Hi,
Recently some people I know stumbled upon a strange problem with the
following piece of code. This is reduced as far as possible.
The idea of the code is simple. There are four structs of which one
contains the other three structs. If we allocate a large double array
of that struct and try to initialize it to zero, or try to use one of
them in a function. The program gives a seg fault. If we make the array
slightly smaller, there is no problem. The problem only appears on
x86_64 architectures.
Below I past a copy of the failing code. If DATAMAX is set to 4820 the
program works, if it is set to 4830 it seg faults.
What could the reason be of this? We have calculated the amount of
memory and we have for sure no problem with that. A backtrace also does
not give any deeper insight.
Thanks for the help
#define DATAMAX 4830
#define ISOMAX 7
typedef struct
{
short iso;
char observable[15];
short ds_dt;
short ds_du;
double emin;
double emax;
double cos;
double ampli;
double error;
short tch;
} Photo;
typedef struct
{
short iso;
char observable[15];
short ds_dt;
double qsquared;
double s;
short cos_ang;
double t;
double cos;
double ampli;
double error;
short tch;
short beam_ener_input ;
double e_beam_ener;
double eps;
short cs_convention;
} Electro;
typedef struct
{
short iso;
char observable[10];
double pk;
double cos;
double ampli;
double ratio;
double error;
} Kaoncap;
typedef struct
{
short iso;
short photo_prod;
short electro_prod;
short kaoncapture;
Photo photo;
Electro elec;
Kaoncap kaoncap;
} Data;
int test(Data blub) {
return 0;
}
int main(int argc, char* argv)
{
// WORKS ALWAYS
Data datapoints[ISOMAX][DATAMAX];
// FAILS WITH DATAMAX = 4830 or larger
// Data datapoints[ISOMAX][DATAMAX] = {{{0}}};
// FAILS WITH DATAMAX = 4830 or lager
// Data datapoints[ISOMAX][DATAMAX];
// test(datapoints[0][0]);
return 0;
}