Jude wrote:
here is the source code:
#include<stdio.h>
int main()
{
float f;
scanf("%d%f",&f);
printf("The float is %10.5f\n",f);
return 0;
}
when I input 12345.11111,the output is 12345.11133.
what wrong with it?
I compile it with vc++6.0.
All floating point types have limits on their precision. Interpreting
digits beyond that is pointless. Run the following with your
implementation and see what happens:
#include <stdio.h>
#include <float.h>
int main()
{
float xf;
double xd;
long double xld;
char *src[] = { "12345.11111", "12345.11133" };
size_t i, n = sizeof src / sizeof *src;
for (i = 0; i < n; i++) {
printf("input string is \"%s\"\n", src[i]);
sscanf(src[i], "%f", &xf);
printf("Read into a float: %.*g\n", FLT_DIG, xf);
sscanf(src[i], "%lf", &xd);
printf("Read into a double: %.*g\n", DBL_DIG, xd);
sscanf(src[i], "%Lf", &xld);
printf("Read into a long double: %.*Lg\n\n", LDBL_DIG, xld);
}
return 0;
}
input string is "12345.11111"
Read into a float: 12345.1
Read into a double: 12345.11111
Read into a long double: 12345.11111
input string is "12345.11133"
Read into a float: 12345.1
Read into a double: 12345.11133
Read into a long double: 12345.11133