#include <stdio.h>int main(void){ float a, b; a = 0.0f, b = 1.0f; printf("%d\n", plus(a, b)); return 0;}int plus(double a, double b){ printf("%lf %lf\n", a, b); return a + b;}
gcc .\p1.c --std=c89 -o main
the output is:
0.000000 1.0000001
But when i changed the type from float to int
#include <stdio.h>int main(void){ int a, b; a = 0, b = 1; printf("%d\n", plus(a, b)); return 0;}int plus(double a, double b){ printf("%lf %lf\n", a, b); return a + b;}
gcc .\p1.c --std=c89 -o main
the output is:
32019693379112340.000000 622696491526558860000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.000000-2147483648
In the first stage, the default argument promotion changes float to double, and everything is right.
In the second, int -> unsigned int, why does it also call plus function i defined, and why the value of argument in the plus function is out of expectation
I tried to use following to simulate the process:
#include <stdio.h>int main(void){ unsigned int a1, b1; a1 = 0u, b1 = 1u; double da = a1; double db = b1; printf("%d\n", plus(da, db)); return 0;}int plus(double a, double b){ printf("%lf %lf\n", a, b); return a + b;}
gcc .\p1.c --std=c89 -o main
the output is:
0.000000 1.0000001
What confuses me even more is this:
#include <stdio.h>int main(void){ unsigned int a1, b1; a1 = 0u, b1 = 1u; double da = a1; double db = b1; printf("%d\n", plus(da, db)); int a, b; a = 0, b = 1; printf("%d\n", plus(a, b)); double x = 3.0; printf("%d\n", add5(x)); return 0;}int plus(double a, double b){ printf("%lf %lf\n", a, b); return a + b;
gcc .\p1.c --std=c89 -o main
the output is:
0.000000 1.00000010.000000 0.0000000