I would like to get the runtime in milliseconds of an application, I made the following algorithm.
double writingFS(char* Filepath, char* PenDrivepath, clock_t* thput) {
clock_t t1, t2;
long size = 0;
unsigned char buff[PAGE_SIZE];
FILE* fs_file = fopen(PenDrivepath, "w+");
assert(fs_file != NULL);
setbuf(fs_file,NULL);
int fdrand = open("/dev/urandom", O_RDONLY);
int i;
int tam = (10 * 1000000)/ PAGE_SIZE;
t1 = clock();
for (i = 0; i < tam; i++) {
read(fdrand, buff, PAGE_SIZE);
fwrite(buff, 1, PAGE_SIZE, fs_file);
}
fflush(fs_file);
t2 = clock();
double diff =( (double)(t2 - t1) / (((double)CLOCKS_PER_SEC)/1000) );
*thput = (clock_t)(size / (diff/1000.0));
fclose(fs_file);
close(fdrand);
return diff;
The time being returned is around 351ms, but the running time of the algorithm revolves around 3sec (3000ms). What am I doing wrong?