All new accounts created on Gitlab now require administrator approval. If you invite any collaborators, please let Flux staff know so they can approve the accounts.

Commit f20c9a3c authored by amaricq's avatar amaricq

Initial Commit

Modified Version of STREAM that comments out the Single-Threaded portion
parents
# -DSTREAM_ARRAY_SIZE=Number of elements in test arrays (default 10 million)
# -DNTIMES=Number of times to repeat the test cycle (default 10)
# -DOFFSET=Offset of test arrays, may affect array alignment (default 0)
# -DSTREAM_TYPE=Type of the test arrays (default 'double')
# -DVERBOSE=Verbose output
# set OMP_NUM_THREADS=N to set number of openmp threads (default system max)
CC = gcc
NTIMES = 500
STREAM_ARRAY_SIZE = 30000000
OFFSET = 0
STREAM_TYPE = double
OPT = O2
CFLAGS = -$(OPT) -fopenmp -DNTIMES=$(NTIMES) -DSTREAM_ARRAY_SIZE=$(STREAM_ARRAY_SIZE) -DOFFSET=$(OFFSET) -DSTREAM_TYPE=$(STREAM_TYPE) -DFILEOUTPUT
all: streamc
streamc: stream.c
$(CC) $(CFLAGS) stream.c -lm -o streamc
mac: stream.c
gcc-6 $(CFLAGS) stream.c -o streamc
clean:
rm -f streamc *.o
/*-----------------------------------------------------------------------*/
/* Program: STREAM */
/* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */
/* Original code developed by John D. McCalpin */
/* Programmers: John D. McCalpin */
/* Joe R. Zagar */
/* */
/* This program measures memory transfer rates in MB/s for simple */
/* computational kernels coded in C. */
/*-----------------------------------------------------------------------*/
/* Copyright 1991-2013: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear, and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
# include <stdio.h>
# include <unistd.h>
# include <math.h>
# include <float.h>
# include <limits.h>
# include <sys/time.h>
# include <time.h>
/*-----------------------------------------------------------------------
* INSTRUCTIONS:
*
* 1) STREAM requires different amounts of memory to run on different
* systems, depending on both the system cache size(s) and the
* granularity of the system timer.
* You should adjust the value of 'STREAM_ARRAY_SIZE' (below)
* to meet *both* of the following criteria:
* (a) Each array must be at least 4 times the size of the
* available cache memory. I don't worry about the difference
* between 10^6 and 2^20, so in practice the minimum array size
* is about 3.8 times the cache size.
* Example 1: One Xeon E3 with 8 MB L3 cache
* STREAM_ARRAY_SIZE should be >= 4 million, giving
* an array size of 30.5 MB and a total memory requirement
* of 91.5 MB.
* Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP)
* STREAM_ARRAY_SIZE should be >= 20 million, giving
* an array size of 153 MB and a total memory requirement
* of 458 MB.
* (b) The size should be large enough so that the 'timing calibration'
* output by the program is at least 20 clock-ticks.
* Example: most versions of Windows have a 10 millisecond timer
* granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds.
* If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec.
* This means the each array must be at least 1 GB, or 128M elements.
*
* Version 5.10 increases the default array size from 2 million
* elements to 10 million elements in response to the increasing
* size of L3 caches. The new default size is large enough for caches
* up to 20 MB.
* Version 5.10 changes the loop index variables from "register int"
* to "ssize_t", which allows array indices >2^32 (4 billion)
* on properly configured 64-bit systems. Additional compiler options
* (such as "-mcmodel=medium") may be required for large memory runs.
*
* Array size can be set at compile time without modifying the source
* code for the (many) compilers that support preprocessor definitions
* on the compile line. E.g.,
* gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M
* will override the default size of 10M with a new size of 100M elements
* per array.
*/
#ifndef STREAM_ARRAY_SIZE
# define STREAM_ARRAY_SIZE 10000000
#endif
/* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result
* for any iteration after the first, therefore the minimum value
* for NTIMES is 2.
* There are no rules on maximum allowable values for NTIMES, but
* values larger than the default are unlikely to noticeably
* increase the reported performance.
* NTIMES can also be set on the compile line without changing the source
* code using, for example, "-DNTIMES=7".
*/
#ifdef NTIMES
#if NTIMES<=1
# define NTIMES 10
#endif
#endif
#ifndef NTIMES
# define NTIMES 10
#endif
/* Users are allowed to modify the "OFFSET" variable, which *may* change the
* relative alignment of the arrays (though compilers may change the
* effective offset by making the arrays non-contiguous on some systems).
* Use of non-zero values for OFFSET can be especially helpful if the
* STREAM_ARRAY_SIZE is set to a value close to a large power of 2.
* OFFSET can also be set on the compile line without changing the source
* code using, for example, "-DOFFSET=56".
*/
#ifndef OFFSET
# define OFFSET 0
#endif
/*
* 3) Compile the code with optimization. Many compilers generate
* unreasonably bad code before the optimizer tightens things up.
* If the results are unreasonably good, on the other hand, the
* optimizer might be too smart for me!
*
* For a simple single-core version, try compiling with:
* cc -O stream.c -o stream
* This is known to work on many, many systems....
*
* To use multiple cores, you need to tell the compiler to obey the OpenMP
* directives in the code. This varies by compiler, but a common example is
* gcc -O -fopenmp stream.c -o stream_omp
* The environment variable OMP_NUM_THREADS allows runtime control of the
* number of threads/cores used when the resulting "stream_omp" program
* is executed.
*
* To run with single-precision variables and arithmetic, simply add
* -DSTREAM_TYPE=float
* to the compile line.
* Note that this changes the minimum array sizes required --- see (1) above.
*
* The preprocessor directive "TUNED" does not do much -- it simply causes the
* code to call separate functions to execute each kernel. Trivial versions
* of these functions are provided, but they are *not* tuned -- they just
* provide predefined interfaces to be replaced with tuned code.
*
*
* 4) Optional: Mail the results to mccalpin@cs.virginia.edu
* Be sure to include info that will help me understand:
* a) the computer hardware configuration (e.g., processor model, memory type)
* b) the compiler name/version and compilation flags
* c) any run-time information (such as OMP_NUM_THREADS)
* d) all of the output from the test case.
*
* Thanks!
*
*-----------------------------------------------------------------------*/
# define HLINE "-------------------------------------------------------------\n"
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
#ifndef STREAM_TYPE
#define STREAM_TYPE double
#endif
static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET],
b[STREAM_ARRAY_SIZE+OFFSET],
c[STREAM_ARRAY_SIZE+OFFSET];
static double avgtime[4] = {0}, maxtime[4] = {0}, stdevtime[4] = {0},
vartime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX},
avgrate[4] = {0}, stdevrate[4] = {0}, varrate[4] = {0};
static double avgtime_omp[4] = {0}, maxtime_omp[4] = {0}, stdevtime_omp[4] = {0},
vartime_omp[4] = {0}, mintime_omp[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX},
avgrate_omp[4] = {0}, stdevrate_omp[4] = {0}, varrate_omp[4] = {0};
static char *label[4] = {"Copy: ", "Scale: ",
"Add: ", "Triad: "};
static double bytes[4] = {
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE
};
extern double mysecond();
extern void checkSTREAMresults();
#ifdef TUNED
extern void tuned_STREAM_Copy();
extern void tuned_STREAM_Scale(STREAM_TYPE scalar);
extern void tuned_STREAM_Add();
extern void tuned_STREAM_Triad(STREAM_TYPE scalar);
#endif
#ifdef _OPENMP
extern int omp_get_num_threads();
#endif
int
main()
{
int quantum, checktick();
int BytesPerWord;
int k;
ssize_t j;
STREAM_TYPE scalar;
double t, times[4][NTIMES];
/* --- SETUP --- determine precision and check timing --- */
printf(HLINE);
printf("STREAM version $Revision: 5.10 $\n");
printf(HLINE);
BytesPerWord = sizeof(STREAM_TYPE);
printf("This system uses %d bytes per array element.\n",
BytesPerWord);
printf(HLINE);
#ifdef N
printf("***** WARNING: ******\n");
printf(" It appears that you set the preprocessor variable N when compiling this code.\n");
printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n");
printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE);
printf("***** WARNING: ******\n");
#endif
printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET);
printf("Memory per array = %.1f MiB (= %.1f GiB).\n",
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0),
BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0));
printf("Total memory required = %.1f MiB (= %.1f GiB).\n",
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.),
(3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.));
printf("Each kernel will be executed %d times.\n", NTIMES);
printf(" The *best* time for each kernel (excluding the first iteration)\n");
printf(" will be used to compute the reported bandwidth.\n");
/* Get initial value for system clock. */
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
a[j] = 1.0;
b[j] = 2.0;
c[j] = 0.0;
}
printf(HLINE);
if ( (quantum = checktick()) >= 1)
printf("Your clock granularity/precision appears to be "
"%d microseconds.\n", quantum);
else {
printf("Your clock granularity appears to be "
"less than one microsecond.\n");
quantum = 1;
}
t = mysecond();
#pragma omp parallel for
for (j = 0; j < STREAM_ARRAY_SIZE; j++)
a[j] = 2.0E0 * a[j];
t = 1.0E6 * (mysecond() - t);
printf("Each test below will take on the order"
" of %d microseconds.\n", (int) t );
printf(" (= %d clock ticks)\n", (int) (t/quantum) );
printf("Increase the size of the arrays if this shows that\n");
printf("you are not getting at least 20 clock ticks per test.\n");
printf(HLINE);
printf("WARNING -- The above is only a rough guideline.\n");
printf("For best results, please be sure you know the\n");
printf("precision of your system timer.\n");
printf(HLINE);
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
/* --- SINGLE THREADED --- */
/* scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
times[0][k] = mysecond();
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
times[0][k] = mysecond() - times[0][k];
times[1][k] = mysecond();
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
times[1][k] = mysecond() - times[1][k];
times[2][k] = mysecond();
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
times[2][k] = mysecond() - times[2][k];
times[3][k] = mysecond();
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
times[3][k] = mysecond() - times[3][k];
}
*/
/* --- SUMMARY --- */
// for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
/* {
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
}
for (j=0; j<4; j++) {
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
avgrate[j] = 1.0E-06 * bytes[j]/avgtime[j];
}
*/
// for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
/* {
for (j=0; j<4; j++)
{
vartime[j] += pow((times[j][k] - avgtime[j]), 2);
varrate[j] += pow(((1.0E-06 * bytes[j]/times[j][k]) - avgrate[j]), 2);
}
}
printf("Single-Threaded Results:\n\n");
printf("Function Avg time Min time Max time Standard Deviation\n");
for (j=0; j<4; j++) {
vartime[j] = vartime[j] / (double)(NTIMES-1);
varrate[j] = varrate[j] / (double)(NTIMES-1);
stdevtime[j] = sqrt(vartime[j]);
stdevrate[j] = sqrt(varrate[j]);
printf("%s%11.6f %11.6f %11.6f %11.6f\n", label[j],
avgtime[j],
mintime[j],
maxtime[j],
stdevtime[j]);
}
printf("\n");
printf("Function Avg Rate MB/s Best Rate MB/s Worst Rate MB/s Standard Deviation MB/s\n");
for (j=0; j<4; j++) {
printf("%s%12.1f %12.1f %12.1f %12.1f\n", label[j],
avgrate[j],
1.0E-06 * bytes[j]/mintime[j],
1.0E-06 * bytes[j]/maxtime[j],
stdevrate[j]);
}
printf(HLINE);
*/
#ifdef _OPENMP
printf("Multi-Threaded Results:\n\n");
#pragma omp parallel
{
#pragma omp master
{
k = omp_get_num_threads();
printf ("Number of Threads requested = %i\n",k);
}
}
k = 0;
#pragma omp parallel
#pragma omp atomic
k++;
printf ("Number of Threads counted = %i\n\n",k);
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++) {
a[j] = 1.0;
b[j] = 2.0;
c[j] = 0.0;
}
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
/* --- MULTI THREADED --- */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
times[0][k] = mysecond();
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
times[0][k] = mysecond() - times[0][k];
times[1][k] = mysecond();
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
times[1][k] = mysecond() - times[1][k];
times[2][k] = mysecond();
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
times[2][k] = mysecond() - times[2][k];
times[3][k] = mysecond();
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
times[3][k] = mysecond() - times[3][k];
}
/* --- SUMMARY --- */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime_omp[j] = avgtime_omp[j] + times[j][k];
mintime_omp[j] = MIN(mintime_omp[j], times[j][k]);
maxtime_omp[j] = MAX(maxtime_omp[j], times[j][k]);
}
}
for (j=0; j<4; j++) {
avgtime_omp[j] = avgtime_omp[j]/(double)(NTIMES-1);
avgrate_omp[j] = 1.0E-06 * bytes[j]/avgtime_omp[j];
}
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
vartime_omp[j] += pow((times[j][k] - avgtime_omp[j]), 2);
varrate_omp[j] += pow(((1.0E-06 * bytes[j]/times[j][k]) - avgrate_omp[j]), 2);
}
}
printf("Function Avg time Min time Max time Standard Deviation\n");
for (j=0; j<4; j++) {
vartime_omp[j] = vartime_omp[j] / (double)(NTIMES-1);
varrate_omp[j] = varrate_omp[j] / (double)(NTIMES-1);
stdevtime_omp[j] = sqrt(vartime_omp[j]);
stdevrate_omp[j] = sqrt(varrate_omp[j]);
printf("%s%11.6f %11.6f %11.6f %11.6f\n", label[j],
avgtime_omp[j],
mintime_omp[j],
maxtime_omp[j],
stdevtime_omp[j]);
}
printf("\n");
printf("Function Avg Rate MB/s Best Rate MB/s Worst Rate MB/s Standard Deviation MB/s\n");
for (j=0; j<4; j++) {
printf("%s%12.1f %12.1f %12.1f %12.1f\n", label[j],
avgrate_omp[j],
1.0E-06 * bytes[j]/mintime_omp[j],
1.0E-06 * bytes[j]/maxtime_omp[j],
stdevrate_omp[j]);
}
printf(HLINE);
#endif
#ifdef FILEOUTPUT
FILE *outfile = NULL;
outfile = fopen("./stream_out.csv", "w");
if (outfile != NULL) {
/*
fprintf(outfile, "copy_max,copy_min,copy_mean,copy_stdev,");
fprintf(outfile, "scale_max,scale_min,scale_mean,scale_stdev,");
fprintf(outfile, "add_max,add_min,add_mean,add_stdev,");
fprintf(outfile, "triad_max,triad_min,triad_mean,triad_stdev,");
*/
#ifdef _OPENMP
fprintf(outfile, "copy_omp_max,copy_omp_min,copy_omp_mean,copy_omp_stdev,");
fprintf(outfile, "scale_omp_max,scale_omp_min,scale_omp_mean,scale_omp_stdev,");
fprintf(outfile, "add_omp_max,add_omp_min,add_omp_mean,add_omp_stdev,");
fprintf(outfile, "triad_omp_max,triad_omp_min,triad_omp_mean,triad_omp_stdev,");
fprintf(outfile, "omp_nthreads_used,");
#endif
fprintf(outfile, "units,timestamp\n");
/* for (j=0; j<4; j++) {
// Best Rate MB/s Worst Rate MB/s Avg Rate MB/s Standard Deviation MB/s
fprintf(outfile, "%.2f,%.2f,%.2f,%.2f,",
1.0E-06 * bytes[j]/mintime[j],
1.0E-06 * bytes[j]/maxtime[j],
1.0E-06 * bytes[j]/avgtime[j],
stdevrate[j]);
}*/
#ifdef _OPENMP
for (j=0; j<4; j++) {
// Best Rate MB/s Worst Rate MB/s Avg Rate MB/s Standard Deviation MB/s
fprintf(outfile, "%.2f,%.2f,%.2f,%.2f,",
1.0E-06 * bytes[j]/mintime_omp[j],
1.0E-06 * bytes[j]/maxtime_omp[j],
1.0E-06 * bytes[j]/avgtime_omp[j],
stdevrate_omp[j]);
}
k = 0;
#pragma omp parallel
#pragma omp atomic
k++;
fprintf (outfile, "%i,",k); // Number of Threads used
#endif
fprintf(outfile, "MB/s,%lu", (long unsigned) time(NULL));
fclose(outfile);
}
#endif
/* --- Check Results --- */
//checkSTREAMresults();
printf(HLINE);
return 0;
}
# define M 20
int
checktick()
{
int i, minDelta, Delta;
double t1, t2, timesfound[M];
/* Collect a sequence of M unique time values from the system. */
for (i = 0; i < M; i++) {
t1 = mysecond();
while( ((t2=mysecond()) - t1) < 1.0E-6 )
;
timesfound[i] = t1 = t2;
}
/*
* Determine the minimum difference between these M values.
* This result will be our estimate (in microseconds) for the
* clock granularity.
*/
minDelta = 1000000;
for (i = 1; i < M; i++) {
Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1]));
minDelta = MIN(minDelta, MAX(Delta,0));
}
return(minDelta);
}
// Copyright 2013 Alex Reece.
//
// A cross platform monotonic timer.
#define NANOS_PER_SECF 1000000000.0
#define USECS_PER_SEC 1000000
#if _POSIX_TIMERS > 0 && defined(_POSIX_MONOTONIC_CLOCK)
// If we have it, use clock_gettime and CLOCK_MONOTONIC.
#include <time.h>
double mysecond() {
struct timespec time;
// Note: Make sure to link with -lrt to define clock_gettime.
clock_gettime(CLOCK_MONOTONIC, &time);
return ((double) time.tv_sec) + ((double) time.tv_nsec / (NANOS_PER_SECF));
}
#elif defined(__APPLE__)
// If we don't have CLOCK_MONOTONIC, we might be on a Mac. There we instead
// use mach_absolute_time().
#include <mach/mach_time.h>
static mach_timebase_info_data_t info;
static void __attribute__((constructor)) init_info() {
mach_timebase_info(&info);
}
double mysecond() {
uint64_t time = mach_absolute_time();
double dtime = (double) time;
dtime *= (double) info.numer;
dtime /= (double) info.denom;
return dtime / NANOS_PER_SECF;
}
#elif defined(_MSC_VER)
#include <windows.h>
static double PCFreq = 0.0;
// According to http://stackoverflow.com/q/1113409/447288, this will
// make this function a constructor.
// TODO(awreece) Actually attempt to compile on windows.
static void __cdecl init_pcfreq();
__declspec(allocate(".CRT$XCU")) void (__cdecl*init_pcfreq_)() = init_pcfreq;
static void __cdecl init_pcfreq() {
// Accoring to http://stackoverflow.com/a/1739265/447288, this will
// properly initialize the QueryPerformanceCounter.
LARGE_INTEGER li;
int has_qpc = QueryPerformanceFrequency(&li);
assert(has_qpc);
PCFreq = ((double) li.QuadPart) / 1000.0;
}
double mysecond_time() {
LARGE_INTEGER li;
QueryPerformanceCounter(&li);
return ((double) li.QuadPart) / PCFreq;