Test Linux/Windows 11 performance when run unit32.max() times increment time cost
Ubuntu
#include <chrono> #include <iostream> #include <limits.h> #include <uuid/uuid.h> using namespace std; void testTime(int x); int main(int args, char **argv) { int x = atoi(argv[1]); testTime(x); } void testTime(int x) { cout<<numeric_limits<uint32_t>().max()<<endl; chrono::time_point<chrono::steady_clock> startTime; chrono::time_point<chrono::steady_clock> endTime; for(int i=0;i<x;i++) { startTime = chrono::steady_clock::now(); for(uint32_t j=0;j<numeric_limits<uint32_t>().max();j++) { } endTime = chrono::steady_clock::now(); cout << i<< "," << chrono::duration_cast<chrono::milliseconds>(endTime - startTime).count() << " milliseconds,"<<chrono::duration_cast<chrono::nanoseconds>(endTime-startTime).count()<<" nanos!" << endl; } }
Compile
g++ -std=c++2a *.cpp -o h1 -luuid
Run
./h1 10
Snapshot
As the above snapshot illustrated when run 4294967296 times increment,in Ubuntu 20.04,c++ will cost approximately 2.2-2.3 seconds.
Win11/Visual Studio 2022/VC++
#include <chrono> #include <limits.h> #include <iostream> #include <Windows.h> using namespace std; void testTime(); int main() { testTime(); cin.get(); } void testTime() { chrono::time_point<chrono::steady_clock> startTime; chrono::time_point<chrono::steady_clock> endTime; for (int i = 0;i < 10;i++) { startTime = chrono::steady_clock::now(); UINT j; for (j = 0;j < UINT_MAX;j++) { } endTime = chrono::steady_clock::now(); cout << i << "," << j << "," << chrono::duration_cast<chrono::milliseconds>(endTime - startTime).count() << " milliseconds," << chrono::duration_cast<chrono::nanoseconds>(endTime - startTime).count() << " nanos!" << endl; } }
Run release/x64
As the above snapshot illustrates that when increment from 0 to 4294967295 only cost 100 nanoseconds.