The goal of the following code is to call the Win32 function FileTimeToSystemTime:
pub fn convert_times(s: SystemTime) -> Option<SYSTEMTIME> {
let mut st = SYSTEMTIME::default();
let x: u64 = unsafe { transmute(s) };
let low = (x & 0x00000000FFFFFFFF) as u32;
let high = ((x & 0xFFFFFFFF00000000) >> 32) as u32;
let fs = FILETIME {
dwLowDateTime: low,
dwHighDateTime: high,
};
if unsafe { FileTimeToSystemTime(transmute(&fs), transmute(&mut st)) } > 0 {
Some(st)
} else {
None
}
}
When I take a known file time 131147233180069965 which was generated at 2016-08-03T14:41 US-EST(GMT+5) according to my computer's clock. The return structure I get from this returns 2016-0803T18:41:58.006
This is +4 hours.
While US-EST is GMT+5?
Is it because Daylight Saving Time is -1hr?
FileTimeToLocalFileTime() returns time at UTC. In the United States, locations normally in EST transition to EDT during Daylight Saving Time, so it will be GMT+5 DST-1 or UTC-4.
To get time in the local timezone and take DST into account, one would instead need to call SystemTimeToTzSpecificLocalTime().
Generally this inadvisable as working in UTC is preferable for computers as 2 intercommunicating computers are not necessarily in the same timezone.
Related
I'm a student who is interested in learning rust. For a class project I wrote a rust script that parses a SSH log file, which specifically captured dates and IP addresses in the log.
When I first finished the project, the script took 3 minutes to run through a log file with 655147 entries. After major optimizations I got the processing down to 30 seconds. This is fine, but other students' python programs did it in 3 seconds. So I know it's definitely my fault and I want to know how to write it better. Could someone show me where I went wrong?
Here are the structs I made for reference:
struct DateLogins {
date: NaiveDate,
success: i32,
failure: i32,
}
struct IpAuth {
success: i32,
failure: i32,
first_attempt: NaiveDateTime,
successful_attempt: NaiveDateTime,
failed_reverse: bool,
break_in_attempt: bool,
ip_addr: String,
}
struct MinedReport {
start_date: NaiveDateTime,
end_date: NaiveDateTime,
total_success: i32,
total_failure: i32,
total_addrs: i32,
login_attempts: HashMap<String, DateLogins>,
unique_addrs: HashMap<String, IpAuth>,
}
And here is the main processing logic:
lazy_static! {
static ref IP_RGX: Regex = Regex::new(r"(\d{1,3}\.\d{1,3}\.\d{1,3}\d\.\d{1,3})").unwrap(); // regex for capturing the IP address of a message.
static ref LOGIN_GOOD_RGX: Regex = Regex::new(r"Accepted password").unwrap(); // regex for successful login attempts -- for total, date, and IP address.
static ref LOGIN_FAIL_RGX: Regex = Regex::new(r"Failed password").unwrap(); // regex for failed login attempts -- for total, date, and IP address
static ref REVERSE_LOOK_RGX: Regex = Regex::new(r"reverse mapping checking getaddrinfo").unwrap(); // regex for a failed reverse lookup
static ref BREAK_IN_RGX: Regex = Regex::new(r"POSSIBLE BREAK-IN ATTEMPT!").unwrap(); // regex for a break in attempt -- for IP address
}
fn main() {
let start: std::time::Instant;
let duration: std::time::Duration;
let file: File = File::open("./SSH.log").expect("Could not open log file!");
let reader: BufReader<File> = BufReader::new(file);
let origin_date: NaiveDateTime = NaiveDateTime::parse_from_str("0000 01 01 00:00:00", "%Y %m %d %H:%M:%S").expect("Could not parse start time!");
let mut report: MinedReport = MinedReport::new(origin_date.clone(), origin_date.clone());
start = Instant::now();
for line in reader.lines() {
let l = line.expect("Could not read a line!");
parse_line(l, &mut report, origin_date);
}
duration = start.elapsed();
store_log(report, "./report.txt");
println!("Total time elapsed: {:?}\n", duration);
}
// parses the values in each line
fn parse_line(line: String, report: &mut MinedReport, origin_date: NaiveDateTime) {
let time: &str = &line[..15];
let message: &str = &line[15..];
// parse the time to DateTime. Store things in the report.
let date_time: NaiveDateTime = parse_time(time, report, origin_date).unwrap();
// parse the IP address of the line. Store things in the report.
parse_ip(message, report, origin_date, date_time);
}
// parses the time value for each line.
fn parse_time(time_cap: &str, report: &mut MinedReport, origin_date: NaiveDateTime) -> Result<NaiveDateTime, ParseError> {
// add a random year just to have a string.
let time_str: String = time_cap.to_string();
let full_date: String;
let date_time: NaiveDateTime;
let date: NaiveDate;
let d: String;
// add a year. Move to the next year if it's january.
// (I know this is a bad solution, but the only months in the log are Dec and Jan, with no year)
if &time_str[..3] == "Dec" {
full_date = format!("{}{}", "0000 ", time_cap); // No year given, set it to 0000
} else {
full_date = format!("{}{}", "0001 ", time_cap); // No year given, set it to 0001
}
// get date time and date only.
date_time = NaiveDateTime::parse_from_str(&full_date, "%Y %b %d %H:%M:%S").unwrap();
date = date_time.date();
d = date.to_string();
if report.start_date == origin_date {
report.start_date = date_time;
}
report.end_date = date_time;
if !report.login_attempts.contains_key(&d) {
report.login_attempts.insert(d, DateLogins::new(date));
}
Ok(date_time)
}
fn parse_ip(message: &str, report: &mut MinedReport, origin_date: NaiveDateTime, current_date: NaiveDateTime) {
for ip in IP_RGX.captures_iter(message) {
let new_ip: String = String::from(&ip[1]);
let ip_clone: String = new_ip.clone();
let date: NaiveDate = current_date.date();
let d: String = date.to_string();
report.total_addrs += 1;
if !report.unique_addrs.contains_key(&new_ip) {
report.unique_addrs.insert(new_ip, IpAuth::new(ip_clone.clone(), origin_date.clone(), current_date.clone()));
}
let login_date = report.login_attempts.get_mut(&d).unwrap();
let unique_ip = report.unique_addrs.get_mut(&ip_clone).unwrap();
if LOGIN_FAIL_RGX.is_match(message) {
report.total_failure += 1;
login_date.failure += 1;
unique_ip.failure += 1;
} else if LOGIN_GOOD_RGX.is_match(message) {
report.total_success += 1;
login_date.success += 1;
unique_ip.success += 1;
unique_ip.successful_attempt = current_date.clone();
} else {
if REVERSE_LOOK_RGX.is_match(message) {
unique_ip.failed_reverse = true;
}
if BREAK_IN_RGX.is_match(message) {
unique_ip.break_in_attempt = true;
}
}
}
}
Like I said I'm new to rust, and programming in general, so there may be something I just don't know about. I already switched to using a hash map from a vector, but maybe there's something better I can use? I don't know. I have also wondered if the chrono or regex crates are my issue here and maybe there's a faster alternative. Either way, thanks to anyone who tries to understand and correct my code!
I am currently learning Rust because I wanted to use it in project that requires a very high performance. I initially fallen in love with enums but then I started to evaluate their performance and I have found something that is really boggling me. Here is an example:
use std::time::{Instant};
pub enum MyEnum<'a> {
V1,
V2(&'a MyEnum<'a>),
V3,
}
impl MyEnum<'_> {
pub fn eval(&self) -> i64 {
match self {
MyEnum::V1 => 1,
MyEnum::V2(_) => 2,
MyEnum::V3 => 3,
}
}
pub fn eval2(&self) -> i64 {
match self {
MyEnum::V1 => 1,
MyEnum::V2(a) => a.eval2(),
MyEnum::V3 => 3,
}
}
}
fn main() {
const EXAMPLES: usize = 10000000;
let en = MyEnum::V1{};
let start = Instant::now();
let mut sum = 0;
for _ in 0..EXAMPLES {
sum += en.eval()
}
println!("enum without fields func call sum: {} at {:?}", sum, start.elapsed());
let start = Instant::now();
let mut sum = 0;
for _ in 0..EXAMPLES {
sum += en.eval2()
}
println!("enum with field func call sum: {} at {:?}", sum, start.elapsed());
}
Results I get:
enum without fields func call sum: 10000000 at 100ns
enum with field func call sum: 10000000 at 6.3425ms
eval function should execute exactly the same instructions as eval2 for V1 enum but it's working about 60x slower. Why is this happening?
Viewing the assembly, your first loop is optimized entirely into a single mov 10000000 instruction (that is, the compiler does something equivalent to sum += EXAMPLES) while the second is not. I do not know why the second loop is not constant-optimized as heavily.
I see no difference in performance, as one would expect.
$ ./test
enum without fields func call sum: 10000000 at 307.543596ms
enum with field func call sum: 10000000 at 312.196195ms
$ rustc --version
rustc 1.43.1 (8d69840ab 2020-05-04)
$ uname -a
Darwin Windhund.local 18.7.0 Darwin Kernel Version 18.7.0: Mon Feb 10 21:08:45 PST 2020; root:xnu-4903.278.28~1/RELEASE_X86_64 x86_64 i386 MacBookPro15,2 Darwin
One problem might be the use of simple "wall clock" time for benchmarking. This simple count of how much time passed is vulnerable to anything else running which might consume resources. Anti-virus, a web browser, any program. Instead, use benchmark tests.
I have a Rust program where I want to do some simple benchmarking with start time and end time!
use chrono::{NaiveTime, Utc};
fn main() {
let start_time: NaiveTime = Utc::now().time();
let end_time: NaiveTime = Utc::now().time();
println!("Total time taken to run is {}", end_time - start_time);
}
The code above prints as:
Total time taken to run is PT520.532696S
I guess it is 520 seconds if I'm not wrong, but how can I convert that into minutes? Is there a better way?
A simple look to the doc give the answer:
use chrono::Utc;
fn main() {
let start_time = Utc::now().time();
let end_time = Utc::now().time();
let diff = end_time - start_time;
println!("Total time taken to run is {}", diff.num_minutes());
}
but be aware that it's not a good way to mesure time in a monotonic way, this code could show -5 minutes if user change the system date somehow. Also call time() remove the information of the date, and this is strange when you use chrono because generally you don't want to ignore the date so just remove time() call.
It seems the accepted answer has some potential flaws.
A potentially better way as recommended in the rust nursery
is the following:
use std::time::{Duration, Instant};
use std::thread;
fn expensive_function() {
thread::sleep(Duration::from_secs(1));
}
fn main() {
let start = Instant::now();
expensive_function();
let duration = start.elapsed();
println!("Time elapsed in expensive_function() is: {:?}", duration);
}
I'm looking for a way to obtain a guaranteed-monotonic clock which excludes time spent during suspend, just like POSIX CLOCK_MONOTONIC.
Solutions requiring Windows 7 (or later) are acceptable.
Here's an example of something that doesn't work:
LONGLONG suspendTime, uiTime1, uiTime2;
do {
QueryUnbiasedInterruptTime((ULONGLONG*)&uiTime1);
suspendTime = GetTickCount64()*10000 - uiTime1;
QueryUnbiasedInterruptTime((ULONGLONG*)&uiTime2);
} while (uiTime1 != uiTime2);
static LARGE_INTEGER firstSuspend = suspendTime;
static LARGE_INTERER lastSuspend = suspendTime;
assert(suspendTime > lastSuspend);
lastSuspend = suspendTime;
LARGE_INTEGER now;
QueryPerformanceCounter(&now);
static LONGLONG firstQpc = now.QuadPart;
return (now.QuadPart - firstQpc)*qpcFreqNumer/qpcFreqDenom -
(suspendTime - firstSuspend);
The problem with this (my first attempt) is that GetTickCount only ticks every 15ms, wheras QueryUnbiasedInterruptTime seems to tick a little more often, so every now and then my method observes the suspend time go back by a little.
I've also tried using CallNtPowerInformation, but it's not clear how to use those values either to get a nice, race-free measure of suspend time.
The suspend bias time is available in kernel mode (_KUSER_SHARED_DATA.QpcBias in ntddk.h). A read-only copy is available in user mode.
#include <nt.h>
#include <ntrtl.h>
#include <nturtl.h>
LONGLONG suspendTime, uiTime1, uiTime2;
QueryUnbiasedInterruptTime((ULONGLONG*)&uiTime1);
uiTime1 -= USER_SHARED_DATA->QpcBias; // subtract off the suspend bias
The full procedure for calculating monotonic time, which does not tick during suspend, is as follows:
typedef struct _KSYSTEM_TIME {
ULONG LowPart;
LONG High1Time;
LONG High2Time;
} KSYSTEM_TIME;
#define KUSER_SHARED_DATA 0x7ffe0000
#define InterruptTime ((KSYSTEM_TIME volatile*)(KUSER_SHARED_DATA + 0x08))
#define InterruptTimeBias ((ULONGLONG volatile*)(KUSER_SHARED_DATA + 0x3b0))
static LONGLONG readInterruptTime() {
// Reading the InterruptTime from KUSER_SHARED_DATA is much better than
// using GetTickCount() because it doesn't wrap, and is even a little quicker.
// This works on all Windows NT versions (NT4 and up).
LONG timeHigh;
ULONG timeLow;
do {
timeHigh = InterruptTime->High1Time;
timeLow = InterruptTime->LowPart;
} while (timeHigh != InterruptTime->High2Time);
LONGLONG now = ((LONGLONG)timeHigh << 32) + timeLow;
static LONGLONG d = now;
return now - d;
}
static LONGLONG scaleQpc(LONGLONG qpc) {
// We do the actual scaling in fixed-point rather than floating, to make sure
// that we don't violate monotonicity due to rounding errors. There's no
// need to cache QueryPerformanceFrequency().
LARGE_INTEGER frequency;
QueryPerformanceFrequency(&frequency);
double fraction = 10000000/double(frequency.QuadPart);
LONGLONG denom = 1024;
LONGLONG numer = std::max(1LL, (LONGLONG)(fraction*denom + 0.5));
return qpc * numer / denom;
}
static ULONGLONG readUnbiasedQpc() {
// We remove the suspend bias added to QueryPerformanceCounter results by
// subtracting the interrupt time bias, which is not strictly speaking legal,
// but the units are correct and I think it's impossible for the resulting
// "unbiased QPC" value to go backwards.
LONGLONG interruptTimeBias, qpc;
do {
interruptTimeBias = *InterruptTimeBias;
LARGE_INTEGER counter;
QueryPerformanceCounter(&counter);
qpc = counter.QuadPart;
} while (interruptTimeBias != *InterruptTimeBias);
static std::pair<LONGLONG,LONGLONG> d(qpc, interruptTimeBias);
return scaleQpc(qpc - d.first) - (interruptTimeBias - d.second);
}
/// getMonotonicTime() returns the time elapsed since the application's first
/// call to getMonotonicTime(), in 100ns units. The values returned are
/// guaranteed to be monotonic. The time ticks in 15ms resolution and advances
/// during suspend on XP and Vista, but we manage to avoid this on Windows 7
/// and 8, which also use a high-precision timer. The time does not wrap after
/// 49 days.
uint64_t getMonotonicTime()
{
OSVERSIONINFOEX ver = { sizeof(OSVERSIONINFOEX), };
GetVersionEx(&ver);
bool win7OrLater = (ver.dwMajorVersion > 6 ||
(ver.dwMajorVersion == 6 && ver.dwMinorVersion >= 1));
// On Windows XP and earlier, QueryPerformanceCounter is not monotonic so we
// steer well clear of it; on Vista, it's just a bit slow.
return win7OrLater ? readUnbiasedQpc() : readInterruptTime();
}
How can I get the Windows system time with millisecond resolution?
If the above is not possible, then how can I get the operating system start time? I would like to use this value together with timeGetTime() in order to compute a system time with millisecond resolution.
Try this article from MSDN Magazine. It's actually quite complicated.
Implement a Continuously Updating, High-Resolution Time Provider for Windows
(archive link)
This is an elaboration of the above comments to explain the some of the whys.
First, the GetSystemTime* calls are the only Win32 APIs providing the system's time. This time has a fairly coarse granularity, as most applications do not need the overhead required to maintain a higher resolution. Time is (likely) stored internally as a 64-bit count of milliseconds. Calling timeGetTime gets the low order 32 bits. Calling GetSystemTime, etc requests Windows to return this millisecond time, after converting into days, etc and including the system start time.
There are two time sources in a machine: the CPU's clock and an on-board clock (e.g., real-time clock (RTC), Programmable Interval Timers (PIT), and High Precision Event Timer (HPET)). The first has a resolution of around ~0.5ns (2GHz) and the second is generally programmable down to a period of 1ms (though newer chips (HPET) have higher resolution). Windows uses these periodic ticks to perform certain operations, including updating the system time.
Applications can change this period via timerBeginPeriod; however, this affects the entire system. The OS will check / update regular events at the requested frequency. Under low CPU loads / frequencies, there are idle periods for power savings. At high frequencies, there isn't time to put the processor into low power states. See Timer Resolution for further details. Finally, each tick has some overhead and increasing the frequency consumes more CPU cycles.
For higher resolution time, the system time is not maintained to this accuracy, no more than Big Ben has a second hand. Using QueryPerformanceCounter (QPC) or the CPU's ticks (rdtsc) can provide the resolution between the system time ticks. Such an approach was used in the MSDN magazine article Kevin cited. Though these approaches may have drift (e.g., due to frequency scaling), etc and therefore need to be synced to the system time.
In Windows, the base of all time is a function called GetSystemTimeAsFiletime.
It returns a structure that is capable of holding a time with 100ns resoution.
It is kept in UTC
The FILETIME structure records the number of 100ns intervals since January 1, 1600; meaning its resolution is limited to 100ns.
This forms our first function:
A 64-bit number of 100ns ticks since January 1, 1600 is somewhat unwieldy. Windows provides a handy helper function, FileTimeToSystemTime that can decode this 64-bit integer into useful parts:
record SYSTEMTIME {
wYear: Word;
wMonth: Word;
wDayOfWeek: Word;
wDay: Word;
wHour: Word;
wMinute: Word;
wSecond: Word;
wMilliseconds: Word;
}
Notice that SYSTEMTIME has a built-in resolution limitation of 1ms
Now we have a way to go from FILETIME to SYSTEMTIME:
We could write the function to get the current system time as a SYSTEIMTIME structure:
SYSTEMTIME GetSystemTime()
{
//Get the current system time utc in it's native 100ns FILETIME structure
FILETIME ftNow;
GetSytemTimeAsFileTime(ref ft);
//Decode the 100ns intervals into a 1ms resolution SYSTEMTIME for us
SYSTEMTIME stNow;
FileTimeToSystemTime(ref stNow);
return stNow;
}
Except Windows already wrote such a function for you: GetSystemTime
Local, rather than UTC
Now what if you don't want the current time in UTC. What if you want it in your local time? Windows provides a function to convert a FILETIME that is in UTC into your local time: FileTimeToLocalFileTime
You could write a function that returns you a FILETIME in local time already:
FILETIME GetLocalTimeAsFileTime()
{
FILETIME ftNow;
GetSystemTimeAsFileTime(ref ftNow);
//convert to local
FILETIME ftNowLocal
FileTimeToLocalFileTime(ftNow, ref ftNowLocal);
return ftNowLocal;
}
And lets say you want to decode the local FILETIME into a SYSTEMTIME. That's no problem, you can use FileTimeToSystemTime again:
Fortunately, Windows already provides you a function that returns you the value:
Precise
There is another consideration. Before Windows 8, the clock had a resolution of around 15ms. In Windows 8 they improved the clock to 100ns (matching the resolution of FILETIME).
GetSystemTimeAsFileTime (legacy, 15ms resolution)
GetSystemTimeAsPreciseFileTime (Windows 8, 100ns resolution)
This means we should always prefer the new value:
You asked for the time
You asked for the time; but you have some choices.
The timezone:
UTC (system native)
Local timezone
The format:
FILETIME (system native, 100ns resolution)
SYTEMTIME (decoded, 1ms resolution)
Summary
100ns resolution: FILETIME
UTC: GetSytemTimeAsPreciseFileTime (or GetSystemTimeAsFileTime)
Local: (roll your own)
1ms resolution: SYSTEMTIME
UTC: GetSystemTime
Local: GetLocalTime
GetTickCount will not get it done for you.
Look into QueryPerformanceFrequency / QueryPerformanceCounter. The only gotcha here is CPU scaling though, so do your research.
Starting with Windows 8 Microsoft has introduced the new API command GetSystemTimePreciseAsFileTime
Unfortunately you can't use that if you create software which must also run on older operating systems.
My current solution is as follows, but be aware: The determined time is not exact, it is only near to the real time. The result should always be smaller or equal to the real time, but with a fixed error (unless the computer went to standby). The result has a millisecond resolution. For my purpose it is exact enough.
void GetHighResolutionSystemTime(SYSTEMTIME* pst)
{
static LARGE_INTEGER uFrequency = { 0 };
static LARGE_INTEGER uInitialCount;
static LARGE_INTEGER uInitialTime;
static bool bNoHighResolution = false;
if(!bNoHighResolution && uFrequency.QuadPart == 0)
{
// Initialize performance counter to system time mapping
bNoHighResolution = !QueryPerformanceFrequency(&uFrequency);
if(!bNoHighResolution)
{
FILETIME ftOld, ftInitial;
GetSystemTimeAsFileTime(&ftOld);
do
{
GetSystemTimeAsFileTime(&ftInitial);
QueryPerformanceCounter(&uInitialCount);
} while(ftOld.dwHighDateTime == ftInitial.dwHighDateTime && ftOld.dwLowDateTime == ftInitial.dwLowDateTime);
uInitialTime.LowPart = ftInitial.dwLowDateTime;
uInitialTime.HighPart = ftInitial.dwHighDateTime;
}
}
if(bNoHighResolution)
{
GetSystemTime(pst);
}
else
{
LARGE_INTEGER uNow, uSystemTime;
{
FILETIME ftTemp;
GetSystemTimeAsFileTime(&ftTemp);
uSystemTime.LowPart = ftTemp.dwLowDateTime;
uSystemTime.HighPart = ftTemp.dwHighDateTime;
}
QueryPerformanceCounter(&uNow);
LARGE_INTEGER uCurrentTime;
uCurrentTime.QuadPart = uInitialTime.QuadPart + (uNow.QuadPart - uInitialCount.QuadPart) * 10000000 / uFrequency.QuadPart;
if(uCurrentTime.QuadPart < uSystemTime.QuadPart || abs(uSystemTime.QuadPart - uCurrentTime.QuadPart) > 1000000)
{
// The performance counter has been frozen (e. g. after standby on laptops)
// -> Use current system time and determine the high performance time the next time we need it
uFrequency.QuadPart = 0;
uCurrentTime = uSystemTime;
}
FILETIME ftCurrent;
ftCurrent.dwLowDateTime = uCurrentTime.LowPart;
ftCurrent.dwHighDateTime = uCurrentTime.HighPart;
FileTimeToSystemTime(&ftCurrent, pst);
}
}
GetSystemTimeAsFileTime gives the best precision of any Win32 function for absolute time. QPF/QPC as Joel Clark suggested will give better relative time.
Since we all come here for quick snippets instead of boring explanations, I'll write one:
FILETIME t;
GetSystemTimeAsFileTime(&t); // unusable as is
ULARGE_INTEGER i;
i.LowPart = t.dwLowDateTime;
i.HighPart = t.dwHighDateTime;
int64_t ticks_since_1601 = i.QuadPart; // now usable
int64_t us_since_1601 = (i.QuadPart * 1e-1);
int64_t ms_since_1601 = (i.QuadPart * 1e-4);
int64_t sec_since_1601 = (i.QuadPart * 1e-7);
// unix epoch
int64_t unix_us = (i.QuadPart * 1e-1) - 11644473600LL * 1000000;
int64_t unix_ms = (i.QuadPart * 1e-4) - 11644473600LL * 1000;
double unix_sec = (i.QuadPart * 1e-7) - 11644473600LL;
// i.QuadPart is # of 100ns ticks since 1601-01-01T00:00:00Z
// difference to Unix Epoch is 11644473600 seconds (attention to units!)
No idea how drifting performance-counter-based answers went up, don't do slippage bugs, guys.
QueryPerformanceCounter() is built for fine-grained timer resolution.
It is the highest resolution timer that the system has to offer that you can use in your application code to identify performance bottlenecks
Here is a simple implementation for C# devs:
[DllImport("kernel32.dll")]
extern static short QueryPerformanceCounter(ref long x);
[DllImport("kernel32.dll")]
extern static short QueryPerformanceFrequency(ref long x);
private long m_endTime;
private long m_startTime;
private long m_frequency;
public Form1()
{
InitializeComponent();
}
public void Begin()
{
QueryPerformanceCounter(ref m_startTime);
}
public void End()
{
QueryPerformanceCounter(ref m_endTime);
}
private void button1_Click(object sender, EventArgs e)
{
QueryPerformanceFrequency(ref m_frequency);
Begin();
for (long i = 0; i < 1000; i++) ;
End();
MessageBox.Show((m_endTime - m_startTime).ToString());
}
If you are a C/C++ dev, then take a look here: How to use the QueryPerformanceCounter function to time code in Visual C++
Well, this one is very old, yet there is another useful function in Windows C library _ftime, which returns a structure with local time as time_t, milliseconds, timezone, and daylight saving time flag.
In C11 and above (or C++17 and above) you can use timespec_get() to get time with higher precision portably
#include <stdio.h>
#include <time.h>
int main(void)
{
struct timespec ts;
timespec_get(&ts, TIME_UTC);
char buff[100];
strftime(buff, sizeof buff, "%D %T", gmtime(&ts.tv_sec));
printf("Current time: %s.%09ld UTC\n", buff, ts.tv_nsec);
}
If you're using C++ then since C++11 you can use std::chrono::high_resolution_clock, std::chrono::system_clock (wall clock), or std::chrono::steady_clock (monotonic clock) in the new <chrono> header. No need to use Windows-specific APIs anymore
auto start1 = std::chrono::high_resolution_clock::now();
auto start2 = std::chrono::system_clock::now();
auto start3 = std::chrono::steady_clock::now();
// do some work
auto end1 = std::chrono::high_resolution_clock::now();
auto end2 = std::chrono::system_clock::now();
auto end3 = std::chrono::steady_clock::now();
std::chrono::duration<long long, std::milli> diff1 = end1 - start1;
std::chrono::duration<double, std::milli> diff2 = end2 - start2;
auto diff3 = std::chrono::duration_cast<std::chrono::milliseconds>(end3 - start3);
std::cout << diff.count() << ' ' << diff2.count() << ' ' << diff3.count() << '\n';