C ++ How to make timer accurate on Linux

Consider this code:

#include <iostream>
#include <vector>
#include <functional>
#include <map>
#include <atomic>
#include <memory>
#include <chrono>
#include <thread>
#include <boost/asio.hpp>
#include <boost/thread.hpp>
#include <boost/asio/high_resolution_timer.hpp>

static const uint32_t FREQUENCY = 5000; // Hz
static const uint32_t MKSEC_IN_SEC = 1000000;

std::chrono::microseconds timeout(MKSEC_IN_SEC / FREQUENCY);
boost::asio::io_service ioservice;
boost::asio::high_resolution_timer timer(ioservice);

static std::chrono::system_clock::time_point lastCallTime = std::chrono::high_resolution_clock::now();
static uint64_t deviationSum = 0;
static uint64_t deviationMin = 100000000;
static uint64_t deviationMax = 0;
static uint32_t counter = 0;

void timerCallback(const boost::system::error_code &err) {
  auto actualTimeout = std::chrono::high_resolution_clock::now() - lastCallTime;
  std::chrono::microseconds actualTimeoutMkSec = std::chrono::duration_cast<std::chrono::microseconds>(actualTimeout);
  long timeoutDeviation = actualTimeoutMkSec.count() - timeout.count();
  deviationSum += abs(timeoutDeviation);
  if(abs(timeoutDeviation) > deviationMax) {
    deviationMax = abs(timeoutDeviation);
  } else if(abs(timeoutDeviation) < deviationMin) {
    deviationMin = abs(timeoutDeviation);
  }

  ++counter;
  //std::cout << "Actual timeout: " << actualTimeoutMkSec.count() << "\t\tDeviation: " << timeoutDeviation << "\t\tCounter: " << counter << std::endl;

  timer.expires_from_now(timeout);
  timer.async_wait(timerCallback);
  lastCallTime = std::chrono::high_resolution_clock::now();
}

using namespace std::chrono_literals;

int main() {
  std::cout << "Frequency: " << FREQUENCY << " Hz" << std::endl;
  std::cout << "Callback should be called each: " << timeout.count() << " mkSec" << std::endl;
  std::cout << std::endl;

  ioservice.reset();
  timer.expires_from_now(timeout);
  timer.async_wait(timerCallback);
  lastCallTime = std::chrono::high_resolution_clock::now();
  auto thread = new std::thread([&] { ioservice.run(); });
  std::this_thread::sleep_for(1s);

  std::cout << std::endl << "Messages posted: " << counter << std::endl;
  std::cout << "Frequency deviation: " << FREQUENCY - counter << std::endl;
  std::cout << "Min timeout deviation: " << deviationMin << std::endl;
  std::cout << "Max timeout deviation: " << deviationMax << std::endl;
  std::cout << "Avg timeout deviation: " << deviationSum / counter << std::endl;

  return 0;
}

It starts a timer to periodically call timerCallback (..) at a given frequency. In this example, the callback should be called 5000 times per second. You can play with the frequency and see that the actual (measured) call frequency is different from what you want. In fact, the higher the frequency, the higher the deviation. I took some measurements with different frequencies, and here is the summary: https://docs.google.com/spreadsheets/d/1SQtg2slNv-9VPdgS0RD4yKRnyDK1ijKrjVz7BBMSg24/edit?usp=sharing

If the desired frequency is 10,000 Hz, the system mission is 10% (~ 1000) calls. When the desired frequency is 100,000 Hz, skip 40% (~ 40,000) of the calls.

: Linux\++? ? , 500000

P.S. , timerCallback (..) . . 1 . , .

0
2

, , .... , , , , , .

, ( ), , , , 1 8usec, , , . ( - , , , , ) , 2usec , (4 ) # 1 T, # 2 T + 2usec, # 3 T + 4usec,... #N T + 2 * (N-1) . oldT + 2usec , - nsleep(3). , , , , . pthread , . , , . ( , , Android)

, , , NTP ( usec, GBit ) , . , , .

+1

Source: https://habr.com/ru/post/1691366/


All Articles