Problem creating valid test case for promise rate limit function

I am trying to create the correct test case for the promiseRateLimit function below. The way promiseRateLimit works is to use a queue to store incoming promises and put a delay between them.

 import Promise from 'bluebird' export default function promiseRateLimit (fn, delay, count) { let working = 0 let queue = [] function work () { if ((queue.length === 0) || (working === count)) return working++ Promise.delay(delay).tap(() => working--).then(work) let {self, args, resolve} = queue.shift() resolve(fn.apply(self, args)) } return function debounced (...args) { return new Promise(resolve => { queue.push({self: this, args, resolve}) if (working < count) work() }) } } 

Here is an example of a function in action.

 async function main () { const example = (v) => Promise.delay(50) const exampleLimited = promiseRateLimit(example, 100, 1) const alpha = await exampleLimited('alpha') const beta = await exampleLimited('beta') const gamma = await exampleLimited('gamma') const epsilon = await exampleLimited('epsilon') const phi = await exampleLimited('phi') } 

The example promise takes 50ms to run, and the promiseRateLimit function allows 1 promise every 100ms . Therefore, the interval between promises should be more than 100ms .

Here's a complete test that sometimes returns successful and sometimes fails:

 import test from 'ava' import Debug from 'debug' import Promise from 'bluebird' import promiseRateLimit from './index' import {getIntervalsBetweenDates} from '../utilitiesForDates' import {arraySum} from '../utilitiesForArrays' import {filter} from 'lodash' test('using async await', async (t) => { let timeLog = [] let runCount = 0 const example = (v) => Promise.delay(50) .then(() => timeLog.push(new Date)) .then(() => runCount++) .then(() => v) const exampleLimited = promiseRateLimit(example, 100, 1, 'a') const alpha = await exampleLimited('alpha') const beta = await exampleLimited('beta') const gamma = await exampleLimited('gamma') const epsilon = await exampleLimited('epsilon') const phi = await exampleLimited('phi') const intervals = getIntervalsBetweenDates(timeLog) const invalidIntervals = filter(intervals, (interval) => interval < 100) const totalTime = arraySum(intervals) t.is(intervals.length, 4) t.deepEqual(invalidIntervals, []) t.deepEqual(totalTime >= 400, true) t.is(alpha, 'alpha') t.is(beta, 'beta') t.is(gamma, 'gamma') t.is(epsilon, 'epsilon') t.is(phi, 'phi') }) 

I created a getIntervalsBetweenDates function that simply distinguishes between two unix timestamps and gets the duration between an array of dates.

 export function getIntervalsBetweenDates (dates) { let intervals = [] dates.forEach((date, index) => { let nextDate = dates[index + 1] if (nextDate) intervals.push(nextDate - date) }) return intervals } 

The problem is that the above test sometimes returns an interval that is less than delay . For example, if delay is 100ms , sometimes the interval returns 98ms or 96ms . There is no reason why this should happen.

Is there a way to make the above test pass in 100% of cases? I am trying to ensure that the delay argument works, and that there is at least a lot of time between promises.

Update 2016-12-28 9:20 AM (EST)

Here is the full test

 import test from 'ava' import Debug from 'debug' import Promise from 'bluebird' import promiseRateLimit from './index' import {getIntervalsBetweenDates} from '../utilitiesForDates' import {arraySum} from '../utilitiesForArrays' import {filter} from 'lodash' test('using async await', async (t) => { let timeLog = [] let runCount = 0 let bufferInterval = 100 let promisesLength = 4 const example = v => { timeLog.push(new Date) runCount++ return Promise.delay(50, v) } const exampleLimited = promiseRateLimit(example, bufferInterval, 1) const alpha = await exampleLimited('alpha') const beta = await exampleLimited('beta') const gamma = await exampleLimited('gamma') const epsilon = await exampleLimited('epsilon') const phi = await exampleLimited('phi') const intervals = getIntervalsBetweenDates(timeLog) const invalidIntervals = filter(intervals, (interval) => interval < bufferInterval) const totalTime = arraySum(intervals) t.is(intervals.length, promisesLength) t.deepEqual(invalidIntervals, []) t.deepEqual(totalTime >= bufferInterval * promisesLength, true) t.is(alpha, 'alpha') t.is(beta, 'beta') t.is(gamma, 'gamma') t.is(epsilon, 'epsilon') t.is(phi, 'phi') }) test('using Promise.all with 2 promises', async (t) => { let timeLog = [] let runCount = 0 let bufferInterval = 100 let promisesLength = 1 const example = v => { timeLog.push(new Date) runCount++ return Promise.delay(50, v) } const exampleLimited = promiseRateLimit(example, bufferInterval, 1) const results = await Promise.all([exampleLimited('alpha'), exampleLimited('beta')]) const intervals = getIntervalsBetweenDates(timeLog) const invalidIntervals = filter(intervals, (interval) => interval < bufferInterval) const totalTime = arraySum(intervals) t.is(intervals.length, promisesLength) t.deepEqual(invalidIntervals, []) t.deepEqual(totalTime >= bufferInterval * promisesLength, true) }) test('using Promise.props with 4 promises', async (t) => { let timeLog = [] let runCount = 0 let bufferInterval = 100 let promisesLength = 3 const example = v => { timeLog.push(new Date) runCount++ return Promise.delay(200, v) } const exampleLimited = promiseRateLimit(example, bufferInterval, 1) const results = await Promise.props({ 'alpha': exampleLimited('alpha'), 'beta': exampleLimited('beta'), 'gamma': exampleLimited('gamma'), 'delta': exampleLimited('delta') }) const intervals = getIntervalsBetweenDates(timeLog) const invalidIntervals = filter(intervals, (interval) => interval < bufferInterval) const totalTime = arraySum(intervals) t.is(intervals.length, promisesLength) t.deepEqual(invalidIntervals, []) t.deepEqual(totalTime >= bufferInterval * promisesLength, true) t.is(results.alpha, 'alpha') t.is(results.beta, 'beta') t.is(results.gamma, 'gamma') t.is(results.delta, 'delta') }) test('using Promise.props with 12 promises', async (t) => { let timeLog = [] let runCount = 0 let bufferInterval = 100 let promisesLength = 11 const example = v => { timeLog.push(new Date) runCount++ return Promise.delay(200, v) } const exampleLimited = promiseRateLimit(example, bufferInterval, 1) const results = await Promise.props({ 'a': exampleLimited('a'), 'b': exampleLimited('b'), 'c': exampleLimited('c'), 'd': exampleLimited('d'), 'e': exampleLimited('e'), 'f': exampleLimited('f'), 'g': exampleLimited('g'), 'h': exampleLimited('h'), 'i': exampleLimited('i'), 'j': exampleLimited('j'), 'k': exampleLimited('k'), 'l': exampleLimited('l') }) const intervals = getIntervalsBetweenDates(timeLog) console.log(intervals) const invalidIntervals = filter(intervals, (interval) => interval < bufferInterval) const totalTime = arraySum(intervals) t.is(intervals.length, promisesLength) t.deepEqual(invalidIntervals, []) t.deepEqual(totalTime >= bufferInterval * promisesLength, true) }) 

I still run into the problem even with changing example .

 [ 99, 98, 105, 106, 119, 106, 105, 105, 101, 106, 100 ] 2 passed 2 failed using Promise.props with 4 promises t.deepEqual(invalidIntervals, []) | [99] Generator.next (<anonymous>) using Promise.props with 12 promises t.deepEqual(invalidIntervals, []) | [99,98] Generator.next (<anonymous>) 
+6
source share
1 answer

setTimeout (which is internally used in Promise.delay ) does not guarantee the exact time, it only ensures that the callback is not called before the specified timeout . Actual time will depend on machine load, event cycle speed, and possibly something else .

In fact, Node.js docs only indicate that

callback will most likely not be called exactly within a delay millisecond. Node.js makes no warranties regarding the exact timing of callbacks or their ordering. The callback will be called as close as possible to the specified time.

What will happen in your tests is that Promise.delay(50) sometimes takes more than 50 ms (not a lot, but still), and the difference in the next log can become less than 100 ms when the next Promise.delay(50) more on time.

You can reduce this effect if you simply record the time you called your example function immediately, and not after an artificial delay of about 50 ms:

 const example = v => { timeLog.push(new Date); runCount++; return Promise.delay(50, v) }; 

To cope with the inaccuracies of the 100 ms timeout itself, the simplest solution is to give it some freedom of action, possibly 5% (which is 5 ms in your case):

 const invalidIntervals = filter(intervals, (interval) => interval < 100 * .95) t.true(totalTime >= 400 * .95) 

If you want to be absolutely sure that the delay is never too short, you can write your own function:

 Promise.delayAtLeast = function(delay, value) { const begin = Date.now() return Promise.delay(delay, value).then(function checkTime(v) { const duration = Date.now() - begin; return duration < delay ? Promise.delay(delay - duration, v).then(checkTime); : v; }); }; 

and use this in promiseRateLimit .

+1
source

Source: https://habr.com/ru/post/1013448/


All Articles