call method

Future<void> call(
  1. T argument
)

Execute the rate-limited function

Implementation

Future<void> call(T argument) async {
  // Security check: prevent usage after disposal
  if (_isDisposed) {
    ObslyLogger.warn('Rate limit: Attempted to use disposed rate limiter, ignoring call');
    return;
  }

  // A new call has arrived - schedule/reschedule bucket reset
  _scheduleBucketReset();

  // Cache queue length and rate limit status for efficiency
  final int currentQueueLength = _queue.length;
  final bool isRateLimited = _executionCount >= _bucketSize;

  // Single queue size check - optimization to avoid duplicate checks
  if (currentQueueLength >= maxQueueSize) {
    ObslyLogger.warn('Rate limit: Queue size exceeded maximum limit ($maxQueueSize), dropping call');
    return;
  }

  if (isRateLimited) {
    if (_rejectWhenBucketFull) {
      ObslyLogger.debug('Rate limit: Bucket is full and rejectWhenBucketFull is true, dropping call');
      return;
    }

    // When rate-limited (throttling mode), we don't queue up calls
    if (_isProcessingQueue || currentQueueLength > 0) {
      if (_trailing) {
        ObslyLogger.debug('Rate limit: Rate limit active. Saving trailing call');
        // Release old trailing call if exists and acquire new one
        if (_pendingTrailingCall != null) {
          _objectPool.release(_pendingTrailingCall!);
        }
        _pendingTrailingCall = _objectPool.acquire(argument);
      } else {
        ObslyLogger.debug('Rate limit: Rate limit active. Dropping call');
      }
      return;
    }
  } else {
    // Before being rate-limited, we allow calls to queue up to the bucket size
    if (currentQueueLength >= _bucketSize) {
      ObslyLogger.debug('Rate limit: Rate limit queue is full, dropping call');
      return;
    }
  }

  _queue.add(_objectPool.acquire(argument));
  await _processQueue();
}