fetchHtml method

Future<String> fetchHtml({
  1. required String url,
  2. Map<String, String>? headers,
  3. int? timeout,
  4. int? retries,
  5. bool ignoreRobotsTxt = false,
})

Fetches HTML content from the given URL

url is the URL to fetch headers are additional headers to send with the request timeout is the timeout for the request in milliseconds retries is the number of retry attempts ignoreRobotsTxt whether to ignore robots.txt rules (default: false)

Implementation

Future<String> fetchHtml({
  required String url,
  Map<String, String>? headers,
  int? timeout,
  int? retries,
  bool ignoreRobotsTxt = false,
}) async {
  final effectiveHeaders = {
    'User-Agent': _defaultUserAgent,
    ..._defaultHeaders,
    ...?headers,
  };

  final effectiveTimeout = timeout ?? _defaultTimeout;
  final effectiveRetries = retries ?? _maxRetries;

  // Check robots.txt if enabled and not explicitly ignored
  if (_respectRobotsTxt && !ignoreRobotsTxt) {
    final userAgent = effectiveHeaders['User-Agent'] ?? _defaultUserAgent;
    final isAllowed = await _robotsTxtHandler.isAllowed(url, userAgent);

    if (!isAllowed) {
      _logger.warning('URL not allowed by robots.txt: $url');
      throw ScrapingException.robotsTxt(
        'URL not allowed by robots.txt',
        url: url,
        isRetryable: false,
      );
    }
  }

  return _fetchWithRetry(
    url: url,
    headers: effectiveHeaders,
    timeout: effectiveTimeout,
    retries: effectiveRetries,
  );
}