processFrame method

Future<SegmentationResult> processFrame(
  1. Uint8List frameData, {
  2. required int width,
  3. required int height,
  4. int rotationDegrees = 0,
  5. SegmenterInputFormat format = SegmenterInputFormat.rgba8888,
})

Process a camera frame with the current virtual background.

This method mirrors the React selfieSegmentationPreview function. It uses a segmentation mask to separate person from background.

Parameters:

  • frameData: Raw frame data from the camera (RGBA or YUV)
  • width: Frame width in pixels
  • height: Frame height in pixels
  • rotationDegrees: Camera rotation (0, 90, 180, 270)
  • format: Input image format (defaults to RGBA)

Returns a SegmentationResult containing the processed frame.

Implementation

Future<SegmentationResult> processFrame(
  Uint8List frameData, {
  required int width,
  required int height,
  int rotationDegrees = 0,
  SegmenterInputFormat format = SegmenterInputFormat.rgba8888,
}) async {
  if (!_isInitialized || _currentBackground == null) {
    return SegmentationResult.error('Processor not initialized');
  }

  if (_currentBackground!.type == BackgroundType.none) {
    // No processing needed - return original frame
    return SegmentationResult(
      processedFrame: frameData,
      processingTimeMs: 0,
    );
  }

  if (_isProcessing) {
    // Skip if already processing (frame drop)
    return SegmentationResult.error('Already processing');
  }

  _isProcessing = true;
  final stopwatch = Stopwatch()..start();

  try {
    // Create metadata for the input frame
    final metadata = SegmenterInputMetadata(
      width: width,
      height: height,
      format: format,
      rotation: rotationDegrees,
    );

    // Use platform segmenter (preferred) or legacy segmenter
    SegmentationResult result;

    if (_platformSegmenter.isSupported && _platformSegmenter.isReady) {
      // Use the new platform-specific segmenter
      result = await _platformSegmenter.processFrame(frameData, metadata);
    } else if (legacySegmenter != null && legacySegmenter!.isReady) {
      // Fall back to legacy segmenter for backward compatibility
      result = await legacySegmenter!.processFrame(
        frameData,
        width: width,
        height: height,
      );
    } else {
      // No segmenter available - return original
      stopwatch.stop();
      return SegmentationResult(
        processedFrame: frameData,
        processingTimeMs: stopwatch.elapsedMilliseconds,
      );
    }

    // Apply background compositing based on type
    final processedFrame = await _applyBackgroundToFrame(
      result.processedFrame ?? frameData,
      result.mask,
      width: width,
      height: height,
    );

    stopwatch.stop();

    final finalResult = SegmentationResult(
      processedFrame: processedFrame,
      mask: result.mask,
      processingTimeMs: stopwatch.elapsedMilliseconds,
    );

    onFrameProcessed?.call(finalResult);
    return finalResult;
  } catch (e) {
    return SegmentationResult.error('Processing error: $e');
  } finally {
    _isProcessing = false;
  }
}