Skip to content

Commit

Permalink
doc: add missing comments for Task APIs (#1021)
Browse files Browse the repository at this point in the history
* doc: add missing Task API comments

* refactor: rename targets

* chore: fix comments
  • Loading branch information
homuler authored Sep 2, 2023
1 parent e9f06d0 commit 4c21fa5
Show file tree
Hide file tree
Showing 6 changed files with 121 additions and 7 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,30 @@ private FaceDetector(
_packetCallback = packetCallback;
}

/// <summary>
/// Creates an <see cref="FaceDetector" /> object from a TensorFlow Lite model and the default <see cref="FaceDetectorOptions" />.
///
/// Note that the created <see cref="FaceDetector" /> instance is in image mode,
/// for detecting faces on single image inputs.
/// </summary>
/// <param name="modelPath">Path to the model.</param>
/// <returns>
/// <see cref="FaceDetector" /> object that's created from the model and the default <see cref="FaceDetectorOptions" />.
/// </returns>
public static FaceDetector CreateFromModelPath(string modelPath)
{
var baseOptions = new Tasks.Core.BaseOptions(modelAssetPath: modelPath);
var options = new FaceDetectorOptions(baseOptions, runningMode: Core.RunningMode.IMAGE);
return CreateFromOptions(options);
}

/// <summary>
/// Creates the <see cref="FaceDetector" /> object from <paramref name="FaceDetectorOptions" />.
/// </summary>
/// <param name="options">Options for the face detector task.</param>
/// <returns>
/// <see cref="FaceDetector" /> object that's created from <paramref name="options" />.
/// </returns>
public static FaceDetector CreateFromOptions(FaceDetectorOptions options)
{
var taskInfo = new Tasks.Core.TaskInfo<FaceDetectorOptions>(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@

namespace Mediapipe.Tasks.Vision.FaceDetector
{
/// <summary>
/// Options for the face detector task.
/// </summary>
public sealed class FaceDetectorOptions : Tasks.Core.ITaskOptions
{
/// <param name="detectionResult">
Expand All @@ -23,11 +26,44 @@ public sealed class FaceDetectorOptions : Tasks.Core.ITaskOptions
/// </param>
public delegate void ResultCallback(Components.Containers.DetectionResult detectionResult, Image image, int timestampMs);

/// <summary>
/// Base options for the face detector task.
/// </summary>
public Tasks.Core.BaseOptions baseOptions { get; }
/// <summary>
/// The running mode of the task. Default to the image mode.
/// Face detector task has three running modes:
/// <list type="number">
/// <item>
/// <description>The image mode for detecting faces on single image inputs.</description>
/// </item>
/// <item>
/// <description>The video mode for detecting faces on the decoded frames of a video.</description>
/// </item>
/// <item>
/// <description>
/// The live stream mode or detecting faces on the live stream of input data, such as from camera.
/// </description>
/// </item>
/// </list>
/// </summary>
public Core.RunningMode runningMode { get; }
/// <summary>
/// The minimum confidence score for the face detection to be considered successful.
/// </summary>
public float minDetectionConfidence { get; }
/// <summary>
/// The minimum non-maximum-suppression threshold for face detection to be considered overlapped.
/// </summary>
public float minSuppressionThreshold { get; }
/// <summary>
/// The maximum number of faces that can be detected by the face detector.
/// </summary>
public int numFaces { get; }
/// <summary>
/// The user-defined result callback for processing live stream data.
/// The result callback should only be specified when the running mode is set to the live stream mode.
/// </summary>
public ResultCallback resultCallback { get; }

public FaceDetectorOptions(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@

namespace Mediapipe.Tasks.Vision.FaceLandmarker
{
/// <summary>
/// Options for the face landmarker task.
/// </summary>
public sealed class FaceLandmarkerOptions : Tasks.Core.ITaskOptions
{
/// <param name="faceLandmarksResult">
Expand All @@ -19,14 +22,60 @@ public sealed class FaceLandmarkerOptions : Tasks.Core.ITaskOptions
/// </param>
public delegate void ResultCallback(FaceLandmarkerResult faceLandmarksResult, Image image, int timestampMs);

/// <summary>
/// Base options for the hand landmarker task.
/// </summary>
public Tasks.Core.BaseOptions baseOptions { get; }
/// <summary>
/// The running mode of the task. Default to the image mode.
/// FaceLandmarker has three running modes:
/// <list type="number">
/// <item>
/// <description>The image mode for detecting face landmarks on single image inputs.</description>
/// </item>
/// <item>
/// <description>The video mode for detecting face landmarks on the decoded frames of a video.</description>
/// </item>
/// <item>
/// <description>
/// The live stream mode or detecting face landmarks on the live stream of input data, such as from camera.
/// In this mode, the <see cref="resultCallback" /> below must be specified to receive the detection results asynchronously.
/// </description>
/// </item>
/// </list>
/// </summary>
public Core.RunningMode runningMode { get; }
/// <summary>
/// The maximum number of faces that can be detected by the face detector.
/// </summary>
public int numFaces { get; }
/// <summary>
/// The minimum confidence score for the face detection to be considered successful.
/// </summary>
public float minFaceDetectionConfidence { get; }
/// <summary>
/// The minimum confidence score of face presence score in the face landmark detection.
/// </summary>
public float minFacePresenceConfidence { get; }
/// <summary>
/// The minimum confidence score for the face tracking to be considered successful.
/// </summary>
public float minTrackingConfidence { get; }
/// <summary>
/// Whether FaceLandmarker outputs face blendshapes classification.
/// Face blendshapes are used for rendering the 3D face model.
/// </summary>
public bool outputFaceBlendshapes { get; }
/// <summary>
/// Whether FaceLandmarker outputs facial transformation_matrix.
/// Facial transformation matrix is used to transform the face landmarks in canonical face to the detected face,
/// so that users can apply face effects on the detected landmarks.
/// </summary>
public bool outputFaceTransformationMatrixes { get; }
/// <summary>
/// The user-defined result callback for processing live stream data.
/// The result callback should only be specified when the running mode is set to the live stream mode.
/// </summary>
public ResultCallback resultCallback { get; }

public FaceLandmarkerOptions(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,22 @@

namespace Mediapipe.Tasks.Vision.FaceLandmarker
{
/// <summary>
/// The face landmarks result from FaceLandmarker, where each vector element represents a single face detected in the image.
/// </summary>
public readonly struct FaceLandmarkerResult
{
/// <summary>
/// Detected face landmarks in normalized image coordinates.
/// </summary>
public readonly IReadOnlyList<NormalizedLandmarks> faceLandmarks;
/// <summary>
/// Optional face blendshapes results.
/// </summary>
public readonly IReadOnlyList<Classifications> faceBlendshapes;
/// <summary>
/// Optional facial transformation matrix.
/// </summary>
public readonly IReadOnlyList<Matrix4x4> facialTransformationMatrixes;

internal FaceLandmarkerResult(IReadOnlyList<NormalizedLandmarks> faceLandmarks,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ public static HandLandmarker CreateFromModelPath(string modelPath)
/// <summary>
/// Creates the <see cref="HandLandmarker" /> object from <paramref name="HandLandmarkerOptions" />.
/// </summary>
/// <param name="options">Options for the face landmarker task.</param>
/// <param name="options">Options for the hand landmarker task.</param>
/// <returns>
/// <see cref="HandLandmarker" /> object that's created from <paramref name="options" />.
/// </returns>
Expand Down
12 changes: 6 additions & 6 deletions mediapipe_api/tasks/cc/vision/face_landmarker/proto/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,15 @@ package(default_visibility = ["//visibility:public"])
pkg_files(
name = "proto_srcs",
srcs = [
":face_blendshapes_graph_options",
":face_landmarker_graph_options",
":face_landmarks_detector_graph_options",
":face_blendshapes_graph_options_cs",
":face_landmarker_graph_options_cs",
":face_landmarks_detector_graph_options_cs",
],
prefix = "Tasks/Vision/FaceLandmarker/Proto",
)

csharp_proto_src(
name = "face_blendshapes_graph_options",
name = "face_blendshapes_graph_options_cs",
proto_src = "mediapipe/tasks/cc/vision/face_landmarker/proto/face_blendshapes_graph_options.proto",
deps = [
"@com_google_mediapipe//mediapipe/tasks/cc/core/proto:protos_src",
Expand All @@ -31,7 +31,7 @@ csharp_proto_src(
)

csharp_proto_src(
name = "face_landmarker_graph_options",
name = "face_landmarker_graph_options_cs",
proto_src = "mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarker_graph_options.proto",
deps = [
"@com_google_mediapipe//mediapipe/tasks/cc/core/proto:protos_src",
Expand All @@ -45,7 +45,7 @@ csharp_proto_src(
)

csharp_proto_src(
name = "face_landmarks_detector_graph_options",
name = "face_landmarks_detector_graph_options_cs",
proto_src = "mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarks_detector_graph_options.proto",
deps = [
"@com_google_mediapipe//mediapipe/tasks/cc/core/proto:protos_src",
Expand Down

0 comments on commit 4c21fa5

Please sign in to comment.