本文由簡體中文內容自動轉碼而成。阿里雲不保證此自動轉碼的準確性、完整性及時效性。本文内容請以簡體中文版本為準。

臉部偵測

更新時間:2024-11-08 20:01

臉部偵測指的是電腦視覺技術中用於識別和定位元影像片或視頻中人臉的功能。這種技術可以用於多種應用,比如身分識別驗證、監控系統、智能相簿以及客戶行為分析等。本文介紹如何使用臉部偵測功能,包括如何使用該功能來識別映像中的人臉及其相關的特徵。

功能簡介

臉部偵測功能基於圖片AI技術,可以檢測圖片中的人臉以及人臉資訊,如果圖片中有多張人臉,系統會檢測多張人臉以及人臉資訊。人臉資訊包括人臉ID、年齡、性別、心情、吸引力、人臉品質、人臉屬性等,其中人臉屬性包括人臉位置、頭部朝向、眼鏡、鬍子、面罩等。

image

使用情境

  • 身分識別驗證:可以通過臉部偵測與人臉相似性對比功能,實現使用者資訊認證,多用於手機面容解鎖

  • 人臉表情分析:通過臉部偵測和表情識別技術,分析人臉表情,用於情感分析、增強現實(AR)、虛擬角色等應用。

說明
  • 背景雜亂:複雜的背景可能會與面部特徵混淆,影響檢測結果

  • 在映像中存在多個人臉時,相互之間可能會發生遮擋,檢測演算法的效能可能會下降。

前提條件

使用方法

調用DetectImageFaces - 通過AI模型能力檢測圖片中的人臉以及人臉資訊介面檢測圖片中的人臉以及人臉資訊,包括年齡、性別等。

圖片資訊

  • IMM專案名稱:test-project

  • 待檢測圖片的儲存地址:oss://test-bucket/test-object.jpg

  • 圖片樣本:

    test-object

請求樣本

{
    "ProjectName": "test-project",
    "SourceURI": "oss://test-bucket/test-object.jpg",
}

返回樣本

{
  "RequestId": "47449201-245D-58A7-B56B-BDA483874B20",
  "Faces": [
    {
      "Beard": "none",
      "MaskConfidence": 0.724,
      "Gender": "male",
      "Boundary": {
        "Left": 138,
        "Top": 102,
        "Height": 19,
        "Width": 17
      },
      "BeardConfidence": 0.801,
      "FigureId": "b6525b63-cb12-4fab-a9f4-9c7de08b80c3",
      "Mouth": "close",
      "Emotion": "none",
      "Age": 36,
      "MouthConfidence": 0.984,
      "FigureType": "face",
      "GenderConfidence": 0.999,
      "HeadPose": {
        "Pitch": -9.386,
        "Roll": -3.478,
        "Yaw": 14.624
      },
      "Mask": "none",
      "EmotionConfidence": 0.998,
      "HatConfidence": 0.794,
      "GlassesConfidence": 0.999,
      "Sharpness": 0.025,
      "FigureClusterId": "figure-cluster-id-unavailable",
      "FaceQuality": 0.3,
      "Attractive": 0.002,
      "AgeSD": 8,
      "Glasses": "none",
      "FigureConfidence": 0.998,
      "Hat": "none"
    },
    {
      "Beard": "none",
      "MaskConfidence": 0.649,
      "Gender": "male",
      "Boundary": {
        "Left": 85,
        "Top": 108,
        "Height": 18,
        "Width": 14
      },
      "BeardConfidence": 0.975,
      "FigureId": "798ab164-ae05-4a9f-b8c9-4b69ca183c3f",
      "Mouth": "close",
      "Emotion": "none",
      "Age": 34,
      "MouthConfidence": 0.97,
      "FigureType": "face",
      "GenderConfidence": 0.917,
      "HeadPose": {
        "Pitch": -0.946,
        "Roll": -1.785,
        "Yaw": -39.264
      },
      "Mask": "mask",
      "EmotionConfidence": 0.966,
      "HatConfidence": 0.983,
      "GlassesConfidence": 1,
      "Sharpness": 0.095,
      "FigureClusterId": "figure-cluster-id-unavailable",
      "FaceQuality": 0.3,
      "Attractive": 0.022,
      "AgeSD": 9,
      "Glasses": "none",
      "FigureConfidence": 0.998,
      "Hat": "none"
    },
    {
      "Beard": "none",
      "MaskConfidence": 0.534,
      "Gender": "female",
      "Boundary": {
        "Left": 245,
        "Top": 128,
        "Height": 16,
        "Width": 13
      },
      "BeardConfidence": 0.998,
      "FigureId": "b9fb1552-cc98-454a-ac7c-18e5c55cc5bf",
      "Mouth": "close",
      "Emotion": "none",
      "Age": 6,
      "MouthConfidence": 0.999,
      "FigureType": "face",
      "GenderConfidence": 0.972,
      "HeadPose": {
        "Pitch": 21.686,
        "Roll": 16.806,
        "Yaw": 50.348
      },
      "Mask": "mask",
      "EmotionConfidence": 0.991,
      "HatConfidence": 0.999,
      "GlassesConfidence": 1,
      "Sharpness": 0.389,
      "FigureClusterId": "figure-cluster-id-unavailable",
      "FaceQuality": 0.3,
      "Attractive": 0.046,
      "AgeSD": 6,
      "Glasses": "none",
      "FigureConfidence": 0.991,
      "Hat": "none"
    },
    {
      "Beard": "none",
      "MaskConfidence": 0.654,
      "Gender": "male",
      "Boundary": {
        "Left": 210,
        "Top": 130,
        "Height": 18,
        "Width": 15
      },
      "BeardConfidence": 0.738,
      "FigureId": "a00154ad-6e5a-48a8-b79e-4cd3699e3281",
      "Mouth": "close",
      "Emotion": "none",
      "Age": 24,
      "MouthConfidence": 0.999,
      "FigureType": "face",
      "GenderConfidence": 0.999,
      "HeadPose": {
        "Pitch": -3.356,
        "Roll": 1.734,
        "Yaw": 12.431
      },
      "Mask": "none",
      "EmotionConfidence": 0.993,
      "HatConfidence": 1,
      "GlassesConfidence": 0.984,
      "Sharpness": 0.449,
      "FigureClusterId": "figure-cluster-id-unavailable",
      "FaceQuality": 0.3,
      "Attractive": 0.005,
      "AgeSD": 15,
      "Glasses": "none",
      "FigureConfidence": 0.985,
      "Hat": "none"
    }
  ]
}
說明

返回樣本顯示了當前圖片中有四個人臉以及四個人臉的資訊,包括人物性別、年齡、心情等。

範例程式碼

以Python SDK為例,臉部偵測的完整範例程式碼如下。

# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
import sys
import os
from typing import List

from alibabacloud_imm20200930.client import Client as imm20200930Client
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_imm20200930 import models as imm_20200930_models
from alibabacloud_tea_util import models as util_models
from alibabacloud_tea_util.client import Client as UtilClient


class Sample:
    def __init__(self):
        pass

    @staticmethod
    def create_client(
        access_key_id: str,
        access_key_secret: str,
    ) -> imm20200930Client:
        """
        使用AccessKey ID&AccessKey Secret初始化帳號Client。
        @param access_key_id:
        @param access_key_secret:
        @return: Client
        @throws Exception
        """
        config = open_api_models.Config(
            access_key_id=access_key_id,
            access_key_secret=access_key_secret
        )
        # 填寫訪問的IMM網域名稱。
        config.endpoint = f'imm.cn-beijing.aliyuncs.com'
        return imm20200930Client(config)

    @staticmethod
    def main(
        args: List[str],
    ) -> None:
        # 阿里雲帳號AccessKey擁有所有API的存取權限,建議您使用RAM使用者進行API訪問或日常營運。
        # 強烈建議不要把AccessKey ID和AccessKey Secret儲存到工程代碼裡,否則可能導致AccessKey泄露,威脅您帳號下所有資源的安全。
        # 本樣本通過從環境變數中讀取AccessKey,來實現API訪問的身分識別驗證。如何配置環境變數,請參見https://help.aliyun.com/document_detail/2361894.html。
        imm_access_key_id = os.getenv("AccessKeyId")
        imm_access_key_secret = os.getenv("AccessKeySecret")
        client = Sample.create_client(imm_access_key_id, imm_access_key_secret)
        detect_image_faces_request = imm_20200930_models.DetectImageFacesRequest(
            project_name='test-project',
            source_uri='oss://test-bucket/test-object.jpg'
        )
        runtime = util_models.RuntimeOptions()
        try:
            # 複製代碼運行請自行列印API的傳回值。
            client.detect_image_faces_with_options(detect_image_faces_request, runtime)
        except Exception as error:
            # 如有需要,請列印錯誤資訊。
            UtilClient.assert_as_string(error.message)

    @staticmethod
    async def main_async(
        args: List[str],
    ) -> None:
        # 阿里雲帳號AccessKey擁有所有API的存取權限,建議您使用RAM使用者進行API訪問或日常營運。
        # 強烈建議不要把AccessKey ID和AccessKey Secret儲存到工程代碼裡,否則可能導致AccessKey泄露,威脅您帳號下所有資源的安全。
        # 本樣本通過從環境變數中讀取AccessKey,來實現API訪問的身分識別驗證。如何配置環境變數,請參見https://help.aliyun.com/document_detail/2361894.html。
        imm_access_key_id = os.getenv("AccessKeyId")
        imm_access_key_secret = os.getenv("AccessKeySecret")
        client = Sample.create_client(imm_access_key_id, imm_access_key_secret)
        detect_image_faces_request = imm_20200930_models.DetectImageFacesRequest(
            project_name='test-project',
            source_uri='oss://test-bucket/test-object.jpg'
        )
        runtime = util_models.RuntimeOptions()
        try:
            # 複製代碼運行請自行列印API的傳回值。
            await client.detect_image_faces_with_options_async(detect_image_faces_request, runtime)
        except Exception as error:
            # 如有需要,請列印錯誤資訊。
            UtilClient.assert_as_string(error.message)


if __name__ == '__main__':
    Sample.main(sys.argv[1:])

  • 本頁導讀 (1, M)
  • 功能簡介
  • 使用情境
  • 前提條件
  • 使用方法
  • 圖片資訊
  • 請求樣本
  • 返回樣本
  • 範例程式碼
文檔反饋