Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/main'
Browse files Browse the repository at this point in the history
  • Loading branch information
lw0404 committed Sep 25, 2024
2 parents 445078b + ec405d4 commit c4f1f11
Show file tree
Hide file tree
Showing 2 changed files with 177 additions and 19 deletions.
114 changes: 114 additions & 0 deletions api/config/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -634,3 +634,117 @@ class TaskQueue(BaseModel):
#
# def ErrorResponseModel(error, code, message):
# return {"error": error, "code": code, "message": message}


class FaceSpecBQAT(str, Enum):
ipd = "Inter-pupillary distance."
confidence = "Confidence level of face dectection (not quality score)."
bbox_left = "Left border of the face bounding box coordinates in pixels."
bbox_right = "Right border of the face bounding box coordinates in pixels."
bbox_upper = "Upper border of the face bounding box coordinates in pixels."
bbox_bottom = "Bottom border of the face bounding box coordinates in pixels."
eye_closed_left = "Left eye close or not."
eye_closed_right = "Right eye close or not."
pupil_right_x = "X coordinates of right pupil in pixels."
pupil_right_y = "Y coordinates of right pupil in pixels."
pupil_left_x = "X coordinates of left pupil in pixels."
pupil_left_y = "Y coordinates of left pupil in pixels."
yaw_pose = "Yaw in head pose direction."
yaw_degree = "Yaw in head pose degree."
pitch_pose = "Pitch in head pose direction."
pitch_degree = "Pitch in head pose degree."
roll_pose = "Roll in head pose direction."
roll_degree = "Roll in head pose degree."
smile = "Smile detected or not."
glassed = "Glasses detected or not."


class FaceSpecOFIQ(str, Enum):
quality = "MagFace-based unified quality score measure."
background_uniformity = "Gradient-based background uniformity."
illumination_uniformity = "Illumination unformity by summing up the minima of the histograms of the left and the right side of the face."
luminance_mean = "Luminance mean measure computed from the luminance histogram."
luminance_variance = (
"Luminance variance measure computed from the luminance histogram."
)
under_exposure_prevention = "Under-exposure prevention by computing the proportion of low-intensity pixels in the luminance image to assess the abscence of under-exposure."
over_exposure_prevention = "Over-exposure prevention by computing the proportion of high-intensity pixels in the luminance image to assess the abscence of over-exposure."
dynamic_range = "Dynamic range computed from the luminance histogram."
sharpness = "Sharpness assessment based on a random forest classifier trained by the OFIQ development team."
compression_artifacts = "Assessment of the absence of compression artifact (both JPEG and JPEG2000) based on a CNN trained by the OFIQ development team."
natural_colour = "Assessment of the naturalness of the colour based on the conversion of the RGB presentation of the image to the CIELAB colour space."
single_face_present = "Assessment of the uniqueness of the most dominant face detected by comparing its size with the size of the second largest face detected."
eyes_open = "Eyes openness assessment based on computing eyes aspect ratio from eye landmarks."
mouth_closed = (
"Mouth closed assessment based on computing a ratio from mouth landmarks."
)
eyes_visible = "Eyes visibility assessment by measuring the coverage of the eye visibility zone with the result of face occlusion segmentation computed during pre-processing."
mouth_occlusion_prevention = "Assessment of the absence of mouth occlusion by measuring the coverage of the mouth region from mouth landmarks with the result of face occlusion segmentation computed on pre-processing."
face_occlusion_prevention = "Assessment of the absence of face occlusion by measuring the coverage of the landmarked region with the result of face occlusion segmentation computed during pre-processing."
inter_eye_distance = " Inter-eye distance assessment based on computing the Euclidean length of eyes’ centres and multiplication with the secant of the yaw angle computed during pre-processing."
head_size = "Size of the head based on computing the height of the face computed from facial landmarks with the height of the image."
leftward_crop_of_the_face_image = "Left of the face image crop."
rightward_crop_of_the_face_image = "Right of the face image crop."
downward_crop_of_the_face_image = "Bottom of the face image crop."
upward_crop_of_the_face_image = "Top of the face image crop."
head_pose_yaw = "Pose angle yaw frontal alignment based on the 3DDFAV2."
head_pose_pitch = "Pose angle pitch frontal alignment based on the 3DDFAV2."
head_pose_roll = "Pose angle roll frontal alignment based on the 3DDFAV2."
expression_neutrality = "Expression neutrality estimation based on a fusion of HSEMotion with Efficient-Expression-Neutrality-Estimation."
no_head_coverings = "Assessment of the absence of head coverings by counting the pixels being labeled as head covers in the mask output by the face parsing computed during pre-processing."


class FaceSpecBIQT(str, Enum):
background_deviation = "Image background deviation."
background_grayness = "Image background grayness."
blur = "Overall image blurriness."
blur_face = "Face area blurriness."
focus = "Overall image focus."
focus_face = "Face area focus."
openbr_confidence = "Confidence value from openbr."
opencv_IPD = "Inter eye distance from opencv."
over_exposure = "Overall image exposure value."
over_exposure_face = "Face area exposure value."
quality = "Overall quality score."
skin_ratio_face = "Skin to face area ratio."
skin_ratio_full = "Skin to fill image area ratio."


class FingerprintSpecDefault(str, Enum):
NFIQ2 = "NIST/NFIQ2 quality score."
UniformImage = "Standard deviation of gray levels in image indicates uniformity."
EmptyImageOrContrastTooLow = "The image is blank or the contrast is too low."
FingerprintImageWithMinutiae = "Number of minutia in image."
SufficientFingerprintForeground = "Number of pixels in the computed foreground."
EdgeStd = "Metric to identify malformed images."


class IrisSpecDefault(str, Enum):
quality = "An overall quality score that leverages several statistics together."
contrast = "Raw score quantifying overall image contrast."
sharpness = "Raw score quantifying the sharpness of the image."
iris_diameter = "Raw diameter of the iris measured in pixels."
percent_visible_iris = "Percentage of visible iris area."
iris_pupil_gs = "Raw measure quantifying how distinguishable the boundary is between the pupil and the iris."
iris_sclera_gs = "Raw measure quantifying how distinguishable the boundary is between the iris and the sclera."
iso_overall_quality = "The overall ISO quality score based on the product of normalized individual iso metrics."
iso_greyscale_utilization = "The spread of intensity values regarding the pixel values within the iris portion of the image, recommended value: 6 or greater."
iso_iris_pupil_concentricity = "The degree to which the pupil centre and the iris centre are in the same location, recommended value: 90 or greater."
iso_iris_pupil_contrast = "The image characteristics at the boundary between the iris region and the pupil, recommended value: 30 or greater."
iso_iris_pupil_ratio = "The degree to which the pupil is dilated or constricted, recommended value: between 20 and 70."
iso_iris_sclera_contrast = "The image characteristics at the boundary between the iris region and the sclera, recommended value: greater than 5."
iso_margin_adequacy = "The degree to which the iris portion of the image is centred relative to the edges of the entire image, recommended value: greater than 80."
iso_pupil_boundary_circularity = "The circularity of the iris-pupil boundary."
iso_sharpness = "The degree of focus present in the image."
iso_usable_iris_area = "The fraction of the iris portion of the image that is not occluded by eyelids, eyelashes, or specular reflections."


class SpeechSpecDefault(str, Enum):
Quality = "Overall quality estimation of the speech audio file."
Noisiness = "Quality degradation such as background, circuit, or coding noise."
Discontinuity = "Quality degradation caused by isolated or non-stationary distortions, e.g. introduced by packet-loss or clipping."
Coloration = "Quality degradation caused by frequency response distortions, e.g. introduced by bandwidth limitation, low bitrate codecs, or packet-loss concealment."
Naturalness = "Estimation of the naturalness of synthetic speech."
Loudness = (
"Influence of the loudness on the perceived quality of transmitted speech."
)
82 changes: 63 additions & 19 deletions api/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,12 @@
from api.config import Settings
from api.config.models import (
CollectionLog,
FaceSpecBIQT,
FaceSpecBQAT,
FaceSpecOFIQ,
FingerprintSpecDefault,
IrisSpecDefault,
SpeechSpecDefault,
# DetectorOptions,
# ReportLog,
Status,
Expand Down Expand Up @@ -633,7 +639,16 @@ async def run_report_tasks(

print(f">> Generate report: {dataset_id}")

report = [report_task.remote(data, task.get("options"))]
options = task.get("options")
dataset_log = await log["datasets"].find_one({"collection": dataset_id})
options.update(
{
"mode": dataset_log["options"].get("mode"),
"engine": dataset_log["options"].get("engine"),
}
)

report = [report_task.remote(data, options)]
await log["reports"].find_one_and_update(
{"tid": task["tid"]},
{
Expand Down Expand Up @@ -1054,17 +1069,23 @@ def check_options(options, modality):


def generate_report(data, **options):
excluded_columns = ["file", "tag", "log"]
temp = "report.html"
df = pd.DataFrame.from_dict(data)

excluded_columns = ["file", "tag", "log"]
excluded_columns = [col for col in excluded_columns if col in df.columns]

df = df.drop(columns=excluded_columns)
df = df.loc[:, ~df.columns.str.endswith("_scalar")]

# Ensure numeric columns are not categorized
df = df.apply(lambda col: pd.to_numeric(col, errors="ignore"))
numeric_columns = df.select_dtypes(include='number').columns
# print(f'----------------{numeric_columns}')
try:
df = df.apply(lambda col: pd.to_numeric(col))
except Exception as e:
print(f"Error converting string columns to float: {e}")
numeric_columns = df.select_dtypes(include="number").columns
df[numeric_columns] = df[numeric_columns].apply(pd.to_numeric, downcast='float')
# df.set_index("file", inplace=True)
# df = df.drop(columns=['file'])
# pd.set_option('display.float_format', '{:.2e}'.format)

if options.get("downsample"):
df = df.sample(frac=options.get("downsample", 0.05))

Expand All @@ -1073,30 +1094,53 @@ def generate_report(data, **options):
if col not in excluded_columns and not pd.api.types.is_numeric_dtype(df[col]):
df[col] = df[col].astype('category')

match options.get("mode"):
case "face":
match options.get("engine"):
case "bqat":
descriptions = {item.name: item.value for item in FaceSpecBQAT}
case "ofiq":
descriptions = {item.name: item.value for item in FaceSpecOFIQ}
case "biqt":
descriptions = {item.name: item.value for item in FaceSpecBIQT}
case _:
descriptions = {}
case "fingerprint":
descriptions = {item.name: item.value for item in FingerprintSpecDefault}
case "iris":
descriptions = {item.name: item.value for item in IrisSpecDefault}
case "speech":
descriptions = {item.name: item.value for item in SpeechSpecDefault}
case _:
descriptions = {}

pd.set_option("display.float_format", "{:.4f}".format)

ProfileReport(
df,
title=f"EDA Report (BQAT v{__version__})",
explorative=True,
minimal=options.get("minimal", False),
progress_bar=False,
# correlations={
# "auto": {"calculate": False},
# "pearson": {"calculate": False},
# "spearman": {"calculate": True},
# "kendall": {"calculate": False},
# "phi_k": {"calculate": False},
# "cramers": {"calculate": False},
# },
correlations=None,
# progress_bar=False,
correlations={
"auto": {"calculate": False},
"pearson": {"calculate": True},
"spearman": {"calculate": True},
"kendall": {"calculate": True},
"phi_k": {"calculate": False},
"cramers": {"calculate": False},
},
# correlations=None,
vars={"num": {"low_categorical_threshold": 0}},
html={
"navbar_show": True,
# "full_width": True,
"style": {
"full_width": True,
"theme": "simplex",
"logo": "https://www.biometix.com/wp-content/uploads/2020/10/logo.png",
},
},
variables={"descriptions": descriptions},
).to_file(temp)

with open(temp, "r") as f:
Expand Down

0 comments on commit c4f1f11

Please sign in to comment.