Skip to content

Commit

Permalink
Expose audio analysis methods via API
Browse files Browse the repository at this point in the history
  • Loading branch information
shauneccles committed Feb 21, 2024
1 parent 3ffb12e commit c6219cd
Show file tree
Hide file tree
Showing 4 changed files with 70 additions and 12 deletions.
2 changes: 0 additions & 2 deletions ledfx/api/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,6 @@ async def put(self, request: web.Request) -> web.Response:
"""
try:
config = await request.json()

audio_config = validate_and_trim_config(
config.pop("audio", {}),
AudioInputSource.AUDIO_CONFIG_SCHEMA.fget(),
Expand All @@ -200,7 +199,6 @@ async def put(self, request: web.Request) -> web.Response:
core_config = validate_and_trim_config(
config, CORE_CONFIG_SCHEMA, "core"
)

self._ledfx.config["audio"].update(audio_config)
self._ledfx.config["melbanks"].update(melbanks_config)
self._ledfx.config.update(core_config)
Expand Down
27 changes: 22 additions & 5 deletions ledfx/api/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from ledfx.api import RestEndpoint
from ledfx.api.utils import PERMITTED_KEYS, convertToJsonSchema
from ledfx.config import CORE_CONFIG_SCHEMA, WLED_CONFIG_SCHEMA
from ledfx.effects.audio import AudioInputSource
from ledfx.effects.audio import AudioAnalysisSource, AudioInputSource
from ledfx.effects.melbank import Melbank, Melbanks

_LOGGER = logging.getLogger(__name__)
Expand Down Expand Up @@ -119,15 +119,32 @@ async def get(self, request: web.Request) -> web.Response:

elif schema == "audio":
# Get audio schema
audio_input_schema = convertToJsonSchema(
AudioInputSource.AUDIO_CONFIG_SCHEMA.fget()
)
audio_analysis_schema = convertToJsonSchema(
AudioAnalysisSource.CONFIG_SCHEMA
)

merged_schema = {**audio_input_schema, **audio_analysis_schema}

for key in (
audio_input_schema.keys() & audio_analysis_schema.keys()
):
if isinstance(
audio_input_schema[key], dict
) and isinstance(audio_analysis_schema[key], dict):
merged_schema[key] = {
**audio_input_schema[key],
**audio_analysis_schema[key],
}

response["audio"] = {
"schema": {
**convertToJsonSchema(
AudioInputSource.AUDIO_CONFIG_SCHEMA.fget(),
),
**merged_schema,
**{"permitted_keys": PERMITTED_KEYS["audio"]},
}
}

elif schema == "melbanks":
# Get melbanks schema
response["melbanks"] = {
Expand Down
9 changes: 8 additions & 1 deletion ledfx/api/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,14 @@
}

PERMITTED_KEYS = {
"audio": ("min_volume", "audio_device", "delay_ms"),
"audio": (
"min_volume",
"audio_device",
"delay_ms",
"pitch_method",
"onset_method",
"pitch_tolerance",
),
"melbanks": (
"max_frequencies",
"min_frequency",
Expand Down
44 changes: 40 additions & 4 deletions ledfx/effects/audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,8 @@ def AUDIO_CONFIG_SCHEMA():
default_device_index = AudioInputSource.default_device_index()
valid_device_indexes = AudioInputSource.valid_device_indexes()
input_devices = AudioInputSource.input_devices()
# melbanks = Melbanks.CONFIG_SCHEMA
melbanks = Melbanks.CONFIG_SCHEMA
audio_analysis = AudioAnalysisSource.CONFIG_SCHEMA
return vol.Schema(
{
vol.Optional("sample_rate", default=60): int,
Expand Down Expand Up @@ -165,6 +166,7 @@ def update_config(self, config):
self.input_devices()[self._config["audio_device"]]
)
)
self._ledfx.config["audio"] = self._config

def activate(self):
if self._audio is None:
Expand Down Expand Up @@ -440,12 +442,46 @@ def volume(self, filtered=True):


class AudioAnalysisSource(AudioInputSource):
# https://aubio.org/doc/latest/pitch_8h.html
PITCH_METHODS = [
"yinfft",
"yin",
"yinfast",
"fcomb",
"mcomb",
"schmitt",
"specacf",
]
# https://aubio.org/doc/latest/specdesc_8h.html
ONSET_METHODS = [
"energy",
"hfc",
"complex",
"phase",
"wphase",
"specdiff",
"kl",
"mkl",
"specflux",
]
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional("pitch_method", default="default"): str,
vol.Optional(
"pitch_method",
default="yinfft",
description="Method to detect pitch",
): vol.In(PITCH_METHODS),
vol.Optional("tempo_method", default="default"): str,
vol.Optional("onset_method", default="specflux"): str,
vol.Optional("pitch_tolerance", default=0.8): float,
vol.Optional(
"onset_method",
default="hfc",
description="Method used to detect onsets",
): vol.In(ONSET_METHODS),
vol.Optional(
"pitch_tolerance",
default=0.8,
description="Pitch detection tolerance",
): vol.All(vol.Coerce(float), vol.Range(min=0.0, max=2)),
},
extra=vol.ALLOW_EXTRA,
)
Expand Down

0 comments on commit c6219cd

Please sign in to comment.