Skip to content

Commit

Permalink
Fix linting
Browse files Browse the repository at this point in the history
  • Loading branch information
danielaskdd committed Feb 1, 2025
1 parent ecf48a5 commit 0a693db
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 3 deletions.
2 changes: 1 addition & 1 deletion lightrag/lightrag.py
Original file line number Diff line number Diff line change
Expand Up @@ -916,7 +916,7 @@ async def aquery(
else self.key_string_value_json_storage_cls(
namespace="llm_response_cache",
global_config=asdict(self),
embedding_func=self.embedding_func,
embedding_func=self.embedding_func,
),
prompt=prompt,
)
Expand Down
6 changes: 4 additions & 2 deletions lightrag/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -368,7 +368,9 @@ async def get_best_cached_response(
original_prompt=None,
cache_type=None,
) -> Union[str, None]:
logger.debug(f"get_best_cached_response: mode={mode} cache_type={cache_type} use_llm_check={use_llm_check}")
logger.debug(
f"get_best_cached_response: mode={mode} cache_type={cache_type} use_llm_check={use_llm_check}"
)
mode_cache = await hashing_kv.get_by_id(mode)
if not mode_cache:
return None
Expand Down Expand Up @@ -511,7 +513,7 @@ async def handle_cache(
if is_embedding_cache_enabled:
# Use embedding cache
current_embedding = await hashing_kv.embedding_func([prompt])
llm_model_func = hashing_kv.global_config.get('llm_model_func')
llm_model_func = hashing_kv.global_config.get("llm_model_func")
quantized, min_val, max_val = quantize_embedding(current_embedding[0])
best_cached_response = await get_best_cached_response(
hashing_kv,
Expand Down

0 comments on commit 0a693db

Please sign in to comment.