Skip to content

Commit

Permalink
Merge pull request #1 from flare-research/dev-dependencies
Browse files Browse the repository at this point in the history
fix(project): prepare for dockerization, fix linter issues, add actions
  • Loading branch information
magurh authored Feb 12, 2025
2 parents 2c240e9 + a2909d9 commit ba2c641
Show file tree
Hide file tree
Showing 37 changed files with 474 additions and 469 deletions.
59 changes: 59 additions & 0 deletions .github/workflows/build_and_deploy.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
name: Build and Deploy

on:
push:
# tags: ["v*.*.*"] # Match semantic versioning tags like v1.0.0
branches: ["main"]
workflow_dispatch: # Allow manual triggering of the workflow

env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}

jobs:
build-and-push:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
attestations: write
id-token: write
steps:
# Checkout repository
- name: Checkout repository
uses: actions/checkout@v4

# Log in to GHCR
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}

# Generate Docker metadata
- name: Metadata for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}

# Cache Docker layers to speed up builds
- name: Cache Docker layers
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-docker-${{ github.ref }}
restore-keys: |
${{ runner.os }}-docker-
# Build and push Docker image
- name: Build and push Docker image
uses: docker/build-push-action@v6
id: push
with:
context: .
push: true
load: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
37 changes: 37 additions & 0 deletions .github/workflows/lint.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
name: Lint

on:
push:
branches: ["main"]
pull_request:
branches: ["main"]

jobs:
lint:
runs-on: "ubuntu-latest"
steps:
- name: Checkout repository
uses: actions/checkout@v4

# Set up Python environment
- name: Set up uv
uses: astral-sh/setup-uv@v5
with:
version: "latest"

# Install dependencies
- name: Install dependencies
run: uv sync --frozen --all-extras

# Format code with ruff
- name: Format with ruff
run: uv run ruff format --diff

# Lint code with ruff
- name: Lint with ruff
run: uv run ruff check --diff
continue-on-error: false

# Type check with pyright
- name: Type check with pyright
run: uv run pyright
2 changes: 1 addition & 1 deletion .python-version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
3.10
3.12
62 changes: 59 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
[project]
name = "consensus-llm"
name = "flare-ai-consensus"
version = "0.1.0"
description = "Add your description here"
description = "Flare AI SDK template for single-node multi-model Consensus Learning"
readme = "README.md"
requires-python = ">=3.10"
requires-python = ">=3.12"
dependencies = [
"asyncio>=3.4.3",
"httpx>=0.28.1",
Expand All @@ -13,5 +13,61 @@ dependencies = [
"pandas>=2.2.3",
"python-dotenv>=1.0.1",
"requests>=2.32.3",
"structlog>=25.1.0",
]

[dependency-groups]
dev = [
"pyright>=1.1.393",
"ruff>=0.9.4",
]

[project.scripts]
start-consensus = "flare_ai_consensus.main:main"

[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"

[tool.ruff]
target-version = "py312"

[tool.ruff.lint]
select = ["ALL"]
ignore = ["D203", "D212", "COM812", "D", "S105", "ANN401", "ISC003"]

[tool.ruff.lint.extend-per-file-ignores]
"tests/**/*.py" = ["S101", "ARG"]

[tool.ruff.format]
docstring-code-format = true

[tool.pyright]
pythonVersion = "3.12"
strictListInference = true
strictDictionaryInference = true
strictSetInference = true
deprecateTypingAliases = true
disableBytesTypePromotions = true
reportUnusedClass = true
reportUnusedFunction = true
reportUntypedFunctionDecorator = true
reportUntypedClassDecorator = true
reportPrivateUsage = true
reportTypeCommentUsage = true
reportConstantRedefinition = true
reportDeprecated = true
reportInconsistentConstructor = true
reportUninitializedInstanceVariable = true
reportMissingParameterType = true
reportCallInDefaultInitializer = true
reportUnnecessaryIsInstance = true
reportUnnecessaryCast = true
reportUnnecessaryComparison = true
reportUnnecessaryContains = true
reportInvalidStubStatement = true
reportUnusedExpression = true
reportUnnecessaryTypeIgnoreComment = true
reportMatchNotExhaustive = true
reportImplicitOverride = true
reportShadowedImports = true
File renamed without changes.
8 changes: 8 additions & 0 deletions src/data/final_consensus.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
{
"aggregated_response": "Here's a combined response based on the majority of the models:\n\nHeat is emitted in a freshly painted room due to two primary reasons: evaporation of solvents and release of volatile organic compounds (VOCs) from the paint, and the exothermic chemical reactions that occur during the drying process.\n\nWhen paint is applied to a surface, it contains solvents and other chemicals that are volatile, meaning they can evaporate at room temperature. As the paint dries, these solvents and VOCs evaporate into the air, carrying heat with them. This process of evaporation is endothermic, meaning it absorbs heat from the surrounding environment, resulting in a cooler temperature in the room.\n\nAdditionally, the paint itself also emits heat as it dries due to the exothermic chemical reactions that occur during the drying process. These reactions involve the breakdown of the paint's components, releasing energy in the form of heat. This heat is then distributed throughout the room, contributing to the overall warmth",
"responses": {
"qwen/qwen-vl-plus:free": "Heat is emitted in a freshly painted room due to the evaporation of solvents and volatile organic compounds (VOCs) from the paint, which absorbs heat from the surrounding environment, and the chemical reactions that occur during the drying process, which release energy in the form of heat.",
"sophosympatheia/rogue-rose-103b-v0.2:free": " Heat is emitted in a freshly painted room due to two primary reasons: evaporation of solvents and release of volatile organic compounds (VOCs) from the paint, and the exothermic chemical reactions that occur during the drying process.\n\n1. Evaporation of solvents and VOCs: When paint is applied to a surface, it contains solvents and other chemicals that are volatile, meaning they can evaporate at room temperature. As the paint dries, these solvents and VOCs evaporate into the air, carrying heat with them. This process of evaporation is endothermic, meaning it absorbs heat from the surrounding environment, resulting in a cooler temperature in the room.\n2. Exothermic chemical reactions: The paint itself also emits heat as it dries due to the exothermic chemical reactions that occur during",
"meta-llama/llama-3.2-3b-instruct:free": "Heat is emitted in a freshly painted room because the paint contains volatile organic compounds (VOCs) that evaporate as the paint dries, absorbing heat from the surrounding environment, and the chemical reactions that occur during drying release energy in the form of heat."
}
}
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
2 changes: 1 addition & 1 deletion src/config.py → src/flare_ai_consensus/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,5 +32,5 @@ def create_path(folder_name: str) -> Path:
open_router_base_url=load_env_var("OPENROUTER_BASE_URL"),
open_router_api_key=load_env_var("OPENROUTER_API_KEY"),
data_path=create_path("data"),
input_path=create_path("src"),
input_path=create_path("flare_ai_consensus"),
)
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from src.consensus.config import AggregatorConfig
from src.router.client import OpenRouterClient, AsyncOpenRouterClient
from flare_ai_consensus.consensus.config import AggregatorConfig
from flare_ai_consensus.router.client import AsyncOpenRouterClient, OpenRouterClient


def concatenate_aggregator(responses: dict) -> str:
Expand All @@ -21,7 +21,8 @@ def centralized_llm_aggregator(
:param client: An OpenRouterClient instance.
:param aggregator_config: An instance of AggregatorConfig.
:param aggregated_responses: A string containing aggregated responses from individual models.
:param aggregated_responses: A string containing aggregated
responses from individual models.
:return: The aggregator's combined response.
"""
# Build the message list.
Expand All @@ -46,10 +47,7 @@ def centralized_llm_aggregator(

# Get aggregated response from the centralized LLM
response = client.send_chat_completion(payload)
aggregated_text = (
response.get("choices", [])[0].get("message", {}).get("content", "")
)
return aggregated_text
return response.get("choices", [])[0].get("message", {}).get("content", "")


async def async_centralized_llm_aggregator(
Expand All @@ -62,7 +60,8 @@ async def async_centralized_llm_aggregator(
:param client: An asynchronous OpenRouter client.
:param aggregator_config: An instance of AggregatorConfig.
:param aggregated_responses: A string containing aggregated responses from individual models.
:param aggregated_responses: A string containing aggregated
responses from individual models.
:return: The aggregator's combined response as a string.
"""
messages = []
Expand All @@ -80,7 +79,4 @@ async def async_centralized_llm_aggregator(
}

response = await client.send_chat_completion(payload)
aggregated_text = (
response.get("choices", [])[0].get("message", {}).get("content", "")
)
return aggregated_text
return response.get("choices", [])[0].get("message", {}).get("content", "")
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1,8 +1,12 @@
import asyncio

from src.consensus.config import ConsensusConfig, ModelConfig
from src.router.client import AsyncOpenRouterClient
from src.utils.parser import parse_chat_response
import structlog

from flare_ai_consensus.consensus.config import ConsensusConfig, ModelConfig
from flare_ai_consensus.router.client import AsyncOpenRouterClient
from flare_ai_consensus.utils.parser import parse_chat_response

logger = structlog.get_logger(__name__)


def build_improvement_conversation(
Expand Down Expand Up @@ -42,20 +46,21 @@ async def get_response_for_model(
:param client: An instance of an asynchronous OpenRouter client.
:param consensus_config: An instance of ConsensusConfig.
:param aggregated_response: The aggregated consensus response from the previous round (or None).
:param aggregated_response: The aggregated consensus response
from the previous round (or None).
:param model: A ModelConfig instance.
:return: A tuple of (model_id, response text).
"""
if aggregated_response is None:
# Use initial prompt for the first round.
conversation = consensus_config.initial_prompt
print(f"Sending initial prompt to {model.model_id}.")
logger.info("sending initial prompt", model_id=model.model_id)
else:
# Build the improvement conversation.
conversation = build_improvement_conversation(
consensus_config, aggregated_response
)
print(f"Sending improvement prompt to {model.model_id}.")
logger.info("sending improvement prompt", model_id=model.model_id)

payload = {
"model": model.model_id,
Expand All @@ -65,27 +70,27 @@ async def get_response_for_model(
}
response = await client.send_chat_completion(payload)
text = parse_chat_response(response)
print(f"{model.model_id} has provided a new response.")

logger.info("new response", model_id=model.model_id, response=text)
return model.model_id, text


async def send_round(
client: AsyncOpenRouterClient,
consensus_config: ConsensusConfig,
aggregated_response: str = None,
aggregated_response: str | None = None,
) -> dict:
"""
Asynchronously sends a round of chat completion requests for all models.
:param client: An instance of an asynchronous OpenRouter client.
:param consensus_config: An instance of ConsensusConfig.
:param aggregated_response: The aggregated consensus response from the previous round (or None).
:param aggregated_response: The aggregated consensus response from the
previous round (or None).
:return: A dictionary mapping model IDs to their response texts.
"""
tasks = [
get_response_for_model(client, consensus_config, model, aggregated_response)
for model in consensus_config.models
]
results = await asyncio.gather(*tasks)
return {model_id: text for model_id, text in results}
return dict(results)
File renamed without changes.
32 changes: 18 additions & 14 deletions src/main.py → src/flare_ai_consensus/main.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,18 @@
import asyncio

from src.config import config
from src.consensus import aggregator, consensus
from src.consensus.config import ConsensusConfig
from src.router.client import AsyncOpenRouterClient
from src.utils import (
saver,
import structlog

from flare_ai_consensus.config import config
from flare_ai_consensus.consensus import aggregator, consensus
from flare_ai_consensus.consensus.config import ConsensusConfig
from flare_ai_consensus.router.client import AsyncOpenRouterClient
from flare_ai_consensus.utils import (
loader,
saver,
)

logger = structlog.get_logger(__name__)


async def run_consensus(
client: AsyncOpenRouterClient,
Expand All @@ -22,17 +26,17 @@ async def run_consensus(
:param consensus_config: An instance of ConsensusConfig.
"""
response_data = {}
response_data['initial_conversation'] = consensus_config.initial_prompt
response_data["initial_conversation"] = consensus_config.initial_prompt

# Step 1: Initial round.
responses = await consensus.send_round(client, consensus_config)
aggregated_response = await aggregator.async_centralized_llm_aggregator(
client, consensus_config.aggregator_config, responses
)
print("\nInitial responses have been aggregated.")
logger.info("initial response aggregation complete")

response_data['iteration_0'] = responses
response_data['aggregate_0'] = aggregated_response
response_data["iteration_0"] = responses
response_data["aggregate_0"] = aggregated_response

# Step 2: Improvement rounds.
for i in range(consensus_config.iterations):
Expand All @@ -42,18 +46,18 @@ async def run_consensus(
aggregated_response = await aggregator.async_centralized_llm_aggregator(
client, consensus_config.aggregator_config, responses
)
print(f"\nThe responses have been aggregated after iteration {i + 1}.")
logger.info("responses aggregated", iteration=i + 1)

response_data[f'iteration_{i+1}'] = responses
response_data[f'aggregate_{i+1}'] = aggregated_response
response_data[f"iteration_{i + 1}"] = responses
response_data[f"aggregate_{i + 1}"] = aggregated_response

# Step 3: Save final consensus.
output_file = config.data_path / "final_consensus.json"
saver.save_json(
response_data,
output_file,
)
print(f"\nFinal consensus saved to {output_file}.")
logger.info("saved consensus", output_file=output_file)

# Close the async client to release resources.
await client.close()
Expand Down
File renamed without changes.
Loading

0 comments on commit ba2c641

Please sign in to comment.