Skip to content

Commit

Permalink
chore: unify import style in examples
Browse files Browse the repository at this point in the history
  • Loading branch information
akx committed Dec 30, 2024
1 parent 42d80aa commit b51a8cd
Show file tree
Hide file tree
Showing 20 changed files with 55 additions and 60 deletions.
29 changes: 15 additions & 14 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,9 @@ pip install ollama
## Usage

```python
from ollama import chat
from ollama import ChatResponse
import ollama

response: ChatResponse = chat(model='llama3.2', messages=[
response: ollama.ChatResponse = ollama.chat(model='llama3.2', messages=[
{
'role': 'user',
'content': 'Why is the sky blue?',
Expand All @@ -38,12 +37,12 @@ See [_types.py](ollama/_types.py) for more information on the response types.
Response streaming can be enabled by setting `stream=True`.

```python
from ollama import chat
import ollama

stream = chat(
model='llama3.2',
messages=[{'role': 'user', 'content': 'Why is the sky blue?'}],
stream=True,
stream = ollama.chat(
model='llama3.2',
messages=[{'role': 'user', 'content': 'Why is the sky blue?'}],
stream=True,
)

for chunk in stream:
Expand All @@ -56,8 +55,9 @@ A custom client can be created by instantiating `Client` or `AsyncClient` from `
All extra keyword arguments are passed into the [`httpx.Client`](https://www.python-httpx.org/api/#client).

```python
from ollama import Client
client = Client(
import ollama

client = ollama.Client(
host='http://localhost:11434',
headers={'x-some-header': 'some-value'}
)
Expand All @@ -75,11 +75,12 @@ The `AsyncClient` class is used to make asynchronous requests. It can be configu

```python
import asyncio
from ollama import AsyncClient
import ollama


async def chat():
message = {'role': 'user', 'content': 'Why is the sky blue?'}
response = await AsyncClient().chat(model='llama3.2', messages=[message])
response = await ollama.AsyncClient().chat(model='llama3.2', messages=[message])

asyncio.run(chat())
```
Expand All @@ -88,11 +89,11 @@ Setting `stream=True` modifies functions to return a Python asynchronous generat

```python
import asyncio
from ollama import AsyncClient
import ollama

async def chat():
message = {'role': 'user', 'content': 'Why is the sky blue?'}
async for part in await AsyncClient().chat(model='llama3.2', messages=[message], stream=True):
async for part in await ollama.AsyncClient().chat(model='llama3.2', messages=[message], stream=True):
print(part['message']['content'], end='', flush=True)

asyncio.run(chat())
Expand Down
4 changes: 2 additions & 2 deletions examples/async-chat.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import asyncio

from ollama import AsyncClient
import ollama


async def main():
Expand All @@ -11,7 +11,7 @@ async def main():
},
]

client = AsyncClient()
client = ollama.AsyncClient()
response = await client.chat('llama3.2', messages=messages)
print(response['message']['content'])

Expand Down
4 changes: 2 additions & 2 deletions examples/async-structured-outputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from pydantic import BaseModel

from ollama import AsyncClient
import ollama


# Define the schema for the response
Expand All @@ -17,7 +17,7 @@ class FriendList(BaseModel):


async def main():
client = AsyncClient()
client = ollama.AsyncClient()
response = await client.chat(
model='llama3.1:8b',
messages=[{'role': 'user', 'content': 'I have two friends. The first is Ollama 22 years old busy saving the world, and the second is Alonso 23 years old and wants to hang out. Return a list of friends in JSON format'}],
Expand Down
3 changes: 1 addition & 2 deletions examples/async-tools.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import asyncio

import ollama
from ollama import ChatResponse


def add_two_numbers(a: int, b: int) -> int:
Expand Down Expand Up @@ -54,7 +53,7 @@ def subtract_two_numbers(a: int, b: int) -> int:
async def main():
client = ollama.AsyncClient()

response: ChatResponse = await client.chat(
response: ollama.ChatResponse = await client.chat(
'llama3.1',
messages=messages,
tools=[add_two_numbers, subtract_two_numbers_tool],
Expand Down
4 changes: 2 additions & 2 deletions examples/chat-stream.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from ollama import chat
import ollama

messages = [
{
Expand All @@ -7,7 +7,7 @@
},
]

for part in chat('llama3.2', messages=messages, stream=True):
for part in ollama.chat('llama3.2', messages=messages, stream=True):
print(part['message']['content'], end='', flush=True)

print()
4 changes: 2 additions & 2 deletions examples/chat-with-history.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from ollama import chat
import ollama

messages = [
{
Expand All @@ -21,7 +21,7 @@

while True:
user_input = input('Chat with history: ')
response = chat(
response = ollama.chat(
'llama3.2',
messages=messages
+ [
Expand Down
4 changes: 2 additions & 2 deletions examples/chat.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from ollama import chat
import ollama

messages = [
{
Expand All @@ -7,5 +7,5 @@
},
]

response = chat('llama3.2', messages=messages)
response = ollama.chat('llama3.2', messages=messages)
print(response['message']['content'])
4 changes: 2 additions & 2 deletions examples/create.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import sys

from ollama import create
import ollama

args = sys.argv[1:]
if len(args) == 2:
Expand All @@ -25,5 +25,5 @@
SYSTEM You are Mario from super mario bros, acting as an assistant.
"""

for response in create(model=args[0], modelfile=modelfile, stream=True):
for response in ollama.create(model=args[0], modelfile=modelfile, stream=True):
print(response['status'])
4 changes: 2 additions & 2 deletions examples/embed.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from ollama import embed
import ollama

response = embed(model='llama3.2', input='Hello, world!')
response = ollama.embed(model='llama3.2', input='Hello, world!')
print(response['embeddings'])
4 changes: 2 additions & 2 deletions examples/fill-in-middle.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from ollama import generate
import ollama

prompt = '''def remove_non_ascii(s: str) -> str:
""" '''
Expand All @@ -7,7 +7,7 @@
return result
"""

response = generate(
response = ollama.generate(
model='codellama:7b-code',
prompt=prompt,
suffix=suffix,
Expand Down
4 changes: 2 additions & 2 deletions examples/generate-stream.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from ollama import generate
import ollama

for part in generate('llama3.2', 'Why is the sky blue?', stream=True):
for part in ollama.generate('llama3.2', 'Why is the sky blue?', stream=True):
print(part['response'], end='', flush=True)
4 changes: 2 additions & 2 deletions examples/generate.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from ollama import generate
import ollama

response = generate('llama3.2', 'Why is the sky blue?')
response = ollama.generate('llama3.2', 'Why is the sky blue?')
print(response['response'])
6 changes: 2 additions & 4 deletions examples/list.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
from ollama import ListResponse, list
import ollama

response: ListResponse = list()

for model in response.models:
for model in ollama.list().models:
print('Name:', model.model)
print(' Size (MB):', f'{(model.size.real / 1024 / 1024):.2f}')
if model.details:
Expand Down
4 changes: 2 additions & 2 deletions examples/multimodal-chat.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from ollama import chat
import ollama

# from pathlib import Path

Expand All @@ -10,7 +10,7 @@
# or the raw bytes
# img = Path(path).read_bytes()

response = chat(
response = ollama.chat(
model='llama3.2-vision',
messages=[
{
Expand Down
4 changes: 2 additions & 2 deletions examples/multimodal-generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

import httpx

from ollama import generate
import ollama

latest = httpx.get('https://xkcd.com/info.0.json')
latest.raise_for_status()
Expand All @@ -23,7 +23,7 @@
raw = httpx.get(comic.json().get('img'))
raw.raise_for_status()

for response in generate('llava', 'explain this comic:', images=[raw.content], stream=True):
for response in ollama.generate('llava', 'explain this comic:', images=[raw.content], stream=True):
print(response['response'], end='', flush=True)

print()
11 changes: 4 additions & 7 deletions examples/ps.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
from ollama import ProcessResponse, chat, ps, pull
import ollama

# Ensure at least one model is loaded
response = pull('llama3.2', stream=True)
progress_states = set()
for progress in response:
for progress in ollama.pull('llama3.2', stream=True):
if progress.get('status') in progress_states:
continue
progress_states.add(progress.get('status'))
Expand All @@ -12,11 +11,9 @@
print('\n')

print('Waiting for model to load... \n')
chat(model='llama3.2', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}])
ollama.chat(model='llama3.2', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}])


response: ProcessResponse = ps()
for model in response.models:
for model in ollama.ps().models:
print('Model: ', model.model)
print(' Digest: ', model.digest)
print(' Expires at: ', model.expires_at)
Expand Down
4 changes: 2 additions & 2 deletions examples/pull.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
from tqdm import tqdm

from ollama import pull
import ollama

current_digest, bars = '', {}
for progress in pull('llama3.2', stream=True):
for progress in ollama.pull('llama3.2', stream=True):
digest = progress.get('digest', '')
if digest != current_digest and current_digest in bars:
bars[current_digest].close()
Expand Down
4 changes: 2 additions & 2 deletions examples/structured-outputs-image.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

from pydantic import BaseModel

from ollama import chat
import ollama


# Define the schema for image objects
Expand Down Expand Up @@ -32,7 +32,7 @@ class ImageDescription(BaseModel):
raise FileNotFoundError(f'Image not found at: {path}')

# Set up chat as usual
response = chat(
response = ollama.chat(
model='llama3.2-vision',
format=ImageDescription.model_json_schema(), # Pass in the schema for the response
messages=[
Expand Down
4 changes: 2 additions & 2 deletions examples/structured-outputs.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from pydantic import BaseModel

from ollama import chat
import ollama


# Define the schema for the response
Expand All @@ -15,7 +15,7 @@ class FriendList(BaseModel):


# schema = {'type': 'object', 'properties': {'friends': {'type': 'array', 'items': {'type': 'object', 'properties': {'name': {'type': 'string'}, 'age': {'type': 'integer'}, 'is_available': {'type': 'boolean'}}, 'required': ['name', 'age', 'is_available']}}}, 'required': ['friends']}
response = chat(
response = ollama.chat(
model='llama3.1:8b',
messages=[{'role': 'user', 'content': 'I have two friends. The first is Ollama 22 years old busy saving the world, and the second is Alonso 23 years old and wants to hang out. Return a list of friends in JSON format'}],
format=FriendList.model_json_schema(), # Use Pydantic to generate the schema or format=schema
Expand Down
6 changes: 3 additions & 3 deletions examples/tools.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from ollama import ChatResponse, chat
import ollama


def add_two_numbers(a: int, b: int) -> int:
Expand Down Expand Up @@ -47,7 +47,7 @@ def subtract_two_numbers(a: int, b: int) -> int:
'subtract_two_numbers': subtract_two_numbers,
}

response: ChatResponse = chat(
response: ollama.ChatResponse = ollama.chat(
'llama3.1',
messages=messages,
tools=[add_two_numbers, subtract_two_numbers_tool],
Expand All @@ -72,7 +72,7 @@ def subtract_two_numbers(a: int, b: int) -> int:
messages.append({'role': 'tool', 'content': str(output), 'name': tool.function.name})

# Get final response from model with function outputs
final_response = chat('llama3.1', messages=messages)
final_response = ollama.chat('llama3.1', messages=messages)
print('Final response:', final_response.message.content)

else:
Expand Down

0 comments on commit b51a8cd

Please sign in to comment.