From 6f5565914f1e76536afcd1e21e0b8bb11208e4fe Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 20 Dec 2023 14:53:38 -0800 Subject: [PATCH] update README.md --- README.md | 70 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/README.md b/README.md index e69de29..0183092 100644 --- a/README.md +++ b/README.md @@ -0,0 +1,70 @@ +# Ollama Python Library + +The Ollama Python library provides the easiest way to integrate your Python 3 project with [Ollama](https://github.com/jmorganca/ollama). + +## Getting Started + +Requires Python 3.8 or higher. + +```sh +pip install ollama +``` + +A global default client is provided for convenience and can be used in the same way as the synchronous client. + +```python +import ollama +response = ollama.chat(model='llama2', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}]) +``` + +```python +import ollama +message = {'role': 'user', 'content': 'Why is the sky blue?'} +for part in ollama.chat(model='llama2', messages=[message], stream=True): + print(part['message']['content'], end='', flush=True) +``` + + +### Using the Synchronous Client + +```python +from ollama import Client +message = {'role': 'user', 'content': 'Why is the sky blue?'} +response = Client().chat(model='llama2', messages=[message]) +``` + +Response streaming can be enabled by setting `stream=True`. This modifies the function to return a Python generator where each part is an object in the stream. + +```python +from ollama import Client +message = {'role': 'user', 'content': 'Why is the sky blue?'} +for part in Client().chat(model='llama2', messages=[message], stream=True): + print(part['message']['content'], end='', flush=True) +``` + +### Using the Asynchronous Client + +```python +import asyncio +from ollama import AsyncClient + +async def chat(): + message = {'role': 'user', 'content': 'Why is the sky blue?'} + response = await AsyncClient().chat(model='llama2', messages=[message]) + +asyncio.run(chat()) +``` + +Similar to the synchronous client, setting `stream=True` modifies the function to return a Python asynchronous generator. + +```python +import asyncio +from ollama import AsyncClient + +async def chat(): + message = {'role': 'user', 'content': 'Why is the sky blue?'} + async for part in await AsyncClient().chat(model='llama2', messages=[message], stream=True): + print(part['message']['content'], end='', flush=True) + +asyncio.run(chat()) +```