Ollama can be easily integrated into various programming languages and frameworks:
Python Integration:
Using the ollama Python library:
pythonimport ollama # Generate text response = ollama.generate(model='llama3.1', prompt='Hello, how are you?') print(response['response']) # Chat messages = [ {'role': 'user', 'content': 'Hello!'}, {'role': 'assistant', 'content': 'Hi there!'}, {'role': 'user', 'content': 'How are you?'} ] response = ollama.chat(model='llama3.1', messages=messages) print(response['message']['content']) # Streaming response for chunk in ollama.generate(model='llama3.1', prompt='Tell me a story', stream=True): print(chunk['response'], end='', flush=True)
JavaScript/Node.js Integration:
Using the ollama-js library:
javascriptimport ollama from 'ollama-js' const client = new ollama.Ollama() // Generate text const response = await client.generate({ model: 'llama3.1', prompt: 'Hello, how are you?' }) console.log(response.response) // Chat const chat = await client.chat({ model: 'llama3.1', messages: [ { role: 'user', content: 'Hello!' }, { role: 'assistant', content: 'Hi there!' }, { role: 'user', content: 'How are you?' } ] }) console.log(chat.message.content)
Go Integration:
gopackage main import ( "bytes" "encoding/json" "fmt" "net/http" ) type GenerateRequest struct { Model string `json:"model"` Prompt string `json:"prompt"` } type GenerateResponse struct { Response string `json:"response"` } func main() { req := GenerateRequest{ Model: "llama3.1", Prompt: "Hello, how are you?", } body, _ := json.Marshal(req) resp, _ := http.Post("http://localhost:11434/api/generate", "application/json", bytes.NewBuffer(body)) var result GenerateResponse json.NewDecoder(resp.Body).Decode(&result) fmt.Println(result.Response) }
LangChain Integration:
pythonfrom langchain_community.llms import Ollama llm = Ollama(model="llama3.1") # Simple call response = llm.invoke("Tell me a joke") print(response) # Chain call from langchain.prompts import ChatPromptTemplate from langchain.schema import StrOutputParser prompt = ChatPromptTemplate.from_template("Tell me a {adjective} joke about {topic}") chain = prompt | llm | StrOutputParser() print(chain.invoke({"adjective": "funny", "topic": "programming"}))
REST API Integration:
Any language supporting HTTP can directly call the REST API:
bashcurl http://localhost:11434/api/generate -d '{ "model": "llama3.1", "prompt": "Hello, how are you?", "stream": false }'