Files
lab_local_models/goodmorning.py
2026-04-08 10:13:12 -04:00

43 lines
1.3 KiB
Python

import ollama
MODEL = "llama3.2:1b"
SYSTEM_PROMPT = """
You are a cheerful morning assistant helping a high school student get ready for school.
Keep responses short and encouraging. Start by suggesting a list of a few things to do before school.
"""
def send_message(messages):
"""Sends a conversation to the model, streaming its reply to the screen."""
print("\nAssistant: ", end="", flush=True)
reply = ""
for chunk in ollama.chat(model=MODEL, messages=messages, stream=True):
piece = chunk.message.content
print(piece, end="", flush=True)
reply += piece
print()
return reply
def run():
"""Runs a chat loop, keeping track of the conversation."""
messages = [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": "Good morning!"},
]
print("(Type 'quit' to exit.)")
greeting = send_message(messages)
messages.append({"role": "assistant", "content": greeting})
while True:
user_input = input("\nYou: ")
if user_input.strip().lower() == "quit":
break
messages.append({"role": "user", "content": user_input})
reply = send_message(messages)
messages.append({"role": "assistant", "content": reply})
if __name__ == "__main__":
run()