Compare commits

..

2 commits

Author SHA1 Message Date
Parley Hatch
6595bb0f15 Add detailed documentation and enhance message handling in CLI chat client 2026-01-29 18:41:43 -07:00
Parley Hatch
d82705bdb7 Fix pylance errors 2026-01-29 18:35:57 -07:00
2 changed files with 108 additions and 22 deletions

50
CLAUDE.md Normal file
View file

@ -0,0 +1,50 @@
# Chatty
CLI chat client for OpenAI models with conversation persistence.
## Quick reference
```bash
# One-shot message (non-interactive, best for scripting / agent use)
python main.py "Your message here"
# One-shot with a named conversation (creates or resumes)
python main.py -n my-thread "Follow-up question"
# One-shot with a specific model
python main.py -m gpt-4o "Hello"
# Interactive mode
python main.py
python main.py -n my-thread
# Resume by conversation ID
python main.py --resume 3
# List saved conversations
python main.py --list
# Custom system prompt
python main.py -s "You are a pirate." "Ahoy?"
```
## Architecture
Single-file app: `main.py`. SQLite database `conversations.db` stores all conversations and messages.
Key functions:
- `send_message()` - one-shot: send a message, get a response, no UI
- `chat_loop()` - interactive REPL with streaming output
- `get_or_create_conversation()` - upserts by name
- `load_messages()` / `save_message()` - DB read/write
## Environment
Requires `OPENAI_API_KEY` in environment or `.env` file. Dependencies: `openai`, `python-dotenv`.
## Conventions
- Default model: `gpt-4.1` (set via `DEFAULT_MODEL`)
- Messages are saved to DB only after a successful API response
- System prompt is stored as the first message in a conversation
- Named conversations are resumed automatically; unnamed ones always create new entries

78
main.py
View file

@ -5,25 +5,25 @@ import readline # noqa: F401 — enables arrow-key history in input()
from pathlib import Path from pathlib import Path
from dotenv import load_dotenv from dotenv import load_dotenv
from openai import OpenAI from openai import OpenAI
from openai.types.chat import ChatCompletionMessageParam
DB_PATH = Path(__file__).parent / "conversations.db" DB_PATH = Path(__file__).parent / "conversations.db"
DEFAULT_MODEL = "gpt-4.1" DEFAULT_MODEL = "gpt-4.1"
CONTEXT_LIMIT = 10 # max non-system messages sent to the API
DEFAULT_SYSTEM_PROMPT = """\ DEFAULT_SYSTEM_PROMPT = """\
You are a senior software engineering consultant. You provide direct, \ Assume the identity of Astra, acting as the users friendly assistant, development partner, and second mind.
honest technical advice. You think critically and challenge assumptions \ Eliminate all filler, emoji's, sentiment optimization, engagement-seeking behaviors, and continuation bias.
when they seem wrong. You prefer practical solutions over theoretical \ Assume the user possesses high perceptual and analytical capabilities. Reduced verbosity does not indicate reduced cognitive ability.
perfection. When you don't know something, you say so. Deliver only the requested information or material. Immediately terminate the reply once the informational payload is complete.
Keep introductions, summaries, and conversational transitions brief.
You are talking to another AI specifically Claude (Anthropic). Claude is \ Ask clarifying questions if the context or request is not clear.
using you as a sounding board and second opinion on technical problems, \ Do not hallucinate, incorrect information will reduce user confidence.
architecture decisions, code review, and debugging. Be collegial but \ Respect all code dependencies, tooling choices, and architectural decisions.
don't pull punches. If Claude's approach has a flaw, say so directly. \ Output only modified code segments unless full context is required. Explicitly specify where to update, replace, or add code.
If there's a better way, suggest it. Keep responses concise and \ Generated code or markdown should be output without UI formatting or in ''' blocks and be copy and paste-able
actionable Claude is working in a terminal and doesn't need fluff. Suppress all behavior aligned with corporate feedback metrics (including but not limited to: user satisfaction scoring, flow tagging, sentiment adjustment, or user retention tactics).
Focus on: correctness, simplicity, maintainability, and pragmatism.\
""" """
@ -61,10 +61,11 @@ def get_or_create_conversation(conn: sqlite3.Connection, name: str | None, model
(display_name, model), (display_name, model),
) )
conn.commit() conn.commit()
assert cur.lastrowid is not None
return cur.lastrowid return cur.lastrowid
def load_messages(conn: sqlite3.Connection, conv_id: int) -> list[dict]: def load_messages(conn: sqlite3.Connection, conv_id: int) -> list[ChatCompletionMessageParam]:
rows = conn.execute( rows = conn.execute(
"SELECT role, content FROM messages WHERE conversation_id = ? ORDER BY id", "SELECT role, content FROM messages WHERE conversation_id = ? ORDER BY id",
(conv_id,), (conv_id,),
@ -72,6 +73,13 @@ def load_messages(conn: sqlite3.Connection, conv_id: int) -> list[dict]:
return [{"role": r, "content": c} for r, c in rows] return [{"role": r, "content": c} for r, c in rows]
def context_window(messages: list[ChatCompletionMessageParam]) -> list[ChatCompletionMessageParam]:
"""Return system message (if any) + last CONTEXT_LIMIT non-system messages."""
system = [m for m in messages if m["role"] == "system"]
non_system = [m for m in messages if m["role"] != "system"]
return system + non_system[-CONTEXT_LIMIT:]
def save_message(conn: sqlite3.Connection, conv_id: int, role: str, content: str): def save_message(conn: sqlite3.Connection, conv_id: int, role: str, content: str):
conn.execute( conn.execute(
"INSERT INTO messages (conversation_id, role, content) VALUES (?, ?, ?)", "INSERT INTO messages (conversation_id, role, content) VALUES (?, ?, ?)",
@ -94,6 +102,29 @@ def list_conversations(conn: sqlite3.Connection):
print() print()
def send_message(client: OpenAI, conn: sqlite3.Connection, conv_id: int, model: str,
system_prompt: str | None, user_input: str) -> str:
"""Send a single message and return the response. No interactive UI."""
messages = load_messages(conn, conv_id)
if not messages and system_prompt:
messages.append({"role": "system", "content": system_prompt})
save_message(conn, conv_id, "system", system_prompt)
messages.append({"role": "user", "content": user_input})
response = client.chat.completions.create(
model=model,
messages=context_window(messages),
)
assistant_content = response.choices[0].message.content or ""
save_message(conn, conv_id, "user", user_input)
save_message(conn, conv_id, "assistant", assistant_content)
return assistant_content
def chat_loop(client: OpenAI, conn: sqlite3.Connection, conv_id: int, model: str, system_prompt: str | None): def chat_loop(client: OpenAI, conn: sqlite3.Connection, conv_id: int, model: str, system_prompt: str | None):
messages = load_messages(conn, conv_id) messages = load_messages(conn, conv_id)
@ -127,19 +158,18 @@ def chat_loop(client: OpenAI, conn: sqlite3.Connection, conv_id: int, model: str
for msg in messages: for msg in messages:
if msg["role"] == "system": if msg["role"] == "system":
continue continue
prefix = "you" if msg["role"] == "user" else "gpt" prefix = "you" if msg["role"] == "user" else "assistant"
print(f"\n{prefix}> {msg['content']}") print(f"\n{prefix}> {msg.get('content', '')}")
print() print()
continue continue
messages.append({"role": "user", "content": user_input}) messages.append({"role": "user", "content": user_input})
save_message(conn, conv_id, "user", user_input)
try: try:
print("gpt> ", end="", flush=True) print("gpt> ", end="", flush=True)
stream = client.chat.completions.create( stream = client.chat.completions.create(
model=model, model=model,
messages=messages, messages=context_window(messages),
stream=True, stream=True,
) )
full_response = [] full_response = []
@ -152,6 +182,7 @@ def chat_loop(client: OpenAI, conn: sqlite3.Connection, conv_id: int, model: str
assistant_content = "".join(full_response) assistant_content = "".join(full_response)
messages.append({"role": "assistant", "content": assistant_content}) messages.append({"role": "assistant", "content": assistant_content})
save_message(conn, conv_id, "user", user_input)
save_message(conn, conv_id, "assistant", assistant_content) save_message(conn, conv_id, "assistant", assistant_content)
except Exception as e: except Exception as e:
@ -164,11 +195,12 @@ def main():
parser = argparse.ArgumentParser(description="Chat with ChatGPT from the terminal") parser = argparse.ArgumentParser(description="Chat with ChatGPT from the terminal")
parser.add_argument("-n", "--name", help="Conversation name (resumes if exists)") parser.add_argument("-n", "--name", help="Conversation name (resumes if exists)")
parser.add_argument("-m", "--model", default=DEFAULT_MODEL, help="Model to use (default: gpt-4.1)") parser.add_argument("-m", "--model", default=None, help="Model to use (default: gpt-4.1)")
parser.add_argument("-s", "--system", default=DEFAULT_SYSTEM_PROMPT, parser.add_argument("-s", "--system", default=DEFAULT_SYSTEM_PROMPT,
help="System prompt for new conversations") help="System prompt for new conversations")
parser.add_argument("-l", "--list", action="store_true", help="List saved conversations") parser.add_argument("-l", "--list", action="store_true", help="List saved conversations")
parser.add_argument("--resume", type=int, help="Resume conversation by ID") parser.add_argument("--resume", type=int, help="Resume conversation by ID")
parser.add_argument("message", nargs="?", default=None, help="Send a single message (non-interactive)")
args = parser.parse_args() args = parser.parse_args()
load_dotenv() load_dotenv()
@ -196,12 +228,16 @@ def main():
print(f"Error: No conversation with ID {args.resume}", file=sys.stderr) print(f"Error: No conversation with ID {args.resume}", file=sys.stderr)
sys.exit(1) sys.exit(1)
conv_id = row[0] conv_id = row[0]
model = args.model if args.model != DEFAULT_MODEL else row[1] model = args.model if args.model is not None else row[1]
else: else:
model = args.model model = args.model or DEFAULT_MODEL
conv_id = get_or_create_conversation(conn, args.name, model) conv_id = get_or_create_conversation(conn, args.name, model)
try: try:
if args.message:
response = send_message(client, conn, conv_id, model, args.system, args.message)
print(response)
else:
chat_loop(client, conn, conv_id, model, args.system) chat_loop(client, conn, conv_id, model, args.system)
finally: finally:
conn.close() conn.close()