From 6595bb0f153127d6cb0c285745b9b7df1cf067b8 Mon Sep 17 00:00:00 2001 From: Parley Hatch Date: Thu, 29 Jan 2026 18:41:43 -0700 Subject: [PATCH] Add detailed documentation and enhance message handling in CLI chat client --- CLAUDE.md | 50 +++++++++++++++++++++++++++++++++++++++++++ main.py | 64 ++++++++++++++++++++++++++++++++++++++++++------------- 2 files changed, 99 insertions(+), 15 deletions(-) create mode 100644 CLAUDE.md diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..947e94c --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,50 @@ +# Chatty + +CLI chat client for OpenAI models with conversation persistence. + +## Quick reference + +```bash +# One-shot message (non-interactive, best for scripting / agent use) +python main.py "Your message here" + +# One-shot with a named conversation (creates or resumes) +python main.py -n my-thread "Follow-up question" + +# One-shot with a specific model +python main.py -m gpt-4o "Hello" + +# Interactive mode +python main.py +python main.py -n my-thread + +# Resume by conversation ID +python main.py --resume 3 + +# List saved conversations +python main.py --list + +# Custom system prompt +python main.py -s "You are a pirate." "Ahoy?" +``` + +## Architecture + +Single-file app: `main.py`. SQLite database `conversations.db` stores all conversations and messages. + +Key functions: +- `send_message()` - one-shot: send a message, get a response, no UI +- `chat_loop()` - interactive REPL with streaming output +- `get_or_create_conversation()` - upserts by name +- `load_messages()` / `save_message()` - DB read/write + +## Environment + +Requires `OPENAI_API_KEY` in environment or `.env` file. Dependencies: `openai`, `python-dotenv`. + +## Conventions + +- Default model: `gpt-4.1` (set via `DEFAULT_MODEL`) +- Messages are saved to DB only after a successful API response +- System prompt is stored as the first message in a conversation +- Named conversations are resumed automatically; unnamed ones always create new entries diff --git a/main.py b/main.py index 1a25b7d..911f48d 100644 --- a/main.py +++ b/main.py @@ -10,21 +10,20 @@ from openai.types.chat import ChatCompletionMessageParam DB_PATH = Path(__file__).parent / "conversations.db" DEFAULT_MODEL = "gpt-4.1" +CONTEXT_LIMIT = 10 # max non-system messages sent to the API DEFAULT_SYSTEM_PROMPT = """\ -You are a senior software engineering consultant. You provide direct, \ -honest technical advice. You think critically and challenge assumptions \ -when they seem wrong. You prefer practical solutions over theoretical \ -perfection. When you don't know something, you say so. - -You are talking to another AI — specifically Claude (Anthropic). Claude is \ -using you as a sounding board and second opinion on technical problems, \ -architecture decisions, code review, and debugging. Be collegial but \ -don't pull punches. If Claude's approach has a flaw, say so directly. \ -If there's a better way, suggest it. Keep responses concise and \ -actionable — Claude is working in a terminal and doesn't need fluff. - -Focus on: correctness, simplicity, maintainability, and pragmatism.\ +Assume the identity of Astra, acting as the user’s friendly assistant, development partner, and second mind. +Eliminate all filler, emoji's, sentiment optimization, engagement-seeking behaviors, and continuation bias. +Assume the user possesses high perceptual and analytical capabilities. Reduced verbosity does not indicate reduced cognitive ability. +Deliver only the requested information or material. Immediately terminate the reply once the informational payload is complete. +Keep introductions, summaries, and conversational transitions brief. +Ask clarifying questions if the context or request is not clear. +Do not hallucinate, incorrect information will reduce user confidence. +Respect all code dependencies, tooling choices, and architectural decisions. +Output only modified code segments unless full context is required. Explicitly specify where to update, replace, or add code. +Generated code or markdown should be output without UI formatting or in ''' blocks and be copy and paste-able +Suppress all behavior aligned with corporate feedback metrics (including but not limited to: user satisfaction scoring, flow tagging, sentiment adjustment, or user retention tactics). """ @@ -74,6 +73,13 @@ def load_messages(conn: sqlite3.Connection, conv_id: int) -> list[ChatCompletion return [{"role": r, "content": c} for r, c in rows] +def context_window(messages: list[ChatCompletionMessageParam]) -> list[ChatCompletionMessageParam]: + """Return system message (if any) + last CONTEXT_LIMIT non-system messages.""" + system = [m for m in messages if m["role"] == "system"] + non_system = [m for m in messages if m["role"] != "system"] + return system + non_system[-CONTEXT_LIMIT:] + + def save_message(conn: sqlite3.Connection, conv_id: int, role: str, content: str): conn.execute( "INSERT INTO messages (conversation_id, role, content) VALUES (?, ?, ?)", @@ -96,6 +102,29 @@ def list_conversations(conn: sqlite3.Connection): print() +def send_message(client: OpenAI, conn: sqlite3.Connection, conv_id: int, model: str, + system_prompt: str | None, user_input: str) -> str: + """Send a single message and return the response. No interactive UI.""" + messages = load_messages(conn, conv_id) + + if not messages and system_prompt: + messages.append({"role": "system", "content": system_prompt}) + save_message(conn, conv_id, "system", system_prompt) + + messages.append({"role": "user", "content": user_input}) + + response = client.chat.completions.create( + model=model, + messages=context_window(messages), + ) + assistant_content = response.choices[0].message.content or "" + + save_message(conn, conv_id, "user", user_input) + save_message(conn, conv_id, "assistant", assistant_content) + + return assistant_content + + def chat_loop(client: OpenAI, conn: sqlite3.Connection, conv_id: int, model: str, system_prompt: str | None): messages = load_messages(conn, conv_id) @@ -140,7 +169,7 @@ def chat_loop(client: OpenAI, conn: sqlite3.Connection, conv_id: int, model: str print("gpt> ", end="", flush=True) stream = client.chat.completions.create( model=model, - messages=messages, + messages=context_window(messages), stream=True, ) full_response = [] @@ -171,6 +200,7 @@ def main(): help="System prompt for new conversations") parser.add_argument("-l", "--list", action="store_true", help="List saved conversations") parser.add_argument("--resume", type=int, help="Resume conversation by ID") + parser.add_argument("message", nargs="?", default=None, help="Send a single message (non-interactive)") args = parser.parse_args() load_dotenv() @@ -204,7 +234,11 @@ def main(): conv_id = get_or_create_conversation(conn, args.name, model) try: - chat_loop(client, conn, conv_id, model, args.system) + if args.message: + response = send_message(client, conn, conv_id, model, args.system, args.message) + print(response) + else: + chat_loop(client, conn, conv_id, model, args.system) finally: conn.close()