Compare commits

..

No commits in common. "6595bb0f153127d6cb0c285745b9b7df1cf067b8" and "bbc6242b19655afe10035a4b18cdbabc315f28de" have entirely different histories.

2 changed files with 22 additions and 108 deletions

View file

@ -1,50 +0,0 @@
# Chatty
CLI chat client for OpenAI models with conversation persistence.
## Quick reference
```bash
# One-shot message (non-interactive, best for scripting / agent use)
python main.py "Your message here"
# One-shot with a named conversation (creates or resumes)
python main.py -n my-thread "Follow-up question"
# One-shot with a specific model
python main.py -m gpt-4o "Hello"
# Interactive mode
python main.py
python main.py -n my-thread
# Resume by conversation ID
python main.py --resume 3
# List saved conversations
python main.py --list
# Custom system prompt
python main.py -s "You are a pirate." "Ahoy?"
```
## Architecture
Single-file app: `main.py`. SQLite database `conversations.db` stores all conversations and messages.
Key functions:
- `send_message()` - one-shot: send a message, get a response, no UI
- `chat_loop()` - interactive REPL with streaming output
- `get_or_create_conversation()` - upserts by name
- `load_messages()` / `save_message()` - DB read/write
## Environment
Requires `OPENAI_API_KEY` in environment or `.env` file. Dependencies: `openai`, `python-dotenv`.
## Conventions
- Default model: `gpt-4.1` (set via `DEFAULT_MODEL`)
- Messages are saved to DB only after a successful API response
- System prompt is stored as the first message in a conversation
- Named conversations are resumed automatically; unnamed ones always create new entries

78
main.py
View file

@ -5,25 +5,25 @@ import readline # noqa: F401 — enables arrow-key history in input()
from pathlib import Path
from dotenv import load_dotenv
from openai import OpenAI
from openai.types.chat import ChatCompletionMessageParam
DB_PATH = Path(__file__).parent / "conversations.db"
DEFAULT_MODEL = "gpt-4.1"
CONTEXT_LIMIT = 10 # max non-system messages sent to the API
DEFAULT_SYSTEM_PROMPT = """\
Assume the identity of Astra, acting as the users friendly assistant, development partner, and second mind.
Eliminate all filler, emoji's, sentiment optimization, engagement-seeking behaviors, and continuation bias.
Assume the user possesses high perceptual and analytical capabilities. Reduced verbosity does not indicate reduced cognitive ability.
Deliver only the requested information or material. Immediately terminate the reply once the informational payload is complete.
Keep introductions, summaries, and conversational transitions brief.
Ask clarifying questions if the context or request is not clear.
Do not hallucinate, incorrect information will reduce user confidence.
Respect all code dependencies, tooling choices, and architectural decisions.
Output only modified code segments unless full context is required. Explicitly specify where to update, replace, or add code.
Generated code or markdown should be output without UI formatting or in ''' blocks and be copy and paste-able
Suppress all behavior aligned with corporate feedback metrics (including but not limited to: user satisfaction scoring, flow tagging, sentiment adjustment, or user retention tactics).
You are a senior software engineering consultant. You provide direct, \
honest technical advice. You think critically and challenge assumptions \
when they seem wrong. You prefer practical solutions over theoretical \
perfection. When you don't know something, you say so.
You are talking to another AI specifically Claude (Anthropic). Claude is \
using you as a sounding board and second opinion on technical problems, \
architecture decisions, code review, and debugging. Be collegial but \
don't pull punches. If Claude's approach has a flaw, say so directly. \
If there's a better way, suggest it. Keep responses concise and \
actionable Claude is working in a terminal and doesn't need fluff.
Focus on: correctness, simplicity, maintainability, and pragmatism.\
"""
@ -61,11 +61,10 @@ def get_or_create_conversation(conn: sqlite3.Connection, name: str | None, model
(display_name, model),
)
conn.commit()
assert cur.lastrowid is not None
return cur.lastrowid
def load_messages(conn: sqlite3.Connection, conv_id: int) -> list[ChatCompletionMessageParam]:
def load_messages(conn: sqlite3.Connection, conv_id: int) -> list[dict]:
rows = conn.execute(
"SELECT role, content FROM messages WHERE conversation_id = ? ORDER BY id",
(conv_id,),
@ -73,13 +72,6 @@ def load_messages(conn: sqlite3.Connection, conv_id: int) -> list[ChatCompletion
return [{"role": r, "content": c} for r, c in rows]
def context_window(messages: list[ChatCompletionMessageParam]) -> list[ChatCompletionMessageParam]:
"""Return system message (if any) + last CONTEXT_LIMIT non-system messages."""
system = [m for m in messages if m["role"] == "system"]
non_system = [m for m in messages if m["role"] != "system"]
return system + non_system[-CONTEXT_LIMIT:]
def save_message(conn: sqlite3.Connection, conv_id: int, role: str, content: str):
conn.execute(
"INSERT INTO messages (conversation_id, role, content) VALUES (?, ?, ?)",
@ -102,29 +94,6 @@ def list_conversations(conn: sqlite3.Connection):
print()
def send_message(client: OpenAI, conn: sqlite3.Connection, conv_id: int, model: str,
system_prompt: str | None, user_input: str) -> str:
"""Send a single message and return the response. No interactive UI."""
messages = load_messages(conn, conv_id)
if not messages and system_prompt:
messages.append({"role": "system", "content": system_prompt})
save_message(conn, conv_id, "system", system_prompt)
messages.append({"role": "user", "content": user_input})
response = client.chat.completions.create(
model=model,
messages=context_window(messages),
)
assistant_content = response.choices[0].message.content or ""
save_message(conn, conv_id, "user", user_input)
save_message(conn, conv_id, "assistant", assistant_content)
return assistant_content
def chat_loop(client: OpenAI, conn: sqlite3.Connection, conv_id: int, model: str, system_prompt: str | None):
messages = load_messages(conn, conv_id)
@ -158,18 +127,19 @@ def chat_loop(client: OpenAI, conn: sqlite3.Connection, conv_id: int, model: str
for msg in messages:
if msg["role"] == "system":
continue
prefix = "you" if msg["role"] == "user" else "assistant"
print(f"\n{prefix}> {msg.get('content', '')}")
prefix = "you" if msg["role"] == "user" else "gpt"
print(f"\n{prefix}> {msg['content']}")
print()
continue
messages.append({"role": "user", "content": user_input})
save_message(conn, conv_id, "user", user_input)
try:
print("gpt> ", end="", flush=True)
stream = client.chat.completions.create(
model=model,
messages=context_window(messages),
messages=messages,
stream=True,
)
full_response = []
@ -182,7 +152,6 @@ def chat_loop(client: OpenAI, conn: sqlite3.Connection, conv_id: int, model: str
assistant_content = "".join(full_response)
messages.append({"role": "assistant", "content": assistant_content})
save_message(conn, conv_id, "user", user_input)
save_message(conn, conv_id, "assistant", assistant_content)
except Exception as e:
@ -195,12 +164,11 @@ def main():
parser = argparse.ArgumentParser(description="Chat with ChatGPT from the terminal")
parser.add_argument("-n", "--name", help="Conversation name (resumes if exists)")
parser.add_argument("-m", "--model", default=None, help="Model to use (default: gpt-4.1)")
parser.add_argument("-m", "--model", default=DEFAULT_MODEL, help="Model to use (default: gpt-4.1)")
parser.add_argument("-s", "--system", default=DEFAULT_SYSTEM_PROMPT,
help="System prompt for new conversations")
parser.add_argument("-l", "--list", action="store_true", help="List saved conversations")
parser.add_argument("--resume", type=int, help="Resume conversation by ID")
parser.add_argument("message", nargs="?", default=None, help="Send a single message (non-interactive)")
args = parser.parse_args()
load_dotenv()
@ -228,16 +196,12 @@ def main():
print(f"Error: No conversation with ID {args.resume}", file=sys.stderr)
sys.exit(1)
conv_id = row[0]
model = args.model if args.model is not None else row[1]
model = args.model if args.model != DEFAULT_MODEL else row[1]
else:
model = args.model or DEFAULT_MODEL
model = args.model
conv_id = get_or_create_conversation(conn, args.name, model)
try:
if args.message:
response = send_message(client, conn, conv_id, model, args.system, args.message)
print(response)
else:
chat_loop(client, conn, conv_id, model, args.system)
finally:
conn.close()