Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 28 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ Some ideas for commands include:
* Calculator
* Sending messages
* Setting alarms
* Programming with Codex
* Question answering
* Programming with AI assistants (GPT-4, Claude 3.7 Sonnet)
* Question answering with AI
* Hearing the Time

## Hardware Parts
Expand All @@ -67,6 +67,32 @@ See the [hardware guide](hardware.md) to know exactly what to buy.

See the [installation instructions](installation.md) to get started.

## LLM Configuration

Go Note Go supports multiple LLM providers for AI assistant features:

* **OpenAI**: Configure with your OpenAI API key to use GPT-4
* **Anthropic**: Configure with your Anthropic API key to use Claude 3.7 Sonnet

### Configuration

In `secure_settings.py`, set the following:

```python
OPENAI_API_KEY = 'your-openai-api-key' # Required for OpenAI models
ANTHROPIC_API_KEY = 'your-anthropic-api-key' # Required for Claude models
LLM_PROVIDER = 'openai' # Options: 'openai', 'anthropic'
```

### Available Commands

* `:ai` - Use the default AI model based on your configured provider
* `:gpt4` - Specifically use GPT-4 (requires OpenAI API key)
* `:sonnet37` - Specifically use Claude 3.7 Sonnet (requires Anthropic API key)
* `:set_llm openai` - Switch to using OpenAI models
* `:set_llm anthropic` - Switch to using Anthropic models
* `:llm_status` - Check current LLM configuration

## History

[Learn about Go Note Go's predecessor "Shh Shell" here.](https://davidbieber.com/projects/shh-shell/)
Expand Down
99 changes: 65 additions & 34 deletions gonotego/command_center/assistant_commands.py
Original file line number Diff line number Diff line change
@@ -1,49 +1,19 @@
"""Assistant commands. Commands for using the AI assistant."""

import openai

from gonotego.command_center import note_commands
from gonotego.command_center import registry
from gonotego.command_center import system_commands
from gonotego.command_center import llm_provider
from gonotego.common import events
from gonotego.common import interprocess
from gonotego.settings import settings

register_command = registry.register_command


def create_completion(
prompt,
*,
model='gpt-3.5-turbo-instruct',
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
**kwargs
):
client = openai.OpenAI(api_key=settings.get('OPENAI_API_KEY'))
response = client.completions.create(
model=model,
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
**kwargs
)
return response


def chat_completion(messages, model='gpt-3.5-turbo'):
client = openai.OpenAI(api_key=settings.get('OPENAI_API_KEY'))
response = client.chat.completions.create(
model=model,
messages=messages
)
return response
# Use the LLM provider module for completions and chat completions
create_completion = llm_provider.create_completion
chat_completion = llm_provider.chat_completion


@register_command('ask {}')
Expand Down Expand Up @@ -83,12 +53,40 @@ def chat_with_context3(prompt=None):

@register_command('ai')
@register_command('ai {}')
def chat_with_context_default(prompt=None):
"""Use the default AI model based on the configured provider."""
provider = llm_provider.get_provider()
if provider == 'openai':
model = 'gpt-4'
else: # anthropic
model = 'claude-3-sonnet-20240229'
return chat_with_context(prompt=prompt, model=model)


@register_command('ai4')
@register_command('ai4 {}')
def chat_with_context4(prompt=None):
return chat_with_context(prompt=prompt, model='gpt-4')


@register_command('sonnet37')
@register_command('sonnet37 {}')
def chat_with_context_sonnet(prompt=None):
if not llm_provider.has_anthropic_key():
system_commands.say("No Anthropic API key available")
return None
return chat_with_context(prompt=prompt, model='claude-3-sonnet-20240229')


@register_command('gpt4')
@register_command('gpt4 {}')
def chat_with_context_gpt4(prompt=None):
if not llm_provider.has_openai_key():
system_commands.say("No OpenAI API key available")
return None
return chat_with_context(prompt=prompt, model='gpt-4')


def chat_with_context(prompt=None, model='gpt-3.5-turbo'):
messages = get_messages(prompt=prompt)
messages.insert(0, {"role": "system", "content": "You are a helpful assistant."})
Expand All @@ -102,6 +100,39 @@ def chat_with_context(prompt=None, model='gpt-3.5-turbo'):
return response_text


@register_command('set_llm openai')
def set_llm_openai():
"""Set the LLM provider to OpenAI."""
settings.set('LLM_PROVIDER', 'openai')
system_commands.say("LLM provider set to OpenAI")
if not llm_provider.has_openai_key():
system_commands.say("Warning: No OpenAI API key configured")


@register_command('set_llm anthropic')
def set_llm_anthropic():
"""Set the LLM provider to Anthropic."""
settings.set('LLM_PROVIDER', 'anthropic')
system_commands.say("LLM provider set to Anthropic")
if not llm_provider.has_anthropic_key():
system_commands.say("Warning: No Anthropic API key configured")


@register_command('llm_status')
def llm_status():
"""Show the current LLM provider and API key status."""
provider = llm_provider.get_provider()
has_openai = llm_provider.has_openai_key()
has_anthropic = llm_provider.has_anthropic_key()

status_msg = f"Current LLM provider: {provider}\n"
status_msg += f"OpenAI API key: {'configured' if has_openai else 'not configured'}\n"
status_msg += f"Anthropic API key: {'configured' if has_anthropic else 'not configured'}"

system_commands.speak(status_msg)
note_commands.add_note(status_msg)


def get_messages(prompt=None):
note_events_session_queue = interprocess.get_note_events_session_queue()
note_event_bytes_list = note_events_session_queue.peek_all()
Expand Down
164 changes: 164 additions & 0 deletions gonotego/command_center/llm_provider.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,164 @@
"""LLM provider module for different AI model providers.

This module handles creating completions and chat completions using different LLM
providers like OpenAI and Anthropic.
"""

import openai
import anthropic
from typing import List, Dict, Any, Optional

from gonotego.settings import settings
from gonotego.command_center import system_commands

def get_provider() -> str:
"""Get the configured LLM provider."""
return settings.get('LLM_PROVIDER')

def has_openai_key() -> bool:
"""Check if OpenAI API key is configured."""
api_key = settings.get('OPENAI_API_KEY')
return api_key and api_key != '<OPENAI_API_KEY>'

def has_anthropic_key() -> bool:
"""Check if Anthropic API key is configured."""
api_key = settings.get('ANTHROPIC_API_KEY')
return api_key and api_key != '<ANTHROPIC_API_KEY>'

def create_completion(
prompt: str,
*,
model: str = 'gpt-3.5-turbo-instruct',
temperature: float = 0.7,
max_tokens: int = 256,
top_p: float = 1,
frequency_penalty: float = 0,
presence_penalty: float = 0,
**kwargs
) -> Any:
"""Create a completion using the configured LLM provider."""
provider = get_provider()

if provider == 'openai':
if not has_openai_key():
system_commands.say("No OpenAI API key available")
return None

client = openai.OpenAI(api_key=settings.get('OPENAI_API_KEY'))
response = client.completions.create(
model=model,
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
**kwargs
)
return response

elif provider == 'anthropic':
if not has_anthropic_key():
system_commands.say("No Anthropic API key available")
return None

# Anthropic doesn't have an exact equivalent to completions API
# So we'll use Claude's messages API instead with a single user prompt
client = anthropic.Anthropic(api_key=settings.get('ANTHROPIC_API_KEY'))
response = client.messages.create(
model="claude-3-sonnet-20240229",
max_tokens=max_tokens,
temperature=temperature,
system="You are a helpful assistant.",
messages=[
{"role": "user", "content": prompt}
]
)

# Return a response object that mimics OpenAI's structure
# so we can access response.choices[0].text in the existing code
class AnthropicCompletionChoice:
def __init__(self, text):
self.text = text

class AnthropicCompletionResponse:
def __init__(self, choices):
self.choices = choices

return AnthropicCompletionResponse([
AnthropicCompletionChoice(response.content[0].text)
])

else:
raise ValueError(f"Unknown LLM provider: {provider}")

def chat_completion(
messages: List[Dict[str, str]],
model: Optional[str] = None
) -> Any:
"""Create a chat completion using the configured LLM provider."""
provider = get_provider()

if provider == 'openai':
if not has_openai_key():
system_commands.say("No OpenAI API key available")
return None

if model is None:
model = 'gpt-3.5-turbo'

client = openai.OpenAI(api_key=settings.get('OPENAI_API_KEY'))
response = client.chat.completions.create(
model=model,
messages=messages
)
return response

elif provider == 'anthropic':
if not has_anthropic_key():
system_commands.say("No Anthropic API key available")
return None

if model is None or model == 'claude-3-sonnet-20240229':
claude_model = "claude-3-sonnet-20240229"
else:
# Default to Claude 3 Sonnet if no specific Claude model is specified
claude_model = "claude-3-sonnet-20240229"

# Extract system message if present
system_message = "You are a helpful assistant."
user_assistant_messages = []

for message in messages:
if message['role'] == 'system':
system_message = message['content']
else:
user_assistant_messages.append(message)

client = anthropic.Anthropic(api_key=settings.get('ANTHROPIC_API_KEY'))
response = client.messages.create(
model=claude_model,
max_tokens=1024,
system=system_message,
messages=user_assistant_messages
)

# Return a response object that mimics OpenAI's structure
class AnthropicMessage:
def __init__(self, content):
self.content = content

class AnthropicChoice:
def __init__(self, message):
self.message = message

class AnthropicChatResponse:
def __init__(self, choices):
self.choices = choices

return AnthropicChatResponse([
AnthropicChoice(AnthropicMessage(response.content[0].text))
])

else:
raise ValueError(f"Unknown LLM provider: {provider}")
6 changes: 6 additions & 0 deletions gonotego/command_center/system_commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,12 @@ def say(text):
@register_command('say_openai {}')
def say_with_openai(text):
import openai
from gonotego.command_center import llm_provider

if not llm_provider.has_openai_key():
say("No OpenAI API key available")
return

client = openai.OpenAI(api_key=settings.get('OPENAI_API_KEY'))
response = client.audio.speech.create(
model="tts-1",
Expand Down
2 changes: 2 additions & 0 deletions gonotego/settings/secure_settings_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@
DROPBOX_ACCESS_TOKEN = '<DROPBOX_ACCESS_TOKEN>'

OPENAI_API_KEY = '<OPENAI_API_KEY>'
ANTHROPIC_API_KEY = '<ANTHROPIC_API_KEY>'
LLM_PROVIDER = 'openai' # Options: 'openai', 'anthropic'

WIFI_NETWORKS = []
CUSTOM_COMMAND_PATHS = []
5 changes: 5 additions & 0 deletions gonotego/transcription/transcriber.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,16 @@
import fire
import openai
from gonotego.settings import settings
from gonotego.command_center import llm_provider


class Transcriber:

def transcribe(self, filepath):
if not llm_provider.has_openai_key():
print("No OpenAI API key available for transcription")
return ""

client = openai.OpenAI(api_key=settings.get('OPENAI_API_KEY'))
with io.open(filepath, 'rb') as audio_file:
response = client.audio.transcriptions.create(
Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ classifiers = [

dependencies = [
'absl-py<=2.1.0',
'anthropic<=0.21.2', # For Claude API
'apscheduler<=3.10.4',
'dropbox<=12.0.2',
'fire<=0.7.0',
Expand Down