Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -185,3 +185,4 @@ examples/hf_demo_space/.chainlit/*
examples/hf_demo_space/chainlit.md

examples/hf_demo_space/public/
.chainlit/
43 changes: 35 additions & 8 deletions codetide/agents/tide/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,10 @@
from ...autocomplete import AutoComplete
from .models import Steps
from .prompts import (
AGENT_TIDE_SYSTEM_PROMPT, GET_CODE_IDENTIFIERS_SYSTEM_PROMPT, STAGED_DIFFS_TEMPLATE, STEPS_SYSTEM_PROMPT, WRITE_PATCH_SYSTEM_PROMPT
AGENT_TIDE_SYSTEM_PROMPT, GET_CODE_IDENTIFIERS_SYSTEM_PROMPT, REJECT_PATCH_FEEDBACK_TEMPLATE,
STAGED_DIFFS_TEMPLATE, STEPS_SYSTEM_PROMPT, WRITE_PATCH_SYSTEM_PROMPT
)
from .utils import parse_commit_blocks, parse_patch_blocks, parse_steps_markdown, trim_to_patch_section
from .utils import delete_file, parse_commit_blocks, parse_patch_blocks, parse_steps_markdown, trim_to_patch_section
from .consts import AGENT_TIDE_ASCII_ART

try:
Expand Down Expand Up @@ -46,14 +47,37 @@ class AgentTide(BaseModel):
steps :Optional[Steps]=None
session_id :str=Field(default_factory=ulid)
changed_paths :List[str]=Field(default_factory=list)
request_human_confirmation :bool=False

_skip_context_retrieval :bool=False
_last_code_identifers :Optional[Set[str]]=set()
_last_code_context :Optional[str] = None
_has_patch :bool=False

@model_validator(mode="after")
def pass_custom_logger_fn(self)->Self:
self.llm.logger_fn = partial(custom_logger_fn, session_id=self.session_id, filepath=self.patch_path)
return self

def approve(self):
self._has_patch = False
if os.path.exists(self.patch_path):
changed_paths = process_patch(self.patch_path, open_file, write_file, remove_file, file_exists)
self.changed_paths.extend(changed_paths)

previous_response = self.history[-1]
diffPatches = parse_patch_blocks(previous_response, multiple=True)
if diffPatches:
for patch in diffPatches:
# TODO this deletes previouspatches from history to make sure changes are always focused on the latest version of the file
previous_response = previous_response.replace(f"*** Begin Patch\n{patch}*** End Patch", "")
self.history[-1] = previous_response

def reject(self, feedback :str):
self._has_patch = False
self.history.append(REJECT_PATCH_FEEDBACK_TEMPLATE.format(
FEEDBACK=feedback
))

@property
def patch_path(self)->Path:
Expand Down Expand Up @@ -105,6 +129,7 @@ async def agent_loop(self, codeIdentifiers :Optional[List[str]]=None):
codeContext = self.tide.get(validatedCodeIdentifiers, as_string=True)
self._last_code_context = codeContext

await delete_file(self.patch_path)
response = await self.llm.acomplete(
self.history,
system_prompt=[
Expand All @@ -116,9 +141,8 @@ async def agent_loop(self, codeIdentifiers :Optional[List[str]]=None):
)

await trim_to_patch_section(self.patch_path)
if os.path.exists(self.patch_path):
changed_paths = process_patch(self.patch_path, open_file, write_file, remove_file, file_exists)
self.changed_paths.extend(changed_paths)
if not self.request_human_confirmation:
self.approve()

commitMessage = parse_commit_blocks(response, multiple=False)
if commitMessage:
Expand All @@ -130,9 +154,12 @@ async def agent_loop(self, codeIdentifiers :Optional[List[str]]=None):

diffPatches = parse_patch_blocks(response, multiple=True)
if diffPatches:
for patch in diffPatches:
# TODO this deletes previouspatches from history to make sure changes are always focused on the latest version of the file
response = response.replace(f"*** Begin Patch\n{patch}*** End Patch", "")
if self.request_human_confirmation:
self._has_patch = True
else:
for patch in diffPatches:
# TODO this deletes previouspatches from history to make sure changes are always focused on the latest version of the file
response = response.replace(f"*** Begin Patch\n{patch}*** End Patch", "")

self.history.append(response)

Expand Down
10 changes: 10 additions & 0 deletions codetide/agents/tide/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -362,4 +362,14 @@
** The following diffs are currently staged and will be commited once you generate an appropriate description:**

{diffs}
"""

REJECT_PATCH_FEEDBACK_TEMPLATE = """
**PATCH REJECTED** - The patch(es) you proposed in the previous message were **not applied** to the codebase.

**Feedback to address:**

{FEEDBACK}

**Next steps:** Please revise your approach to fulfill the task requirements based on the feedback above.
"""
56 changes: 42 additions & 14 deletions codetide/agents/tide/ui/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ async def on_execute_steps(action :cl.Action):
author="Agent Tide"
).send()

await agent_loop(step_instructions_msg, codeIdentifiers=step.context_identifiers)
await agent_loop(step_instructions_msg, codeIdentifiers=step.context_identifiers, agent_tide_ui=agent_tide_ui)

task_list.status = f"Waiting feedback on step {current_task_idx}"
await task_list.send()
Expand Down Expand Up @@ -264,18 +264,20 @@ async def on_inspect_context(action :cl.Action):


@cl.on_message
async def agent_loop(message: cl.Message, codeIdentifiers: Optional[list] = None):
agent_tide_ui = await loadAgentTideUi()
async def agent_loop(message: Optional[cl.Message]=None, codeIdentifiers: Optional[list] = None, agent_tide_ui :Optional[AgentTideUi]=None):
if agent_tide_ui is None:
agent_tide_ui = await loadAgentTideUi()

chat_history = cl.user_session.get("chat_history")

if message is not None:
if message.command:
command_prompt = await agent_tide_ui.get_command_prompt(message.command)
if command_prompt:
message.content = "\n\n---\n\n".join([command_prompt, message.content])

if message.command:
command_prompt = await agent_tide_ui.get_command_prompt(message.command)
if command_prompt:
message.content = "\n\n---\n\n".join([command_prompt, message.content])

chat_history.append({"role": "user", "content": message.content})
await agent_tide_ui.add_to_history(message.content)
chat_history.append({"role": "user", "content": message.content})
await agent_tide_ui.add_to_history(message.content)

msg = cl.Message(content="", author="Agent Tide")
async with cl.Step("ApplyPatch", type="tool") as diff_step:
Expand Down Expand Up @@ -347,11 +349,37 @@ async def agent_loop(message: cl.Message, codeIdentifiers: Optional[list] = None
)
)

# # Send the final message
await msg.send()
# Send the final message
await msg.send()

chat_history.append({"role": "assistant", "content": msg.content})
await agent_tide_ui.add_to_history(msg.content)

await asyncio.sleep(0.5)
if agent_tide_ui.agent_tide._has_patch:
choice = await cl.AskActionMessage(
content="AgentTide is asking you to review the Patch before applying it.",
actions=[
cl.Action(name="approve_patch", payload={"lgtm": True}, label="✔️ Approve"),
cl.Action(name="reject_patch", payload={"lgtm": False}, label="❌ Reject"),
],
timeout=3600
).send()

if choice:
lgtm = choice.get("payload", []).get("lgtm")
if lgtm:
agent_tide_ui.agent_tide.approve()
else:
response = await cl.AskUserMessage(
content="""Please provide specific feedback explaining why the patch was rejected. Include what's wrong, which parts are problematic, and what needs to change. Avoid vague responses like "doesn't work" - instead be specific like "missing error handling for FileNotFoundError" or "function should return boolean, not None." Your detailed feedback helps generate a better solution.""",
timeout=3600
).send()

chat_history.append({"role": "assistant", "content": msg.content})
await agent_tide_ui.add_to_history(msg.content)
feedback = response.get("output")
agent_tide_ui.agent_tide.reject(feedback)
chat_history.append({"role": "user", "content": feedback})
await agent_loop(agent_tide_ui=agent_tide_ui)

# def generate_temp_password(length=16):
# characters = string.ascii_letters + string.digits + string.punctuation
Expand Down
57 changes: 44 additions & 13 deletions examples/hf_demo_space/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ async def start_chatr():
cl.Action(name="gpt-oss-20b", payload={"model": "openai/gpt-oss-20b:free"}, label="gpt-oss-20b"),
cl.Action(name="custom", payload={"model": "custom"}, label="bring your model")
],
timeout=3600
).send()


Expand Down Expand Up @@ -238,7 +239,7 @@ async def on_execute_steps(action :cl.Action):
author="Agent Tide"
).send()

await agent_loop(step_instructions_msg, codeIdentifiers=step.context_identifiers)
await agent_loop(step_instructions_msg, codeIdentifiers=step.context_identifiers, agent_tide_ui=agent_tide_ui)

task_list.status = f"Waiting feedback on step {current_task_idx}"
await task_list.send()
Expand Down Expand Up @@ -294,18 +295,21 @@ async def on_inspect_context(action :cl.Action):
await inspect_msg.send()

@cl.on_message
async def agent_loop(message: cl.Message, codeIdentifiers: Optional[list] = None):
agent_tide_ui = cl.user_session.get("AgentTideUi")
async def agent_loop(message: Optional[cl.Message]=None, codeIdentifiers: Optional[list] = None, agent_tide_ui :Optional[AgentTideUi]=None):
if agent_tide_ui is None:
agent_tide_ui = cl.user_session.get("AgentTideUi")

chat_history = cl.user_session.get("chat_history")

if message is not None:
if message.command:
command_prompt = await agent_tide_ui.get_command_prompt(message.command)
if command_prompt:
message.content = "\n\n---\n\n".join([command_prompt, message.content])

if message.command:
command_prompt = await agent_tide_ui.get_command_prompt(message.command)
if command_prompt:
message.content = "\n\n---\n\n".join([command_prompt, message.content])
chat_history.append({"role": "user", "content": message.content})
await agent_tide_ui.add_to_history(message.content)

chat_history.append({"role": "user", "content": message.content})
await agent_tide_ui.add_to_history(message.content)

msg = cl.Message(content="", author="Agent Tide")

Expand Down Expand Up @@ -386,11 +390,38 @@ async def agent_loop(message: cl.Message, codeIdentifiers: Optional[list] = None
payload={"msg_id": msg.id}
)
)
# # Send the final message
await msg.send()

chat_history.append({"role": "assistant", "content": msg.content})
await agent_tide_ui.add_to_history(msg.content)
# Send the final message
await msg.send()

chat_history.append({"role": "assistant", "content": msg.content})
await agent_tide_ui.add_to_history(msg.content)

await asyncio.sleep(0.5)
if agent_tide_ui.agent_tide._has_patch:
choice = await cl.AskActionMessage(
content="AgentTide is asking you to review the Patch before applying it.",
actions=[
cl.Action(name="approve_patch", payload={"lgtm": True}, label="✔️ Approve"),
cl.Action(name="reject_patch", payload={"lgtm": False}, label="❌ Reject"),
],
timeout=3600
).send()

if choice:
lgtm = choice.get("payload", []).get("lgtm")
if lgtm:
agent_tide_ui.agent_tide.approve()
else:
response = await cl.AskUserMessage(
content="""Please provide specific feedback explaining why the patch was rejected. Include what's wrong, which parts are problematic, and what needs to change. Avoid vague responses like "doesn't work" - instead be specific like "missing error handling for FileNotFoundError" or "function should return boolean, not None." Your detailed feedback helps generate a better solution.""",
timeout=3600
).send()

feedback = response.get("output")
agent_tide_ui.agent_tide.reject(feedback)
chat_history.append({"role": "user", "content": feedback})
await agent_loop(agent_tide_ui=agent_tide_ui)

if __name__ == "__main__":
from dotenv import load_dotenv
Expand Down
5 changes: 3 additions & 2 deletions examples/hf_demo_space/static/landing_page.html
Original file line number Diff line number Diff line change
Expand Up @@ -134,8 +134,9 @@
}

.logo {
max-width: 650px;
height: auto;
max-width: 650px; /* won’t exceed 650px on big screens */
width: 100%; /* scales down to fit smaller screens */
height: auto; /* maintains aspect ratio */
margin-bottom: 0.5rem;
filter: drop-shadow(0 4px 8px hsl(var(--foreground) / 0.1));
animation: logoGlow 3s ease-in-out infinite alternate;
Expand Down
Loading