Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 29 additions & 0 deletions .roo/mcp.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
{
"mcpServers": {
"context7": {
"command": "npx",
"args": [
"-y",
"@upstash/context7-mcp"
],
"env": {
"DEFAULT_MINIMUM_TOKENS": ""
}
},
"filesystem": {
"command": "npx",
"args": [
"-y",
"@modelcontextprotocol/server-filesystem",
"/Users/henry"
]
},
"playwright": {
"command": "npx",
"args": [
"-y",
"@playwright/mcp@0.0.38"
]
}
}
}
33 changes: 33 additions & 0 deletions code/homework/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# Homework Project

This project demonstrates a simple FastMCP assistant with basic tools.

## Available Tools

- **add(a: int, b: int) -> int**: Returns the sum of two integers.
- **scrape(url: str) -> str**: Scrapes the given webpage using the Jina AI summarizer and returns the content as markdown.

## Usage Example

```python
from fastmcp import FastMCP
from scrape_web import scrape_webpage

mcp = FastMCP("Demo 🚀")

@mcp.tool
def add(a: int, b: int) -> int:
return a + b

@mcp.tool
def scrape(url: str) -> str:
return scrape_webpage(url)

if __name__ == "__main__":
# Test the scraping tool
content = scrape("https://example.com")
print(f"Scraped {len(content)} characters")
mcp.run()
```

Run the script with `python code/homework/main.py` to start the FastMCP server and see the tool in action.
13 changes: 13 additions & 0 deletions code/homework/count_data.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import urllib.request
import re, sys

url = 'https://datatalks.club/'
try:
response = urllib.request.urlopen(url)
html = response.read().decode('utf-8', errors='ignore')
except Exception as e:
print(f'Error fetching URL: {e}', file=sys.stderr)
sys.exit(1)
# Use regex to find whole word 'data', case-insensitive
matches = re.findall(r'\bdata\b', html, flags=re.IGNORECASE)
print(len(matches))
126 changes: 126 additions & 0 deletions code/homework/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
# main.py
from fastmcp import FastMCP
import requests
import os
import zipfile
from pathlib import Path
import minsearch
import re

# Give your server a descriptive name
mcp = FastMCP("Homework MCP Server")

# Global variables for the search index
documents = []
index = None

def initialize_search_index():
"""Downloads and indexes the FastMCP documentation."""
global documents, index

print("Initializing search index...")

# Download the zip file if it doesn't exist
url = "https://github.com/jlowin/fastmcp/archive/refs/heads/main.zip"
zip_path = "main.zip"

if not os.path.exists(zip_path):
print("Downloading FastMCP documentation...")
response = requests.get(url)
with open(zip_path, "wb") as f:
f.write(response.content)

# Process all zip files
for zip_file in Path(".").glob("*.zip"):
with zipfile.ZipFile(zip_file, "r") as zip_ref:
for file_info in zip_ref.infolist():
if file_info.filename.endswith((".md", ".mdx")):
# Remove the first part of the path
parts = file_info.filename.split("/", 1)
if len(parts) > 1:
clean_path = parts[1]
else:
clean_path = file_info.filename

# Read the file content
with zip_ref.open(file_info) as f:
content = f.read().decode("utf-8")
documents.append({
"content": content,
"filename": clean_path
})

# Create the index
index = minsearch.Index(
text_fields=["content", "filename"],
keyword_fields=[]
)

index.fit(documents)
print(f"Search index initialized with {len(documents)} documents.")

# --- MCP Tools ---

@mcp.tool
def add(a: int, b: int) -> int:
"""Add two numbers."""
return a + b

@mcp.tool
def scrape_webpage(url: str) -> str:
"""Scrape the content of a webpage and return it as markdown."""
jina_url = f"https://r.jina.ai/{url}"
response = requests.get(jina_url)
response.raise_for_status() # Raise an exception for bad status codes
return response.text

@mcp.tool
def count_characters(text: str) -> int:
"""Count the number of characters in a given text."""
return len(text)

@mcp.tool
def count_word_occurrences(text: str, word: str) -> int:
"""Count how many times a specific word appears in a text (case-insensitive)."""
text_lower = text.lower()
word_lower = word.lower()

# Use regex to find whole word matches
pattern = r'\b' + re.escape(word_lower) + r'\b'
matches = re.findall(pattern, text_lower)

return len(matches)

@mcp.tool
def search_docs(query: str) -> str:
"""Search through the fastmcp documentation for a given query."""
global index

if index is None:
initialize_search_index()

results = index.search(
query=query,
boost_dict={"filename": 2.0},
num_results=5
)

output = []
for result in results:
output.append(f"Filename: {result['filename']}")
# Show a snippet of the content
output.append(f"Content Snippet: {result['content'][:200].replace(chr(10), ' ')}...")
output.append("---")

return "\n".join(output)

def count_data_on_site(url: str) -> int:
"""Fetch page via scrape_webpage and count occurrences of 'data'."""
content = scrape_webpage(url)
return count_word_occurrences(content, "data")

if __name__ == "__main__":
# Initialize the index on startup so it's ready for the first search
initialize_search_index()
print("Starting MCP server...")
mcp.run()
Binary file added code/homework/main.zip
Binary file not shown.
10 changes: 10 additions & 0 deletions code/homework/pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
[project]
name = "homework"
version = "0.1.0"
description = "Add your description here"
readme = "README.md"
requires-python = ">=3.10"
dependencies = [
"fastmcp>=2.12.3",
"minsearch>=0.0.7",
]
12 changes: 12 additions & 0 deletions code/homework/scrape_web.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
import requests

def scrape_webpage(url: str) -> str:
"""Scrape the content of a webpage and return it as markdown"""
jina_url = f"https://r.jina.ai/{url}"
response = requests.get(jina_url)
return response.text

# Test the function
if __name__ == "__main__":
content = scrape_webpage("https://github.com/alexeygrigorev/minsearch")
print(f"Character count: {len(content)}")
5 changes: 5 additions & 0 deletions code/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,11 @@ dependencies = [
[tool.setuptools]
py-modules = ["client", "server"]

[tool.uv.workspace]
members = [
"homework",
]

[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
Loading