-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathinstall.sh
More file actions
executable file
·101 lines (91 loc) · 3.54 KB
/
install.sh
File metadata and controls
executable file
·101 lines (91 loc) · 3.54 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
#!/usr/bin/env bash
#
# install.sh - Top-level installer for Intel GPU inference stack
#
# Usage:
# ./install.sh # Install llama-server service
# ./install.sh --with-mcp # Also install MCP web search server
# ./install.sh --update # Pull latest submodules + rebuild all
# ./install.sh --with-whisper # Also install whisper speech recognition
# ./install.sh --with-embedding # Also install embedding server
# ./install.sh --all # Install everything (mcp + whisper + embedding)
# ./install.sh --update --all # Update + reinstall everything
set -euo pipefail
INSTALL_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Parse flags
WITH_MCP=false
WITH_WHISPER=false
WITH_EMBEDDING=false
UPDATE=false
for arg in "$@"; do
case "$arg" in
--with-mcp) WITH_MCP=true ;;
--with-whisper) WITH_WHISPER=true ;;
--with-embedding) WITH_EMBEDDING=true ;;
--all) WITH_MCP=true; WITH_WHISPER=true; WITH_EMBEDDING=true ;;
--update) UPDATE=true ;;
esac
done
# 1. Init submodules
echo "[intel-gpu-inference] Initializing submodules..."
cd "$INSTALL_DIR"
git submodule update --init --recursive
# 2. Build llama.cpp
if [[ "$UPDATE" == "true" ]]; then
echo "[intel-gpu-inference] Updating and rebuilding llama.cpp..."
bash "$INSTALL_DIR/scripts/install.sh" --update
elif [ ! -x "$INSTALL_DIR/llama.cpp/build/bin/llama-server" ] &&
[ ! -x "$INSTALL_DIR/llama.cpp/build/llama-server" ]; then
echo "[intel-gpu-inference] llama-server not built — running scripts/install.sh..."
bash "$INSTALL_DIR/scripts/install.sh"
else
echo "[intel-gpu-inference] llama-server already built (use --update to rebuild)"
fi
# 3. XDG config
mkdir -p "$HOME/.config/intel-gpu-inference"
if [ ! -f "$HOME/.config/intel-gpu-inference/env" ]; then
sed -e "s|__HOME__|$HOME|g" \
-e "s|__INSTALL_DIR__|$INSTALL_DIR|g" \
"$INSTALL_DIR/configs/llama-server.env.template" \
> "$HOME/.config/intel-gpu-inference/env"
echo "Config installed at ~/.config/intel-gpu-inference/env — edit before starting"
else
echo "Config already exists at ~/.config/intel-gpu-inference/env — skipping"
fi
# 4. Service file
mkdir -p "$HOME/.config/systemd/user"
sed -e "s|__HOME__|$HOME|g" \
-e "s|__INSTALL_DIR__|$INSTALL_DIR|g" \
"$INSTALL_DIR/llama-server.service.template" \
> "$HOME/.config/systemd/user/llama-server.service"
# 5. Enable + start
systemctl --user daemon-reload
systemctl --user enable llama-server.service
systemctl --user restart llama-server.service
echo "llama-server installed and started"
# 6. (Optional) MCP web search server
if [[ "$WITH_MCP" == "true" ]]; then
echo ""
echo "[intel-gpu-inference] Installing open-websearch MCP server..."
if [[ "$UPDATE" == "true" ]]; then
bash "$INSTALL_DIR/scripts/install-mcp.sh" --update
else
bash "$INSTALL_DIR/scripts/install-mcp.sh"
fi
fi
# 7. (Optional) whisper.cpp speech recognition server
if [[ "$WITH_WHISPER" == "true" ]]; then
echo ""
echo "[intel-gpu-inference] Installing whisper.cpp speech recognition server..."
if [[ "$UPDATE" == "true" ]]; then
bash "$INSTALL_DIR/scripts/install-whisper.sh" --update
else
bash "$INSTALL_DIR/scripts/install-whisper.sh"
fi
fi
# 8. (Optional) llama.cpp embedding server
if [[ "$WITH_EMBEDDING" == "true" ]]; then
echo ""
echo "[intel-gpu-inference] Installing llama.cpp embedding server..."
bash "$INSTALL_DIR/scripts/install-embedding.sh"
fi