-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmakefile
More file actions
335 lines (300 loc) · 13.4 KB
/
makefile
File metadata and controls
335 lines (300 loc) · 13.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
.PHONY: init kind-start kind-stop kind-dashboard \
containers-build containers-load containers-all \
kind-generate-key-pair generate-ingress-key \
kind-deploy kind-start-traefik kind-start-cleaner \
kind-clean clean kind-stop-traefik \
kind-undeploy stop \
enable-wsl \
docker-clean deploy \
integration-test
# ------------------------
# Kind targets
# ------------------------
# Initialize kind cluster, build/load containers, generate keys, start cleaner
init: kind-start containers-all kind-generate-key-pair generate-ingress-key kind-start-cleaner
# Start kind cluster
kind-start:
@echo "🚀 Creating kind cluster..."
@if kind get clusters 2>/dev/null | grep -q "aggregator"; then \
echo "Kind cluster 'aggregator' already exists."; \
if ! kubectl config get-contexts kind-aggregator >/dev/null 2>&1; then \
echo "⚠️ Context 'kind-aggregator' not found, deleting and recreating cluster..."; \
kind delete cluster --name aggregator; \
kind create cluster --name aggregator --config k8s/kind-config.yaml; \
echo "⏳ Waiting for cluster to be ready..."; \
kubectl wait --for=condition=Ready nodes --all --timeout=120s; \
fi; \
else \
kind create cluster --name aggregator --config k8s/kind-config.yaml; \
echo "⏳ Waiting for cluster to be ready..."; \
kubectl wait --for=condition=Ready nodes --all --timeout=120s; \
fi
@kubectl config use-context kind-aggregator
@echo "✅ Kind cluster is ready!"
# Stop and delete kind cluster
kind-stop:
@echo "🧹 Deleting kind cluster..."
@kind delete cluster --name aggregator
# Optional: dashboard (kubectl proxy)
# Get token: kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath="{.data.token}" | base64 -d
kind-dashboard:
@echo "🚀 Configuring kubernetes dashboard"
@kubectl config use-context kind-aggregator
@if ! helm repo list | grep -q "kubernetes-dashboard"; then \
helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/; \
fi
@helm repo update
@helm upgrade --install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --create-namespace --namespace kubernetes-dashboard
@kubectl apply -f k8s/dashboard-admin.yaml
@echo "🚀 Starting kubectl proxy for Kubernetes dashboard..."
@kubectl wait --namespace kubernetes-dashboard \
--for=condition=ready pod \
--selector=app.kubernetes.io/instance=kubernetes-dashboard \
--timeout=120s
@echo "🔑 The token is:"
@kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath="{.data.token}" | base64 -d && echo ""
@kubectl -n kubernetes-dashboard port-forward svc/kubernetes-dashboard-kong-proxy 8443:443
# Set up key pair for uma-proxy
kind-generate-key-pair:
@echo "🔑 Generating key pair for uma-proxy..."
@kubectl config use-context kind-aggregator
@openssl genrsa -out uma-proxy.key 4096
@openssl req -x509 -new -nodes -key uma-proxy.key -sha256 -days 3650 -out uma-proxy.crt -subj "/CN=Aggregator MITM CA"
@echo "🗑️ Deleting existing Kubernetes secret for uma-proxy key pair if it exists..."
@kubectl delete secret uma-proxy-key-pair -n default --ignore-not-found
@echo "🔐 Creating Kubernetes secret for uma-proxy key pair..."
@kubectl create secret generic uma-proxy-key-pair --from-file=uma-proxy.crt=uma-proxy.crt --from-file=uma-proxy.key=uma-proxy.key -n default
@echo "🗑️ Cleaning up generated key pair files..."
@rm uma-proxy.crt uma-proxy.key
# Generate RSA private key for ingress-uma
generate-ingress-key:
@echo "🔑 Generating RSA private key for ingress-uma..."
@if [ ! -f private_key.pem ]; then \
openssl genrsa -out private_key.pem 2048; \
echo "✅ Generated private_key.pem"; \
else \
echo "ℹ️ private_key.pem already exists, skipping generation"; \
fi
# ------------------------
# Container targets
# ------------------------
# add CONTAINER=<container name> to handle a specific container
# Build Docker images
containers-build:
@echo "🔨 Building Docker images for containers..."
@if [ -n "$(CONTAINER)" ]; then \
dir="containers/$(CONTAINER)"; \
if [ -d "$$dir" ]; then \
echo "🗑️ Removing old $(CONTAINER) images..."; \
docker images "$(CONTAINER)" --format "{{.ID}}" | xargs -r docker rmi -f 2>/dev/null || true; \
echo "📦 Building $(CONTAINER)..."; \
docker build "$$dir" -t "$(CONTAINER):latest"; \
else \
echo "❌ Container $(CONTAINER) does not exist!"; \
exit 1; \
fi \
else \
echo "🗑️ Removing old container images..."; \
find containers -maxdepth 1 -mindepth 1 -type d -exec basename {} \; | \
xargs -I {} sh -c 'docker images "{}" --format "{{.ID}}" | xargs -r docker rmi -f 2>/dev/null || true'; \
find containers -maxdepth 1 -mindepth 1 -type d | \
xargs -I {} -P $$(nproc) sh -c '\
name=$$(basename {}); \
echo "📦 Building $$name..."; \
if docker build {} -t "$$name:latest"; then \
echo "✅ Built $$name"; \
else \
echo "❌ Failed to build $$name"; \
exit 1; \
fi \
' && echo "✅ All containers built successfully" || (echo "❌ Build failed"; exit 1); \
fi
# Load Docker images into kind
containers-load:
@echo "📤 Loading container images into kind..."
@kubectl config use-context kind-aggregator 2>/dev/null || (echo "❌ Kind cluster not ready"; exit 1)
@if [ -n "$(CONTAINER)" ]; then \
name="$(CONTAINER)"; \
echo "📥 Loading $$name into kind..."; \
kind load docker-image "$$name:latest" --name aggregator; \
else \
find containers -maxdepth 1 -mindepth 1 -type d | \
xargs -I {} -P 4 sh -c '\
name=$$(basename {}); \
echo "📥 Loading $$name into kind..."; \
if kind load docker-image "$$name:latest" --name aggregator; then \
echo "✅ Loaded $$name"; \
else \
echo "❌ Failed to load $$name"; \
exit 1; \
fi \
' && echo "✅ All containers loaded successfully" || (echo "❌ Loading failed"; exit 1); \
fi
# Build and load all containers
containers-all: containers-build containers-load
# Clean up Docker dangling and unused images
docker-clean:
@echo "🧹 Cleaning up Docker images..."
@echo "🗑️ Removing dangling images..."
@docker image prune -f
@echo "🗑️ Removing unused images..."
@docker image prune -a -f --filter "until=24h"
@echo "✅ Docker cleanup complete"
# ------------------------
# Deploy YAML manifests with temporary key pair for uma-proxy
# ------------------------
kind-start-traefik:
@echo "📄 Deploying Traefik Ingress Controller..."
@kubectl config use-context kind-aggregator
@helm repo add traefik https://traefik.github.io/charts
@helm repo update
@helm upgrade --install aggregator-traefik traefik/traefik \
--namespace aggregator-traefik \
--create-namespace \
--set ingressClass.enabled=true \
--set ingressClass.name=aggregator-traefik \
--set ports.web.hostPort=80 \
--set ports.websecure.hostPort=443 \
--set service.type=ClusterIP \
--set providers.kubernetesCRD.allowCrossNamespace=true
@echo "⏳ Waiting for Traefik deployment to be ready..."
@kubectl rollout status deployment aggregator-traefik -n aggregator-traefik --timeout=180s
@echo "✅ Traefik deployment is ready!"
kind-start-cleaner:
@echo "📄 Deploying aggregator-cleaner controller..."
@kubectl config use-context kind-aggregator
@kubectl apply -f k8s/ops/ns.yaml
@kubectl apply -f k8s/ops/cleaner.yaml
@echo "⏳ Waiting for aggregator-cleaner to be ready..."
@kubectl wait --namespace aggregator-ops \
--for=condition=available deployment/aggregator-cleaner \
--timeout=60s || true
@echo "✅ Aggregator cleaner deployed"
kind-deploy:
@echo "📄 Deploying aggregator application..."
@kubectl config use-context kind-aggregator
@echo "📄 Applying aggregator namespace..."
@kubectl apply -f k8s/app/ns.yaml
@echo "📄 Applying traefik config..."
@kubectl apply -f k8s/app/traefik-config.yaml
@echo "📄 Creating secret for ingress-uma..."
@kubectl -n aggregator-app create secret generic ingress-uma-key \
--from-file=private_key.pem=private_key.pem \
--dry-run=client -o yaml | kubectl apply -f -
@echo "📄 Applying aggregator ConfigMap..."
@kubectl apply -f k8s/app/config.yaml
@echo "📄 Adding localhost entries for ingress hosts..."
@grep -qxF "127.0.0.1 aggregator.local" /etc/hosts || sudo -- sh -c "echo '127.0.0.1 aggregator.local' >> /etc/hosts"
@grep -qxF "127.0.0.1 wsl.local" /etc/hosts || sudo -- sh -c "echo '127.0.0.1 wsl.local' >> /etc/hosts"
@echo "📄 Applying ingress-uma..."
@kubectl apply -f k8s/app/ingress-uma.yaml
@echo "⏳ Waiting for ingress-uma deployment to be ready..."
@kubectl rollout status deployment ingress-uma -n aggregator-app --timeout=90s
@echo "⏳ Waiting for ingress-uma via Ingress to be reachable..."
@for i in {1..30}; do \
STATUS=$$(curl -s -o /dev/null -w "%{http_code}" http://aggregator.local/uma/.well-known/jwks.json || echo "000"); \
if [ "$$STATUS" = "200" ]; then \
echo "✅ Ingress-uma endpoint is ready"; \
break; \
else \
echo "Waiting for Ingress JWKS endpoint... (status=$$STATUS)"; \
sleep 2; \
fi; \
done
@echo "📄 Applying aggregator deployment and service..."
@kubectl apply -f k8s/app/aggregator.yaml
@echo "⏳ Waiting for aggregator deployment to be ready..."
@kubectl rollout status deployment aggregator-server -n aggregator-app --timeout=120s
@echo "✅ Resources deployed to kind"
deploy: kind-start-traefik kind-deploy
@echo "✅ Aggregator deployment complete"
# ------------------------
# Cleanup kind deployment
# ------------------------
kind-undeploy:
@echo "🧹 Stopping aggregator deployment (keeping Traefik and cleaner running)..."
@if kind get clusters 2>/dev/null | grep -q "aggregator"; then \
echo "🔧 Setting kubectl context..."; \
kubectl config use-context kind-aggregator || true; \
echo "🧹 Deleting aggregator namespace..."; \
kubectl delete namespace aggregator-app --ignore-not-found || true; \
else \
echo "ℹ️ Kind cluster 'aggregator' does not exist, skipping deployment cleanup"; \
fi
@echo "🧹 Removing localhost entries..."
@sudo sed -i.bak '/aggregator\.local/d' /etc/hosts || true
@sudo sed -i.bak '/wsl\.local/d' /etc/hosts || true
@echo "✅ Deployment stopped (Traefik and cleaner still running)"
kind-stop-traefik:
@if kind get clusters 2>/dev/null | grep -q "aggregator"; then \
echo "🧹 Deleting Traefik Ingress Controller..."; \
kubectl config use-context kind-aggregator || true; \
kubectl delete namespace aggregator-traefik --ignore-not-found || true; \
echo "✅ Traefik Ingress Controller removed successfully."; \
else \
echo "ℹ️ Kind cluster 'aggregator' does not exist, skipping Traefik cleanup"; \
fi
kind-clean:
@echo "🧹 Cleaning up aggregator deployment..."
@if kind get clusters 2>/dev/null | grep -q "aggregator"; then \
echo "🔧 Setting kubectl context..."; \
kubectl config use-context kind-aggregator || true; \
echo "🧹 Deleting aggregator cluster-wide roles..."; \
kubectl delete clusterrole aggregator-namespace-manager --ignore-not-found || true; \
kubectl delete clusterrolebinding aggregator-namespace-manager-binding --ignore-not-found || true; \
kubectl delete clusterrole aggregator-cleaner-role --ignore-not-found || true; \
kubectl delete clusterrolebinding aggregator-cleaner-binding --ignore-not-found || true; \
echo "🧹 Deleting aggregator namespace..."; \
kubectl delete namespace aggregator-app --ignore-not-found || true; \
$(MAKE) kind-stop-cleaner; \
$(MAKE) kind-stop-traefik; \
else \
echo "ℹ️ Kind cluster 'aggregator' does not exist, skipping Kubernetes cleanup"; \
fi
@echo "🧹 Removing localhost entries..."
@sudo sed -i.bak '/aggregator\.local/d' /etc/hosts || true
@sudo sed -i.bak '/wsl\.local/d' /etc/hosts || true
@echo "🗑️ Removing generated key files..."
@rm -f private_key.pem
@echo "✅ Cleanup complete"
# Clean everything and delete the entire kind cluster
clean: kind-clean kind-stop docker-clean
@echo "✅ Complete cleanup finished - cluster deleted"
# Stop deployment and Traefik
stop: kind-undeploy kind-stop-traefik
@echo "✅ All services stopped (cluster and cleaner still running)"
# -------------------------
# wsl support
# -------------------------
enable-wsl:
@echo "🔍 Detecting WSL2 IP..."
$(eval WSL_IP := $(shell hostname -I | awk '{print $$1}'))
@echo "Detected WSL2 IP: $(WSL_IP)"
@echo "🧠 Backing up CoreDNS ConfigMap..."
@kubectl -n kube-system get configmap coredns -o yaml > /tmp/coredns.yaml
@echo "🧩 Patching CoreDNS..."
@awk -v ip="$(WSL_IP)" '\
/^data:/ {print; inData=1; next} \
inData && /^\s*Corefile:/ { \
print; \
print " wsl.local:53 {"; \
print " hosts {"; \
print " " ip " wsl.local"; \
print " fallthrough"; \
print " }"; \
print " }"; \
next \
} \
{print} \
' /tmp/coredns.yaml > /tmp/coredns-patched.yaml
@echo "📦 Applying patched ConfigMap..."
@kubectl -n kube-system apply -f /tmp/coredns-patched.yaml >/dev/null
@echo "♻️ Restarting CoreDNS deployment..."
@kubectl -n kube-system rollout restart deployment coredns >/dev/null
@echo "✅ Done! 'wsl.local' now resolves to $(WSL_IP)"
# ------------------------
# Integration Tests
# ------------------------
integration-test:
@echo "🧪 Running integration tests..."
@cd integration-test && go mod download && go test -v -timeout 20m ./...