Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 30 additions & 28 deletions scripts/update.sh
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,6 @@ REMOTE_BASE_DIR="/opt/paikka/data"
GEOCODER_ADMIN_URL="http://localhost:8080/admin/refresh-db"
GEOCODER_TEST_URL_BASE="http://localhost:8080/v1/reverse"

# --- Verification Test Cases ---
declare -A TEST_CASES=(
["lat=52.516280&lon=13.377635"]="518071791" # Brandenburger Tor
["lat=48.85826&lon=2.2945008"]="5013364" # Eiffel Tower
["lat=40.68924&lon=-74.044502"]="32965412" # Statue of Liberty
)

# Global variables that will be set by parse_args_and_configure or environment
REMOTE_USER=""
REMOTE_HOST=""
Expand Down Expand Up @@ -166,64 +159,73 @@ remote_sync_bundle() {
remote_deploy_and_verify() {
log "REMOTE: Executing remote deployment (Atomic Swap)"

# Convert TEST_CASES to a format that can be passed to remote shell
local test_cases_str=""
for key in "${!TEST_CASES}"; do
test_cases_str+="[\"$key\"]=\"${TEST_CASES[$key]}\" "
done

# shellcheck disable=SC2087
ssh "${REMOTE_USER}@${REMOTE_HOST}" /bin/bash << EOF
set -e
BASE_DIR="${REMOTE_BASE_DIR}"
BASE_DIR="/opt/paikka/data"
API_TOKEN="${GEOCODER_API_TOKEN}"
ADMIN_URL="${GEOCODER_ADMIN_URL}"
TEST_URL_BASE="${GEOCODER_TEST_URL_BASE}"
ADMIN_URL="http://localhost:8080/admin/refresh-db"
TEST_URL_BASE="http://localhost:8080/api/v1/reverse"
NEW_RELEASE_DIR="releases/${LATEST_RELEASE_DIR_NAME}"
LIVE_DATA_SYMLINK="live_data"

# Define TESTS array on remote side
declare -A TESTS=($test_cases_str)

echo_remote() {
echo "[REMOTE] \$1"
}

cd "\$BASE_DIR"

OLD_RELEASE_DIR=""
[ -L "\$LIVE_DATA_SYMLINK" ] && OLD_RELEASE_DIR=\$(readlink \$LIVE_DATA_SYMLINK)
[ -L "\$LIVE_DATA_SYMLINK" ] && OLD_RELEASE_DIR=\$(readlink "\$LIVE_DATA_SYMLINK")

echo_remote "Switching symlink: \$LIVE_DATA_SYMLINK -> \$NEW_RELEASE_DIR"
ln -sfn "\$NEW_RELEASE_DIR" "\$LIVE_DATA_SYMLINK"

echo_remote "Refreshing Geocoder DB..."
HTTP_STATUS=\$(curl -s -o /dev/null -w "%{http_code}" -X POST -H "X-Admin-Token: \$API_TOKEN" "\$ADMIN_URL")
HTTP_STATUS=\$(curl -s -o /dev/null -w "%{http_code}" --max-time 300 -X POST -H "X-Admin-Token: \$API_TOKEN" "\$ADMIN_URL")

if [ "\$HTTP_STATUS" -ne 200 ]; then
echo_remote "ERROR: Refresh failed (\$HTTP_STATUS). Rolling back."
[ -n "\$OLD_RELEASE_DIR" ] && ln -sfn "\$OLD_RELEASE_DIR" "\$LIVE_DATA_SYMLINK"
exit 1
fi

echo_remote "Refresh completed successfully"

# --- 2. Verify ---
echo_remote "Verifying new data..."
VERIFICATION_FAILED=0
for query in "\${!TESTS}"; do
ACTUAL_ID=\$(curl -s "\$TEST_URL_BASE?\$query" | jq -r '.[0].id // "not_found"')
if [ "\$ACTUAL_ID" != "\${TESTS[\$query]}" ]; then
echo_remote " --> FAILED: For \$query, expected '\${TESTS[\$query]}', got '\$ACTUAL_ID'"

QUERIES[0]="lat=52.516280&lon=13.377635"
QUERIES[1]="lat=48.85826&lon=2.2945008"
QUERIES[2]="lat=40.68924&lon=-74.044502"

EXPECTED_IDS[0]="518071791"
EXPECTED_IDS[1]="5013364"
EXPECTED_IDS[2]="32965412"

# Get the number of elements
NUM_TESTS=3

for ((i=0; i<NUM_TESTS; i++)); do
query="\${QUERIES[\$i]}"
expected_id="\${EXPECTED_IDS[\$i]}"
echo_remote "Testing URL: \$TEST_URL_BASE?\$query"
ACTUAL_ID=\$(curl -s --max-time 30 "\$TEST_URL_BASE?\$query" | jq -r '.results[0].id // "not_found"')
echo_remote "Got ID: \$ACTUAL_ID, Expected: \$expected_id"
if [ "\$ACTUAL_ID" != "\$expected_id" ]; then
echo_remote " --> FAILED: For \$query, expected '\$expected_id', got '\$ACTUAL_ID'"
VERIFICATION_FAILED=1
else
echo_remote " --> SUCCESS: Verified query for \$query"
fi
done

# --- 3. Finalize or Rollback ---
if [ \$VERIFICATION_FAILED -eq 1 ]; then
echo_remote "VERIFICATION FAILED. Rolling back and re-refreshing."
if [ -n "\$OLD_RELEASE_DIR" ] && [ -d "\$OLD_RELEASE_DIR" ]; then
ln -sfn "\$OLD_RELEASE_DIR" "\$LIVE_DATA_SYMLINK"
curl -s -o /dev/null -X POST -H "X-Admin-Token: \$API_TOKEN" "\$ADMIN_URL"
curl -s -o /dev/null --max-time 300 -X POST -H "X-Admin-Token: \$API_TOKEN" "\$ADMIN_URL"
echo_remote "Rollback to \$OLD_RELEASE_DIR complete. Faulty data in \$NEW_RELEASE_DIR is kept for inspection."
exit 1
else
Expand Down Expand Up @@ -310,4 +312,4 @@ main() {
###
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
main "$@"
fi
fi
35 changes: 1 addition & 34 deletions src/main/java/com/dedicatedcode/paikka/dto/POIResponse.java
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,6 @@ public class POIResponse {
@JsonProperty("boundary")
private GeoJsonGeometry boundary;

@JsonProperty("query")
private QueryInfo query;

// Constructors
public POIResponse() {}

Expand Down Expand Up @@ -96,9 +93,6 @@ public POIResponse() {}
public GeoJsonGeometry getBoundary() { return boundary; }
public void setBoundary(GeoJsonGeometry boundary) { this.boundary = boundary; }

public QueryInfo getQuery() { return query; }
public void setQuery(QueryInfo query) { this.query = query; }

public static class HierarchyItem {
@JsonProperty("level")
private int level;
Expand Down Expand Up @@ -155,32 +149,5 @@ public HierarchyItem(int level, String type, String name, String code, long osmI
public String getCode() { return code; }
public void setCode(String code) { this.code = code; }
}

public static class QueryInfo {
@JsonProperty("lat")
private double lat;

@JsonProperty("lon")
private double lon;

@JsonProperty("lang")
private String lang;

public QueryInfo() {}

public QueryInfo(double lat, double lon, String lang) {
this.lat = lat;
this.lon = lon;
this.lang = lang;
}

public double getLat() { return lat; }
public void setLat(double lat) { this.lat = lat; }

public double getLon() { return lon; }
public void setLon(double lon) { this.lon = lon; }

public String getLang() { return lang; }
public void setLang(String lang) { this.lang = lang; }
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -415,9 +415,6 @@ private POIResponse convertPOIToResponse(POIData poi, double queryLat, double qu
}
}

// Query information
response.setQuery(new POIResponse.QueryInfo(queryLat, queryLon, lang));

return response;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,9 @@ public record CachedBoundary(int level, String name, String code, long osmId, En
public boolean contains(double lon, double lat) {
if (mir != null && mir.contains(lon, lat)) return true;
if (!mbr.contains(lon, lat)) return false;
return locator.locate(new Coordinate(lon, lat)) != Location.EXTERIOR;
synchronized (this) {
return locator.locate(new Coordinate(lon, lat)) != Location.EXTERIOR;
}
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ public void importData(String pbfFilePath, String dataDir) throws Exception {
Path appendDbPath = dataDirectory.resolve("tmp/append_poi");
Path nodeCacheDbPath = dataDirectory.resolve("tmp/node_cache");
Path wayIndexDbPath = dataDirectory.resolve("tmp/way_index");
Path boundaryWayIndexDbPath = dataDirectory.resolve("tmp/boundary_way_index");
Path neededNodesDbPath = dataDirectory.resolve("tmp/needed_nodes");
Path relIndexDbPath = dataDirectory.resolve("tmp/rel_index");
Path poiIndexDbPath = dataDirectory.resolve("tmp/poi_index");
Expand All @@ -114,6 +115,7 @@ public void importData(String pbfFilePath, String dataDir) throws Exception {
cleanupDatabase(gridIndexDbPath);
cleanupDatabase(nodeCacheDbPath);
cleanupDatabase(wayIndexDbPath);
cleanupDatabase(boundaryWayIndexDbPath);
cleanupDatabase(neededNodesDbPath);
cleanupDatabase(relIndexDbPath);
cleanupDatabase(poiIndexDbPath);
Expand Down Expand Up @@ -185,6 +187,7 @@ public void importData(String pbfFilePath, String dataDir) throws Exception {
RocksDB gridIndexDb = RocksDB.open(gridOpts, gridIndexDbPath.toString());
RocksDB nodeCache = RocksDB.open(nodeOpts, nodeCacheDbPath.toString());
RocksDB wayIndexDb = RocksDB.open(wayIndexOpts, wayIndexDbPath.toString());
RocksDB neededBoundaryWaysDb = RocksDB.open(wayIndexOpts, boundaryWayIndexDbPath.toString());
RocksDB neededNodesDb = RocksDB.open(neededNodesOpts, neededNodesDbPath.toString());
RocksDB relIndexDb = RocksDB.open(wayIndexOpts, relIndexDbPath.toString());
RocksDB poiIndexDb = RocksDB.open(poiIndexOpts, poiIndexDbPath.toString());
Expand All @@ -194,21 +197,23 @@ public void importData(String pbfFilePath, String dataDir) throws Exception {
stats.printPhaseHeader("PASS 1: Discovery & Indexing");
long pass1Start = System.currentTimeMillis();
stats.setCurrentPhase(1, "1.1.1: Discovery & Indexing");
pass1DiscoveryAndIndexing(pbfFile, wayIndexDb, neededNodesDb, relIndexDb, poiIndexDb, stats);
pass1DiscoveryAndIndexing(pbfFile, wayIndexDb, neededBoundaryWaysDb, neededNodesDb, relIndexDb, poiIndexDb, stats);
stats.setCurrentPhase(2, "1.1.2: Indexing boundary member ways");
indexBoundaryMemberWays(pbfFile, neededBoundaryWaysDb, wayIndexDb, neededNodesDb, stats);
stats.printPhaseSummary("PASS 1", pass1Start);

// PASS 2: Nodes Cache, Boundaries, POIs
stats.printPhaseHeader("PASS 2: Nodes Cache, Boundaries, POIs");
long pass2Start = System.currentTimeMillis();
stats.setCurrentPhase(2, "1.1.2: Caching node coordinates");
stats.setCurrentPhase(3, "1.1.3: Caching node coordinates");
cacheNeededNodeCoordinates(pbfFile, neededNodesDb, nodeCache, stats);

stats.setCurrentPhase(3, "1.2: Processing administrative boundaries");
stats.setCurrentPhase(4, "1.2: Processing administrative boundaries");
processAdministrativeBoundariesFromIndex(relIndexDb, nodeCache, wayIndexDb, gridIndexDb, boundariesDb, stats);
stats.setCurrentPhase(4, "2.1: Processing POIs & Sharding");
stats.setCurrentPhase(5, "2.1: Processing POIs & Sharding");
pass2PoiShardingFromIndex(nodeCache, wayIndexDb, appendDb, boundariesDb, poiIndexDb, gridIndexDb, stats);

stats.setCurrentPhase(5, "2.2: Compacting POIs");
stats.setCurrentPhase(6, "2.2: Compacting POIs");
compactShards(appendDb, shardsDb, stats);
stats.stop();
stats.printPhaseSummary("PASS 2", pass2Start);
Expand All @@ -226,6 +231,7 @@ public void importData(String pbfFilePath, String dataDir) throws Exception {
gridIndexDbPath,
nodeCacheDbPath,
wayIndexDbPath,
boundaryWayIndexDbPath,
neededNodesDbPath,
relIndexDbPath,
poiIndexDbPath,
Expand Down Expand Up @@ -264,6 +270,43 @@ private void writeMetadataFile(Path pbfFile, Path dataDirectory) throws IOExcept
System.out.println("\n\033[1;32mMetadata file written to: " + metadataPath + "\033[0m");
}

private void indexBoundaryMemberWays(Path pbfFile,
RocksDB neededBoundaryWaysDb,
RocksDB wayIndexDb,
RocksDB neededNodesDb,
ImportStatistics stats) throws Exception {
final byte[] ONE = new byte[]{1};
try (RocksBatchWriter wayWriter = new RocksBatchWriter(wayIndexDb, 10_000, stats);
RocksBatchWriter neededWriter = new RocksBatchWriter(neededNodesDb, 500_000, stats)) {

withPbfIterator(pbfFile, iterator -> {

while (iterator.hasNext()) {
EntityContainer container = iterator.next();
if (container.getType() != EntityType.Way) continue;

OsmWay way = (OsmWay) container.getEntity();
byte[] wayKey = s2Helper.longToByteArray(way.getId());

// Only process ways that are needed AND not already indexed
if (neededBoundaryWaysDb.get(wayKey) == null) continue;
if (wayIndexDb.get(wayKey) != null) { continue; }
int n = way.getNumberOfNodes();
long[] nodeIds = new long[n];
for (int j = 0; j < n; j++) {
long nid = way.getNodeId(j);
nodeIds[j] = nid;
neededWriter.put(s2Helper.longToByteArray(nid), ONE);
}
wayWriter.put(wayKey, s2Helper.longArrayToByteArray(nodeIds));
stats.incrementWaysProcessed();
}
});

wayWriter.flush();
neededWriter.flush();
}
}

private void updateGridIndexEntry(RocksDB gridIndexDb, long cellId, long osmId) throws Exception {
byte[] key = s2Helper.longToByteArray(cellId);
Expand All @@ -282,10 +325,11 @@ private void updateGridIndexEntry(RocksDB gridIndexDb, long cellId, long osmId)
}
}

private void pass1DiscoveryAndIndexing(Path pbfFile, RocksDB wayIndexDb, RocksDB neededNodesDb, RocksDB relIndexDb, RocksDB poiIndexDb, ImportStatistics stats) throws Exception {
private void pass1DiscoveryAndIndexing(Path pbfFile, RocksDB wayIndexDb, RocksDB boundaryWayIndexDb, RocksDB neededNodesDb, RocksDB relIndexDb, RocksDB poiIndexDb, ImportStatistics stats) throws Exception {

final byte[] ONE = new byte[]{1};
try (RocksBatchWriter wayWriter = new RocksBatchWriter(wayIndexDb, 10_000, stats);
RocksBatchWriter boundaryWayWriter = new RocksBatchWriter(boundaryWayIndexDb, 10_000, stats);
RocksBatchWriter neededWriter = new RocksBatchWriter(neededNodesDb, 500_000, stats);
RocksBatchWriter relWriter = new RocksBatchWriter(relIndexDb, 2_000, stats);
RocksBatchWriter poiWriter = new RocksBatchWriter(poiIndexDb, 20_000, stats)) {
Expand Down Expand Up @@ -336,6 +380,13 @@ private void pass1DiscoveryAndIndexing(Path pbfFile, RocksDB wayIndexDb, RocksDB
stats.incrementRelationsFound();
RelRec rec = buildRelRec(relation);
relWriter.put(s2Helper.longToByteArray(relation.getId()), encodeRelRec(rec));
for (long wid : rec.outer) {
boundaryWayWriter.put(s2Helper.longToByteArray(wid), ONE);
}
for (long wid : rec.inner) {
boundaryWayWriter.put(s2Helper.longToByteArray(wid), ONE);
}

}
}
} catch (Exception e) {
Expand Down Expand Up @@ -1447,19 +1498,30 @@ private long computeDirectorySize(Path root) {
}
}

private void recordSizeMetrics(ImportStatistics stats, Path shardsDbPath, Path boundariesDbPath, Path gridIndexDbPath, Path nodeCacheDbPath, Path wayIndexDbPath, Path neededNodesDbPath, Path relIndexDbPath, Path poiIndexDbPath, Path appendDbPath) {
private void recordSizeMetrics(ImportStatistics stats,
Path shardsDbPath,
Path boundariesDbPath,
Path gridIndexDbPath,
Path nodeCacheDbPath,
Path wayIndexDbPath,
Path boundaryWayIndexDbPath,
Path neededNodesDbPath,
Path relIndexDbPath,
Path poiIndexDbPath,
Path appendDbPath) {
long shards = computeDirectorySize(shardsDbPath);
long boundaries = computeDirectorySize(boundariesDbPath);
long dataset = shards + boundaries;

long grid = computeDirectorySize(gridIndexDbPath);
long node = computeDirectorySize(nodeCacheDbPath);
long way = computeDirectorySize(wayIndexDbPath);
long boundaryWay = computeDirectorySize(boundaryWayIndexDbPath);
long needed = computeDirectorySize(neededNodesDbPath);
long rel = computeDirectorySize(relIndexDbPath);
long poi = computeDirectorySize(poiIndexDbPath);
long append = computeDirectorySize(appendDbPath);
long tmpTotal = grid + node + way + needed + rel + poi + append;
long tmpTotal = grid + node + way + boundaryWay + needed + rel + poi + append;

stats.setShardsBytes(shards);
stats.setBoundariesBytes(boundaries);
Expand All @@ -1468,6 +1530,7 @@ private void recordSizeMetrics(ImportStatistics stats, Path shardsDbPath, Path b
stats.setTmpGridBytes(grid);
stats.setTmpNodeBytes(node);
stats.setTmpWayBytes(way);
stats.setTmpBoundaryWayBytes(boundaryWay);
stats.setTmpNeededBytes(needed);
stats.setTmpRelBytes(rel);
stats.setTmpPoiBytes(poi);
Expand Down
Loading
Loading