Skip to content

Commit 0cb51d2

Browse files
author
Sofia Kirsanova
committed
Finalize Yuankun updates: remove old JPEG; update publications.bib
1 parent de073f0 commit 0cb51d2

File tree

2 files changed

+10
-7
lines changed

2 files changed

+10
-7
lines changed

images/people/yuankun.jpeg

-286 KB
Binary file not shown.

publications.bib

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,16 @@
1+
@article{namgung2025less,
2+
title={Less is More: Multimodal Region Representation via Pairwise Inter-view Learning},
3+
author={Namgung, Min and Lin, Yijun and Lee, JangHyeon and Chiang, Yao-Yi},
4+
journal={arXiv preprint arXiv:2505.18178},
5+
year={2025}
6+
}
17
@article{li2025mapqa,
28
title={MapQA: Open-domain Geospatial Question Answering on Map Data},
3-
author={Li, Zekun and Grossman, Malcolm and Qasemi, Eric (Ehsan) and Kulkarni, Mihir and Chen, Muhao and Chiang, Yao-Yi},
9+
author={Li, Zekun and Grossman, Malcolm and Qasemi, Eric and Kulkarni, Mihir and Chen, Muhao and Chiang, Yao-Yi},
410
journal={arXiv preprint arXiv:2503.07871},
5-
year={2025}
11+
year={2025},
12+
url={https://arxiv.org/abs/2503.07871},
13+
abstract={Geospatial question answering (QA) is a fundamental task in navigation and point of interest (POI) searches. While existing geospatial QA datasets exist, they are limited in both scale and diversity, often relying solely on textual descriptions of geo-entities without considering their geometries. A major challenge in scaling geospatial QA datasets for reasoning lies in the complexity of geospatial relationships, which require integrating spatial structures, topological dependencies, and multi-hop reasoning capabilities that most text-based QA datasets lack. To address these limitations, we introduce MapQA, a novel dataset that not only provides question-answer pairs but also includes the geometries of geo-entities referenced in the questions. MapQA is constructed using SQL query templates to extract question-answer pairs from OpenStreetMap (OSM) for two study regions: Southern California and Illinois. It consists of 3,154 QA pairs spanning nine question types that require geospatial reasoning, such as neighborhood inference and geo-entity type identification. Compared to existing datasets, MapQA expands both the number and diversity of geospatial question types. We explore two approaches to tackle this challenge: (1) a retrieval-based language model that ranks candidate geo-entities by embedding similarity, and (2) a large language model (LLM) that generates SQL queries from natural language questions and geo-entity attributes, which are then executed against an OSM database. Our findings indicate that retrieval-based methods effectively capture concepts like closeness and direction but struggle with questions that require explicit computations (e.g., distance calculations). LLMs (e.g., GPT and Gemini) excel at generating SQL queries for one-hop reasoning but face challenges with multi-hop reasoning, highlighting a key bottleneck in advancing geospatial QA systems.}
614
}
715

816
@article{MAI2025104368,
@@ -247,11 +255,6 @@ @inproceedings{lin2022semi
247255
year={2022},
248256
organization={IEEE}
249257
}
250-
@article{li2025mapqa,
251-
title={MapQA: Open-domain Geospatial Question Answering on Map Data},
252-
author={Li, Zekun and Grossman, Malcolm and Qasemi, Eric (Ehsan) and Kulkarni, Mihir and Chen, Muhao and Chiang, Yao-Yi},
253-
journal={arXiv preprint arXiv:2503.07871},
254-
year={2025}
255258
}
256259
@article{li2022spabert,
257260
title={SpaBERT: A Pretrained Language Model from Geographic Data for Geo-Entity Representation},

0 commit comments

Comments
 (0)