-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathapp.py
More file actions
6024 lines (5060 loc) · 249 KB
/
app.py
File metadata and controls
6024 lines (5060 loc) · 249 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python3
"""
ProfOlaf Web Application - Entry Point
"""
import os
import json
import threading
import shutil
import sqlite3
from flask import Flask, render_template, request, jsonify, flash, redirect, url_for, send_file
from pathlib import Path
from collections import defaultdict
from utils.db_management import DBManager, SelectionStage, initialize_db
from utils.article_search.article_search_method import SearchMethod, ArticleSearch, SemanticScholarSearchMethod, GoogleScholarSearchMethod, DBLPSearchMethod
from werkzeug.utils import secure_filename
# Import from pipeline modules
from utils.pipeline.generate_snowball_start_utils import generate_snowball_start, extract_titles_from_file
from utils.pipeline.start_iteration_utils import get_articles
from utils.pipeline.get_bibtex import process_articles_optimized, get_bibtex_single
from utils.pipeline.generate_conf_rank_utils import get_venues, find_similar_venues, _get_scimago_rank, _get_core_rank
from utils.pipeline.filter_by_metadata_utils import automated_check_venue_and_peer_reviewed
from utils.pipeline.screening import apply_decision
from utils.pipeline.solve_disagreements import settle_agreements
from utils.pipeline.llm_screening import screen_papers, download_pdfs, get_articles_from_db
from utils.article_processing.download_pdfs import is_valid_pdf
# Global state for tracking running tasks
running_tasks = {
'generate_snowball_start': {
'is_running': False,
'progress': 0,
'total': 0,
'current_step': '',
'logs': [],
'cancel_flag': None
},
'start_iteration': {
'is_running': False,
'progress': 0,
'total': 0,
'current_step': '',
'logs': [],
'cancel_flag': None,
'articles_without_id_count': 0,
'articles_without_id_iteration': None
},
'get_bibtex': {
'is_running': False,
'progress': 0,
'total': 0,
'current_step': '',
'logs': [],
'cancel_flag': None
}
}
ITERATION_0 = 0
# Configuration for file uploads
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'txt', 'json'}
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
app = Flask(__name__)
app.secret_key = os.urandom(24)
# Configuration file path
CONFS_DIR = "confs"
DATABASES_DIR = "databases"
SEARCH_CONF_PATH = os.path.join(CONFS_DIR, "search_conf.json")
WORKFLOW_STATE_PATH = os.path.join(CONFS_DIR, "workflow_state.json")
ANALYSIS_CONF_PATH = os.path.join(CONFS_DIR, "analysis_conf.json")
LLM_CONFIG_PATH = os.path.join("utils", "article_llm_analysis", "llm_config.json")
# Ensure directories exist
os.makedirs(CONFS_DIR, exist_ok=True)
os.makedirs(DATABASES_DIR, exist_ok=True)
def load_workflow_state():
"""Load workflow state from JSON file"""
try:
if os.path.exists(WORKFLOW_STATE_PATH):
with open(WORKFLOW_STATE_PATH, 'r', encoding='utf-8') as f:
return json.load(f)
else:
return {
'db_path': None,
'current_iteration': None,
'last_step': None,
'skipped_steps': []
}
except Exception as e:
print(f"Error loading workflow state: {e}")
return {
'db_path': None,
'current_iteration': None,
'last_step': None,
'skipped_steps': []
}
def save_workflow_state(state):
"""Save workflow state to JSON file"""
try:
# Ensure confs directory exists
os.makedirs(CONFS_DIR, exist_ok=True)
with open(WORKFLOW_STATE_PATH, 'w', encoding='utf-8') as f:
json.dump(state, f, indent=2, ensure_ascii=False)
return True
except Exception as e:
print(f"Error saving workflow state: {e}")
return False
def get_db_manager_for_workflow():
"""Helper function to get DBManager for workflow metadata operations"""
search_conf = load_search_conf()
if search_conf and 'db_path' in search_conf:
db_path = search_conf['db_path']
else:
db_path = os.path.join(DATABASES_DIR, 'database.db')
if not os.path.exists(db_path):
return None
try:
db_manager = DBManager(db_path)
# Ensure workflow metadata table exists (for existing databases)
try:
db_manager.create_workflow_metadata_table()
except:
pass # Table might already exist
return db_manager
except Exception as e:
print(f"Error creating DBManager for workflow: {e}")
return None
def update_workflow_state(db_path=None, current_iteration=None, last_step=None, skip_step=None):
"""Update workflow state in both database and JSON file (for backwards compatibility)"""
state = load_workflow_state()
# Ensure skipped_steps list exists
if 'skipped_steps' not in state:
state['skipped_steps'] = []
if db_path is not None:
state['db_path'] = db_path
if current_iteration is not None:
state['current_iteration'] = current_iteration
if last_step is not None:
state['last_step'] = last_step
if skip_step is not None:
# Add step to skipped list if not already there
if skip_step not in state['skipped_steps']:
state['skipped_steps'].append(skip_step)
# Update database metadata (primary source)
db_manager = get_db_manager_for_workflow()
if db_manager:
try:
if current_iteration is not None:
db_manager.update_current_iteration(current_iteration)
if last_step is not None:
db_manager.update_last_step(last_step)
db_manager.conn.close()
except Exception as e:
print(f"Error updating workflow metadata in database: {e}")
# Also save to JSON for backwards compatibility
save_workflow_state(state)
return state
def get_next_step_after_skip(current_step):
"""Determine the next logical step after skipping a step"""
step_map = {
"Step 0: Generate Snowball Start": "Step 1: Start Iteration",
"Step 1: Start Iteration": "Step 2: Remove Duplicates",
"Step 2: Remove Duplicates": "Step 3: Get BibTeX",
"Step 3: Get BibTeX": "Step 4: Assign Venue Ranks",
"Step 4: Assign Venue Ranks": "Step 5: Filter by Metadata",
"Step 5: Filter by Metadata": "Step 6: Filter by Title",
"Step 6: Filter by Title": "Step 7: Solve Title Disagreements",
"Step 7: Solve Title Disagreements": "Step 8: Filter by Content",
"Step 8: Filter by Content": "Step 9: Solve Content Disagreements",
"Step 9: Solve Content Disagreements": "Step 10: Generate CSV"
}
return step_map.get(current_step, None)
def generate_search_conf(data):
"""
Generate search configuration dictionary from form data.
Args:
data: Dictionary containing form data
Returns:
Dictionary containing the search configuration
"""
# Parse venue ranks from comma-separated string
venue_list_str = data.get('venue_rank_list', '')
venue_list = [v.strip() for v in venue_list_str.split(',') if v.strip()]
# Parse annotations from newline-separated string
annotations_str = data.get('annotations', '')
annotations = [a.strip() for a in annotations_str.split('\n') if a.strip()]
# Handle proxy key - check if it's a file path or direct value
proxy_key = data.get('proxy_key', '').strip()
if proxy_key and data.get('proxy_from_file') == 'true':
# Read proxy key from file
try:
with open(proxy_key, 'r', encoding='utf-8') as f:
proxy_key = f.read().strip()
except Exception as e:
raise ValueError(f"Failed to read proxy key file: {str(e)}")
# Use the provided database path as-is; only default to databases/ when none given
db_path = data.get('db_path', '').strip()
if not db_path:
db_path = os.path.join(DATABASES_DIR, 'database.db')
else:
# Resolve relative or bare filenames relative to cwd (do not force into databases/)
db_path = os.path.abspath(db_path)
config = {
"start_year": int(data.get('start_year', 2020)),
"end_year": int(data.get('end_year', 2024)),
"venue_rank_list": venue_list,
"proxy_key": proxy_key,
"initial_file": data.get('initial_file', 'confs/seed.txt'),
"db_path": db_path,
"csv_path": data.get('csv_path', 'results.csv'),
"search_method": data.get('search_method', 'google_scholar'),
"annotations": annotations,
"rater": data.get('rater', 'default')
}
return config
def load_search_conf():
"""Load search configuration if it exists"""
if os.path.exists(SEARCH_CONF_PATH):
try:
with open(SEARCH_CONF_PATH, 'r') as f:
return json.load(f)
except Exception:
return None
return None
def get_current_iteration_from_db(db_manager, all_articles):
"""
Determine the current iteration from the database.
First checks if there's a stored current_iteration in search_conf.
Otherwise falls back to MAX(iteration) from articles.
"""
# Try to get from search_conf first
search_conf = load_search_conf()
if search_conf and 'current_iteration' in search_conf:
stored_iteration = search_conf['current_iteration']
if stored_iteration is not None:
try:
return int(stored_iteration)
except (ValueError, TypeError):
pass
# Fall back to database method
try:
result = db_manager.check_current_iteration()
if result:
iteration = result[0]
if iteration is not None:
try:
return int(iteration)
except (ValueError, TypeError):
pass
except:
pass
# Last resort: calculate from articles
if all_articles:
iteration_max = 0
for article in all_articles:
try:
iter_num = int(article.iteration) if article.iteration else 0
iteration_max = max(iteration_max, iter_num)
except (ValueError, TypeError):
continue
return iteration_max if iteration_max >= 0 else None
return None
def update_current_iteration(iteration):
"""
Update the current_iteration in search_conf.json
"""
search_conf = load_search_conf()
if search_conf is None:
search_conf = {}
try:
iteration_int = int(iteration)
search_conf['current_iteration'] = iteration_int
# Ensure confs directory exists
os.makedirs(CONFS_DIR, exist_ok=True)
with open(SEARCH_CONF_PATH, 'w') as f:
json.dump(search_conf, f, indent=4)
return True
except (ValueError, TypeError, Exception):
return False
def get_workflow_info():
"""
Get workflow information (current iteration, step, counts, etc.)
Returns a dictionary with workflow_info or None if database doesn't exist
"""
search_conf = load_search_conf()
# Get database path
if search_conf and 'db_path' in search_conf:
db_path = search_conf['db_path']
else:
db_path = os.path.join(DATABASES_DIR, 'database.db')
db_exists = os.path.exists(db_path)
# Load workflow state (used as fallback/primary source)
workflow_state = load_workflow_state()
if not db_exists:
# If no database exists and no workflow state, show default message
last_step = workflow_state.get('last_step')
if not last_step:
last_step = None # Will be displayed as "No steps performed yet" in templates
return {
'current_iteration': workflow_state.get('current_iteration'),
'current_step': last_step,
'content_approved_count': 0,
'new_articles_count': 0,
'total_articles': 0,
'search_method': None
}
try:
db_manager = DBManager(db_path)
# Ensure workflow metadata table exists
try:
db_manager.create_workflow_metadata_table()
except:
pass # Table might already exist
# Try to get current iteration and last step from database metadata (primary source)
current_iteration = db_manager.get_current_iteration()
current_step = db_manager.get_last_step()
# If database doesn't have metadata yet, try to migrate from workflow_state.json
if current_iteration is None and workflow_state.get('current_iteration') is not None:
current_iteration = workflow_state.get('current_iteration')
db_manager.update_current_iteration(current_iteration)
if current_step is None and workflow_state.get('last_step') is not None:
current_step = workflow_state.get('last_step')
db_manager.update_last_step(current_step)
# If still no current iteration, try to infer from database
if current_iteration is None:
all_articles = db_manager.get_iteration_data()
current_iteration = get_current_iteration_from_db(db_manager, all_articles)
if current_iteration is not None:
db_manager.update_current_iteration(current_iteration)
# Get all articles to calculate stats
all_articles = db_manager.get_iteration_data()
# Get max_selected and search_method for the current iteration
max_selected = None
search_method = None
if current_iteration is not None:
try:
result = db_manager.check_current_iteration()
if result:
iter_from_db, max_selected, search_method = result
except:
pass
# For iteration 0, use "Step 0" only if we don't have a stored step that's later
# (iteration 0 articles are auto-approved but didn't go through the steps)
# IMPORTANT: Only override if we're actually on iteration 0 AND we don't have a stored step
# If we have a stored step that's later (e.g., "Step 1: Start Iteration"), respect it
if current_iteration == 0:
# Only override to Step 0 if we don't have a stored step, or if the stored step is Step 0
# This ensures that if step 1 has been executed (which updates current_iteration to >= 1),
# we won't be in this branch, and the stored step will be used
if current_step is None or current_step == "Step 0: Generate Snowball Start":
current_step = "Step 0: Generate Snowball Start"
# Save this to database to ensure it's persisted
try:
db_manager.update_last_step(current_step)
except:
pass
# If we have a stored step that's later than Step 0 (e.g., Step 1), keep it
# This handles edge cases where step 1 might have been executed but current_iteration
# hasn't been updated yet (shouldn't happen, but defensive programming)
# If still no current step, set default
elif current_step is None:
current_step = "Step 0: Generate Snowball Start"
# Only infer step if we don't have an explicit last_step from database
# (and we're not on iteration 0, which is handled above)
if current_step is None and current_iteration != 0:
if max_selected is not None:
try:
max_selected_int = int(max_selected)
if max_selected_int == 0:
current_step = "Step 1-2: Initial Setup & BibTeX"
elif max_selected_int == 1:
current_step = "Step 5: Filter by Metadata"
elif max_selected_int == 2:
current_step = "Step 6: Title Screening"
elif max_selected_int == 3:
# Only infer Step 7 if we're not on iteration 0
# (iteration 0 articles are auto-approved, but iteration > 0 articles need to go through the step)
current_step = "Step 8: Content Screening"
else:
current_step = f"Step: Selection Stage {max_selected_int}"
except (ValueError, TypeError):
current_step = "Step: Unknown"
# Save inferred step to database
try:
db_manager.update_last_step(current_step)
except:
pass
else:
if all_articles:
has_bibtex = any(getattr(a, 'bibtex', '') for a in all_articles if hasattr(a, 'bibtex'))
if has_bibtex:
current_step = "Step 4: Assign Venue Ranks"
else:
current_step = "Step 3: Get BibTeX"
else:
current_step = "Step 0: Generate Snowball Start"
# Save inferred step to database
try:
db_manager.update_last_step(current_step)
except:
pass
elif current_step == "Step 0: Generate Snowball Start" and current_iteration == 0:
# If we have Step 0 and we're on iteration 0, make sure it's saved (might have been inferred before)
try:
db_manager.update_last_step(current_step)
except:
pass
# Count content approved papers (selected = 3)
content_approved_count = 0
for article in all_articles:
try:
selected = int(article.selected) if article.selected is not None else 0
if selected == 3: # CONTENT_APPROVED
content_approved_count += 1
except (ValueError, TypeError):
continue
# Count articles in current iteration
new_articles_count = 0
if current_iteration is not None:
try:
current_iter_int = int(current_iteration)
for article in all_articles:
try:
article_iter = getattr(article, 'iteration', None)
if article_iter is not None:
article_iter_int = int(article_iter)
if article_iter_int == current_iter_int:
new_articles_count += 1
except (ValueError, TypeError, AttributeError):
continue
except (ValueError, TypeError):
new_articles_count = 0
workflow_info = {
'current_iteration': current_iteration,
'current_step': current_step,
'content_approved_count': content_approved_count,
'new_articles_count': new_articles_count,
'total_articles': len(all_articles),
'search_method': search_method
}
# Close database connection
db_manager.conn.close()
# Sync workflow state JSON with database for backwards compatibility
if workflow_state.get('db_path') != db_path or workflow_state.get('current_iteration') != current_iteration or workflow_state.get('last_step') != current_step:
update_workflow_state(
db_path=db_path,
current_iteration=current_iteration,
last_step=current_step
)
return workflow_info
except Exception as e:
return {
'current_iteration': None,
'current_step': "Error",
'content_approved_count': 0,
'new_articles_count': 0,
'total_articles': 0,
'search_method': None,
'error': str(e)
}
@app.route('/')
def index():
"""Main dashboard page"""
# Check if search_conf.json exists
config_exists = os.path.exists(SEARCH_CONF_PATH)
# Check if database exists (from config if available)
db_exists = False
db_path = None
search_conf = load_search_conf()
if search_conf and 'db_path' in search_conf:
db_path = search_conf['db_path']
db_exists = os.path.exists(db_path)
return render_template('index.html',
config_exists=config_exists,
db_exists=db_exists,
db_path=db_path)
@app.route('/generate_search_conf', methods=['GET', 'POST'])
def generate_search_conf_route():
"""Generate search configuration"""
if request.method == 'GET':
# Load existing configuration if it exists
existing_config = load_search_conf()
# Load existing seed.txt if it exists
seed_content = ""
seed_file_path = os.path.join(CONFS_DIR, "seed.txt")
if os.path.exists(seed_file_path):
try:
with open(seed_file_path, 'r', encoding='utf-8') as f:
seed_content = f.read()
except Exception:
pass
# Prepare default values from existing config or use defaults
initial_file = existing_config.get('initial_file', 'confs/seed.txt') if existing_config else 'confs/seed.txt'
# Normalize old format to new format
if initial_file == 'seed.txt':
initial_file = 'confs/seed.txt'
defaults = {
'start_year': existing_config.get('start_year', 2020) if existing_config else 2020,
'end_year': existing_config.get('end_year', 2024) if existing_config else 2024,
'venue_rank_list': ', '.join(existing_config.get('venue_rank_list', ['A*', 'A', 'B', 'C', 'Q1', 'Q2'])) if existing_config else 'A*, A, B, C, Q1, Q2',
'search_method': existing_config.get('search_method', 'google_scholar') if existing_config else 'google_scholar',
'proxy_key': existing_config.get('proxy_key', '') if existing_config else '',
'initial_file': initial_file,
'db_path': existing_config.get('db_path', os.path.join(DATABASES_DIR, 'database.db')) if existing_config else os.path.join(DATABASES_DIR, 'database.db'),
'csv_path': existing_config.get('csv_path', 'results.csv') if existing_config else 'results.csv',
'rater': existing_config.get('rater', 'default') if existing_config else 'default',
'annotations': '\n'.join(existing_config.get('annotations', [])) if existing_config else '',
'seed_content': seed_content
}
# Show the form
return render_template('generate_search_conf.html', **defaults)
# Handle POST request
try:
# Get form data
form_data = request.form.to_dict()
# Validate required fields
if not form_data.get('start_year') or not form_data.get('end_year'):
flash('Start year and end year are required', 'error')
return redirect(url_for('generate_search_conf_route'))
if not form_data.get('initial_file'):
flash('Initial file is required', 'error')
return redirect(url_for('generate_search_conf_route'))
if not form_data.get('db_path'):
flash('Database path is required', 'error')
return redirect(url_for('generate_search_conf_route'))
if not form_data.get('csv_path'):
flash('CSV path is required', 'error')
return redirect(url_for('generate_search_conf_route'))
# Validate year range
start_year = int(form_data.get('start_year'))
end_year = int(form_data.get('end_year'))
if start_year >= end_year:
flash('Starting year must be less than ending year', 'error')
return redirect(url_for('generate_search_conf_route'))
# Generate configuration
config = generate_search_conf(form_data)
# Ensure confs directory exists
os.makedirs(CONFS_DIR, exist_ok=True)
# Save seed.txt content if provided
seed_content = form_data.get('seed_content', '').strip()
if seed_content:
seed_file_path = os.path.join(CONFS_DIR, "seed.txt")
with open(seed_file_path, 'w', encoding='utf-8') as f:
f.write(seed_content)
# Update initial_file to use confs/seed.txt if it's the default
initial_file = config.get('initial_file', 'confs/seed.txt')
if initial_file == 'seed.txt' or initial_file == 'confs/seed.txt':
config['initial_file'] = 'confs/seed.txt'
# Save configuration
with open(SEARCH_CONF_PATH, 'w') as f:
json.dump(config, f, indent=4)
flash('Search configuration generated successfully!', 'success')
return redirect(url_for('configuration'))
except ValueError as e:
flash(f'Validation error: {str(e)}', 'error')
return redirect(url_for('generate_search_conf_route'))
except Exception as e:
flash(f'Error generating configuration: {str(e)}', 'error')
return redirect(url_for('generate_search_conf_route'))
@app.route('/configuration', methods=['GET'])
def configuration():
"""Configuration page - view and generate search configuration"""
config_exists = os.path.exists(SEARCH_CONF_PATH)
current_config = None
if config_exists:
try:
with open(SEARCH_CONF_PATH, 'r') as f:
current_config = json.load(f)
except Exception:
current_config = None
return render_template('configuration.html',
config_exists=config_exists,
current_config=current_config)
ALLOWED_DB_EXTENSIONS = {'db', 'sqlite', 'sqlite3'}
def _validate_and_set_db_path(db_path: str):
"""Validate db_path and update search_conf. Returns (success, response_tuple)."""
if not db_path or not db_path.strip():
return False, (jsonify({'success': False, 'error': 'Database path cannot be empty'}), 400)
db_path = db_path.strip()
if not os.path.exists(db_path):
return False, (jsonify({
'success': False,
'error': f'Database file not found: {db_path}. Please check the path and try again.'
}), 404)
try:
db_manager = DBManager(db_path)
_ = db_manager.get_iteration_data()
except Exception as e:
return False, (jsonify({
'success': False,
'error': f'Invalid database file: {str(e)}'
}), 400)
search_conf = load_search_conf()
if search_conf is None:
search_conf = {
'start_year': 2020,
'end_year': 2024,
'venue_rank_list': ['A*', 'A', 'B', 'C', 'Q1', 'Q2'],
'proxy_key': '',
'initial_file': 'confs/seed.txt',
'db_path': db_path,
'csv_path': 'results.csv',
'search_method': 'google_scholar',
'annotations': ['Methods', 'Area'],
'current_iteration': None
}
else:
search_conf['db_path'] = db_path
os.makedirs(CONFS_DIR, exist_ok=True)
with open(SEARCH_CONF_PATH, 'w') as f:
json.dump(search_conf, f, indent=4)
return True, (jsonify({
'success': True,
'message': f'Database loaded successfully: {db_path}',
'db_path': db_path
}), 200)
@app.route('/api/database/upload', methods=['POST'])
def upload_database():
"""Upload a database file to the project's databases folder and set it as the current database."""
try:
if 'database_file' not in request.files:
return jsonify({'success': False, 'error': 'No file provided'}), 400
file = request.files['database_file']
if not file or file.filename == '':
return jsonify({'success': False, 'error': 'No file selected'}), 400
ext = (file.filename.rsplit('.', 1)[-1] or '').lower()
if ext not in ALLOWED_DB_EXTENSIONS:
return jsonify({
'success': False,
'error': f'Invalid file type. Allowed: {", ".join(ALLOWED_DB_EXTENSIONS)}'
}), 400
filename = secure_filename(file.filename) or 'uploaded.db'
if not filename.lower().endswith(('.db', '.sqlite', '.sqlite3')):
filename += '.db'
os.makedirs(DATABASES_DIR, exist_ok=True)
db_path = os.path.abspath(os.path.join(DATABASES_DIR, filename))
file.save(db_path)
ok, result = _validate_and_set_db_path(db_path)
return result
except Exception as e:
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/api/database/upload_file', methods=['POST'])
def upload_database_file():
"""Upload a database file to the project's databases folder. Returns the path for use in merge steps (does not set as current database)."""
try:
if 'database_file' not in request.files:
return jsonify({'success': False, 'error': 'No file provided'}), 400
file = request.files['database_file']
if not file or file.filename == '':
return jsonify({'success': False, 'error': 'No file selected'}), 400
ext = (file.filename.rsplit('.', 1)[-1] or '').lower()
if ext not in ALLOWED_DB_EXTENSIONS:
return jsonify({
'success': False,
'error': f'Invalid file type. Allowed: {", ".join(ALLOWED_DB_EXTENSIONS)}'
}), 400
filename = secure_filename(file.filename) or 'uploaded.db'
if not filename.lower().endswith(('.db', '.sqlite', '.sqlite3')):
filename += '.db'
os.makedirs(DATABASES_DIR, exist_ok=True)
dest_path = os.path.join(DATABASES_DIR, filename)
file.save(dest_path)
# Return path relative to project root so merge APIs can resolve it
db_path = os.path.join(DATABASES_DIR, filename)
return jsonify({'success': True, 'db_path': db_path})
except Exception as e:
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/api/database/load', methods=['POST'])
def load_database():
"""Load a new database file and update search_conf.json (by path or from JSON body)."""
try:
# Support both: JSON with db_path, or form data with path (for consistency)
if request.is_json:
data = request.get_json() or {}
db_path = (data.get('db_path') or '').strip()
else:
db_path = (request.form.get('db_path') or '').strip()
if not db_path:
return jsonify({'success': False, 'error': 'Database path not provided'}), 400
# Use the path as-is: resolve relative/bare paths relative to cwd (no copy to databases/)
db_path = os.path.abspath(db_path)
ok, result = _validate_and_set_db_path(db_path)
return result
except Exception as e:
return jsonify({
'success': False,
'error': f'Error loading database: {str(e)}'
}), 500
@app.route('/database', methods=['GET'])
def database_state():
"""Database state page - displays database statistics"""
search_conf = load_search_conf()
if search_conf and 'db_path' in search_conf:
db_path = search_conf['db_path']
else:
db_path = os.path.join(DATABASES_DIR, 'database.db')
db_exists = os.path.exists(db_path)
db_error = None
db_stats = None
iteration_stats = []
articles_display = []
if db_exists:
try:
db_manager = DBManager(db_path)
# Get all iteration data
all_articles = db_manager.get_iteration_data()
# Calculate statistics
total_articles = len(all_articles)
# Group by iteration
by_iteration = defaultdict(int)
by_selection_stage = defaultdict(int)
iteration_max = 0
for article in all_articles:
# Convert iteration to int (it might be stored as string in DB)
iter_num = 0
if hasattr(article, 'iteration'):
try:
iter_num = int(article.iteration) if article.iteration else 0
except (ValueError, TypeError):
iter_num = 0
by_iteration[iter_num] += 1
iteration_max = max(iteration_max, iter_num)
# Convert selected to int (it might be stored as string in DB)
selected = 0
if hasattr(article, 'selected'):
try:
selected = int(article.selected) if article.selected is not None else 0
except (ValueError, TypeError):
selected = 0
by_selection_stage[selected] += 1
# Get iteration statistics
for iter_num in range(iteration_max + 1):
# Filter articles for this iteration, converting to int for comparison
iter_articles = []
for a in all_articles:
if hasattr(a, 'iteration'):
try:
article_iter = int(a.iteration) if a.iteration else 0
if article_iter == iter_num:
iter_articles.append(a)
except (ValueError, TypeError):
continue
iter_count = len(iter_articles)
if iter_count > 0:
# Count by selection stage for this iteration
iter_selection_counts = defaultdict(int)
for article in iter_articles:
selected = 0
if hasattr(article, 'selected'):
try:
selected = int(article.selected) if article.selected is not None else 0
except (ValueError, TypeError):
selected = 0
iter_selection_counts[selected] += 1
iteration_stats.append({
'iteration': iter_num,
'total': iter_count,
'not_selected': iter_selection_counts.get(0, 0),
'metadata_approved': iter_selection_counts.get(1, 0),
'title_approved': iter_selection_counts.get(2, 0),
'content_approved': iter_selection_counts.get(3, 0),
'duplicate': iter_selection_counts.get(-1, 0)
})
# Try to get current iteration info
try:
result = db_manager.check_current_iteration()
if result:
current_iteration, max_selected, search_method = result
if current_iteration is None:
current_iteration = iteration_max if iteration_max >= 0 else None
else:
current_iteration = iteration_max if iteration_max >= 0 else None
max_selected = None
search_method = None
except:
current_iteration = iteration_max if iteration_max >= 0 else None
max_selected = None
search_method = None
# Get seen titles count
try:
seen_titles = db_manager.get_seen_titles_data()
seen_titles_count = len(seen_titles) if seen_titles else 0
except:
seen_titles_count = 0
db_stats = {
'total_articles': total_articles,
'total_iterations': iteration_max + 1 if iteration_max >= 0 else 0,
'current_iteration': current_iteration,
'search_method': search_method,
'seen_titles_count': seen_titles_count,
'by_selection_stage': dict(by_selection_stage)
}
# Prepare articles for display (limit to first 1000 for performance)
articles_display = []
for article in all_articles[:1000]: # Limit for performance
try:
iter_num = int(article.iteration) if article.iteration else 0
except (ValueError, TypeError):
iter_num = 0
try:
selected = int(article.selected) if article.selected is not None else 0
except (ValueError, TypeError):
selected = 0
articles_display.append({
'id': getattr(article, 'id', ''),
'title': getattr(article, 'title', '') or '(No title)',
'authors': getattr(article, 'authors', '') or '(No authors)',
'venue': getattr(article, 'venue', '') or '(No venue)',
'pub_year': getattr(article, 'pub_year', '') or '',
'iteration': iter_num,
'selected': selected,
'eprint_url': getattr(article, 'eprint_url', ''),
'num_citations': getattr(article, 'num_citations', '') or 0
})
except Exception as e:
db_error = str(e)
articles_display = []
return render_template('database_state.html',
db_exists=db_exists,
db_path=db_path,
db_stats=db_stats,
iteration_stats=iteration_stats,
articles=articles_display,
db_error=db_error,
SelectionStage=SelectionStage)
@app.route('/api/database/screening_raters', methods=['GET'])
def api_screening_raters():
"""Return distinct rater names from the screening table."""
try:
search_conf = load_search_conf()
if not search_conf or 'db_path' not in search_conf:
return jsonify({'success': False, 'error': 'Database not configured'}), 400
db_path = search_conf.get('db_path')
if not os.path.exists(db_path):
return jsonify({'success': False, 'error': 'Database file not found'}), 404
db_manager = DBManager(db_path)
try:
raters = db_manager.get_screening_raters()
except Exception:
raters = [] # e.g. screening table does not exist
return jsonify({'success': True, 'raters': raters})
except Exception as e:
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/api/database/screening_rows', methods=['GET'])
def api_screening_rows():
"""Return screening table rows for a given rater (optional iteration filter)."""
try:
search_conf = load_search_conf()
if not search_conf or 'db_path' not in search_conf:
return jsonify({'success': False, 'error': 'Database not configured'}), 400
db_path = search_conf.get('db_path')
if not os.path.exists(db_path):
return jsonify({'success': False, 'error': 'Database file not found'}), 404
rater = request.args.get('rater', '').strip()
if not rater:
return jsonify({'success': False, 'error': 'Rater is required'}), 400
iteration = request.args.get('iteration', type=int)
db_manager = DBManager(db_path)
try:
rows = db_manager.get_screening_rows_by_rater(rater, iteration=iteration)
except Exception:
rows = []
return jsonify({'success': True, 'rows': rows})
except Exception as e:
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/api/database/annotations_table', methods=['GET'])
def api_annotations_table():
"""Return all rows from the annotations table."""