VyLala commited on
Commit
d839145
·
verified ·
1 Parent(s): ccbc9f4

Update mtdna_classifier.py

Browse files
Files changed (1) hide show
  1. mtdna_classifier.py +706 -706
mtdna_classifier.py CHANGED
@@ -1,707 +1,707 @@
1
- # mtDNA Location Classifier MVP (Google Colab)
2
- # Accepts accession number → Fetches PubMed ID + isolate name → Gets abstract → Predicts location
3
- import os
4
- #import streamlit as st
5
- import subprocess
6
- import re
7
- from Bio import Entrez
8
- import fitz
9
- import spacy
10
- from spacy.cli import download
11
- from NER.PDF import pdf
12
- from NER.WordDoc import wordDoc
13
- from NER.html import extractHTML
14
- from NER.word2Vec import word2vec
15
- from transformers import pipeline
16
- import urllib.parse, requests
17
- from pathlib import Path
18
- from upgradeClassify import filter_context_for_sample, infer_location_for_sample
19
-
20
- # Set your email (required by NCBI Entrez)
21
- #Entrez.email = "[email protected]"
22
- import nltk
23
-
24
- nltk.download("stopwords")
25
- nltk.download("punkt")
26
- nltk.download('punkt_tab')
27
- # Step 1: Get PubMed ID from Accession using EDirect
28
- from Bio import Entrez, Medline
29
- import re
30
-
31
- Entrez.email = "[email protected]"
32
-
33
- # --- Helper Functions (Re-organized and Upgraded) ---
34
-
35
- def fetch_ncbi_metadata(accession_number):
36
- """
37
- Fetches metadata directly from NCBI GenBank using Entrez.
38
- Includes robust error handling and improved field extraction.
39
- Prioritizes location extraction from geo_loc_name, then notes, then other qualifiers.
40
- Also attempts to extract ethnicity and sample_type (ancient/modern).
41
-
42
- Args:
43
- accession_number (str): The NCBI accession number (e.g., "ON792208").
44
-
45
- Returns:
46
- dict: A dictionary containing 'country', 'specific_location', 'ethnicity',
47
- 'sample_type', 'collection_date', 'isolate', 'title', 'doi', 'pubmed_id'.
48
- """
49
- Entrez.email = "[email protected]" # Required by NCBI, REPLACE WITH YOUR EMAIL
50
-
51
- country = "unknown"
52
- specific_location = "unknown"
53
- ethnicity = "unknown"
54
- sample_type = "unknown"
55
- collection_date = "unknown"
56
- isolate = "unknown"
57
- title = "unknown"
58
- doi = "unknown"
59
- pubmed_id = None
60
- all_feature = "unknown"
61
-
62
- KNOWN_COUNTRIES = [
63
- "Afghanistan", "Albania", "Algeria", "Andorra", "Angola", "Antigua and Barbuda", "Argentina", "Armenia", "Australia", "Austria", "Azerbaijan",
64
- "Bahamas", "Bahrain", "Bangladesh", "Barbados", "Belarus", "Belgium", "Belize", "Benin", "Bhutan", "Bolivia", "Bosnia and Herzegovina", "Botswana", "Brazil", "Brunei", "Bulgaria", "Burkina Faso", "Burundi",
65
- "Cabo Verde", "Cambodia", "Cameroon", "Canada", "Central African Republic", "Chad", "Chile", "China", "Colombia", "Comoros", "Congo (Brazzaville)", "Congo (Kinshasa)", "Costa Rica", "Croatia", "Cuba", "Cyprus", "Czechia",
66
- "Denmark", "Djibouti", "Dominica", "Dominican Republic", "Ecuador", "Egypt", "El Salvador", "Equatorial Guinea", "Eritrea", "Estonia", "Eswatini", "Ethiopia",
67
- "Fiji", "Finland", "France", "Gabon", "Gambia", "Georgia", "Germany", "Ghana", "Greece", "Grenada", "Guatemala", "Guinea", "Guinea-Bissau", "Guyana",
68
- "Haiti", "Honduras", "Hungary", "Iceland", "India", "Indonesia", "Iran", "Iraq", "Ireland", "Israel", "Italy", "Ivory Coast", "Jamaica", "Japan", "Jordan",
69
- "Kazakhstan", "Kenya", "Kiribati", "Kosovo", "Kuwait", "Kyrgyzstan", "Laos", "Latvia", "Lebanon", "Lesotho", "Liberia", "Libya", "Liechtenstein", "Lithuania", "Luxembourg",
70
- "Madagascar", "Malawi", "Malaysia", "Maldives", "Mali", "Malta", "Marshall Islands", "Mauritania", "Mauritius", "Mexico", "Micronesia", "Moldova", "Monaco", "Mongolia", "Montenegro", "Morocco", "Mozambique", "Myanmar",
71
- "Namibia", "Nauru", "Nepal", "Netherlands", "New Zealand", "Nicaragua", "Niger", "Nigeria", "North Korea", "North Macedonia", "Norway", "Oman",
72
- "Pakistan", "Palau", "Palestine", "Panama", "Papua New Guinea", "Paraguay", "Peru", "Philippines", "Poland", "Portugal", "Qatar", "Romania", "Russia", "Rwanda",
73
- "Saint Kitts and Nevis", "Saint Lucia", "Saint Vincent and the Grenadines", "Samoa", "San Marino", "Sao Tome and Principe", "Saudi Arabia", "Senegal", "Serbia", "Seychelles", "Sierra Leone", "Singapore", "Slovakia", "Slovenia", "Solomon Islands", "Somalia", "South Africa", "South Korea", "South Sudan", "Spain", "Sri Lanka", "Sudan", "Suriname", "Sweden", "Switzerland", "Syria",
74
- "Taiwan", "Tajikistan", "Tanzania", "Thailand", "Timor-Leste", "Togo", "Tonga", "Trinidad and Tobago", "Tunisia", "Turkey", "Turkmenistan", "Tuvalu",
75
- "Uganda", "Ukraine", "United Arab Emirates", "United Kingdom", "United States", "Uruguay", "Uzbekistan", "Vanuatu", "Vatican City", "Venezuela", "Vietnam",
76
- "Yemen", "Zambia", "Zimbabwe"
77
- ]
78
- COUNTRY_PATTERN = re.compile(r'\b(' + '|'.join(re.escape(c) for c in KNOWN_COUNTRIES) + r')\b', re.IGNORECASE)
79
-
80
- try:
81
- handle = Entrez.efetch(db="nucleotide", id=str(accession_number), rettype="gb", retmode="xml")
82
- record = Entrez.read(handle)
83
- handle.close()
84
-
85
- gb_seq = None
86
- # Validate record structure: It should be a list with at least one element (a dict)
87
- if isinstance(record, list) and len(record) > 0:
88
- if isinstance(record[0], dict):
89
- gb_seq = record[0]
90
- else:
91
- print(f"Warning: record[0] is not a dictionary for {accession_number}. Type: {type(record[0])}")
92
- else:
93
- print(f"Warning: No valid record or empty record list from NCBI for {accession_number}.")
94
-
95
- # If gb_seq is still None, return defaults
96
- if gb_seq is None:
97
- return {"country": "unknown", "specific_location": "unknown", "ethnicity": "unknown",
98
- "sample_type": "unknown", "collection_date": "unknown", "isolate": "unknown",
99
- "title": "unknown", "doi": "unknown", "pubmed_id": None}
100
-
101
-
102
- # If gb_seq is valid, proceed with extraction
103
- collection_date = gb_seq.get("GBSeq_create-date","unknown")
104
-
105
- references = gb_seq.get("GBSeq_references", [])
106
- for ref in references:
107
- if not pubmed_id:
108
- pubmed_id = ref.get("GBReference_pubmed",None)
109
- if title == "unknown":
110
- title = ref.get("GBReference_title","unknown")
111
- for xref in ref.get("GBReference_xref", []):
112
- if xref.get("GBXref_dbname") == "doi":
113
- doi = xref.get("GBXref_id")
114
- break
115
-
116
- features = gb_seq.get("GBSeq_feature-table", [])
117
-
118
- context_for_flagging = "" # Accumulate text for ancient/modern detection
119
- features_context = ""
120
- for feature in features:
121
- if feature.get("GBFeature_key") == "source":
122
- feature_context = ""
123
- qualifiers = feature.get("GBFeature_quals", [])
124
- found_country = "unknown"
125
- found_specific_location = "unknown"
126
- found_ethnicity = "unknown"
127
-
128
- temp_geo_loc_name = "unknown"
129
- temp_note_origin_locality = "unknown"
130
- temp_country_qual = "unknown"
131
- temp_locality_qual = "unknown"
132
- temp_collection_location_qual = "unknown"
133
- temp_isolation_source_qual = "unknown"
134
- temp_env_sample_qual = "unknown"
135
- temp_pop_qual = "unknown"
136
- temp_organism_qual = "unknown"
137
- temp_specimen_qual = "unknown"
138
- temp_strain_qual = "unknown"
139
-
140
- for qual in qualifiers:
141
- qual_name = qual.get("GBQualifier_name")
142
- qual_value = qual.get("GBQualifier_value")
143
- feature_context += qual_name + ": " + qual_value +"\n"
144
- if qual_name == "collection_date":
145
- collection_date = qual_value
146
- elif qual_name == "isolate":
147
- isolate = qual_value
148
- elif qual_name == "population":
149
- temp_pop_qual = qual_value
150
- elif qual_name == "organism":
151
- temp_organism_qual = qual_value
152
- elif qual_name == "specimen_voucher" or qual_name == "specimen":
153
- temp_specimen_qual = qual_value
154
- elif qual_name == "strain":
155
- temp_strain_qual = qual_value
156
- elif qual_name == "isolation_source":
157
- temp_isolation_source_qual = qual_value
158
- elif qual_name == "environmental_sample":
159
- temp_env_sample_qual = qual_value
160
-
161
- if qual_name == "geo_loc_name": temp_geo_loc_name = qual_value
162
- elif qual_name == "note":
163
- if qual_value.startswith("origin_locality:"):
164
- temp_note_origin_locality = qual_value
165
- context_for_flagging += qual_value + " " # Capture all notes for flagging
166
- elif qual_name == "country": temp_country_qual = qual_value
167
- elif qual_name == "locality": temp_locality_qual = qual_value
168
- elif qual_name == "collection_location": temp_collection_location_qual = qual_value
169
-
170
-
171
- # --- Aggregate all relevant info into context_for_flagging ---
172
- context_for_flagging += f" {isolate} {temp_isolation_source_qual} {temp_specimen_qual} {temp_strain_qual} {temp_organism_qual} {temp_geo_loc_name} {temp_collection_location_qual} {temp_env_sample_qual}"
173
- context_for_flagging = context_for_flagging.strip()
174
-
175
- # --- Determine final country and specific_location based on priority ---
176
- if temp_geo_loc_name != "unknown":
177
- parts = [p.strip() for p in temp_geo_loc_name.split(':')]
178
- if len(parts) > 1:
179
- found_specific_location = parts[-1]; found_country = parts[0]
180
- else: found_country = temp_geo_loc_name; found_specific_location = "unknown"
181
- elif temp_note_origin_locality != "unknown":
182
- match = re.search(r"origin_locality:\s*(.*)", temp_note_origin_locality, re.IGNORECASE)
183
- if match:
184
- location_string = match.group(1).strip()
185
- parts = [p.strip() for p in location_string.split(':')]
186
- if len(parts) > 1: found_country = parts[-1]; found_specific_location = parts[0]
187
- else: found_country = location_string; found_specific_location = "unknown"
188
- elif temp_locality_qual != "unknown":
189
- found_country_match = COUNTRY_PATTERN.search(temp_locality_qual)
190
- if found_country_match: found_country = found_country_match.group(1); temp_loc = re.sub(re.escape(found_country), '', temp_locality_qual, flags=re.IGNORECASE).strip().replace(',', '').replace(':', '').replace(';', '').strip(); found_specific_location = temp_loc if temp_loc else "unknown"
191
- else: found_specific_location = temp_locality_qual; found_country = "unknown"
192
- elif temp_collection_location_qual != "unknown":
193
- found_country_match = COUNTRY_PATTERN.search(temp_collection_location_qual)
194
- if found_country_match: found_country = found_country_match.group(1); temp_loc = re.sub(re.escape(found_country), '', temp_collection_location_qual, flags=re.IGNORECASE).strip().replace(',', '').replace(':', '').replace(';', '').strip(); found_specific_location = temp_loc if temp_loc else "unknown"
195
- else: found_specific_location = temp_collection_location_qual; found_country = "unknown"
196
- elif temp_isolation_source_qual != "unknown":
197
- found_country_match = COUNTRY_PATTERN.search(temp_isolation_source_qual)
198
- if found_country_match: found_country = found_country_match.group(1); temp_loc = re.sub(re.escape(found_country), '', temp_isolation_source_qual, flags=re.IGNORECASE).strip().replace(',', '').replace(':', '').replace(';', '').strip(); found_specific_location = temp_loc if temp_loc else "unknown"
199
- else: found_specific_location = temp_isolation_source_qual; found_country = "unknown"
200
- elif temp_env_sample_qual != "unknown":
201
- found_country_match = COUNTRY_PATTERN.search(temp_env_sample_qual)
202
- if found_country_match: found_country = found_country_match.group(1); temp_loc = re.sub(re.escape(found_country), '', temp_env_sample_qual, flags=re.IGNORECASE).strip().replace(',', '').replace(':', '').replace(';', '').strip(); found_specific_location = temp_loc if temp_loc else "unknown"
203
- else: found_specific_location = temp_env_sample_qual; found_country = "unknown"
204
- if found_country == "unknown" and temp_country_qual != "unknown":
205
- found_country_match = COUNTRY_PATTERN.search(temp_country_qual)
206
- if found_country_match: found_country = found_country_match.group(1)
207
-
208
- country = found_country
209
- specific_location = found_specific_location
210
- # --- Determine final ethnicity ---
211
- if temp_pop_qual != "unknown":
212
- found_ethnicity = temp_pop_qual
213
- elif isolate != "unknown" and re.fullmatch(r'[A-Za-z\s\-]+', isolate) and get_country_from_text(isolate) == "unknown":
214
- found_ethnicity = isolate
215
- elif context_for_flagging != "unknown": # Use the broader context for ethnicity patterns
216
- eth_match = re.search(r'(?:population|ethnicity|isolate source):\s*([A-Za-z\s\-]+)', context_for_flagging, re.IGNORECASE)
217
- if eth_match:
218
- found_ethnicity = eth_match.group(1).strip()
219
-
220
- ethnicity = found_ethnicity
221
-
222
- # --- Determine sample_type (ancient/modern) ---
223
- if context_for_flagging:
224
- sample_type, explain = detect_ancient_flag(context_for_flagging)
225
- features_context += feature_context + "\n"
226
- break
227
-
228
- if specific_location != "unknown" and specific_location.lower() == country.lower():
229
- specific_location = "unknown"
230
- if not features_context: features_context = "unknown"
231
- return {"country": country.lower(),
232
- "specific_location": specific_location.lower(),
233
- "ethnicity": ethnicity.lower(),
234
- "sample_type": sample_type.lower(),
235
- "collection_date": collection_date,
236
- "isolate": isolate,
237
- "title": title,
238
- "doi": doi,
239
- "pubmed_id": pubmed_id,
240
- "all_features": features_context}
241
-
242
- except Exception as e:
243
- print(f"Error fetching NCBI data for {accession_number}: {e}")
244
- return {"country": "unknown",
245
- "specific_location": "unknown",
246
- "ethnicity": "unknown",
247
- "sample_type": "unknown",
248
- "collection_date": "unknown",
249
- "isolate": "unknown",
250
- "title": "unknown",
251
- "doi": "unknown",
252
- "pubmed_id": None,
253
- "all_features": "unknown"}
254
-
255
- # --- Helper function for country matching (re-defined from main code to be self-contained) ---
256
- _country_keywords = {
257
- "thailand": "Thailand", "laos": "Laos", "cambodia": "Cambodia", "myanmar": "Myanmar",
258
- "philippines": "Philippines", "indonesia": "Indonesia", "malaysia": "Malaysia",
259
- "china": "China", "chinese": "China", "india": "India", "taiwan": "Taiwan",
260
- "vietnam": "Vietnam", "russia": "Russia", "siberia": "Russia", "nepal": "Nepal",
261
- "japan": "Japan", "sumatra": "Indonesia", "borneu": "Indonesia",
262
- "yunnan": "China", "tibet": "China", "northern mindanao": "Philippines",
263
- "west malaysia": "Malaysia", "north thailand": "Thailand", "central thailand": "Thailand",
264
- "northeast thailand": "Thailand", "east myanmar": "Myanmar", "west thailand": "Thailand",
265
- "central india": "India", "east india": "India", "northeast india": "India",
266
- "south sibera": "Russia", "mongolia": "China", "beijing": "China", "south korea": "South Korea",
267
- "north asia": "unknown", "southeast asia": "unknown", "east asia": "unknown"
268
- }
269
-
270
- def get_country_from_text(text):
271
- text_lower = text.lower()
272
- for keyword, country in _country_keywords.items():
273
- if keyword in text_lower:
274
- return country
275
- return "unknown"
276
- # The result will be seen as manualLink for the function get_paper_text
277
- def search_google_custom(query, max_results=3):
278
- # query should be the title from ncbi or paper/source title
279
- GOOGLE_CSE_API_KEY = "AIzaSyAg_Hi5DPit2bvvwCs1PpUkAPRZun7yCRQ"
280
- GOOGLE_CSE_CX = "25a51c433f148490c"
281
- endpoint = "https://www.googleapis.com/customsearch/v1"
282
- params = {
283
- "key": GOOGLE_CSE_API_KEY,
284
- "cx": GOOGLE_CSE_CX,
285
- "q": query,
286
- "num": max_results
287
- }
288
- try:
289
- response = requests.get(endpoint, params=params)
290
- if response.status_code == 429:
291
- print("Rate limit hit. Try again later.")
292
- return []
293
- response.raise_for_status()
294
- data = response.json().get("items", [])
295
- return [item.get("link") for item in data if item.get("link")]
296
- except Exception as e:
297
- print("Google CSE error:", e)
298
- return []
299
- # Step 3: Extract Text: Get the paper (html text), sup. materials (pdf, doc, excel) and do text-preprocessing
300
- # Step 3.1: Extract Text
301
- # sub: download excel file
302
- def download_excel_file(url, save_path="temp.xlsx"):
303
- if "view.officeapps.live.com" in url:
304
- parsed_url = urllib.parse.parse_qs(urllib.parse.urlparse(url).query)
305
- real_url = urllib.parse.unquote(parsed_url["src"][0])
306
- response = requests.get(real_url)
307
- with open(save_path, "wb") as f:
308
- f.write(response.content)
309
- return save_path
310
- elif url.startswith("http") and (url.endswith(".xls") or url.endswith(".xlsx")):
311
- response = requests.get(url)
312
- response.raise_for_status() # Raises error if download fails
313
- with open(save_path, "wb") as f:
314
- f.write(response.content)
315
- return save_path
316
- else:
317
- print("URL must point directly to an .xls or .xlsx file\n or it already downloaded.")
318
- return url
319
- def get_paper_text(doi,id,manualLinks=None):
320
- # create the temporary folder to contain the texts
321
- folder_path = Path("data/"+str(id))
322
- if not folder_path.exists():
323
- cmd = f'mkdir data/{id}'
324
- result = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
325
- print("data/"+str(id) +" created.")
326
- else:
327
- print("data/"+str(id) +" already exists.")
328
- saveLinkFolder = "data/"+id
329
-
330
- link = 'https://doi.org/' + doi
331
- '''textsToExtract = { "doiLink":"paperText"
332
- "file1.pdf":"text1",
333
- "file2.doc":"text2",
334
- "file3.xlsx":excelText3'''
335
- textsToExtract = {}
336
- # get the file to create listOfFile for each id
337
- html = extractHTML.HTML("",link)
338
- jsonSM = html.getSupMaterial()
339
- text = ""
340
- links = [link] + sum((jsonSM[key] for key in jsonSM),[])
341
- if manualLinks != None:
342
- links += manualLinks
343
- for l in links:
344
- # get the main paper
345
- name = l.split("/")[-1]
346
- file_path = folder_path / name
347
- if l == link:
348
- text = html.getListSection()
349
- textsToExtract[link] = text
350
- elif l.endswith(".pdf"):
351
- if file_path.is_file():
352
- l = saveLinkFolder + "/" + name
353
- print("File exists.")
354
- p = pdf.PDF(l,saveLinkFolder,doi)
355
- f = p.openPDFFile()
356
- pdf_path = saveLinkFolder + "/" + l.split("/")[-1]
357
- doc = fitz.open(pdf_path)
358
- text = "\n".join([page.get_text() for page in doc])
359
- textsToExtract[l] = text
360
- elif l.endswith(".doc") or l.endswith(".docx"):
361
- d = wordDoc.wordDoc(l,saveLinkFolder)
362
- text = d.extractTextByPage()
363
- textsToExtract[l] = text
364
- elif l.split(".")[-1].lower() in "xlsx":
365
- wc = word2vec.word2Vec()
366
- # download excel file if it not downloaded yet
367
- savePath = saveLinkFolder +"/"+ l.split("/")[-1]
368
- excelPath = download_excel_file(l, savePath)
369
- corpus = wc.tableTransformToCorpusText([],excelPath)
370
- text = ''
371
- for c in corpus:
372
- para = corpus[c]
373
- for words in para:
374
- text += " ".join(words)
375
- textsToExtract[l] = text
376
- # delete folder after finishing getting text
377
- #cmd = f'rm -r data/{id}'
378
- #result = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
379
- return textsToExtract
380
- # Step 3.2: Extract context
381
- def extract_context(text, keyword, window=500):
382
- # firstly try accession number
383
- idx = text.find(keyword)
384
- if idx == -1:
385
- return "Sample ID not found."
386
- return text[max(0, idx-window): idx+window]
387
- def extract_relevant_paragraphs(text, accession, keep_if=None, isolate=None):
388
- if keep_if is None:
389
- keep_if = ["sample", "method", "mtdna", "sequence", "collected", "dataset", "supplementary", "table"]
390
-
391
- outputs = ""
392
- text = text.lower()
393
-
394
- # If isolate is provided, prioritize paragraphs that mention it
395
- # If isolate is provided, prioritize paragraphs that mention it
396
- if accession and accession.lower() in text:
397
- if extract_context(text, accession.lower(), window=700) != "Sample ID not found.":
398
- outputs += extract_context(text, accession.lower(), window=700)
399
- if isolate and isolate.lower() in text:
400
- if extract_context(text, isolate.lower(), window=700) != "Sample ID not found.":
401
- outputs += extract_context(text, isolate.lower(), window=700)
402
- for keyword in keep_if:
403
- para = extract_context(text, keyword)
404
- if para and para not in outputs:
405
- outputs += para + "\n"
406
- return outputs
407
- # Step 4: Classification for now (demo purposes)
408
- # 4.1: Using a HuggingFace model (question-answering)
409
- def infer_fromQAModel(context, question="Where is the mtDNA sample from?"):
410
- try:
411
- qa = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad")
412
- result = qa({"context": context, "question": question})
413
- return result.get("answer", "Unknown")
414
- except Exception as e:
415
- return f"Error: {str(e)}"
416
-
417
- # 4.2: Infer from haplogroup
418
- # Load pre-trained spaCy model for NER
419
- try:
420
- nlp = spacy.load("en_core_web_sm")
421
- except OSError:
422
- download("en_core_web_sm")
423
- nlp = spacy.load("en_core_web_sm")
424
-
425
- # Define the haplogroup-to-region mapping (simple rule-based)
426
- import csv
427
-
428
- def load_haplogroup_mapping(csv_path):
429
- mapping = {}
430
- with open(csv_path) as f:
431
- reader = csv.DictReader(f)
432
- for row in reader:
433
- mapping[row["haplogroup"]] = [row["region"],row["source"]]
434
- return mapping
435
-
436
- # Function to extract haplogroup from the text
437
- def extract_haplogroup(text):
438
- match = re.search(r'\bhaplogroup\s+([A-Z][0-9a-z]*)\b', text)
439
- if match:
440
- submatch = re.match(r'^[A-Z][0-9]*', match.group(1))
441
- if submatch:
442
- return submatch.group(0)
443
- else:
444
- return match.group(1) # fallback
445
- fallback = re.search(r'\b([A-Z][0-9a-z]{1,5})\b', text)
446
- if fallback:
447
- return fallback.group(1)
448
- return None
449
-
450
-
451
- # Function to extract location based on NER
452
- def extract_location(text):
453
- doc = nlp(text)
454
- locations = []
455
- for ent in doc.ents:
456
- if ent.label_ == "GPE": # GPE = Geopolitical Entity (location)
457
- locations.append(ent.text)
458
- return locations
459
-
460
- # Function to infer location from haplogroup
461
- def infer_location_from_haplogroup(haplogroup):
462
- haplo_map = load_haplogroup_mapping("data/haplogroup_regions_extended.csv")
463
- return haplo_map.get(haplogroup, ["Unknown","Unknown"])
464
-
465
- # Function to classify the mtDNA sample
466
- def classify_mtDNA_sample_from_haplo(text):
467
- # Extract haplogroup
468
- haplogroup = extract_haplogroup(text)
469
- # Extract location based on NER
470
- locations = extract_location(text)
471
- # Infer location based on haplogroup
472
- inferred_location, sourceHaplo = infer_location_from_haplogroup(haplogroup)[0],infer_location_from_haplogroup(haplogroup)[1]
473
- return {
474
- "source":sourceHaplo,
475
- "locations_found_in_context": locations,
476
- "haplogroup": haplogroup,
477
- "inferred_location": inferred_location
478
-
479
- }
480
- # 4.3 Get from available NCBI
481
- def infer_location_fromNCBI(accession):
482
- try:
483
- handle = Entrez.efetch(db="nuccore", id=accession, rettype="medline", retmode="text")
484
- text = handle.read()
485
- handle.close()
486
- match = re.search(r'/(geo_loc_name|country|location)\s*=\s*"([^"]+)"', text)
487
- if match:
488
- return match.group(2), match.group(0) # This is the value like "Brunei"
489
- return "Not found", "Not found"
490
-
491
- except Exception as e:
492
- print("❌ Entrez error:", e)
493
- return "Not found", "Not found"
494
-
495
- ### ANCIENT/MODERN FLAG
496
- from Bio import Entrez
497
- import re
498
-
499
- def flag_ancient_modern(accession, textsToExtract, isolate=None):
500
- """
501
- Try to classify a sample as Ancient or Modern using:
502
- 1. NCBI accession (if available)
503
- 2. Supplementary text or context fallback
504
- """
505
- context = ""
506
- label, explain = "", ""
507
-
508
- try:
509
- # Check if we can fetch metadata from NCBI using the accession
510
- handle = Entrez.efetch(db="nuccore", id=accession, rettype="medline", retmode="text")
511
- text = handle.read()
512
- handle.close()
513
-
514
- isolate_source = re.search(r'/(isolation_source)\s*=\s*"([^"]+)"', text)
515
- if isolate_source:
516
- context += isolate_source.group(0) + " "
517
-
518
- specimen = re.search(r'/(specimen|specimen_voucher)\s*=\s*"([^"]+)"', text)
519
- if specimen:
520
- context += specimen.group(0) + " "
521
-
522
- if context.strip():
523
- label, explain = detect_ancient_flag(context)
524
- if label!="Unknown":
525
- return label, explain + " from NCBI\n(" + context + ")"
526
-
527
- # If no useful NCBI metadata, check supplementary texts
528
- if textsToExtract:
529
- labels = {"modern": [0, ""], "ancient": [0, ""], "unknown": 0}
530
-
531
- for source in textsToExtract:
532
- text_block = textsToExtract[source]
533
- context = extract_relevant_paragraphs(text_block, accession, isolate=isolate) # Reduce to informative paragraph(s)
534
- label, explain = detect_ancient_flag(context)
535
-
536
- if label == "Ancient":
537
- labels["ancient"][0] += 1
538
- labels["ancient"][1] += f"{source}:\n{explain}\n\n"
539
- elif label == "Modern":
540
- labels["modern"][0] += 1
541
- labels["modern"][1] += f"{source}:\n{explain}\n\n"
542
- else:
543
- labels["unknown"] += 1
544
-
545
- if max(labels["modern"][0],labels["ancient"][0]) > 0:
546
- if labels["modern"][0] > labels["ancient"][0]:
547
- return "Modern", labels["modern"][1]
548
- else:
549
- return "Ancient", labels["ancient"][1]
550
- else:
551
- return "Unknown", "No strong keywords detected"
552
- else:
553
- print("No DOI or PubMed ID available for inference.")
554
- return "", ""
555
-
556
- except Exception as e:
557
- print("Error:", e)
558
- return "", ""
559
-
560
-
561
- def detect_ancient_flag(context_snippet):
562
- context = context_snippet.lower()
563
-
564
- ancient_keywords = [
565
- "ancient", "archaeological", "prehistoric", "neolithic", "mesolithic", "paleolithic",
566
- "bronze age", "iron age", "burial", "tomb", "skeleton", "14c", "radiocarbon", "carbon dating",
567
- "postmortem damage", "udg treatment", "adna", "degradation", "site", "excavation",
568
- "archaeological context", "temporal transect", "population replacement", "cal bp", "calbp", "carbon dated"
569
- ]
570
-
571
- modern_keywords = [
572
- "modern", "hospital", "clinical", "consent","blood","buccal","unrelated", "blood sample","buccal sample","informed consent", "donor", "healthy", "patient",
573
- "genotyping", "screening", "medical", "cohort", "sequencing facility", "ethics approval",
574
- "we analysed", "we analyzed", "dataset includes", "new sequences", "published data",
575
- "control cohort", "sink population", "genbank accession", "sequenced", "pipeline",
576
- "bioinformatic analysis", "samples from", "population genetics", "genome-wide data", "imr collection"
577
- ]
578
-
579
- ancient_hits = [k for k in ancient_keywords if k in context]
580
- modern_hits = [k for k in modern_keywords if k in context]
581
-
582
- if ancient_hits and not modern_hits:
583
- return "Ancient", f"Flagged as ancient due to keywords: {', '.join(ancient_hits)}"
584
- elif modern_hits and not ancient_hits:
585
- return "Modern", f"Flagged as modern due to keywords: {', '.join(modern_hits)}"
586
- elif ancient_hits and modern_hits:
587
- if len(ancient_hits) >= len(modern_hits):
588
- return "Ancient", f"Mixed context, leaning ancient due to: {', '.join(ancient_hits)}"
589
- else:
590
- return "Modern", f"Mixed context, leaning modern due to: {', '.join(modern_hits)}"
591
-
592
- # Fallback to QA
593
- answer = infer_fromQAModel(context, question="Are the mtDNA samples ancient or modern? Explain why.")
594
- if answer.startswith("Error"):
595
- return "Unknown", answer
596
- if "ancient" in answer.lower():
597
- return "Ancient", f"Leaning ancient based on QA: {answer}"
598
- elif "modern" in answer.lower():
599
- return "Modern", f"Leaning modern based on QA: {answer}"
600
- else:
601
- return "Unknown", f"No strong keywords or QA clues. QA said: {answer}"
602
-
603
- # STEP 5: Main pipeline: accession -> 1. get pubmed id and isolate -> 2. get doi -> 3. get text -> 4. prediction -> 5. output: inferred location + explanation + confidence score
604
- def classify_sample_location(accession):
605
- outputs = {}
606
- keyword, context, location, qa_result, haplo_result = "", "", "", "", ""
607
- # Step 1: get pubmed id and isolate
608
- pubmedID, isolate = get_info_from_accession(accession)
609
- '''if not pubmedID:
610
- return {"error": f"Could not retrieve PubMed ID for accession {accession}"}'''
611
- if not isolate:
612
- isolate = "UNKNOWN_ISOLATE"
613
- # Step 2: get doi
614
- doi = get_doi_from_pubmed_id(pubmedID)
615
- '''if not doi:
616
- return {"error": "DOI not found for this accession. Cannot fetch paper or context."}'''
617
- # Step 3: get text
618
- '''textsToExtract = { "doiLink":"paperText"
619
- "file1.pdf":"text1",
620
- "file2.doc":"text2",
621
- "file3.xlsx":excelText3'''
622
- if doi and pubmedID:
623
- textsToExtract = get_paper_text(doi,pubmedID)
624
- else: textsToExtract = {}
625
- '''if not textsToExtract:
626
- return {"error": f"No texts extracted for DOI {doi}"}'''
627
- if isolate not in [None, "UNKNOWN_ISOLATE"]:
628
- label, explain = flag_ancient_modern(accession,textsToExtract,isolate)
629
- else:
630
- label, explain = flag_ancient_modern(accession,textsToExtract)
631
- # Step 4: prediction
632
- outputs[accession] = {}
633
- outputs[isolate] = {}
634
- # 4.0 Infer from NCBI
635
- location, outputNCBI = infer_location_fromNCBI(accession)
636
- NCBI_result = {
637
- "source": "NCBI",
638
- "sample_id": accession,
639
- "predicted_location": location,
640
- "context_snippet": outputNCBI}
641
- outputs[accession]["NCBI"]= {"NCBI": NCBI_result}
642
- if textsToExtract:
643
- long_text = ""
644
- for key in textsToExtract:
645
- text = textsToExtract[key]
646
- # try accession number first
647
- outputs[accession][key] = {}
648
- keyword = accession
649
- context = extract_context(text, keyword, window=500)
650
- # 4.1: Using a HuggingFace model (question-answering)
651
- location = infer_fromQAModel(context, question=f"Where is the mtDNA sample {keyword} from?")
652
- qa_result = {
653
- "source": key,
654
- "sample_id": keyword,
655
- "predicted_location": location,
656
- "context_snippet": context
657
- }
658
- outputs[keyword][key]["QAModel"] = qa_result
659
- # 4.2: Infer from haplogroup
660
- haplo_result = classify_mtDNA_sample_from_haplo(context)
661
- outputs[keyword][key]["haplogroup"] = haplo_result
662
- # try isolate
663
- keyword = isolate
664
- outputs[isolate][key] = {}
665
- context = extract_context(text, keyword, window=500)
666
- # 4.1.1: Using a HuggingFace model (question-answering)
667
- location = infer_fromQAModel(context, question=f"Where is the mtDNA sample {keyword} from?")
668
- qa_result = {
669
- "source": key,
670
- "sample_id": keyword,
671
- "predicted_location": location,
672
- "context_snippet": context
673
- }
674
- outputs[keyword][key]["QAModel"] = qa_result
675
- # 4.2.1: Infer from haplogroup
676
- haplo_result = classify_mtDNA_sample_from_haplo(context)
677
- outputs[keyword][key]["haplogroup"] = haplo_result
678
- # add long text
679
- long_text += text + ". \n"
680
- # 4.3: UpgradeClassify
681
- # try sample_id as accession number
682
- sample_id = accession
683
- if sample_id:
684
- filtered_context = filter_context_for_sample(sample_id.upper(), long_text, window_size=1)
685
- locations = infer_location_for_sample(sample_id.upper(), filtered_context)
686
- if locations!="No clear location found in top matches":
687
- outputs[sample_id]["upgradeClassifier"] = {}
688
- outputs[sample_id]["upgradeClassifier"]["upgradeClassifier"] = {
689
- "source": "From these sources combined: "+ ", ".join(list(textsToExtract.keys())),
690
- "sample_id": sample_id,
691
- "predicted_location": ", ".join(locations),
692
- "context_snippep": "First 1000 words: \n"+ filtered_context[:1000]
693
- }
694
- # try sample_id as isolate name
695
- sample_id = isolate
696
- if sample_id:
697
- filtered_context = filter_context_for_sample(sample_id.upper(), long_text, window_size=1)
698
- locations = infer_location_for_sample(sample_id.upper(), filtered_context)
699
- if locations!="No clear location found in top matches":
700
- outputs[sample_id]["upgradeClassifier"] = {}
701
- outputs[sample_id]["upgradeClassifier"]["upgradeClassifier"] = {
702
- "source": "From these sources combined: "+ ", ".join(list(textsToExtract.keys())),
703
- "sample_id": sample_id,
704
- "predicted_location": ", ".join(locations),
705
- "context_snippep": "First 1000 words: \n"+ filtered_context[:1000]
706
- }
707
  return outputs, label, explain
 
1
+ # mtDNA Location Classifier MVP (Google Colab)
2
+ # Accepts accession number → Fetches PubMed ID + isolate name → Gets abstract → Predicts location
3
+ import os
4
+ #import streamlit as st
5
+ import subprocess
6
+ import re
7
+ from Bio import Entrez
8
+ import fitz
9
+ import spacy
10
+ from spacy.cli import download
11
+ from NER.PDF import pdf
12
+ from NER.WordDoc import wordDoc
13
+ from NER.html import extractHTML
14
+ from NER.word2Vec import word2vec
15
+ from transformers import pipeline
16
+ import urllib.parse, requests
17
+ from pathlib import Path
18
+ from upgradeClassify import filter_context_for_sample, infer_location_for_sample
19
+
20
+ # Set your email (required by NCBI Entrez)
21
+ #Entrez.email = "[email protected]"
22
+ import nltk
23
+
24
+ nltk.download("stopwords")
25
+ nltk.download("punkt")
26
+ nltk.download('punkt_tab')
27
+ # Step 1: Get PubMed ID from Accession using EDirect
28
+ from Bio import Entrez, Medline
29
+ import re
30
+
31
+ Entrez.email = "[email protected]"
32
+
33
+ # --- Helper Functions (Re-organized and Upgraded) ---
34
+
35
+ def fetch_ncbi_metadata(accession_number):
36
+ """
37
+ Fetches metadata directly from NCBI GenBank using Entrez.
38
+ Includes robust error handling and improved field extraction.
39
+ Prioritizes location extraction from geo_loc_name, then notes, then other qualifiers.
40
+ Also attempts to extract ethnicity and sample_type (ancient/modern).
41
+
42
+ Args:
43
+ accession_number (str): The NCBI accession number (e.g., "ON792208").
44
+
45
+ Returns:
46
+ dict: A dictionary containing 'country', 'specific_location', 'ethnicity',
47
+ 'sample_type', 'collection_date', 'isolate', 'title', 'doi', 'pubmed_id'.
48
+ """
49
+ Entrez.email = "[email protected]" # Required by NCBI, REPLACE WITH YOUR EMAIL
50
+
51
+ country = "unknown"
52
+ specific_location = "unknown"
53
+ ethnicity = "unknown"
54
+ sample_type = "unknown"
55
+ collection_date = "unknown"
56
+ isolate = "unknown"
57
+ title = "unknown"
58
+ doi = "unknown"
59
+ pubmed_id = None
60
+ all_feature = "unknown"
61
+
62
+ KNOWN_COUNTRIES = [
63
+ "Afghanistan", "Albania", "Algeria", "Andorra", "Angola", "Antigua and Barbuda", "Argentina", "Armenia", "Australia", "Austria", "Azerbaijan",
64
+ "Bahamas", "Bahrain", "Bangladesh", "Barbados", "Belarus", "Belgium", "Belize", "Benin", "Bhutan", "Bolivia", "Bosnia and Herzegovina", "Botswana", "Brazil", "Brunei", "Bulgaria", "Burkina Faso", "Burundi",
65
+ "Cabo Verde", "Cambodia", "Cameroon", "Canada", "Central African Republic", "Chad", "Chile", "China", "Colombia", "Comoros", "Congo (Brazzaville)", "Congo (Kinshasa)", "Costa Rica", "Croatia", "Cuba", "Cyprus", "Czechia",
66
+ "Denmark", "Djibouti", "Dominica", "Dominican Republic", "Ecuador", "Egypt", "El Salvador", "Equatorial Guinea", "Eritrea", "Estonia", "Eswatini", "Ethiopia",
67
+ "Fiji", "Finland", "France", "Gabon", "Gambia", "Georgia", "Germany", "Ghana", "Greece", "Grenada", "Guatemala", "Guinea", "Guinea-Bissau", "Guyana",
68
+ "Haiti", "Honduras", "Hungary", "Iceland", "India", "Indonesia", "Iran", "Iraq", "Ireland", "Israel", "Italy", "Ivory Coast", "Jamaica", "Japan", "Jordan",
69
+ "Kazakhstan", "Kenya", "Kiribati", "Kosovo", "Kuwait", "Kyrgyzstan", "Laos", "Latvia", "Lebanon", "Lesotho", "Liberia", "Libya", "Liechtenstein", "Lithuania", "Luxembourg",
70
+ "Madagascar", "Malawi", "Malaysia", "Maldives", "Mali", "Malta", "Marshall Islands", "Mauritania", "Mauritius", "Mexico", "Micronesia", "Moldova", "Monaco", "Mongolia", "Montenegro", "Morocco", "Mozambique", "Myanmar",
71
+ "Namibia", "Nauru", "Nepal", "Netherlands", "New Zealand", "Nicaragua", "Niger", "Nigeria", "North Korea", "North Macedonia", "Norway", "Oman",
72
+ "Pakistan", "Palau", "Palestine", "Panama", "Papua New Guinea", "Paraguay", "Peru", "Philippines", "Poland", "Portugal", "Qatar", "Romania", "Russia", "Rwanda",
73
+ "Saint Kitts and Nevis", "Saint Lucia", "Saint Vincent and the Grenadines", "Samoa", "San Marino", "Sao Tome and Principe", "Saudi Arabia", "Senegal", "Serbia", "Seychelles", "Sierra Leone", "Singapore", "Slovakia", "Slovenia", "Solomon Islands", "Somalia", "South Africa", "South Korea", "South Sudan", "Spain", "Sri Lanka", "Sudan", "Suriname", "Sweden", "Switzerland", "Syria",
74
+ "Taiwan", "Tajikistan", "Tanzania", "Thailand", "Timor-Leste", "Togo", "Tonga", "Trinidad and Tobago", "Tunisia", "Turkey", "Turkmenistan", "Tuvalu",
75
+ "Uganda", "Ukraine", "United Arab Emirates", "United Kingdom", "United States", "Uruguay", "Uzbekistan", "Vanuatu", "Vatican City", "Venezuela", "Vietnam",
76
+ "Yemen", "Zambia", "Zimbabwe"
77
+ ]
78
+ COUNTRY_PATTERN = re.compile(r'\b(' + '|'.join(re.escape(c) for c in KNOWN_COUNTRIES) + r')\b', re.IGNORECASE)
79
+
80
+ try:
81
+ handle = Entrez.efetch(db="nucleotide", id=str(accession_number), rettype="gb", retmode="xml")
82
+ record = Entrez.read(handle)
83
+ handle.close()
84
+
85
+ gb_seq = None
86
+ # Validate record structure: It should be a list with at least one element (a dict)
87
+ if isinstance(record, list) and len(record) > 0:
88
+ if isinstance(record[0], dict):
89
+ gb_seq = record[0]
90
+ else:
91
+ print(f"Warning: record[0] is not a dictionary for {accession_number}. Type: {type(record[0])}")
92
+ else:
93
+ print(f"Warning: No valid record or empty record list from NCBI for {accession_number}.")
94
+
95
+ # If gb_seq is still None, return defaults
96
+ if gb_seq is None:
97
+ return {"country": "unknown", "specific_location": "unknown", "ethnicity": "unknown",
98
+ "sample_type": "unknown", "collection_date": "unknown", "isolate": "unknown",
99
+ "title": "unknown", "doi": "unknown", "pubmed_id": None}
100
+
101
+
102
+ # If gb_seq is valid, proceed with extraction
103
+ collection_date = gb_seq.get("GBSeq_create-date","unknown")
104
+
105
+ references = gb_seq.get("GBSeq_references", [])
106
+ for ref in references:
107
+ if not pubmed_id:
108
+ pubmed_id = ref.get("GBReference_pubmed",None)
109
+ if title == "unknown":
110
+ title = ref.get("GBReference_title","unknown")
111
+ for xref in ref.get("GBReference_xref", []):
112
+ if xref.get("GBXref_dbname") == "doi":
113
+ doi = xref.get("GBXref_id")
114
+ break
115
+
116
+ features = gb_seq.get("GBSeq_feature-table", [])
117
+
118
+ context_for_flagging = "" # Accumulate text for ancient/modern detection
119
+ features_context = ""
120
+ for feature in features:
121
+ if feature.get("GBFeature_key") == "source":
122
+ feature_context = ""
123
+ qualifiers = feature.get("GBFeature_quals", [])
124
+ found_country = "unknown"
125
+ found_specific_location = "unknown"
126
+ found_ethnicity = "unknown"
127
+
128
+ temp_geo_loc_name = "unknown"
129
+ temp_note_origin_locality = "unknown"
130
+ temp_country_qual = "unknown"
131
+ temp_locality_qual = "unknown"
132
+ temp_collection_location_qual = "unknown"
133
+ temp_isolation_source_qual = "unknown"
134
+ temp_env_sample_qual = "unknown"
135
+ temp_pop_qual = "unknown"
136
+ temp_organism_qual = "unknown"
137
+ temp_specimen_qual = "unknown"
138
+ temp_strain_qual = "unknown"
139
+
140
+ for qual in qualifiers:
141
+ qual_name = qual.get("GBQualifier_name")
142
+ qual_value = qual.get("GBQualifier_value")
143
+ feature_context += qual_name + ": " + qual_value +"\n"
144
+ if qual_name == "collection_date":
145
+ collection_date = qual_value
146
+ elif qual_name == "isolate":
147
+ isolate = qual_value
148
+ elif qual_name == "population":
149
+ temp_pop_qual = qual_value
150
+ elif qual_name == "organism":
151
+ temp_organism_qual = qual_value
152
+ elif qual_name == "specimen_voucher" or qual_name == "specimen":
153
+ temp_specimen_qual = qual_value
154
+ elif qual_name == "strain":
155
+ temp_strain_qual = qual_value
156
+ elif qual_name == "isolation_source":
157
+ temp_isolation_source_qual = qual_value
158
+ elif qual_name == "environmental_sample":
159
+ temp_env_sample_qual = qual_value
160
+
161
+ if qual_name == "geo_loc_name": temp_geo_loc_name = qual_value
162
+ elif qual_name == "note":
163
+ if qual_value.startswith("origin_locality:"):
164
+ temp_note_origin_locality = qual_value
165
+ context_for_flagging += qual_value + " " # Capture all notes for flagging
166
+ elif qual_name == "country": temp_country_qual = qual_value
167
+ elif qual_name == "locality": temp_locality_qual = qual_value
168
+ elif qual_name == "collection_location": temp_collection_location_qual = qual_value
169
+
170
+
171
+ # --- Aggregate all relevant info into context_for_flagging ---
172
+ context_for_flagging += f" {isolate} {temp_isolation_source_qual} {temp_specimen_qual} {temp_strain_qual} {temp_organism_qual} {temp_geo_loc_name} {temp_collection_location_qual} {temp_env_sample_qual}"
173
+ context_for_flagging = context_for_flagging.strip()
174
+
175
+ # --- Determine final country and specific_location based on priority ---
176
+ if temp_geo_loc_name != "unknown":
177
+ parts = [p.strip() for p in temp_geo_loc_name.split(':')]
178
+ if len(parts) > 1:
179
+ found_specific_location = parts[-1]; found_country = parts[0]
180
+ else: found_country = temp_geo_loc_name; found_specific_location = "unknown"
181
+ elif temp_note_origin_locality != "unknown":
182
+ match = re.search(r"origin_locality:\s*(.*)", temp_note_origin_locality, re.IGNORECASE)
183
+ if match:
184
+ location_string = match.group(1).strip()
185
+ parts = [p.strip() for p in location_string.split(':')]
186
+ if len(parts) > 1: found_country = parts[-1]; found_specific_location = parts[0]
187
+ else: found_country = location_string; found_specific_location = "unknown"
188
+ elif temp_locality_qual != "unknown":
189
+ found_country_match = COUNTRY_PATTERN.search(temp_locality_qual)
190
+ if found_country_match: found_country = found_country_match.group(1); temp_loc = re.sub(re.escape(found_country), '', temp_locality_qual, flags=re.IGNORECASE).strip().replace(',', '').replace(':', '').replace(';', '').strip(); found_specific_location = temp_loc if temp_loc else "unknown"
191
+ else: found_specific_location = temp_locality_qual; found_country = "unknown"
192
+ elif temp_collection_location_qual != "unknown":
193
+ found_country_match = COUNTRY_PATTERN.search(temp_collection_location_qual)
194
+ if found_country_match: found_country = found_country_match.group(1); temp_loc = re.sub(re.escape(found_country), '', temp_collection_location_qual, flags=re.IGNORECASE).strip().replace(',', '').replace(':', '').replace(';', '').strip(); found_specific_location = temp_loc if temp_loc else "unknown"
195
+ else: found_specific_location = temp_collection_location_qual; found_country = "unknown"
196
+ elif temp_isolation_source_qual != "unknown":
197
+ found_country_match = COUNTRY_PATTERN.search(temp_isolation_source_qual)
198
+ if found_country_match: found_country = found_country_match.group(1); temp_loc = re.sub(re.escape(found_country), '', temp_isolation_source_qual, flags=re.IGNORECASE).strip().replace(',', '').replace(':', '').replace(';', '').strip(); found_specific_location = temp_loc if temp_loc else "unknown"
199
+ else: found_specific_location = temp_isolation_source_qual; found_country = "unknown"
200
+ elif temp_env_sample_qual != "unknown":
201
+ found_country_match = COUNTRY_PATTERN.search(temp_env_sample_qual)
202
+ if found_country_match: found_country = found_country_match.group(1); temp_loc = re.sub(re.escape(found_country), '', temp_env_sample_qual, flags=re.IGNORECASE).strip().replace(',', '').replace(':', '').replace(';', '').strip(); found_specific_location = temp_loc if temp_loc else "unknown"
203
+ else: found_specific_location = temp_env_sample_qual; found_country = "unknown"
204
+ if found_country == "unknown" and temp_country_qual != "unknown":
205
+ found_country_match = COUNTRY_PATTERN.search(temp_country_qual)
206
+ if found_country_match: found_country = found_country_match.group(1)
207
+
208
+ country = found_country
209
+ specific_location = found_specific_location
210
+ # --- Determine final ethnicity ---
211
+ if temp_pop_qual != "unknown":
212
+ found_ethnicity = temp_pop_qual
213
+ elif isolate != "unknown" and re.fullmatch(r'[A-Za-z\s\-]+', isolate) and get_country_from_text(isolate) == "unknown":
214
+ found_ethnicity = isolate
215
+ elif context_for_flagging != "unknown": # Use the broader context for ethnicity patterns
216
+ eth_match = re.search(r'(?:population|ethnicity|isolate source):\s*([A-Za-z\s\-]+)', context_for_flagging, re.IGNORECASE)
217
+ if eth_match:
218
+ found_ethnicity = eth_match.group(1).strip()
219
+
220
+ ethnicity = found_ethnicity
221
+
222
+ # --- Determine sample_type (ancient/modern) ---
223
+ if context_for_flagging:
224
+ sample_type, explain = detect_ancient_flag(context_for_flagging)
225
+ features_context += feature_context + "\n"
226
+ break
227
+
228
+ if specific_location != "unknown" and specific_location.lower() == country.lower():
229
+ specific_location = "unknown"
230
+ if not features_context: features_context = "unknown"
231
+ return {"country": country.lower(),
232
+ "specific_location": specific_location.lower(),
233
+ "ethnicity": ethnicity.lower(),
234
+ "sample_type": sample_type.lower(),
235
+ "collection_date": collection_date,
236
+ "isolate": isolate,
237
+ "title": title,
238
+ "doi": doi,
239
+ "pubmed_id": pubmed_id,
240
+ "all_features": features_context}
241
+
242
+ except Exception as e:
243
+ print(f"Error fetching NCBI data for {accession_number}: {e}")
244
+ return {"country": "unknown",
245
+ "specific_location": "unknown",
246
+ "ethnicity": "unknown",
247
+ "sample_type": "unknown",
248
+ "collection_date": "unknown",
249
+ "isolate": "unknown",
250
+ "title": "unknown",
251
+ "doi": "unknown",
252
+ "pubmed_id": None,
253
+ "all_features": "unknown"}
254
+
255
+ # --- Helper function for country matching (re-defined from main code to be self-contained) ---
256
+ _country_keywords = {
257
+ "thailand": "Thailand", "laos": "Laos", "cambodia": "Cambodia", "myanmar": "Myanmar",
258
+ "philippines": "Philippines", "indonesia": "Indonesia", "malaysia": "Malaysia",
259
+ "china": "China", "chinese": "China", "india": "India", "taiwan": "Taiwan",
260
+ "vietnam": "Vietnam", "russia": "Russia", "siberia": "Russia", "nepal": "Nepal",
261
+ "japan": "Japan", "sumatra": "Indonesia", "borneu": "Indonesia",
262
+ "yunnan": "China", "tibet": "China", "northern mindanao": "Philippines",
263
+ "west malaysia": "Malaysia", "north thailand": "Thailand", "central thailand": "Thailand",
264
+ "northeast thailand": "Thailand", "east myanmar": "Myanmar", "west thailand": "Thailand",
265
+ "central india": "India", "east india": "India", "northeast india": "India",
266
+ "south sibera": "Russia", "mongolia": "China", "beijing": "China", "south korea": "South Korea",
267
+ "north asia": "unknown", "southeast asia": "unknown", "east asia": "unknown"
268
+ }
269
+
270
+ def get_country_from_text(text):
271
+ text_lower = text.lower()
272
+ for keyword, country in _country_keywords.items():
273
+ if keyword in text_lower:
274
+ return country
275
+ return "unknown"
276
+ # The result will be seen as manualLink for the function get_paper_text
277
+ def search_google_custom(query, max_results=3):
278
+ # query should be the title from ncbi or paper/source title
279
+ GOOGLE_CSE_API_KEY = os.environ["GOOGLE_CSE_API_KEY"]
280
+ GOOGLE_CSE_CX = os.environ["GOOGLE_CSE_CX"]
281
+ endpoint = os.environ["SEARCH_ENDPOINT"]
282
+ params = {
283
+ "key": GOOGLE_CSE_API_KEY,
284
+ "cx": GOOGLE_CSE_CX,
285
+ "q": query,
286
+ "num": max_results
287
+ }
288
+ try:
289
+ response = requests.get(endpoint, params=params)
290
+ if response.status_code == 429:
291
+ print("Rate limit hit. Try again later.")
292
+ return []
293
+ response.raise_for_status()
294
+ data = response.json().get("items", [])
295
+ return [item.get("link") for item in data if item.get("link")]
296
+ except Exception as e:
297
+ print("Google CSE error:", e)
298
+ return []
299
+ # Step 3: Extract Text: Get the paper (html text), sup. materials (pdf, doc, excel) and do text-preprocessing
300
+ # Step 3.1: Extract Text
301
+ # sub: download excel file
302
+ def download_excel_file(url, save_path="temp.xlsx"):
303
+ if "view.officeapps.live.com" in url:
304
+ parsed_url = urllib.parse.parse_qs(urllib.parse.urlparse(url).query)
305
+ real_url = urllib.parse.unquote(parsed_url["src"][0])
306
+ response = requests.get(real_url)
307
+ with open(save_path, "wb") as f:
308
+ f.write(response.content)
309
+ return save_path
310
+ elif url.startswith("http") and (url.endswith(".xls") or url.endswith(".xlsx")):
311
+ response = requests.get(url)
312
+ response.raise_for_status() # Raises error if download fails
313
+ with open(save_path, "wb") as f:
314
+ f.write(response.content)
315
+ return save_path
316
+ else:
317
+ print("URL must point directly to an .xls or .xlsx file\n or it already downloaded.")
318
+ return url
319
+ def get_paper_text(doi,id,manualLinks=None):
320
+ # create the temporary folder to contain the texts
321
+ folder_path = Path("data/"+str(id))
322
+ if not folder_path.exists():
323
+ cmd = f'mkdir data/{id}'
324
+ result = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
325
+ print("data/"+str(id) +" created.")
326
+ else:
327
+ print("data/"+str(id) +" already exists.")
328
+ saveLinkFolder = "data/"+id
329
+
330
+ link = 'https://doi.org/' + doi
331
+ '''textsToExtract = { "doiLink":"paperText"
332
+ "file1.pdf":"text1",
333
+ "file2.doc":"text2",
334
+ "file3.xlsx":excelText3'''
335
+ textsToExtract = {}
336
+ # get the file to create listOfFile for each id
337
+ html = extractHTML.HTML("",link)
338
+ jsonSM = html.getSupMaterial()
339
+ text = ""
340
+ links = [link] + sum((jsonSM[key] for key in jsonSM),[])
341
+ if manualLinks != None:
342
+ links += manualLinks
343
+ for l in links:
344
+ # get the main paper
345
+ name = l.split("/")[-1]
346
+ file_path = folder_path / name
347
+ if l == link:
348
+ text = html.getListSection()
349
+ textsToExtract[link] = text
350
+ elif l.endswith(".pdf"):
351
+ if file_path.is_file():
352
+ l = saveLinkFolder + "/" + name
353
+ print("File exists.")
354
+ p = pdf.PDF(l,saveLinkFolder,doi)
355
+ f = p.openPDFFile()
356
+ pdf_path = saveLinkFolder + "/" + l.split("/")[-1]
357
+ doc = fitz.open(pdf_path)
358
+ text = "\n".join([page.get_text() for page in doc])
359
+ textsToExtract[l] = text
360
+ elif l.endswith(".doc") or l.endswith(".docx"):
361
+ d = wordDoc.wordDoc(l,saveLinkFolder)
362
+ text = d.extractTextByPage()
363
+ textsToExtract[l] = text
364
+ elif l.split(".")[-1].lower() in "xlsx":
365
+ wc = word2vec.word2Vec()
366
+ # download excel file if it not downloaded yet
367
+ savePath = saveLinkFolder +"/"+ l.split("/")[-1]
368
+ excelPath = download_excel_file(l, savePath)
369
+ corpus = wc.tableTransformToCorpusText([],excelPath)
370
+ text = ''
371
+ for c in corpus:
372
+ para = corpus[c]
373
+ for words in para:
374
+ text += " ".join(words)
375
+ textsToExtract[l] = text
376
+ # delete folder after finishing getting text
377
+ #cmd = f'rm -r data/{id}'
378
+ #result = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
379
+ return textsToExtract
380
+ # Step 3.2: Extract context
381
+ def extract_context(text, keyword, window=500):
382
+ # firstly try accession number
383
+ idx = text.find(keyword)
384
+ if idx == -1:
385
+ return "Sample ID not found."
386
+ return text[max(0, idx-window): idx+window]
387
+ def extract_relevant_paragraphs(text, accession, keep_if=None, isolate=None):
388
+ if keep_if is None:
389
+ keep_if = ["sample", "method", "mtdna", "sequence", "collected", "dataset", "supplementary", "table"]
390
+
391
+ outputs = ""
392
+ text = text.lower()
393
+
394
+ # If isolate is provided, prioritize paragraphs that mention it
395
+ # If isolate is provided, prioritize paragraphs that mention it
396
+ if accession and accession.lower() in text:
397
+ if extract_context(text, accession.lower(), window=700) != "Sample ID not found.":
398
+ outputs += extract_context(text, accession.lower(), window=700)
399
+ if isolate and isolate.lower() in text:
400
+ if extract_context(text, isolate.lower(), window=700) != "Sample ID not found.":
401
+ outputs += extract_context(text, isolate.lower(), window=700)
402
+ for keyword in keep_if:
403
+ para = extract_context(text, keyword)
404
+ if para and para not in outputs:
405
+ outputs += para + "\n"
406
+ return outputs
407
+ # Step 4: Classification for now (demo purposes)
408
+ # 4.1: Using a HuggingFace model (question-answering)
409
+ def infer_fromQAModel(context, question="Where is the mtDNA sample from?"):
410
+ try:
411
+ qa = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad")
412
+ result = qa({"context": context, "question": question})
413
+ return result.get("answer", "Unknown")
414
+ except Exception as e:
415
+ return f"Error: {str(e)}"
416
+
417
+ # 4.2: Infer from haplogroup
418
+ # Load pre-trained spaCy model for NER
419
+ try:
420
+ nlp = spacy.load("en_core_web_sm")
421
+ except OSError:
422
+ download("en_core_web_sm")
423
+ nlp = spacy.load("en_core_web_sm")
424
+
425
+ # Define the haplogroup-to-region mapping (simple rule-based)
426
+ import csv
427
+
428
+ def load_haplogroup_mapping(csv_path):
429
+ mapping = {}
430
+ with open(csv_path) as f:
431
+ reader = csv.DictReader(f)
432
+ for row in reader:
433
+ mapping[row["haplogroup"]] = [row["region"],row["source"]]
434
+ return mapping
435
+
436
+ # Function to extract haplogroup from the text
437
+ def extract_haplogroup(text):
438
+ match = re.search(r'\bhaplogroup\s+([A-Z][0-9a-z]*)\b', text)
439
+ if match:
440
+ submatch = re.match(r'^[A-Z][0-9]*', match.group(1))
441
+ if submatch:
442
+ return submatch.group(0)
443
+ else:
444
+ return match.group(1) # fallback
445
+ fallback = re.search(r'\b([A-Z][0-9a-z]{1,5})\b', text)
446
+ if fallback:
447
+ return fallback.group(1)
448
+ return None
449
+
450
+
451
+ # Function to extract location based on NER
452
+ def extract_location(text):
453
+ doc = nlp(text)
454
+ locations = []
455
+ for ent in doc.ents:
456
+ if ent.label_ == "GPE": # GPE = Geopolitical Entity (location)
457
+ locations.append(ent.text)
458
+ return locations
459
+
460
+ # Function to infer location from haplogroup
461
+ def infer_location_from_haplogroup(haplogroup):
462
+ haplo_map = load_haplogroup_mapping("data/haplogroup_regions_extended.csv")
463
+ return haplo_map.get(haplogroup, ["Unknown","Unknown"])
464
+
465
+ # Function to classify the mtDNA sample
466
+ def classify_mtDNA_sample_from_haplo(text):
467
+ # Extract haplogroup
468
+ haplogroup = extract_haplogroup(text)
469
+ # Extract location based on NER
470
+ locations = extract_location(text)
471
+ # Infer location based on haplogroup
472
+ inferred_location, sourceHaplo = infer_location_from_haplogroup(haplogroup)[0],infer_location_from_haplogroup(haplogroup)[1]
473
+ return {
474
+ "source":sourceHaplo,
475
+ "locations_found_in_context": locations,
476
+ "haplogroup": haplogroup,
477
+ "inferred_location": inferred_location
478
+
479
+ }
480
+ # 4.3 Get from available NCBI
481
+ def infer_location_fromNCBI(accession):
482
+ try:
483
+ handle = Entrez.efetch(db="nuccore", id=accession, rettype="medline", retmode="text")
484
+ text = handle.read()
485
+ handle.close()
486
+ match = re.search(r'/(geo_loc_name|country|location)\s*=\s*"([^"]+)"', text)
487
+ if match:
488
+ return match.group(2), match.group(0) # This is the value like "Brunei"
489
+ return "Not found", "Not found"
490
+
491
+ except Exception as e:
492
+ print("❌ Entrez error:", e)
493
+ return "Not found", "Not found"
494
+
495
+ ### ANCIENT/MODERN FLAG
496
+ from Bio import Entrez
497
+ import re
498
+
499
+ def flag_ancient_modern(accession, textsToExtract, isolate=None):
500
+ """
501
+ Try to classify a sample as Ancient or Modern using:
502
+ 1. NCBI accession (if available)
503
+ 2. Supplementary text or context fallback
504
+ """
505
+ context = ""
506
+ label, explain = "", ""
507
+
508
+ try:
509
+ # Check if we can fetch metadata from NCBI using the accession
510
+ handle = Entrez.efetch(db="nuccore", id=accession, rettype="medline", retmode="text")
511
+ text = handle.read()
512
+ handle.close()
513
+
514
+ isolate_source = re.search(r'/(isolation_source)\s*=\s*"([^"]+)"', text)
515
+ if isolate_source:
516
+ context += isolate_source.group(0) + " "
517
+
518
+ specimen = re.search(r'/(specimen|specimen_voucher)\s*=\s*"([^"]+)"', text)
519
+ if specimen:
520
+ context += specimen.group(0) + " "
521
+
522
+ if context.strip():
523
+ label, explain = detect_ancient_flag(context)
524
+ if label!="Unknown":
525
+ return label, explain + " from NCBI\n(" + context + ")"
526
+
527
+ # If no useful NCBI metadata, check supplementary texts
528
+ if textsToExtract:
529
+ labels = {"modern": [0, ""], "ancient": [0, ""], "unknown": 0}
530
+
531
+ for source in textsToExtract:
532
+ text_block = textsToExtract[source]
533
+ context = extract_relevant_paragraphs(text_block, accession, isolate=isolate) # Reduce to informative paragraph(s)
534
+ label, explain = detect_ancient_flag(context)
535
+
536
+ if label == "Ancient":
537
+ labels["ancient"][0] += 1
538
+ labels["ancient"][1] += f"{source}:\n{explain}\n\n"
539
+ elif label == "Modern":
540
+ labels["modern"][0] += 1
541
+ labels["modern"][1] += f"{source}:\n{explain}\n\n"
542
+ else:
543
+ labels["unknown"] += 1
544
+
545
+ if max(labels["modern"][0],labels["ancient"][0]) > 0:
546
+ if labels["modern"][0] > labels["ancient"][0]:
547
+ return "Modern", labels["modern"][1]
548
+ else:
549
+ return "Ancient", labels["ancient"][1]
550
+ else:
551
+ return "Unknown", "No strong keywords detected"
552
+ else:
553
+ print("No DOI or PubMed ID available for inference.")
554
+ return "", ""
555
+
556
+ except Exception as e:
557
+ print("Error:", e)
558
+ return "", ""
559
+
560
+
561
+ def detect_ancient_flag(context_snippet):
562
+ context = context_snippet.lower()
563
+
564
+ ancient_keywords = [
565
+ "ancient", "archaeological", "prehistoric", "neolithic", "mesolithic", "paleolithic",
566
+ "bronze age", "iron age", "burial", "tomb", "skeleton", "14c", "radiocarbon", "carbon dating",
567
+ "postmortem damage", "udg treatment", "adna", "degradation", "site", "excavation",
568
+ "archaeological context", "temporal transect", "population replacement", "cal bp", "calbp", "carbon dated"
569
+ ]
570
+
571
+ modern_keywords = [
572
+ "modern", "hospital", "clinical", "consent","blood","buccal","unrelated", "blood sample","buccal sample","informed consent", "donor", "healthy", "patient",
573
+ "genotyping", "screening", "medical", "cohort", "sequencing facility", "ethics approval",
574
+ "we analysed", "we analyzed", "dataset includes", "new sequences", "published data",
575
+ "control cohort", "sink population", "genbank accession", "sequenced", "pipeline",
576
+ "bioinformatic analysis", "samples from", "population genetics", "genome-wide data", "imr collection"
577
+ ]
578
+
579
+ ancient_hits = [k for k in ancient_keywords if k in context]
580
+ modern_hits = [k for k in modern_keywords if k in context]
581
+
582
+ if ancient_hits and not modern_hits:
583
+ return "Ancient", f"Flagged as ancient due to keywords: {', '.join(ancient_hits)}"
584
+ elif modern_hits and not ancient_hits:
585
+ return "Modern", f"Flagged as modern due to keywords: {', '.join(modern_hits)}"
586
+ elif ancient_hits and modern_hits:
587
+ if len(ancient_hits) >= len(modern_hits):
588
+ return "Ancient", f"Mixed context, leaning ancient due to: {', '.join(ancient_hits)}"
589
+ else:
590
+ return "Modern", f"Mixed context, leaning modern due to: {', '.join(modern_hits)}"
591
+
592
+ # Fallback to QA
593
+ answer = infer_fromQAModel(context, question="Are the mtDNA samples ancient or modern? Explain why.")
594
+ if answer.startswith("Error"):
595
+ return "Unknown", answer
596
+ if "ancient" in answer.lower():
597
+ return "Ancient", f"Leaning ancient based on QA: {answer}"
598
+ elif "modern" in answer.lower():
599
+ return "Modern", f"Leaning modern based on QA: {answer}"
600
+ else:
601
+ return "Unknown", f"No strong keywords or QA clues. QA said: {answer}"
602
+
603
+ # STEP 5: Main pipeline: accession -> 1. get pubmed id and isolate -> 2. get doi -> 3. get text -> 4. prediction -> 5. output: inferred location + explanation + confidence score
604
+ def classify_sample_location(accession):
605
+ outputs = {}
606
+ keyword, context, location, qa_result, haplo_result = "", "", "", "", ""
607
+ # Step 1: get pubmed id and isolate
608
+ pubmedID, isolate = get_info_from_accession(accession)
609
+ '''if not pubmedID:
610
+ return {"error": f"Could not retrieve PubMed ID for accession {accession}"}'''
611
+ if not isolate:
612
+ isolate = "UNKNOWN_ISOLATE"
613
+ # Step 2: get doi
614
+ doi = get_doi_from_pubmed_id(pubmedID)
615
+ '''if not doi:
616
+ return {"error": "DOI not found for this accession. Cannot fetch paper or context."}'''
617
+ # Step 3: get text
618
+ '''textsToExtract = { "doiLink":"paperText"
619
+ "file1.pdf":"text1",
620
+ "file2.doc":"text2",
621
+ "file3.xlsx":excelText3'''
622
+ if doi and pubmedID:
623
+ textsToExtract = get_paper_text(doi,pubmedID)
624
+ else: textsToExtract = {}
625
+ '''if not textsToExtract:
626
+ return {"error": f"No texts extracted for DOI {doi}"}'''
627
+ if isolate not in [None, "UNKNOWN_ISOLATE"]:
628
+ label, explain = flag_ancient_modern(accession,textsToExtract,isolate)
629
+ else:
630
+ label, explain = flag_ancient_modern(accession,textsToExtract)
631
+ # Step 4: prediction
632
+ outputs[accession] = {}
633
+ outputs[isolate] = {}
634
+ # 4.0 Infer from NCBI
635
+ location, outputNCBI = infer_location_fromNCBI(accession)
636
+ NCBI_result = {
637
+ "source": "NCBI",
638
+ "sample_id": accession,
639
+ "predicted_location": location,
640
+ "context_snippet": outputNCBI}
641
+ outputs[accession]["NCBI"]= {"NCBI": NCBI_result}
642
+ if textsToExtract:
643
+ long_text = ""
644
+ for key in textsToExtract:
645
+ text = textsToExtract[key]
646
+ # try accession number first
647
+ outputs[accession][key] = {}
648
+ keyword = accession
649
+ context = extract_context(text, keyword, window=500)
650
+ # 4.1: Using a HuggingFace model (question-answering)
651
+ location = infer_fromQAModel(context, question=f"Where is the mtDNA sample {keyword} from?")
652
+ qa_result = {
653
+ "source": key,
654
+ "sample_id": keyword,
655
+ "predicted_location": location,
656
+ "context_snippet": context
657
+ }
658
+ outputs[keyword][key]["QAModel"] = qa_result
659
+ # 4.2: Infer from haplogroup
660
+ haplo_result = classify_mtDNA_sample_from_haplo(context)
661
+ outputs[keyword][key]["haplogroup"] = haplo_result
662
+ # try isolate
663
+ keyword = isolate
664
+ outputs[isolate][key] = {}
665
+ context = extract_context(text, keyword, window=500)
666
+ # 4.1.1: Using a HuggingFace model (question-answering)
667
+ location = infer_fromQAModel(context, question=f"Where is the mtDNA sample {keyword} from?")
668
+ qa_result = {
669
+ "source": key,
670
+ "sample_id": keyword,
671
+ "predicted_location": location,
672
+ "context_snippet": context
673
+ }
674
+ outputs[keyword][key]["QAModel"] = qa_result
675
+ # 4.2.1: Infer from haplogroup
676
+ haplo_result = classify_mtDNA_sample_from_haplo(context)
677
+ outputs[keyword][key]["haplogroup"] = haplo_result
678
+ # add long text
679
+ long_text += text + ". \n"
680
+ # 4.3: UpgradeClassify
681
+ # try sample_id as accession number
682
+ sample_id = accession
683
+ if sample_id:
684
+ filtered_context = filter_context_for_sample(sample_id.upper(), long_text, window_size=1)
685
+ locations = infer_location_for_sample(sample_id.upper(), filtered_context)
686
+ if locations!="No clear location found in top matches":
687
+ outputs[sample_id]["upgradeClassifier"] = {}
688
+ outputs[sample_id]["upgradeClassifier"]["upgradeClassifier"] = {
689
+ "source": "From these sources combined: "+ ", ".join(list(textsToExtract.keys())),
690
+ "sample_id": sample_id,
691
+ "predicted_location": ", ".join(locations),
692
+ "context_snippep": "First 1000 words: \n"+ filtered_context[:1000]
693
+ }
694
+ # try sample_id as isolate name
695
+ sample_id = isolate
696
+ if sample_id:
697
+ filtered_context = filter_context_for_sample(sample_id.upper(), long_text, window_size=1)
698
+ locations = infer_location_for_sample(sample_id.upper(), filtered_context)
699
+ if locations!="No clear location found in top matches":
700
+ outputs[sample_id]["upgradeClassifier"] = {}
701
+ outputs[sample_id]["upgradeClassifier"]["upgradeClassifier"] = {
702
+ "source": "From these sources combined: "+ ", ".join(list(textsToExtract.keys())),
703
+ "sample_id": sample_id,
704
+ "predicted_location": ", ".join(locations),
705
+ "context_snippep": "First 1000 words: \n"+ filtered_context[:1000]
706
+ }
707
  return outputs, label, explain