File size: 32,323 Bytes
07d2942
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
import os
import sys
import time
import json
import joblib
import math
import itertools 
import argparse
import multiprocessing as mp
from typing import List
from pathlib import Path

import jinja2
import requests
import pandas as pd
from dotenv import load_dotenv
from serpapi import GoogleSearch
import tiktoken
from openai import OpenAI
from tqdm import tqdm
from loguru import logger

from model import llm
from data import get_leads, format_search_results
from utils import (parse_json_garbage, split_dataframe, merge_results, 
                   combine_results, split_dict, format_df, 
                   clean_quotes, compose_query)
from batch import postprocess_result

load_dotenv()
ORGANIZATION_ID = os.getenv('OPENAI_ORGANIZATION_ID')
SERP_API_KEY = os.getenv('SERP_APIKEY')
SERPER_API_KEY = os.getenv('SERPER_API_KEY')

try:
    logger.remove(0)
    logger.add(sys.stderr, level="INFO")
except ValueError:
    pass

def get_serp( query: str, google_domain: str, gl: str, lr: str, top_k: int = 20, hl: str = "zh-tw", location: str = 'Taiwan', provider: str = 'serp') -> dict:
    """
    """
    results = []

    if provider == 'serp':
        search = GoogleSearch({
            "q": query, 
            'google_domain': google_domain,
            'gl': gl,
            'lr': lr,
            "api_key": SERP_API_KEY
        })
        result = search.get_dict()
        # print(result['organic_results'][0])
        # return result['organic_results'][0]
        return result
    elif provider == 'serper':
        try:
            payload = json.dumps({
                "q": query,
                "location": "Taiwan",
                "gl": gl,
                "hl": hl,
                "num": top_k,
                "autocorrect": False
            })
            response = requests.request(
                "POST", 
                "https://google.serper.dev/search", 
                headers = { 'X-API-KEY': SERPER_API_KEY, 'Content-Type': 'application/json'}, 
                data = payload
            )
        except Exception as e:
            logger.error()
            raise Exception(f"SERPER error -> {e}, query -> {query}")
        result = response.json()
        if 'searchParamters' in result:
            result['search_parameters'] = result.pop('searchParamters')
        if 'knowledgeGraph' in result:
            result['knowledge_graph'] = result.pop('knowledgeGraph')
        if 'organic' in result:
            result['organic_results'] = result.pop('organic')
        return result
    else:
        raise Exception(f"Unknown provider: {provider}")

def get_condensed_result(result: dict):
    """
    Argument
        result
    Return
        condensed_result:
    Example:
        
    """
    filtered_results = [ 
        {"title": r.get('title',""), 'snippet': r.get('snippet',"")} for r in result['organic_results']
    ]
    
    condensed_result = json.dumps(filtered_results, ensure_ascii=False)
    # print( condensed_results )
    return condensed_result

def get_googlemap_results(result: dict):
    """Get a store's google map results (in knowledge_graph)
    Argument
        result: dict
            - 'knowledge_graph'
                - 'title', 'thumbnail', 'type', 'entity_type', 'kgmid', 'knowledge_graph_search_link', 'serpapi_knowledge_graph_search_link', 'tabs', 'place_id', 'directions', 'local_map', 'rating', 'review_count', '服務項目', '地址', '地址_links', 'raw_hours', 'hours', '電話號碼', '電話號碼_links', 'popular_times', 'user_reviews', 'reviews_from_the_web', 'unclaimed_listing', '個人資料', '其他人也搜尋了以下項目', '其他人也搜尋了以下項目_link', '其他人也搜尋了以下項目_stick'
    Return
        googlemap_result: str
    """
    googlemap_result = "## Google map results\n"
    if 'knowledge_graph' in result:
        if 'user_reviews' in result['knowledge_graph']:
            user_review = "\t".join([ _.get('summary', '') for _ in result['knowledge_graph']['user_reviews']])
            store_name = result['knowledge_graph']['title']
            googlemap_result += ( f"### store name: {store_name}\n")
            googlemap_result += ( f"\t- 顧客評價: {user_review}\n")
        if '其他人也搜尋了以下項目' in result['knowledge_graph']:
            similar_store_types = "\t".join([ str(_.get('extensions', '')) for _ in result['knowledge_graph']['其他人也搜尋了以下項目']])
            googlemap_result += ( f"\t- 類似店面類型: {similar_store_types}\n")
        if '暫停營業' in result['knowledge_graph']:
            store_status = '暫停營業' if result['knowledge_graph']['暫停營業'] else '營業中'
            googlemap_result += ( f"\t- 營業狀態: {store_status}\n")
        if '電話號碼' in result['knowledge_graph']:
            phone_number = result['knowledge_graph']['電話號碼']
            googlemap_result += ( f"\t- 電話號碼: {phone_number}\n")
        if 'type' in result['knowledge_graph']:
            store_type = result['knowledge_graph']['type']
            googlemap_result += ( f"\t- 餐飲屬性: {store_type}\n")

    else:
        googlemap_result += ("empty\n")
    return clean_quotes(googlemap_result)

def get_organic_result(result: dict) -> str:
    """Get a store's organic search results
    Argument
        result: str
    Return
        organic_result: str
    """
    organic_result = "## Search results\n"
    # filtered_results = [ 
    #     {"title": r.get('title',""), 'snippet': r.get('snippet',"")} for r in result['organic_results']
    # ]
    for r in result['organic_results']:
        organic_result += ( f"### {r.get('title','')}: {r.get('snippet','')}\n")
    return clean_quotes(organic_result)

def compose_classification(  user_content, config: dict) -> str:
    """
    Argument
        user_content: str
        config: dict
            classes: list
            backup_classes: list
            provider: e.g. 'google', 'openai'
            model: e.g. 'gemini-1.5-flash', 'gpt-3.5-turbo-0125', 'gpt-4-0125-preview'
    Return
        response: str
    Example
        system_prompt = 
            As a helpful and rigorous retail analyst, given the provided information about a store, 
            your task is two-fold. First, classify provided evidence below into the mostly relevant category from the following: {classes}. 
            Second, if no relevant information has been found, classify the evidence into the mostly relevant supercategory from the following: {backup_classes}.
            It's very important to omit unrelated piece of evidence and don't make up any assumption.
            Please think step by step, and must output in json format. An example output json is like {{"category": "..."}}
            If no relevant piece of information can ever be found at all, simply output json with empty string "".
            I'll tip you and guarantee a place in heaven you do a great job completely according to my instruction.
    """

    if isinstance(classes, list):
        classes = ", ".join([ f"`{x}`" for x in classes])
    elif isinstance(classes, str):
        pass
    else:
        raise Exception(f"Incorrect classes type: {type(classes)}")
    template = jinja2.Environment().from_string(config['classification_prompt'])
    system_prompt = template.render( classes=config['classes'], backup_classes=config['backup_classes'])
    response = llm( 
        provider = config['provider'], 
        model = config['model'], 
        system_prompt = system_prompt,
        user_content = user_content,
    )
    return response

def classify_results( 
        analysis_results: pd.DataFrame, 
        config: dict,
        input_column: str = 'evidence', 
        output_column: str = 'classified_category',
    ):
    """Classify the results
    Argument
        analysis_results: dataframe
        config: dict
            classes: list,
            backup_classes: list,
            provider: str, 
            model: str,
        input_column: str
        output_column: str
    Return 
        analysis_results: dataframe
    """
    classified_results = analysis_results.copy()
    labels, empty_indices = [], []
    for idx, evidence in zip( analysis_results['index'], analysis_results[input_column]):
        try:
            user_content = f'''`evidence`: `{evidence}`''' 
            pred_cls = compose_classification( user_content, config)
            label = parse_json_garbage(pred_cls)['category']
            labels.append(label)
        except Exception as e:
            logger.error(f"# CLASSIFICATION error: e -> {e}, user_content -> {user_content}, evidence: {evidence}")
            labels.append("")
            empty_indices.append(idx)

    classified_results[output_column] = labels
    return { 
        "classified_results": classified_results, 
        "empty_indices": empty_indices
    }
            
def classify_results_mp( extracted_results: pd.DataFrame, classified_file_path: str, config: dict, n_processes: int = 4):
    """
    Argument
        extracted_results:
        classified_file_path:
        config: dict
            classes: list
            backup_classes: list
            provider: str
            model: str, 
        n_processes: int
    Return
        classified_results: dataframe
    Reference
        200 records, 4 processes, 122.4695s
    """
    st = time.time()
    # classified_file_path = "data/classified_result.joblib"
    if not os.path.exists(classified_file_path):
        split_data = split_dataframe(extracted_results)
        with mp.Pool(args.n_processes) as pool:
            classified_results = pool.starmap( 
                classify_results,
                [ ( 
                    d, config, 'evidence', 'classified_category'
                ) for d in split_data]
            ) 
            classified_results = merge_results( classified_results, dataframe_columns=['classified_results'], list_columns=['empty_indices'])
            try:
                with open( classified_file_path, "wb") as f:
                    joblib.dump( classified_results, f)
            except FileNotFoundError as e:
                logger.error(f"# CLASSIFICATION error: e -> {e}")
                with open( f"./{Path(classified_file_path).name}.joblib", "wb") as f:
                    joblib.dump( classified_results, f)
            
    else:
        with open( classified_file_path, "rb") as f:
            classified_results = joblib.load(f)
    print( f"total time: {time.time() - st}")
    return classified_results

def compose_filter( query, search_results, config: dict):
    """Filter the search results based on the query (store name and address)
    Argument
        query: str
        search_results: str
        system_prompt: str
        config: dict
            provider: default to be "google"
            model: default to be "gemini-1.5-flash" 
    Return
        response: str
    """
    system_prompt = f'''As a helpful and rigorous retail analyst, given the provided query and a list of search results for the query,  execute this task step by step. Search results contains a list of search results, where some could be totally irrelevant to our query.
Steps: 
First, use store name and address to identify relevant and irrelevant information from search results.
Second, look through the list of search results; keep relevant ones and drop irrelevant ones.
Third, filter the results and only output relevant ones. Original numbering must be kept.

Output in json format such as {{ "relevant_results": [ "result ...", "result ...", "result ..." ], "irrelevant_results": ["result ..."]}}. It's very important to omit unrelated results. Do not make up any assumption.
        '''
    user_content = f"## query: `{query}`\n## search_results: {search_results}"
    response = llm( 
        provider = config['provider'], 
        model = config['model'], 
        system_prompt = system_prompt, 
        user_content = user_content
    )
    return response

def filter_results(  results: pd.DataFrame, config: dict):
    """Filter the results
    Argument
        results: dataframe
        config: dict
            provider
            model:
    Return 
        analysis_results: dataframe
    """
    results = results.copy()
    relevant_results, empty_indices = [], []
    for i, d in tqdm(enumerate(results.itertuples())):
        idx = d.index # d[1]
        search_results = d.search_results
        # evidence = d.googlemap_results +"\n" + d.search_results
        # business_id = d.business_id # d[2]
        business_name = d.business_name # d[3]
        address = d.address # d[7]
        try:
            query = compose_query( address, business_name, use_exclude=False)
            filtered_results = compose_filter( query = query, search_results=search_results, config=config)
            relevant_result = parse_json_garbage(filtered_results)['relevant_results']
            relevant_result = "### 搜尋結果: \n" + "\n".join([ "- " + r for r in relevant_result])
            relevant_results.append(relevant_result)
        except Exception as e:
            logger.error(f"# FILTER error (add to empty_indices): e -> {e}, query -> {query}, filtered_results: {filtered_results}")
            relevant_results.append(search_results)
            empty_indices.append(idx)

    results.loc[ :, "search_results"] = relevant_results
    return { 
        "filtered_results": results, 
        "empty_indices": empty_indices
    }

def filter_results_mp( data: pd.DataFrame, filtered_file_path: str, config: dict, n_processes: int = 4):
    """Filter results in parallel
    Argument
        data: dataframe
        filtered_file_path: str
        config: dict
            provider: str
            model: str
        n_processes: int
    Return
        filtered_results: dataframe
    """
    st = time.time()
    # crawl_file_path = "data/crawled_results.joblib"
    if not os.path.exists(filtered_file_path):
        split_data = split_dataframe( data )
        with mp.Pool(n_processes) as pool:
            filtered_results = pool.starmap( 
                filter_results, 
                [ (d, config) for d in split_data]
            )
            filtered_results = merge_results( filtered_results, dataframe_columns=['filtered_results'], list_columns=['empty_indices'])
            # with open( filtered_file_path, "wb") as f:
            #     joblib.dump( filtered_results, f)
            filtered_results['filtered_results'].to_csv( filtered_file_path, index=False)
    else:
        # with open( filtered_file_path, "rb") as f:
        #     filtered_results = joblib.load(f)
        filtered_results = { 'filtered_results': pd.read_csv( filtered_file_path)}
    logger.debug( f"total time: {time.time() - st}")
    return filtered_results

def crawl_results( data: pd.DataFrame, serp_provider: str = 'serp', google_domain: str = 'google.com.tw', gl: str = 'tw', lr: str = 'lang_zh-TW'):
    """
    Argument
        data: dataframe
        google_domain: str
        gl: str
        lr: str
    Return
        {
            `crawled_results`: df
            `empty_indices`: list
        }
        df format:
        [
            {'title': '達米娜魚料理食堂',
            'snippet': 'Zhenzhen · 台東縣台東市洛陽街204號 · 08 934 1662 · 其他美食 · 外送・提供廁所・免費Wifi · ・休息中・將於20:00 開始營業 · NT$120 · 座位數15 · 現金.'},
            ...
            {'title': '台東美食推薦》25間台東市美食小吃/特色餐廳/早餐伴手禮',
            'snippet': '好漁日鬼頭刀專屬料理MAHI MAHI TODAY如店名所說,是間專賣鬼頭刀料理的餐廳,台灣主要盛產鬼頭刀的地方就位於台東的成功新港漁港,所以推薦大家來台東 ...'},
            {'title': '類似的店', 'snippet': "['餐廳']\t['早午餐']\t['餐廳']"},
            {'status': '暫停營業'},
            {'telephone_number': '08 934 1662'}
        ]
    Reference
        200 records, 4 processes, 171.36490321159363
    """
    # serp_results = []
    # condensed_results = []
    crawled_results = []
    empty_indices = []
    for i, d in tqdm(enumerate(data.itertuples())):
        idx = d[0]
        address = d[1]
        business_id = d[2]
        business_name = d[4]
        query = compose_query(address, business_name)
        try:
            res = get_serp( query, google_domain, gl, lr, provider=serp_provider)
            # serp_results.append(res)
        except:
            logger.warning( f"# SERP error (will add to empty indices): i = {i}, idx = {idx}, query = {query}")
            empty_indices.append(i)
            continue
        try:
            # cond_res = get_condensed_result(res)
            googlemap_res = get_googlemap_results(res)
            search_res = get_organic_result(res)
            # condensed_results.append(cond_res)
        except:
            logger.warning(f"# get googlemap & organic results error (will add to empty indices): i = {i}, idx = {idx}, res = {res}")
            empty_indices.append(i)
            continue
        
        crawled_results.append( { 
            "index": idx, 
            "business_id": business_id, 
            "business_name": business_name, 
            "serp": res,
            # "evidence": cond_res, 
            "googlemap_results": googlemap_res,
            "search_results": search_res,
            "address": address,
        } )
    crawled_results = pd.DataFrame(crawled_results)

    return {
        "crawled_results": crawled_results,
        "empty_indices": empty_indices
    }

def crawl_results_mp( data: pd.DataFrame, crawl_file_path: str, serp_provider: str, n_processes: int = 4):
    st = time.time()
    # crawl_file_path = "data/crawled_results.joblib"
    if not os.path.exists(crawl_file_path):
        split_data = split_dataframe( data )
        with mp.Pool(n_processes) as pool:
            crawled_results = pool.starmap( 
                crawl_results, 
                [( d, serp_provider) for d in split_data]
            )
            crawled_results = merge_results( crawled_results, dataframe_columns=['crawled_results'], list_columns=['empty_indices'])
            try:
                with open( crawl_file_path, "wb") as f:
                    joblib.dump( crawled_results, f)
            except FileNotFoundError as e:
                logger.error(f"# CRAWL error: e = {e}")
                with open( f"./{Path(crawl_file_path).name}.joblib", "wb") as f:
                    joblib.dump( crawled_results, f)
    else:
        with open( crawl_file_path, "rb") as f:
            crawled_results = joblib.load(f)
    logger.debug( f"total time: {time.time() - st}")
    return crawled_results

def compose_extraction( query, search_results, config: dict):
    """
    Argument
        query: str
        search_results: str
        config: dict
            system_prompt: str
            classes: list. e.g. `小吃店`,`日式料理(含居酒屋,串燒)`,`火(鍋/爐)`,`東南亞料理(不含日韓)`,`海鮮熱炒`,`特色餐廳(含雞、鵝、牛、羊肉)`,`釣蝦場`,`傳統餐廳`,`燒烤`,`韓式料理(含火鍋,烤肉)`,`PUB(Live Band)`,`PUB(一般,含Lounge)`,`PUB(電音\舞場)`,`五星級飯店`,`自助KTV(含連鎖,庭園自助)`,`西餐廳(含美式,義式,墨式)`,`咖啡廳(泡沫紅茶)`,`飯店(星級/旅館,不含五星級)`,`運動休閒館(含球類練習場,飛鏢等)`,`西餐廳(餐酒館、酒吧、飛鏢吧、pub、lounge bar)`,`西餐廳(土耳其、漢堡、薯條、法式、歐式、印度)`,`早餐`
            provider: "openai"
            model: "gpt-4-0125-preview" or 'gpt-3.5-turbo-0125'
    Return
        response: str

    Example
        classes = ", ".join([ "`"+x+"`" for x in classes if x!='早餐' ])+ " or " + "`早餐`"
        traits = "Gathering, Chill, Enjoying Together, Drinking Freely, Winery, Wine Cellar, Wine Storage, Relaxing, Unwinding, Lyrical, Romantic, Pleasant, Stress Relief, Wine and Dine, Light Drinking Gatherings, Birthday Celebrations, Socializing, Parties, Networking, After Work Relaxation with a Drink, Relaxing Places Suitable for Drinking, Every Dish Goes Well with Beer, Shared Dishes, Dining Together, Atmosphere Suitable for Celebratory Drinking, Places Suitable for Light Drinking Gatherings with Friends, Small Shops Suitable for Relaxing and Light Drinking"
        system_prompt = f'''
            As a helpful and rigorous retail analyst, given the provided query and a list of search results for the query,  your task is to first use store name and address to identify relevant information. 
            After that, from the relevant information, extract `store_name`, `address`, `description`, `category`, `provide_alcohol` and `phone_number` from the found relevant information. 
            Note that `category` can only be {classes}. 
            According to our experience,`provide_alcohol` can be inferred based on whether a store is suitable for scenarios such as {traits}. 
            `description` is a summary of key piece of evidence and reasons that lead you decide `category` and `provide_alcohol` .

            It's very important to omit unrelated results. Do not make up any assumption.
            Please think step by step, and output a single json that starts with `{{` and ends with `}}`. An example output json is like {{"store_name": "...", "address": "...", "description": "... products, service or highlights ...", "category": "...", "phone_number": "...", "provide_alcohol": true or false}}
            If no relevant information has been found, simply output json with empty values.
        '''
    """
    classes = ", ".join([ "`"+x+"`" for x in config['classes'] if x!='早餐' ])+ " or " + "`早餐`"
    traits = config['traits']
    system_prompt = config['extraction_prompt']
    jenv = jinja2.Environment()
    template = jenv.from_string(system_prompt)
    system_prompt = template.render( classes=classes, traits=traits)
    user_content = f"`query`: `{query}`\n`search_results`: {search_results}"
    response = llm( 
        provider = config['provider'], 
        model = config['model'], 
        system_prompt = system_prompt, 
        user_content = user_content
    )
    return response

def extract_results( data: pd.DataFrame, config: dict):
    """
    Argument
        data: a dataframe 
            - "index", "business_id", "business_name", "serp", "googlemap_results", "search_results", "address"
            # - `evidence`, `result`
        config: dict
            classes: list
            provider: str
            model: str
    Return
        extracted_results: dataframe of `extracted_evidence`
    """
    extracted_results, empty_indices, ext_res = [], [], []
    for i, d in tqdm(enumerate(data.itertuples())):
        idx = d.index # d[1]
        # evidence = d.evidence
        # evidence = d.formatted_evidence
        evidence = d.googlemap_results +"\n" + d.search_results
        business_id = d.business_id # d[2]
        business_name = d.business_name # d[3]
        address = d.address # d[7]
        ana_res = None
        query = compose_query( address, business_name, use_exclude=False)
        try:
            ext_res = compose_extraction(
                query = query, 
                search_results = evidence, 
                config = config
            )
            ext_res = parse_json_garbage(ext_res)
        except Exception as e:
            logger.error(f"# ANALYSIS error (add to empty indices): e = {e}, i = {i}, q = {query}, ext_res = {ext_res}")
            empty_indices.append(i)
            continue
        
        extracted_results.append( { 
            "index": idx, 
            "business_id": business_id, 
            "business_name": business_name, 
            "evidence": evidence, 
            ** ext_res
        } )
    extracted_results = pd.DataFrame(extracted_results)

    return {
        "extracted_results": extracted_results, 
        "empty_indices": empty_indices
    }

def extract_results_mp( crawled_results, extracted_file_path, config: dict, n_processes: int = 4):
    """
    Argument
        crawled_results: dataframe
        extracted_file_path
        config:
            classes: list
            model: str
            provider: str
    Return 
    Reference
        200 records, 4 processes, 502.26914715766907
    """
    st = time.time()
    # args.extracted_file_path = "data/extracted_results.joblib"
    if not os.path.exists(extracted_file_path):
        split_data = split_dataframe( crawled_results)
        with mp.Pool(n_processes) as pool:
            extracted_results = pool.starmap( extract_results, [ (x, config) for x in split_data])
            extracted_results = merge_results( extracted_results, dataframe_columns=['extracted_results'], list_columns=['empty_indices'])
            try:
                with open( extracted_file_path, "wb") as f:
                    joblib.dump( extracted_results, f)
            except FileNotFoundError as e:
                logger.error(f"# EXTRACT error: e = {e}")
                with open( f"./{Path(extracted_file_path).name}.joblib", "wb") as f:
                    joblib.dump( extracted_results, f)
    else:
        with open( extracted_file_path, "rb") as f:
            extracted_results = joblib.load(f)
    logger.info( f"total time: {time.time() - st}")
    return extracted_results

def compose_regularization( category: str, config: dict):
    """
    Argument
        category: str
        config: dict
            provider: str
            model: str
    Return
        response: str
    """
    system_prompt = f"""
    As a helpful and factual assistant, your task is to classify the provided raw cuisine category into a conformed category. The definition of each conformed category is show below (in the format of `category`: `... definition ...`):
- `小吃店`:小吃、擔仔麵、小吃攤、街邊小店、傳統小吃、麵食、麵攤、炒飯、餃子館、鯊魚煙、黑白切、牛肉麵、銅板美食、小點心、簡餐、色小菜、開放空間攤販
- `日式料理(含居酒屋,串燒)`:居酒屋、酒場、水產、清酒、生魚片、壽司、日式啤酒、日式料理、代烤服務、日本餐飲場所、日本傳統食物、日式定食
- `火(鍋/爐)`:麻辣鍋、薑母鴨、鴨味仔、鍋物、湯底、滋補、冬令補、涮涮鍋、個人鍋、冬天圍爐、羊肉爐、鴛鴦鍋、炭火爐、氣火爐、燒酒雞、蒸氣海鮮鍋
- `東南亞料理(不含日韓)`:印尼、越式、泰式、沙嗲、海南雞、河粉、馬來西亞料理、新加坡料理、寮國料理、緬甸料理、南洋風味、印度料理、越南春捲、泰式綠咖哩、異國風情裝潢、滇緬料理
- `海鮮熱炒`:海鮮、現撈、活海鮮、生猛、大排檔、活魚活蝦、生猛海鮮、快炒、海產、台式海鮮、下酒菜
- `特色餐廳(含雞、鵝、牛、羊肉)`:烤鴨、燒鵝、甕仔雞、甕缸雞、桶仔雞、牛雜、蒙古烤肉、鵝肉城、金山鴨肉、生牛肉、全羊宴、活鱉、烤雞店、鵝肉餐廳、溫體牛、現宰羊肉、鹹水鵝、土羊肉
- `傳統餐廳`:江浙、台菜、合菜、桌菜、粵菜、中式、川菜、港式、上海菜、砂鍋魚頭、東北菜、北京烤鴨、一鴨三吃、婚宴、辦桌、老字號、宴會廳、台灣料理
- `燒烤`:燒烤、串燒、串串、烤魚、鮮蚵、炭烤、直火、碳火、和牛、戶外生火、烤肉、路邊燒烤
- `韓式料理(含火鍋,烤肉)`:韓國泡菜、韓式年糕、首爾、燒酒、韓式炸雞、春川辣炒雞、韓式炸醬麵、海鮮煎餅、烤三層肉、烤五花、烤韓牛、醬料和飯、石鍋拌飯、韓式風格、韓式清酒、啤酒、銅盤烤肉、韓流
- `PUB(Live Band)`:音樂餐廳、樂團表演、現場表演、LIVE表演、樂團駐唱、定期表演、有舞台場地、樂隊、專人駐唱
- `PUB(一般,含Lounge)`:酒吧、bar、lounge、飛鏢、調酒、運動酒吧、音樂酒吧、沙發聊天、女公關、互動調酒師、公關服務
- `PUB(電音\舞場)`:夜店、舞池電音、藝人、包廂低消制、電子音樂表演、DJ、派對狂歡
- `五星級飯店`:高級飯店、奢華酒店、連鎖五星級飯店、國際集團飯店、米其林飯店、高檔住宿
- `自助KTV(含連鎖,庭園自助)`:卡拉OK、唱歌、歌坊、歡唱吧、自行點歌、自助唱歌、唱歌包廂、慶生聯誼包廂
- `西餐廳(含美式,義式,墨式)`:牛排、餐酒、歐式、義式、西餐、義大利麵、凱薩沙拉、紅酒、白酒、調酒、墨西哥式料理、阿根廷式料理、漢堡、比薩
- `咖啡廳(泡沫紅茶)`:泡沫紅茶店、咖啡店、café、coffee、輕食、軟性飲料、簡餐、茶街
- `飯店(星級/旅館,不含五星級)`:飯店、酒店、商務旅館、平價住宿
- `運動休閒館(含球類練習場,飛鏢等)`:撞球、高爾夫、運動、保齡球、娛樂、高爾夫練習場、大魯閣棒球場、籃球、羽毛球、PHOENIX鳳凰、羽球館、看球賽
- `釣蝦場`:釣蝦、蝦寶、投幣卡拉OK、釣竿和餌料、蝦子現場烹煮食用、泰國蝦、現烤蝦子、包廂唱歌、現釣現烤、自備或租用釣竿。

Note that you must choose from the above categories. Other ones are strongly prohibited. 

Output in json format such as {{"category": "..."}}.

    """
    user_content = category
    response = llm( 
        provider = config['provider'], 
        model = config['model'], 
        system_prompt = system_prompt, 
        user_content = user_content
    )
    return response

def regularize_results( results: pd.DataFrame, provider, model):
    """Regularize the categories
    Argument
        results: dataframe
        provider: str
        model: str
    Return
        a dict of 
            - regularized_results: dataframe
            - empty_indices: list
    """
    results = results.copy()
    regular_categories, empty_indices = [], []
    for i, d in tqdm(enumerate(results.itertuples())):
        idx = d.index # d[1]
        category = d.category
        if pd.isna(category) or len(category)==0:
            regular_categories.append("")
            continue
        try:
            query = category
            regularized_result = compose_regularization(  category, provider=provider, model=model)
            regular_category = parse_json_garbage(regularized_result)['category']
            regular_categories.append(regular_category)
        except Exception as e:
            logger.error(f"# REGULARIZATION error (add to empty_indices): e -> {e}, query -> {query}, category: {category}")
            regular_categories.append(category)
            empty_indices.append(idx)

    results.loc[ :, "category"] = regular_categories
    return { 
        "regularized_results": results, 
        "empty_indices": empty_indices
    }

def regularize_results_mp( data: pd.DataFrame, regularized_file_path, provider, model):
    """Regularize categories in parallel
    Argument
        data: dataframe
        regularized_file_path: str
        provider: str
        model: str
    Return
        regularized_results: dataframe
    """
    st = time.time()
    if not os.path.exists(regularized_file_path):
        split_data = split_dataframe( data )
        with mp.Pool(n_processes) as pool:
            regularized_results = pool.starmap( 
                regularize_results, 
                [ ( 
                    d, provider, model
                ) for d in split_data]
            )
            regularized_results = merge_results( regularized_results, dataframe_columns=['regularized_results'], list_columns=['empty_indices'])
            # with open( filtered_file_path, "wb") as f:
            #     joblib.dump( filtered_results, f)
            regularized_results['regularized_results'].to_csv( regularized_file_path, index=False)
    else:
        # with open( filtered_file_path, "rb") as f:
        #     filtered_results = joblib.load(f)
        regularized_results = { 'regularized_results': pd.read_csv( regularized_file_path)}
    logger.debug( f"total time: {time.time() - st}")
    return regularized_results