File size: 3,054 Bytes
de3c2ee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
"""
Util functions for openai api
"""
import json
import os
from thefuzz import process


def get_lab_member_info(name: str):
    database_addr = os.path.join(os.getcwd(), 'database/original_documents/members.json')
    with open(database_addr, 'r') as fin:
        all_members_info = json.load(fin)

    for field in all_members_info:
        toSearch = all_members_info[field]
        for i in toSearch:
            if name.lower() in i['name'].lower():
                return json.dumps(i)

    return json.dumps({})


def get_lab_member_detailed_info(name: str, detailed_info: str):
    database_addr = os.path.join(os.getcwd(), 'database/original_documents/members.json')
    with open(database_addr, 'r') as fin:
        all_members_info = json.load(fin)

    for field in all_members_info:
        toSearch = all_members_info[field]
        for i in toSearch:
            if name.lower() in i['name'].lower():
                if "link" in detailed_info.lower() or "homepage" in detailed_info.lower():
                    return json.dumps(i['links'])
                elif "photo" in detailed_info.lower() or "pic" in detailed_info.lower() or "picture" in detailed_info.lower():
                    return json.dumps(i['photo'])
                else:
                    return json.dumps(i["description"])

    return json.dumps({})

def get_publication_by_year(year: str):
    database_addr = os.path.join(os.getcwd(), 'database/original_documents/publications.json')
    with open(database_addr, 'r') as fin:
        all_pub_info = json.load(fin)
    data = {}
    for field in all_pub_info:
        to_search = all_pub_info[field]
        for i in to_search:
            if int(year) == i['year']:
                data.update(i)
    return json.dumps(data)


def get_pub_info(name: str):
    database_addr = os.path.join(os.getcwd(), 'database/original_documents/publications.json')
    with open(database_addr, 'r') as fin:
        all_members_info = json.load(fin)

    for i in all_members_info:
        if name.lower() in i['title'].lower():
            return json.dumps(i)

    return json.dumps({})

def get_pub_by_name(name: str):
    database_addr = os.path.join(os.getcwd(), 'database/original_documents/publications.json')
    with open(database_addr, 'r') as fin:
        all_members_info = json.load(fin)

    data = {}
    for i in all_members_info:
        for author in i['authors']:
            if name.lower() in author.lower():
                data.update(i)

    return json.dumps(data)

def get_fuzz_name(name: str):
    choices = {}
    database_addr = os.path.join(os.getcwd(), 'database/original_documents/members.json')
    with open(database_addr, 'r') as fin:
        all_members_info = json.load(fin)

    for field in all_members_info:
        toSearch = all_members_info[field]
        for i in toSearch:
            choices.add(i['name'])

    best = process.extractOne(name, choices, score_cutoff=50)
    if best:
        return best[0]
    else:
        return None


def semantic_search(query: str):
    return