File size: 4,011 Bytes
564ce0c
569a26f
564ce0c
 
8deafd3
 
 
aa6444a
8deafd3
 
e5f77a9
aa6444a
 
e5f77a9
aa6444a
 
e5f77a9
0553e73
3fec030
 
 
 
0553e73
3fec030
 
 
c077e58
 
170f624
c077e58
 
 
170f624
 
 
c077e58
 
 
 
f63dcc6
8deafd3
3fec030
170f624
c077e58
 
 
 
170f624
c077e58
170f624
c077e58
 
 
3fec030
0553e73
3fec030
c077e58
067fbea
42aed14
 
067fbea
42aed14
 
067fbea
42aed14
 
 
bc4e0d2
170f624
 
 
 
 
a95b8e3
170f624
d57d7e1
170f624
a95b8e3
 
2534d93
8deafd3
a95b8e3
0a35ca9
c226cf4
 
 
2be4187
0ca0300
 
2be4187
a95b8e3
170f624
f66f708
c2b8ffb
c99e844
 
569a26f
c2b8ffb
170f624
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import json
import streamlit as st
from google.oauth2 import service_account
from google.cloud import language_v1
import urllib.parse
import urllib.request

# Function to query Google's Knowledge Graph API
# Function to query Google's Knowledge Graph API
def query_knowledge_graph(entity_id):
    try:
        google_search_link = f"https://www.google.com/search?kgmid={entity_id}"
        st.markdown(f'[Open in Google Search]({google_search_link})', unsafe_allow_html=True)
    except Exception as e:
        st.write(f"An error occurred: {e}")


# Function to count entities with 'mid' that contains '/g/' or '/m/' in their metadata
def count_entities(entities):
    count = 0
    for entity in entities:
        metadata = entity.metadata
        if 'mid' in metadata and ('/g/' in metadata['mid'] or '/m/' in metadata['mid']):
            count += 1
    return count

# Sidebar content
st.sidebar.title("About This Tool")
st.sidebar.markdown("This tool leverages Google's NLP technology for entity analysis.")
st.sidebar.markdown("### Step-by-Step Guide")
st.sidebar.markdown("""
1. **Open the Tool**: Navigate to the URL where the tool is hosted.
2. **User Input**: Enter the text you want to analyze.
3. **Analyze**: Click the 'Analyze' button.
4. **View Results**: See the identified entities and their details.
""")

# Header and intro
st.title("Google Cloud NLP Entity Analyzer")
st.write("This tool analyzes text to identify entities such as people, locations, organizations, and events")
st.write("Entity salience scores are always relative to the analysed text.")

def sample_analyze_entities(text_content):
    service_account_info = json.loads(st.secrets["google_nlp"])
    credentials = service_account.Credentials.from_service_account_info(
        service_account_info, scopes=["https://www.googleapis.com/auth/cloud-platform"]
    )
    
    client = language_v1.LanguageServiceClient(credentials=credentials)
    document = {"content": text_content, "type_": language_v1.Document.Type.PLAIN_TEXT, "language": "en"}
    encoding_type = language_v1.EncodingType.UTF8

    response = client.analyze_entities(request={"document": document, "encoding_type": encoding_type})
    
    # Count the entities with 'mid' and either '/g/' or '/m/' in their metadata
    entity_count = count_entities(response.entities)

    if entity_count == 0:
        st.markdown(f"# We found {len(response.entities)} entities - but found no Google Entities")
        st.write("---")
    elif entity_count == 1:
        st.markdown(f"# We found {len(response.entities)} entities - and found 1 Google Entity")
        st.write("---")
    else:
        st.markdown(f"# We found {len(response.entities)} entities - and found {entity_count} Google Entities")
        st.write("---")


    for i, entity in enumerate(response.entities):
        st.write(f"Entity {i+1} of {len(response.entities)}")
        st.write(f"Name: {entity.name}")
        st.write(f"Type: {language_v1.Entity.Type(entity.type_).name}")
        st.write(f"Salience Score: {entity.salience}")

        if entity.metadata:
            st.write("Metadata:")
            st.write(entity.metadata)
            
            if 'mid' in entity.metadata and ('/g/' in entity.metadata['mid'] or '/m/' in entity.metadata['mid']):
                entity_id = entity.metadata['mid']
                query_knowledge_graph(entity_id)

        if entity.mentions:
            mention_count = len(entity.mentions)
            plural = "s" if mention_count > 1 else ""
            st.write(f"Mentions: {mention_count} mention{plural}")
            st.write("Raw Array:")
            st.write(entity.mentions)
            # st.write(', '.join([mention.text.content for mention in entity.mentions]))


        st.write("---")

# User input for text analysis
user_input = st.text_area("Enter text to analyze")
#user_input = st.text_area("Enter text to analyze", max_chars=5000)

if st.button("Analyze"):
    if user_input:
        sample_analyze_entities(user_input)