File size: 3,077 Bytes
564ce0c
 
 
9502681
564ce0c
c2b8ffb
 
 
 
 
 
 
 
 
 
f5b51a2
c2b8ffb
9502681
c2b8ffb
 
9502681
 
 
c2b8ffb
 
9502681
 
c2b8ffb
9502681
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2b8ffb
9502681
 
 
c2b8ffb
9502681
 
 
 
2b108c5
c2b8ffb
7c393ac
c2b8ffb
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import json
from google.oauth2 import service_account
from google.cloud import language_v1
import streamlit as st

# Header and intro
st.write("## Introduction to the Knowledge Graph API")
st.write("---")
st.write("""
The Google Knowledge Graph API reveals entity information related to a keyword, that Google knows about.
This information can be very useful for SEO – discovering related topics and what Google believes is relevant.
It can also help when trying to claim/win a Knowledge Graph box on search results.
The API requires a high level of technical understanding, so this tool creates a simple public interface, with the ability to export data into spreadsheets.
""")

def sample_analyze_entities(text_content):
    # Parse the JSON string to a dictionary
    service_account_info = json.loads(st.secrets["google_nlp"])

    # Create credentials
    credentials = service_account.Credentials.from_service_account_info(
        service_account_info, scopes=["https://www.googleapis.com/auth/cloud-platform"]
    )

    # Initialize the LanguageServiceClient with the credentials
    client = language_v1.LanguageServiceClient(credentials=credentials)

    # NLP analysis
    type_ = language_v1.Document.Type.PLAIN_TEXT
    language = "en"
    document = {"content": text_content, "type_": type_, "language": language}
    encoding_type = language_v1.EncodingType.UTF8

    response = client.analyze_entities(request={"document": document, "encoding_type": encoding_type})

    # Create an empty list to hold the results
    entities_list = []

    for entity in response.entities:
        # Create a dictionary to hold individual entity details
        entity_details = {
            "Name": entity.name,
            "Type": language_v1.Entity.Type(entity.type_).name,
            "Salience Score": entity.salience,
            "Metadata": [],
            "Mentions": []
        }

        for metadata_name, metadata_value in entity.metadata.items():
            entity_details["Metadata"].append({metadata_name: metadata_value})

        for mention in entity.mentions:
            entity_details["Mentions"].append({
                "Text": mention.text.content,
                "Type": language_v1.EntityMention.Type(mention.type_).name
            })

        # Append the dictionary to the list
        entities_list.append(entity_details)

    # Use Streamlit to display the results
    st.write("### Analyzed Entities")
    for entity in entities_list:
        st.write(f"**Name**: {entity['Name']}")
        st.write(f"**Type**: {entity['Type']}")
        st.write(f"**Salience Score**: {entity['Salience Score']}")

        if entity["Metadata"]:
            st.write("**Metadata**: ")
            st.json(entity["Metadata"])

        if entity["Mentions"]:
            st.write("**Mentions**: ")
            st.json(entity["Mentions"])


    st.write(f"### Language of the text: {response.language}")

# User input for text analysis
user_input = st.text_area("Enter text to analyze")
if st.button("Analyze"):
    sample_analyze_entities(user_input)