blazingbunny commited on
Commit
3fec030
·
1 Parent(s): 309b488

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -5
app.py CHANGED
@@ -3,6 +3,15 @@ import streamlit as st
3
  from google.oauth2 import service_account
4
  from google.cloud import language_v1
5
 
 
 
 
 
 
 
 
 
 
6
  # Sidebar content
7
  st.sidebar.title("About This Tool")
8
  st.sidebar.markdown("This tool leverages Google's NLP technology for entity analysis.")
@@ -12,17 +21,14 @@ st.sidebar.markdown("""
12
  2. **User Input**: Enter the text you want to analyze.
13
  3. **Analyze**: Click the 'Analyze' button.
14
  4. **View Results**: See the identified entities and their details.
15
-
16
- To do:
17
- https://www.linkedin.com/pulse/seo-content-writing-how-optimize-entity-salience-emmanuel-dan-awoh/
18
  """)
19
 
20
  # Header and intro
21
  st.title("Google Cloud NLP Entity Analyzer")
22
  st.write("This tool analyzes text to identify entities such as people, locations, organizations, and events")
23
  st.write("Entity salience scores are always relative to the analysed text. In natural language processing, a salience score is always a prediction of what a human would consider to be the most important entities in the same text. A number of textual features contribute to the salience score.")
 
24
  def sample_analyze_entities(text_content):
25
- # Assuming service_account_info is set in your Streamlit secrets
26
  service_account_info = json.loads(st.secrets["google_nlp"])
27
  credentials = service_account.Credentials.from_service_account_info(
28
  service_account_info, scopes=["https://www.googleapis.com/auth/cloud-platform"]
@@ -33,8 +39,11 @@ def sample_analyze_entities(text_content):
33
  encoding_type = language_v1.EncodingType.UTF8
34
 
35
  response = client.analyze_entities(request={"document": document, "encoding_type": encoding_type})
 
 
 
36
 
37
- st.write(f"### We found {len(response.entities)} entities")
38
  st.write("---")
39
 
40
  for i, entity in enumerate(response.entities):
 
3
  from google.oauth2 import service_account
4
  from google.cloud import language_v1
5
 
6
+ # Function to count entities with 'mid' and '/g/' in their metadata
7
+ def count_entities(entities):
8
+ count = 0
9
+ for entity in entities:
10
+ metadata = entity.metadata
11
+ if 'mid' in metadata and '/g/' in metadata['mid']:
12
+ count += 1
13
+ return count
14
+
15
  # Sidebar content
16
  st.sidebar.title("About This Tool")
17
  st.sidebar.markdown("This tool leverages Google's NLP technology for entity analysis.")
 
21
  2. **User Input**: Enter the text you want to analyze.
22
  3. **Analyze**: Click the 'Analyze' button.
23
  4. **View Results**: See the identified entities and their details.
 
 
 
24
  """)
25
 
26
  # Header and intro
27
  st.title("Google Cloud NLP Entity Analyzer")
28
  st.write("This tool analyzes text to identify entities such as people, locations, organizations, and events")
29
  st.write("Entity salience scores are always relative to the analysed text. In natural language processing, a salience score is always a prediction of what a human would consider to be the most important entities in the same text. A number of textual features contribute to the salience score.")
30
+
31
  def sample_analyze_entities(text_content):
 
32
  service_account_info = json.loads(st.secrets["google_nlp"])
33
  credentials = service_account.Credentials.from_service_account_info(
34
  service_account_info, scopes=["https://www.googleapis.com/auth/cloud-platform"]
 
39
  encoding_type = language_v1.EncodingType.UTF8
40
 
41
  response = client.analyze_entities(request={"document": document, "encoding_type": encoding_type})
42
+
43
+ # Count the entities with 'mid' and '/g/' in their metadata
44
+ entity_count = count_entities(response.entities)
45
 
46
+ st.write(f"We found {len(response.entities)} entities - {entity_count} meet your criteria")
47
  st.write("---")
48
 
49
  for i, entity in enumerate(response.entities):