File size: 2,718 Bytes
b644746
ebca9c8
b644746
eef03e6
1c727cf
eef03e6
 
d84590e
b644746
 
 
 
 
7c5a7c7
 
b644746
 
 
 
 
d84590e
b644746
 
 
 
 
 
 
 
 
7bd5519
b644746
 
 
7bd5519
1c727cf
b644746
 
 
6363df7
b644746
6363df7
 
 
 
 
 
 
 
c7e9131
b644746
 
7a0c125
ce5adfb
7a0c125
b644746
7a0c125
b644746
7a0c125
c7e9131
6363df7
 
 
b644746
7bd5519
c7e9131
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import streamlit as st
import requests, json

secret_key = st.secrets["secret_key"]
wl_key = st.secrets["wl_key"]

def call_api(url, keyword, wl_key, description_narrative):
    api_url = "https://api.wordlift.io/quality-rating/score"
    
    payload = {
        "url": url,
        "keyword": keyword,
        "wl_key": wl_key,
        "description_narrative": description_narrative,
        "text": text
    }
    
    headers = {
        "Content-Type": "application/json",
        "User-Agent": "insomnia/8.2.0",
        "Authorization": "Key " + wl_key
    }

    response = requests.request("POST", api_url, json=payload, headers=headers)
    return response.json()  # assuming API responds with JSON

# User inputs
url = st.text_input("Enter the URL of the webpage:")
query = st.text_input("Enter the query the content aims at ranking for:")
narrative = st.text_area("Enter the descriptive narrative of the searcher:")
text = st.text_input("Enter the text to be analyzed (as alternative to the URL):")

# Button to execute analysis
if st.button("Analyze"):
    if (url or text) and query and narrative and wl_key:
        response = call_api(url, query, wl_key, narrative)
        
        # Display JSON response
        st.json(response)

        try:
            # Check if `response["analyze"]` is a string and parse it if true
            analyze_data = response["analyze"]
            if isinstance(analyze_data, str):
                analyze_data = json.loads(analyze_data)

            # Extract M and T values
            M = analyze_data[0]["M"]
            T = analyze_data[0]["T"]
            
            # Display traffic light system
            if M == 2 and T == 2:
                st.markdown("<h4 style='text-align: center; color: green;'>🟒 Content is highly relevant and trustworthy</h4>", unsafe_allow_html=True)
            elif M == 0 and T == 2:
                st.markdown("<h4 style='text-align: center; color: orange;'>🟑 Content is irrelevant even if the webpage is trustworthy</h4>", unsafe_allow_html=True)
            elif M == 1 or T == 1:
                st.markdown("<h4 style='text-align: center; color: orange;'>🟑 Content is partly relevant/helpful</h4>", unsafe_allow_html=True)
            else:
                st.markdown("<h4 style='text-align: center; color: red;'>πŸ”΄ Content is not relevant</h4>", unsafe_allow_html=True)
            
        except (KeyError, IndexError, ValueError) as e:
            st.error(f"Error extracting analysis results: {str(e)}")
            st.error("Please check the API response format and adapt the code accordingly.")
    else:
        st.warning("Please provide either a URL or Text along with other required inputs!")