import os os.environ["STREAMLIT_GLOBAL_CONFIG"] = "/data/.streamlit/config.toml" import time import uuid import random import urllib.parse # To parse URL parameters from functools import lru_cache import streamlit as st import numpy as np import pandas as pd import duckdb import hdbscan # Database file path DB_PATH = 'steampolis.duckdb' # Initialize database tables if they don't exist def initialize_database(): try: init_con = duckdb.connect(database=DB_PATH, read_only=False) init_con.execute(""" CREATE TABLE IF NOT EXISTS topics ( id TEXT PRIMARY KEY, name TEXT NOT NULL, description TEXT, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ) """) init_con.execute(""" CREATE TABLE IF NOT EXISTS users ( id TEXT PRIMARY KEY, username TEXT NOT NULL UNIQUE, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ) """) init_con.execute(""" CREATE TABLE IF NOT EXISTS comments ( id TEXT PRIMARY KEY, topic_id TEXT NOT NULL, user_id TEXT NOT NULL, content TEXT NOT NULL, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (topic_id) REFERENCES topics(id), FOREIGN KEY (user_id) REFERENCES users(id) ) """) init_con.execute(""" CREATE TABLE IF NOT EXISTS votes ( id TEXT PRIMARY KEY, user_id TEXT NOT NULL, comment_id TEXT NOT NULL, vote_type TEXT NOT NULL CHECK (vote_type IN ('agree', 'disagree', 'neutral')), created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (user_id) REFERENCES users(id), FOREIGN KEY (comment_id) REFERENCES comments(id), UNIQUE (user_id, comment_id) ) """) init_con.execute(""" CREATE TABLE IF NOT EXISTS user_comment_collections ( id TEXT PRIMARY KEY, user_id TEXT NOT NULL, comment_id TEXT NOT NULL, collected_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (user_id) REFERENCES users(id), FOREIGN KEY (comment_id) REFERENCES comments(id), UNIQUE (user_id, comment_id) ) """) init_con.execute(""" CREATE TABLE IF NOT EXISTS user_progress ( id TEXT PRIMARY KEY, user_id TEXT NOT NULL, topic_id TEXT NOT NULL, last_comment_id_viewed TEXT, last_viewed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (user_id) REFERENCES users(id), FOREIGN KEY (topic_id) REFERENCES topics(id), FOREIGN KEY (last_comment_id_viewed) REFERENCES comments(id), UNIQUE (user_id, topic_id) ) """) # Create system user if it doesn't exist try: init_con.execute(""" INSERT INTO users (id, username) VALUES ('system', 'System') ON CONFLICT (id) DO NOTHING """) except Exception as e: print(f"Warning: Could not create system user: {e}") except Exception as e: st.error(f"Database initialization failed: {e}") finally: if 'init_con' in locals() and init_con: init_con.close() def get_ttl_hash(seconds=360): """Return the same value withing `seconds` time period""" return round(time.time() / seconds) # Helper function to get the R matrix from user voting data # This matrix represents user-comment interactions (votes) # Users are rows, comments are columns. # Values: 1 for 'agree', 0 for 'neutral', -1 for 'disagree', NaN for unvoted. # Requires pandas and numpy. def get_r_matrix_from_votes(): local_con = None try: local_con = duckdb.connect(database=DB_PATH, read_only=True) # Read-only is sufficient # Fetch all vote data # fetchdf requires pandas votes_df = local_con.execute(""" SELECT user_id, comment_id, vote_type FROM votes """).fetchdf() if votes_df.empty: # Return empty matrix and mappings if no votes exist # pd.DataFrame requires pandas return pd.DataFrame(), {}, {} # Map vote types to numerical values vote_mapping = {'agree': 1, 'neutral': 0, 'disagree': -1} votes_df['vote_value'] = votes_df['vote_type'].map(vote_mapping) # Create the R matrix using pivot_table # This automatically handles missing user-comment pairs by filling with NaN # pivot_table requires pandas r_matrix = votes_df.pivot_table( index='user_id', columns='comment_id', values='vote_value' ) # Create mappings from user/comment IDs to matrix indices (optional but useful) user_id_to_index = {user_id: i for i, user_id in enumerate(r_matrix.index)} comment_id_to_index = {comment_id: i for i, comment_id in enumerate(r_matrix.columns)} return r_matrix, user_id_to_index, comment_id_to_index except Exception as e: # st.error is not available here, just print or log print(f"Error generating R matrix: {e}") # Return empty results in case of error # pd.DataFrame requires pandas return pd.DataFrame(), {}, {} finally: if local_con: local_con.close() # Custom Hamming-like distance function handling NaNs for clustering # Assumes numpy is imported as np def hamming_distance_with_nan(u1, u2): """ Calculates a Hamming-like distance between two vectors (user vote profiles) ignoring positions where either value is NaN. Args: u1 (np.ndarray or pd.Series): First vector. u2 (np.ndarray or pd.Series): Second vector. Returns: float: The proportion of differing elements among non-NaN positions. Returns 0.0 if vectors are identical (including all NaN), 1.0 if different but no common non-NaN positions. """ u1 = np.asarray(u1) u2 = np.asarray(u2) # Find positions where both are not NaN both_not_nan_mask = ~np.isnan(u1) & ~np.isnan(u2) # If no common non-NaN values if not np.any(both_not_nan_mask): # If vectors are identical (e.g., both all NaN), distance is 0. # If different vectors with no common non-NaN, distance is 1 (max difference). if np.array_equal(u1, u2, equal_nan=True): return 0.0 else: return 1.0 # Filter to only positions where both are not NaN u1_filtered = u1[both_not_nan_mask] u2_filtered = u2[both_not_nan_mask] # Calculate proportion of differing elements among common non-NaN positions diff_count = np.sum(u1_filtered != u2_filtered) total_count = len(u1_filtered) return diff_count / total_count # Function to get clusters using HDBSCAN with the custom Hamming distance # Assumes pandas is imported as pd, numpy as np, and hdbscan is imported def get_clusters_from_r_matrix(r_matrix): """ Performs HDBSCAN clustering on the R matrix using a custom Hamming-like distance that handles NaN values. Args: r_matrix (pd.DataFrame): The user-comment vote matrix from get_r_matrix_from_votes. Index should be user_id, columns comment_id. Returns: np.ndarray: An array of cluster labels for each user in the r_matrix index. -1 indicates noise. Returns empty array if clustering fails or r_matrix is empty. """ # Check if r_matrix is empty if r_matrix.empty: print("R matrix is empty, cannot perform clustering.") return np.array([]) try: # Instantiate HDBSCAN with the custom metric # Using default parameters for min_cluster_size and min_samples # These might need tuning based on data characteristics and desired cluster granularity # allow_single_cluster=True prevents an error if all points form one cluster clusterer = hdbscan.HDBSCAN(metric=hamming_distance_with_nan, allow_single_cluster=True) # Fit the model directly to the DataFrame values # HDBSCAN fit expects a numpy array or similar structure clusterer.fit(r_matrix.values) # Return the cluster labels return clusterer.labels_ except Exception as e: # In a Streamlit app context, st.error would be better, but not available here. # Print to console/logs. print(f"Error during HDBSCAN clustering: {e}") return np.array([]) # Return empty array on error def get_cluster_labels(): r_matrix, user_id_to_index, _ = get_r_matrix_from_votes() cluster_labels = get_clusters_from_r_matrix(r_matrix) if len(cluster_labels) == 0: cluster_labels = [0] * len(user_id_to_index) return cluster_labels, user_id_to_index # Function to get the cluster label for a specific user @lru_cache() def get_user_cluster_label(user_id, ttl_hash=None): """ Gets the HDBSCAN cluster label for a specific user and a list of users sharing the same cluster. Args: user_id (str): The ID of the user. Returns: tuple: A tuple containing: - int or None: The cluster label (an integer, -1 for noise) if the user is found in the clustering result, otherwise None. - list[str]: A list of user IDs (including the input user_id if found) that belong to the same cluster. Returns an empty list if the user is not found or has no cluster label. """ # get_cluster_labels is already cached, so calling it repeatedly is fine cluster_labels, user_id_to_index = get_cluster_labels() # Create a reverse mapping from index to user_id for easier lookup index_to_user_id = {index: uid for uid, index in user_id_to_index.items()} target_cluster_label = None same_cluster_users = [] # Check if the user_id exists in the mapping if user_id in user_id_to_index: user_index = user_id_to_index[user_id] # Ensure the index is within the bounds of the cluster_labels array if 0 <= user_index < len(cluster_labels): target_cluster_label = int(cluster_labels[user_index]) # Get the target label # Find all users with the same cluster label for index, current_user_id in index_to_user_id.items(): # Ensure the index is valid for cluster_labels if 0 <= index < len(cluster_labels): current_user_label = int(cluster_labels[index]) if current_user_label == target_cluster_label: same_cluster_users.append(current_user_id) else: # This case should ideally not happen if index_to_user_id is consistent print(f"Warning: Index {index} from index_to_user_id out of bounds for cluster labels array length {len(cluster_labels)}") else: # This case should ideally not happen if user_id_to_index is consistent print(f"Warning: User index {user_index} out of bounds for cluster labels array length {len(cluster_labels)}") # Return None and empty list as user couldn't be processed return None, [] else: # User not found in the R matrix used for clustering (e.g., new user with no votes) # print(f"User ID {user_id} not found in clustering data.") # Optional: for debugging # Return None and empty list as user is not part of the current clustering result return None, [] # Return the target user's label and the list of users in that cluster return target_cluster_label, same_cluster_users # Helper function to get top k most polarized comments for a list of users def get_top_k_polarized_comments_for_users(user_ids, k=5): """ Retrieves the top k comments most agreed or disagreed upon (most polarized) by a given list of users. Args: user_ids (list[str]): A list of user IDs. k (int): The number of top comments to retrieve. Returns: list[tuple]: A list of tuples, where each tuple contains (comment_id, comment_content, average_vote_score), ordered by the absolute value of the average score descending. Returns an empty list if no votes are found for these users or on error. """ if not user_ids: # print("Warning: get_top_k_polarized_comments_for_users called with empty user_ids list.") # Optional debug return [] # Cannot query without user IDs local_con = None try: local_con = duckdb.connect(database=DB_PATH, read_only=True) # Use parameterized query for the list of user IDs # DuckDB's Python API handles lists for IN clauses query = """ SELECT v.comment_id, c.content, AVG(CASE WHEN v.vote_type = 'agree' THEN 1.0 WHEN v.vote_type = 'neutral' THEN 0.0 WHEN v.vote_type = 'disagree' THEN -1.0 ELSE NULL -- Should not happen with current data END) as average_vote_score FROM votes v JOIN comments c ON v.comment_id = c.id WHERE v.user_id IN (?) GROUP BY v.comment_id, c.content HAVING COUNT(v.user_id) > 0 -- Ensure at least one user from the list voted on this comment ORDER BY ABS(average_vote_score) DESC LIMIT ? """ # Pass the list of user_ids and k as parameters result = local_con.execute(query, [user_ids, k]).fetchall() return result except Exception as e: # st.error is not available here, just print or log print(f"Error getting top k polarized comments for users {user_ids}: {e}") return [] # Return empty list on error finally: if local_con: local_con.close() @lru_cache() def estimate_group_voting_diversity(user_ids, topic_id): """ Estimates the diversity of voting within a group of users for a specific topic. Diversity is measured by the average variance of numerical vote scores (-1, 0, 1) across comments that at least two users in the group have voted on. Args: user_ids (list[str]): A list of user IDs belonging to the group. topic_id (str): The ID of the topic. Returns: float: A diversity score between 0.0 and 1.0. 0.0 indicates no diversity (all users voted the same way on all shared comments), 1.0 indicates maximum possible diversity (e.g., half agree, half disagree on shared comments). Returns 0.0 if the group has less than 2 users or if no comments were voted on by at least two users in the group. """ if not user_ids or len(user_ids) < 2: return 0.0 local_con = None try: local_con = duckdb.connect(database=DB_PATH, read_only=True) # Get all votes for the given topic by the specified users # Join with comments to filter by topic_id query = """ SELECT v.comment_id, v.user_id, v.vote_type FROM votes v JOIN comments c ON v.comment_id = c.id WHERE c.topic_id = ? AND v.user_id IN (?) """ # DuckDB's Python API handles lists for IN clauses results = local_con.execute(query, [topic_id, user_ids]).fetchall() if not results: return 0.0 # No votes found for this group on this topic # Map vote types to numerical scores vote_map = {'agree': 1.0, 'neutral': 0.0, 'disagree': -1.0} # Group votes by comment ID votes_by_comment = {} for comment_id, user_id, vote_type in results: if comment_id not in votes_by_comment: votes_by_comment[comment_id] = [] # Append the numerical vote score votes_by_comment[comment_id].append(vote_map.get(vote_type, 0.0)) # Default to 0.0 for unknown types # Calculate variance for comments voted on by at least two users in the group variances = [] for comment_id, comment_votes in votes_by_comment.items(): # Ensure the comment was voted on by at least two users from the input list if len(comment_votes) >= 2: # Use numpy to calculate variance variances.append(np.var(comment_votes)) if not variances: return 0.0 # No comments voted on by at least two users in the group # The maximum possible variance for values in [-1, 0, 1] is 1.0 # (e.g., half votes are 1, half are -1). # The average variance is already in the range [0, 1]. average_variance = np.mean(variances) return average_variance except Exception as e: # st.error is not available here, just print or log print(f"Error estimating group voting diversity for topic {topic_id} and users {user_ids}: {e}") return 0.0 # Return 0.0 on error finally: if local_con: local_con.close() # Helper function to name a group of users based on their participation and voting diversity def name_user_group(user_ids, topic_id): """ Generates a descriptive name and description for a group of users within a specific topic based on their participation level and voting diversity. Args: user_ids (list[str]): A list of user IDs belonging to the group. topic_id (str): The ID of the topic. Returns: tuple[str, str]: A tuple containing the name and description for the group. Returns ("Silent Gathering", "This group has no members.") or ("Unengaged Group", "No members of this group have voted on this topic.") or ("Isolated Voices", "This topic has no voters yet.") or ("Mysterious Gathering", "An error occurred while trying to name this group.") in edge cases or on error. """ # Handle empty user list if not user_ids: return "Silent Gathering", "This group has no members." local_con = None try: local_con = duckdb.connect(database=DB_PATH, read_only=True) # 1. Get total unique users who voted in the topic total_voters_result = local_con.execute(""" SELECT COUNT(DISTINCT user_id) FROM votes v JOIN comments c ON v.comment_id = c.id WHERE c.topic_id = ? """, [topic_id]).fetchone() total_voters_in_topic = total_voters_result[0] if total_voters_result else 0 # 2. Get unique users from the input list who voted in the topic # Filter user_ids to only those present in the votes table for this topic # DuckDB IN clause handles lists directly group_voters_result = local_con.execute(""" SELECT COUNT(DISTINCT user_id) FROM votes v JOIN comments c ON v.comment_id = c.id WHERE c.topic_id = ? AND v.user_id IN (?) """, [topic_id, user_ids]).fetchone() group_voters_count = group_voters_result[0] if group_voters_result else 0 # Handle case where no one in the group has voted on this topic if group_voters_count == 0: return "Unengaged Group", "No members of this group have voted on this topic." # Handle case where topic has no voters but the group somehow has voters (shouldn't happen if queries are correct) if total_voters_in_topic == 0: # This case is unlikely if group_voters_count > 0, but for safety return "Isolated Voices", "This topic has no voters yet." # 3. Calculate significance (proportion of group voters among all topic voters) significance_proportion = group_voters_count / total_voters_in_topic # 4. Get diversity score for the group diversity_score = estimate_group_voting_diversity(user_ids, topic_id) # 5. Determine name and description based on significance and diversity # Define thresholds (can be tuned) SIG_LOW_THRESHOLD = 0.1 SIG_MED_THRESHOLD = 0.5 # High if > MED, Med if > LOW and <= MED, Low if <= LOW DIV_LOW_THRESHOLD = 0.2 DIV_MED_THRESHOLD = 0.5 # High if > MED, Med if > LOW and <= MED, Low if <= LOW significance_level = "low" if significance_proportion > SIG_MED_THRESHOLD: significance_level = "high" elif significance_proportion > SIG_LOW_THRESHOLD: significance_level = "medium" diversity_level = "low" if diversity_score > DIV_MED_THRESHOLD: diversity_level = "high" elif diversity_score > DIV_LOW_THRESHOLD: diversity_level = "medium" # Assign names and descriptions based on levels if significance_level == "high": if diversity_level == "low": return "Likeheart Village", "A large group where opinions converge." elif diversity_level == "medium": return "Harmonious Assembly", "A significant gathering with mostly aligned views." else: # high diversity return "Vibrant Forum", "A large, active group with diverse perspectives." elif significance_level == "medium": if diversity_level == "low": return "Quiet Consensus", "A moderately sized group with little disagreement." elif diversity_level == "medium": return "Mixed Opinions", "A balanced group with varied viewpoints." else: # high diversity return "Lively Discussion", "A moderately sized group with strong, differing opinions." else: # low significance if diversity_level == "low": return "Echo Chamber Nook", "A small corner where similar thoughts resonate." elif diversity_level == "medium": return "Scattered Thoughts", "A small group with somewhat varied, isolated views." else: # high diversity return "Whispering Gallery", "A small group where many different ideas are quietly shared." except Exception as e: print(f"Error naming user group for topic {topic_id} and users {user_ids}: {e}") return "Mysterious Gathering", "An error occurred while trying to name this group." # Default name and description on error finally: if local_con: local_con.close() # Helper function to get a random unvoted comment def get_random_unvoted_comment(user_id, topic_id): new_area_comments = st.session_state.get("_new_area_comments", []) if len(new_area_comments) != 0: value = new_area_comments.pop() st.session_state._new_area_comments = new_area_comments return value[0], value[1] local_con = None try: local_con = duckdb.connect(database=DB_PATH, read_only=False) # First, check if there are any comments at all in the topic comment_count = local_con.execute(""" SELECT COUNT(*) FROM comments WHERE topic_id = ? """, [topic_id]).fetchone()[0] if comment_count == 0: return None, "Share your insight!" # Attempt to get a random comment that the user has NOT voted on result = local_con.execute(""" SELECT c.id, c.content FROM comments c WHERE c.topic_id = ? AND NOT EXISTS ( SELECT 1 FROM votes v WHERE v.comment_id = c.id AND v.user_id = ? ) ORDER BY RANDOM() LIMIT 1 """, [topic_id, user_id]).fetchone() if result: # Check for cluster change and set message flag current_label, current_users = get_user_cluster_label(user_id, topic_id) current_users_set = set(current_users) previous_label = st.session_state.get('_previous_cluster_label') previous_users_set = st.session_state.get('_previous_cluster_users_set', set()) # Check if cluster label has changed AND the set of users in the new cluster is different # This indicates the user has moved to a different group of commenters if current_label is not None and previous_label is not None and current_label != previous_label: if current_users_set != previous_users_set: # Set a flag in session state to display the message later in the main rendering logic st.session_state._show_new_area_message = True new_area_comments = get_top_k_polarized_comments_for_users(current_users_set, k=5) st.session_state._new_area_comments = new_area_comments # print(f"DEBUG: Cluster changed for user {user_id} in topic {topic_id}: {previous_label} -> {current_label}") # print(f"DEBUG: Previous users count: {len(previous_users_set)}, Current users count: {len(current_users_set)}") st.session_state._previous_cluster_label = current_label st.session_state._previous_cluster_users_set = current_users_set # Found an unvoted comment return result[0], result[1] else: # No unvoted comments found for this user in this topic return None, "No new thoughts for now..." except Exception as e: st.error(f"Error getting random unvoted comment: {e}") return None, f"Error loading comments: {str(e)}" finally: if local_con: local_con.close() # Helper function to find or create a user def find_or_create_user(username): local_con = None try: local_con = duckdb.connect(database=DB_PATH, read_only=False) user_result = local_con.execute("SELECT id FROM users WHERE username = ?", [username]).fetchone() if user_result: return user_result[0] else: user_id = str(uuid.uuid4()) local_con.execute("INSERT INTO users (id, username) VALUES (?, ?)", [user_id, username]) return user_id except Exception as e: st.error(f"Error finding or creating user: {e}") return None finally: if local_con: local_con.close() # Helper function to update user progress def update_user_progress(user_id, topic_id, comment_id): local_con = None try: local_con = duckdb.connect(database=DB_PATH, read_only=False) progress_id = str(uuid.uuid4()) local_con.execute(""" INSERT INTO user_progress (id, user_id, topic_id, last_comment_id_viewed) VALUES (?, ?, ?, ?) ON CONFLICT (user_id, topic_id) DO UPDATE SET last_comment_id_viewed = EXCLUDED.last_comment_id_viewed """, [progress_id, user_id, topic_id, comment_id]) except Exception as e: st.error(f"Error updating user progress: {e}") finally: if local_con: local_con.close() # Helper function to handle comment submission UI and logic def share_wisdom(prompt, allow_skip=False): st.markdown(prompt) new_comment_text = st.text_area(f"Your Insight {'that different from others above (Empty to skip)' if allow_skip else ''}", key="new_comment_input") if st.button("Share Your Wisdom"): if new_comment_text: user_email = st.session_state.get('user_email', '') user_id = find_or_create_user(user_email) # Ensure user exists if user_id: local_con = None try: local_con = duckdb.connect(database=DB_PATH, read_only=False) comment_id = str(uuid.uuid4()) local_con.execute(""" INSERT INTO comments (id, topic_id, user_id, content) VALUES (?, ?, ?, ?) """, [comment_id, topic_id, user_id, new_comment_text]) # Append new comment to history st.session_state.comment_history += f"\n\nšŸ’¬ {new_comment_text}" # Get next comment (could be the one just submitted) next_comment_id, next_comment_content = get_random_unvoted_comment(user_id, topic_id) st.session_state.current_comment_id = next_comment_id st.session_state.current_comment_content = next_comment_content # Update progress update_user_progress(user_id, topic_id, next_comment_id) st.session_state.new_comment_input = "" # Clear input box st.rerun() # Rerun to update UI except Exception as e: st.error(f"Error sharing information: {e}") finally: if local_con: local_con.close() else: st.error("Could not find or create user.") elif allow_skip: return else: st.warning("Please enter your thought.") # --- Page Functions --- def home_page(): st.title("Welcome to SteamPolis") st.markdown("Choose an option:") if st.button("Create New Topic (Quest)"): st.session_state.page = 'create_topic' st.rerun() st.markdown("---") st.markdown("Or join an existing topic (quest):") topic_input = st.text_input("Enter Topic ID or URL") if st.button("Join Topic"): topic_id = topic_input.strip() if topic_id.startswith('http'): # Handle full URL parsed_url = urllib.parse.urlparse(topic_id) query_params = urllib.parse.parse_qs(parsed_url.query) topic_id = query_params.get('topic', [None])[0] if topic_id: st.session_state.page = 'view_topic' st.session_state.current_topic_id = topic_id # Attempt to load email from session state (mimics browser state) # If email exists, handle email submission logic immediately on view page load st.rerun() else: st.warning("Please enter a valid Topic ID or URL.") def create_topic_page(): st.title("Create a New Topic") new_topic_name = st.text_input("Topic Name (Imagine you are the king, how would you share your concern)") new_topic_description = st.text_area('Description (Begin with "I want to figure out...", imagine you are the king, what would you want to know)', height=150) new_topic_seed_comments = st.text_area("Initial Comments (separate by new line, imagine there are civilians what will they answer)", height=200) creator_email = st.text_input("Enter your Email (required for creation)") if st.button("Create Topic"): if not creator_email: st.error("Email is required to create a topic.") return topic_id = str(uuid.uuid4())[:8] user_id = find_or_create_user(creator_email) if user_id: local_con = None try: local_con = duckdb.connect(database=DB_PATH, read_only=False) local_con.execute("INSERT INTO topics (id, name, description) VALUES (?, ?, ?)", [topic_id, new_topic_name, new_topic_description]) seed_comments = [c.strip() for c in new_topic_seed_comments.split('\n') if c.strip()] for comment in seed_comments: comment_id = str(uuid.uuid4()) local_con.execute("INSERT INTO comments (id, topic_id, user_id, content) VALUES (?, ?, ?, ?)", [comment_id, topic_id, 'system', comment]) # Get the first comment to display after creation comment_to_display_id, comment_to_display_content = get_random_unvoted_comment(user_id, topic_id) # Set initial progress for creator update_user_progress(user_id, topic_id, comment_to_display_id) st.session_state.page = 'view_topic' st.session_state.current_topic_id = topic_id st.session_state.user_email = creator_email # Store email in session state st.session_state.current_comment_id = comment_to_display_id st.session_state.current_comment_content = comment_to_display_content st.session_state.comment_history = "" st.success(f"Topic '{new_topic_name}' created!") st.rerun() except Exception as e: st.error(f"Error creating topic: {e}") finally: if local_con: local_con.close() else: st.error("Could not find or create user.") if st.button("Back to Home"): st.session_state.page = 'home' st.rerun() def view_topic_page(): topic_id = st.session_state.get('current_topic_id') user_email = st.session_state.get('user_email', '') current_comment_id = st.session_state.get('current_comment_id') current_comment_content = st.session_state.get('current_comment_content', "Loading comments...") comment_history = st.session_state.get('comment_history', "") show_new_area_message = st.session_state.get('_show_new_area_message', True) if not topic_id: st.warning("No topic selected. Returning to home.") st.session_state.page = 'home' st.rerun() return local_con = None topic_name = "Loading..." topic_description = "Loading..." try: local_con = duckdb.connect(database=DB_PATH, read_only=True) topic_result = local_con.execute("SELECT name, description FROM topics WHERE id = ?", [topic_id]).fetchone() if topic_result: topic_name, topic_description = topic_result else: st.error(f"Topic ID '{topic_id}' not found.") st.session_state.page = 'home' st.rerun() return except Exception as e: st.error(f"Error loading topic details: {e}") if local_con: local_con.close() st.session_state.page = 'home' st.rerun() return finally: if local_con: local_con.close() # Include functional information st.markdown(f"**Shareable Quest Scroll ID:** `{topic_id}`") # Construct shareable link using current app URL app_url = st.query_params.get('base', ['http://localhost:8501/'])[0] # Get base URL if available shareable_link = f"{app_url}?topic={topic_id}" if app_url else f"?topic={topic_id}" st.markdown(f"**Shareable Scroll Link:** `{shareable_link}`") st.title("Seeker Quest") # Check if user email is available in session state. # user_email is already retrieved from st.session_state at the start of view_topic_page. if user_email: # Get the user ID. find_or_create_user handles the DB connection internally. user_id = find_or_create_user(user_email) if user_id: # Check if user has any progress recorded for this specific topic. # This indicates they have viewed comments or interacted before. local_con = None progress_exists = False try: local_con = duckdb.connect(database=DB_PATH, read_only=True) # Query the user_progress table for a record matching user_id and topic_id result = local_con.execute(""" SELECT 1 FROM user_progress WHERE user_id = ? AND topic_id = ? LIMIT 1 """, [user_id, topic_id]).fetchone() progress_exists = result is not None except Exception as e: # Log error but don't stop the app. Assume no progress on error. st.error(f"Error checking user progress for greeting: {e}") # progress_exists remains False finally: if local_con: local_con.close() # Display the appropriate greeting based on progress if progress_exists: # Acknowledge return and remind of quest st.markdown("Welcome back, Seeker. Your journey through the whispers of Aethelgard continues.") st.markdown(f"You pause to recall the heart of the Emperor's concern regarding **{topic_name}**: `{topic_description}`.") # Introduce the next comment st.markdown("As you press onward, you encounter another soul willing to share their thoughts on this vital matter.") else: # Introduce the setting and the Emperor's concern st.markdown("Welcome, Seeker, to the ancient Kingdom of Aethelgard, a realm of digital whispers and forgotten wisdom.") st.markdown("For centuries, Aethelgard has stood, preserving the echoes of an age long past. But now, a matter of great weight troubles the Emperor's thoughts.") st.markdown(f"The Emperor seeks clarity on a crucial topic: **`{topic_name}`**.") # Explain the quest and the user's role st.markdown("You, among a select few, have been summoned for a vital quest: to traverse the kingdom, gather insights, and illuminate this matter for the throne.") st.markdown(f"At a recent royal gathering, the Emperor revealed the heart of their concern, the very essence of your mission: `{topic_description}`") # Transition to the task st.markdown("Your journey begins now. The path leads to the first village, where the voices of the realm await your ear.") # --- Email Prompt --- if not user_email: st.subheader("Enter your Email to view comments and progress") view_user_email_input = st.text_input("Your Email", key="view_email_input") if st.button("Submit Email", key="submit_view_email"): if view_user_email_input: st.session_state.user_email = view_user_email_input user_id = find_or_create_user(view_user_email_input) if user_id: comment_to_display_id, comment_to_display_content = get_random_unvoted_comment(user_id, topic_id) st.session_state.current_comment_id = comment_to_display_id st.session_state.current_comment_content = comment_to_display_content update_user_progress(user_id, topic_id, comment_to_display_id) st.session_state.comment_history = "" # Reset history on new email submission st.rerun() else: st.error("Could not find or create user with that email.") else: st.warning("Please enter your email.") return # Stop rendering the rest until email is submitted # --- Comment Display and Voting --- # Define introductory phrases for encountering a new perspective intro_phrases = [ "A new whisper reaches your ear", "You ponder a fresh perspective", "Another voice shares their view", "A thought emerges from the crowd", "The wind carries a new idea", "Someone offers an insight", "You overhear a comment", "A different angle appears", "The village elder shares", "A traveler murmurs", ] # Randomly select a phrase random_phrase = random.choice(intro_phrases) st.markdown(comment_history) if current_comment_id: # Only show voting if there's a comment to vote on # Display comment history and the current comment with the random intro if show_new_area_message == True: _, user_ids = get_user_cluster_label(user_id) new_area_name, desc = name_user_group(user_ids, topic_id) st.markdown(f"You've collected {len(comment_history.splitlines())} insights so far.") st.markdown(f"And yet a new place you have arrived: `{new_area_name}`. {desc}") st.session_state._show_new_area_message = False st.markdown(f"[Collected new insight, {random_phrase}]:\n* {current_comment_content}") # Handle vote logic def handle_vote(vote_type, comment_id, topic_id, user_id): # Add JavaScript to scroll to the bottom anchor after the page reloads # This script will be included in the next render cycle triggered by st.rerun() # Ensure an element with id="bottom" exists in the rendered page, # typically placed after the content you want to scroll to (e.g., comment history). local_con = None try: local_con = duckdb.connect(database=DB_PATH, read_only=False) # Use INSERT OR REPLACE INTO or ON CONFLICT DO UPDATE to handle repeat votes # The UNIQUE constraint on (user_id, comment_id) in the votes table # allows us to update the existing vote if one already exists for this user/comment pair. # We generate a new UUID for the 'id' column, but it will only be used # if this is a new insert. If it's an update, the existing 'id' is kept. vote_id = str(uuid.uuid4()) # Generate a new UUID for the potential insert local_con.execute(""" INSERT INTO votes (id, user_id, comment_id, vote_type) VALUES (?, ?, ?, ?) ON CONFLICT (user_id, comment_id) DO UPDATE SET vote_type = excluded.vote_type, -- Update vote_type with the new value created_at = current_localtimestamp(); -- Update timestamp to reflect the latest vote """, [vote_id, user_id, comment_id, vote_type]) # Append voted comment to history # Note: This appends the comment regardless of whether it was a new vote or an update. # The history is a simple log, not a reflection of vote changes. vote_text = "šŸ‘" if vote_type == "agree" else "šŸ‘Ž" if vote_type == "disagree" else "😐" comment_history = st.session_state.comment_history.split("\n\n") if len(comment_history) > 10: comment_history = ["..."] + comment_history[-10:] st.session_state.comment_history = "\n\n".join(comment_history) st.session_state.comment_history += f"\n\n{vote_text} {current_comment_content}" # Check vote count and trigger special event # Initialize vote_count if it doesn't exist if 'vote_count' not in st.session_state: st.session_state.vote_count = 0 # Increment vote count only if it was a new vote or a change? # The current logic increments on every button click. Let's keep that for now # as it drives the special event trigger based on interaction frequency. st.session_state.vote_count += 1 # Check if it's time for a potential special event (every 5 votes/interactions) if st.session_state.vote_count % 5 == 0: st.session_state.vote_count = 0 # Reset count after triggering # 30% chance to trigger the special sharing event if random.random() < 0.3: prompts = [ "An elder approaches you, seeking your perspective on the Emperor's concern. What wisdom do you share?", "A letter arrives from the Emperor's office, requesting your personal insight on the matter. What counsel do you offer?", "As you walk through the streets, people gather, eager to hear your thoughts on the Emperor's dilemma. What advice do you give?" ] # Pass the current topic_id to share_wisdom if needed, though it's not currently used there. share_wisdom(random.choice(prompts), allow_skip=True) # Get next comment # This should always get the next unvoted comment for the user in this topic. next_comment_id, next_comment_content = get_random_unvoted_comment(user_id, topic_id) st.session_state.current_comment_id = next_comment_id st.session_state.current_comment_content = next_comment_content # Update progress # Update the user's progress to the next comment they should see. update_user_progress(user_id, topic_id, next_comment_id) st.rerun() # Rerun to update UI except Exception as e: st.error(f"Error processing vote: {e}") finally: if local_con: local_con.close() col1, col2, col3, col4 = st.columns(4) user_id = find_or_create_user(user_email) # Ensure user exists col1.markdown("*Personally I...*") if col2.button("Agree"): handle_vote("agree", current_comment_id, topic_id, user_id) if col3.button("Neutral"): handle_vote("neutral", current_comment_id, topic_id, user_id) if col4.button("Disagree"): handle_vote("disagree", current_comment_id, topic_id, user_id) else: st.info("No more comments to vote on in this topic." if "No more comments" in current_comment_content else current_comment_content) st.markdown("") # --- Comment Submission --- with st.expander("Offer Your Counsel to the Emperor", expanded=False): share_wisdom("Having heard the thoughts of others, what wisdom do you wish to share regarding the Emperor's concern?") st.markdown("---") if st.button("Pack all insights and Return to Capital"): st.session_state.page = 'home' st.rerun() # Initialize session state for navigation and data if 'page' not in st.session_state: st.session_state.page = 'home' if 'current_topic_id' not in st.session_state: st.session_state.current_topic_id = None if 'user_email' not in st.session_state: st.session_state.user_email = '' # Mimics browser state if 'current_comment_id' not in st.session_state: st.session_state.current_comment_id = None if 'current_comment_content' not in st.session_state: st.session_state.current_comment_content = "Loading comments..." if 'comment_history' not in st.session_state: st.session_state.comment_history = "" if 'processed_url_params' not in st.session_state: st.session_state.processed_url_params = False # Add flag initialization # Initialize the database on first run initialize_database() # Handle initial load from URL query parameters # Process only once per session load using the flag query_params = st.query_params # Check for 'topic' param and if it hasn't been processed yet if 'topic' in query_params and not st.session_state.processed_url_params: topic_id_from_url = query_params.get('topic') # Use .get for safety if topic_id_from_url: # Check if topic_id is actually retrieved st.session_state.page = 'view_topic' st.session_state.current_topic_id = topic_id_from_url st.session_state.processed_url_params = True # Mark as processed # The view_topic_page will handle loading user/comment based on session_state.user_email st.rerun() # Rerun to apply the page change # Render the appropriate page based on session state if st.session_state.page == 'home': home_page() elif st.session_state.page == 'create_topic': create_topic_page() elif st.session_state.page == 'view_topic': view_topic_page()