File size: 3,328 Bytes
53e65b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
from google_auth_oauthlib.flow import InstalledAppFlow
from google.oauth2.credentials import Credentials
from google.auth.transport.requests import Request
from googleapiclient.discovery import build
import os.path
import pickle
from typing import Dict

class SheetsHandler:
    def __init__(self, credentials_path: str):
        self.SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
        self.creds = None
        
        # Load existing token if it exists
        if os.path.exists('token.pickle'):
            with open('token.pickle', 'rb') as token:
                self.creds = pickle.load(token)
        
        # If no valid credentials available, let user log in
        if not self.creds or not self.creds.valid:
            if self.creds and self.creds.expired and self.creds.refresh_token:
                self.creds.refresh(Request())
            else:
                flow = InstalledAppFlow.from_client_secrets_file(
                    credentials_path, self.SCOPES)
                self.creds = flow.run_local_server(
                    port=8085,
                    success_message='The authentication flow has completed. You may close this window.',
                    open_browser=True
                )
            
            # Save the credentials for the next run
            with open('token.pickle', 'wb') as token:
                pickle.dump(self.creds, token)

        self.service = build('sheets', 'v4', credentials=self.creds)

    def get_previous_posts(self):
        # Implementation for getting previous posts
        try:
            result = self.service.spreadsheets().values().get(
                spreadsheetId='1CL0L4V288SEygm0BieMRM8t8h7MbcV9bYyzkDc0zInU',
                range='Sheet1!A:D'
            ).execute()
            rows = result.get('values', [])
            if not rows:
                return []
            
            posts = []
            for row in rows[1:]:  # Skip header
                if len(row) >= 4:
                    posts.append({
                        'title': row[0],
                        'keywords': row[1],
                        'summary': row[2],
                        'url': row[3]
                    })
            return posts
        except Exception as e:
            print(f"Error getting previous posts: {e}")
            return []

    def mark_cluster_complete(self, sheet_id: str, sheet_name: str, row_number: int):
        range_name = f"{sheet_name}!E{row_number}"
        body = {
            'values': [['yes']]
        }
        self.service.spreadsheets().values().update(
            spreadsheetId=sheet_id,
            range=range_name,
            valueInputOption='RAW',
            body=body
        ).execute()

    def log_completed_post(self, sheet_id: str, metadata: Dict):
        range_name = 'Sheet1!A:D'
        body = {
            'values': [[
                metadata['title'],
                metadata['keywords'],
                metadata['meta_description'],
                f"https://yourblog.com/{metadata['slug']}"
            ]]
        }
        self.service.spreadsheets().values().append(
            spreadsheetId=sheet_id,
            range=range_name,
            valueInputOption='RAW',
            insertDataOption='INSERT_ROWS',
            body=body
        ).execute()