File size: 959 Bytes
ea1968a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
# import pandas as pd
# from datasets import Dataset
import nltk
from nltk import pos_tag
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from collections import Counter

# Download required NLTK data
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('stopwords')



# Preprocessing
stop_words = set(stopwords.words('english'))

def preprocess(text):
    tokens = word_tokenize(text.lower())
    return [word for word in tokens if word.isalnum() and word not in stop_words]

def get_keywords(text, top_n=5):
    processed_text = preprocess(text)
    pos_tags = pos_tag(processed_text)
    
    # Looking for nouns (NN), verbs (VB), and adjectives (JJ)
    keywords = [word for word, pos in pos_tags if pos.startswith(('NN', 'VB', 'JJ'))]
    
    # Get top N keywords by counting worfd ocurrences
    keyword_counts = Counter(keywords)
    return [word for word, _ in keyword_counts.most_common(top_n)]