kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
12,714,797 |
def generate_ngrams(text, n_gram=1):
token = [token for token in text.lower().split(' ')if token != '' if token not in STOPWORDS]
ngrams = zip(*[token[i:] for i in range(n_gram)])
return [' '.join(ngram)for ngram in ngrams]
N = 100
disaster_unigrams = defaultdict(int)
nondisaster_unigrams = defaultdict(int)
for tweet in df_train[DISASTER_TWEETS]['text']:
for word in generate_ngrams(tweet):
disaster_unigrams[word] += 1
for tweet in df_train[~DISASTER_TWEETS]['text']:
for word in generate_ngrams(tweet):
nondisaster_unigrams[word] += 1
df_disaster_unigrams = pd.DataFrame(sorted(disaster_unigrams.items() , key=lambda x: x[1])[::-1])
df_nondisaster_unigrams = pd.DataFrame(sorted(nondisaster_unigrams.items() , key=lambda x: x[1])[::-1])
disaster_bigrams = defaultdict(int)
nondisaster_bigrams = defaultdict(int)
for tweet in df_train[DISASTER_TWEETS]['text']:
for word in generate_ngrams(tweet, n_gram=2):
disaster_bigrams[word] += 1
for tweet in df_train[~DISASTER_TWEETS]['text']:
for word in generate_ngrams(tweet, n_gram=2):
nondisaster_bigrams[word] += 1
df_disaster_bigrams = pd.DataFrame(sorted(disaster_bigrams.items() , key=lambda x: x[1])[::-1])
df_nondisaster_bigrams = pd.DataFrame(sorted(nondisaster_bigrams.items() , key=lambda x: x[1])[::-1])
disaster_trigrams = defaultdict(int)
nondisaster_trigrams = defaultdict(int)
for tweet in df_train[DISASTER_TWEETS]['text']:
for word in generate_ngrams(tweet, n_gram=3):
disaster_trigrams[word] += 1
for tweet in df_train[~DISASTER_TWEETS]['text']:
for word in generate_ngrams(tweet, n_gram=3):
nondisaster_trigrams[word] += 1
df_disaster_trigrams = pd.DataFrame(sorted(disaster_trigrams.items() , key=lambda x: x[1])[::-1])
df_nondisaster_trigrams = pd.DataFrame(sorted(nondisaster_trigrams.items() , key=lambda x: x[1])[::-1] )<choose_model_class>
|
model.compile(optimizer = "nadam" , loss = "categorical_crossentropy", metrics=["accuracy"] )
|
Digit Recognizer
|
12,714,797 |
sw = stopwords.words('english')
stw = sw + ['lot','frog','ppl','tldr','time','nan','thing', 'subject', 're', 'edu', 'use','good','really','quite','nice','well','little','need','keep','make','important','take','get','very','course','instructor','example']
ps = PorterStemmer()
lemmatizer = nltk.stem.WordNetLemmatizer()<feature_engineering>
|
epochs = 100
batch_size = 64
|
Digit Recognizer
|
12,714,797 |
def lower(df):
df['com_token'] = df['text'].str.lower().str.split()
df["com_"] = df["com_token"].apply(' '.join)
return df<feature_engineering>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train )
|
Digit Recognizer
|
12,714,797 |
df_train = lower(df_train)
df_train["Orig_comment"] = df_train["text"]
df_train["text"] = df_train["com_"]<categorify>
|
early_stopping_cb = keras.callbacks.EarlyStopping(patience=20, restore_best_weights=True)
model_checkpoint_cb = keras.callbacks.ModelCheckpoint("best_mnist_model.h5", save_best_only=True)
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=5,
verbose=1,
factor=0.5,
min_lr=0.00001)
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data =(X_val,Y_val),
verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size
, callbacks=[early_stopping_cb, model_checkpoint_cb, learning_rate_reduction] )
|
Digit Recognizer
|
12,714,797 |
def decontracted(tweet):
tweet = re.sub(r"won't", "will not", tweet)
tweet = re.sub(r"can't", "can not", tweet)
tweet = re.sub(r"he\ ’ s", "he is", tweet)
tweet = re.sub(r"i\ ’ m", "he is", tweet)
tweet=re.sub("(<.*?>)","",tweet)
tweet=re.sub("(\\W|\\d)"," ",tweet)
tweet = re.sub(r"n't", " not", tweet)
tweet = re.sub(r"'re", " are", tweet)
tweet = re.sub(r"'s", " is", tweet)
tweet = re.sub(r"'d", " would", tweet)
tweet = re.sub(r"'ll", " will", tweet)
tweet = re.sub(r"'t", " not", tweet)
tweet = re.sub(r"'ve", " have", tweet)
tweet = re.sub(r"'m", " am", tweet)
tweet = re.sub(r"'didnt", " did not", tweet)
tweet = re.sub(r"\x89Û_", "", tweet)
tweet = re.sub(r"\x89ÛÒ", "", tweet)
tweet = re.sub(r"\x89ÛÓ", "", tweet)
tweet = re.sub(r"\x89ÛÏWhen", "When", tweet)
tweet = re.sub(r"\x89ÛÏ", "", tweet)
tweet = re.sub(r"China\x89Ûªs", "China's", tweet)
tweet = re.sub(r"let\x89Ûªs", "let's", tweet)
tweet = re.sub(r"\x89Û÷", "", tweet)
tweet = re.sub(r"\x89Ûª", "", tweet)
tweet = re.sub(r"\x89Û\x9d", "", tweet)
tweet = re.sub(r"å_", "", tweet)
tweet = re.sub(r"\x89Û¢", "", tweet)
tweet = re.sub(r"\x89Û¢åÊ", "", tweet)
tweet = re.sub(r"fromåÊwounds", "from wounds", tweet)
tweet = re.sub(r"åÊ", "", tweet)
tweet = re.sub(r"åÈ", "", tweet)
tweet = re.sub(r"JapÌ_n", "Japan", tweet)
tweet = re.sub(r"Ì©", "e", tweet)
tweet = re.sub(r"å¨", "", tweet)
tweet = re.sub(r"Surṳ", "Suruc", tweet)
tweet = re.sub(r"åÇ", "", tweet)
tweet = re.sub(r"å£3million", "3 million", tweet)
tweet = re.sub(r"åÀ", "", tweet)
tweet = re.sub(r"he's", "he is", tweet)
tweet = re.sub(r"there's", "there is", tweet)
tweet = re.sub(r"We're", "We are", tweet)
tweet = re.sub(r"That's", "That is", tweet)
tweet = re.sub(r"won't", "will not", tweet)
tweet = re.sub(r"they're", "they are", tweet)
tweet = re.sub(r"Can't", "Cannot", tweet)
tweet = re.sub(r"wasn't", "was not", tweet)
tweet = re.sub(r"don\x89Ûªt", "do not", tweet)
tweet = re.sub(r"aren't", "are not", tweet)
tweet = re.sub(r"isn't", "is not", tweet)
tweet = re.sub(r"What's", "What is", tweet)
tweet = re.sub(r"haven't", "have not", tweet)
tweet = re.sub(r"hasn't", "has not", tweet)
tweet = re.sub(r"There's", "There is", tweet)
tweet = re.sub(r"He's", "He is", tweet)
tweet = re.sub(r"It's", "It is", tweet)
tweet = re.sub(r"You're", "You are", tweet)
tweet = re.sub(r"I'M", "I am", tweet)
tweet = re.sub(r"shouldn't", "should not", tweet)
tweet = re.sub(r"wouldn't", "would not", tweet)
tweet = re.sub(r"i'm", "I am", tweet)
tweet = re.sub(r"I\x89Ûªm", "I am", tweet)
tweet = re.sub(r"I'm", "I am", tweet)
tweet = re.sub(r"Isn't", "is not", tweet)
tweet = re.sub(r"Here's", "Here is", tweet)
tweet = re.sub(r"you've", "you have", tweet)
tweet = re.sub(r"you\x89Ûªve", "you have", tweet)
tweet = re.sub(r"we're", "we are", tweet)
tweet = re.sub(r"what's", "what is", tweet)
tweet = re.sub(r"couldn't", "could not", tweet)
tweet = re.sub(r"we've", "we have", tweet)
tweet = re.sub(r"it\x89Ûªs", "it is", tweet)
tweet = re.sub(r"doesn\x89Ûªt", "does not", tweet)
tweet = re.sub(r"It\x89Ûªs", "It is", tweet)
tweet = re.sub(r"Here\x89Ûªs", "Here is", tweet)
tweet = re.sub(r"who's", "who is", tweet)
tweet = re.sub(r"I\x89Ûªve", "I have", tweet)
tweet = re.sub(r"y'all", "you all", tweet)
tweet = re.sub(r"can\x89Ûªt", "cannot", tweet)
tweet = re.sub(r"would've", "would have", tweet)
tweet = re.sub(r"it'll", "it will", tweet)
tweet = re.sub(r"we'll", "we will", tweet)
tweet = re.sub(r"wouldn\x89Ûªt", "would not", tweet)
tweet = re.sub(r"We've", "We have", tweet)
tweet = re.sub(r"he'll", "he will", tweet)
tweet = re.sub(r"Y'all", "You all", tweet)
tweet = re.sub(r"Weren't", "Were not", tweet)
tweet = re.sub(r"Didn't", "Did not", tweet)
tweet = re.sub(r"they'll", "they will", tweet)
tweet = re.sub(r"they'd", "they would", tweet)
tweet = re.sub(r"DON'T", "DO NOT", tweet)
tweet = re.sub(r"That\x89Ûªs", "That is", tweet)
tweet = re.sub(r"they've", "they have", tweet)
tweet = re.sub(r"i'd", "I would", tweet)
tweet = re.sub(r"should've", "should have", tweet)
tweet = re.sub(r"You\x89Ûªre", "You are", tweet)
tweet = re.sub(r"where's", "where is", tweet)
tweet = re.sub(r"Don\x89Ûªt", "Do not", tweet)
tweet = re.sub(r"we'd", "we would", tweet)
tweet = re.sub(r"i'll", "I will", tweet)
tweet = re.sub(r"weren't", "were not", tweet)
tweet = re.sub(r"They're", "They are", tweet)
tweet = re.sub(r"Can\x89Ûªt", "Cannot", tweet)
tweet = re.sub(r"you\x89Ûªll", "you will", tweet)
tweet = re.sub(r"I\x89Ûªd", "I would", tweet)
tweet = re.sub(r"let's", "let us", tweet)
tweet = re.sub(r"it's", "it is", tweet)
tweet = re.sub(r"can't", "cannot", tweet)
tweet = re.sub(r"don't", "do not", tweet)
tweet = re.sub(r"you're", "you are", tweet)
tweet = re.sub(r"i've", "I have", tweet)
tweet = re.sub(r"that's", "that is", tweet)
tweet = re.sub(r"i'll", "I will", tweet)
tweet = re.sub(r"doesn't", "does not", tweet)
tweet = re.sub(r"i'd", "I would", tweet)
tweet = re.sub(r"didn't", "did not", tweet)
tweet = re.sub(r"ain't", "am not", tweet)
tweet = re.sub(r"you'll", "you will", tweet)
tweet = re.sub(r"I've", "I have", tweet)
tweet = re.sub(r"Don't", "do not", tweet)
tweet = re.sub(r"I'll", "I will", tweet)
tweet = re.sub(r"I'd", "I would", tweet)
tweet = re.sub(r"Let's", "Let us", tweet)
tweet = re.sub(r"you'd", "You would", tweet)
tweet = re.sub(r"It's", "It is", tweet)
tweet = re.sub(r"Ain't", "am not", tweet)
tweet = re.sub(r"Haven't", "Have not", tweet)
tweet = re.sub(r"Could've", "Could have", tweet)
tweet = re.sub(r"youve", "you have", tweet)
tweet = re.sub(r"donå«t", "do not", tweet)
tweet = re.sub(r">", ">", tweet)
tweet = re.sub(r"<", "<", tweet)
tweet = re.sub(r"&", "&", tweet)
tweet = re.sub(r"w/e", "whatever", tweet)
tweet = re.sub(r"w/", "with", tweet)
tweet = re.sub(r"USAgov", "USA government", tweet)
tweet = re.sub(r"recentlu", "recently", tweet)
tweet = re.sub(r"Ph0tos", "Photos", tweet)
tweet = re.sub(r"amirite", "am I right", tweet)
tweet = re.sub(r"exp0sed", "exposed", tweet)
tweet = re.sub(r"<3", "love", tweet)
tweet = re.sub(r"amageddon", "armageddon", tweet)
tweet = re.sub(r"Trfc", "Traffic", tweet)
tweet = re.sub(r"8/5/2015", "2015-08-05", tweet)
tweet = re.sub(r"WindStorm", "Wind Storm", tweet)
tweet = re.sub(r"8/6/2015", "2015-08-06", tweet)
tweet = re.sub(r"10:38PM", "10:38 PM", tweet)
tweet = re.sub(r"10:30pm", "10:30 PM", tweet)
tweet = re.sub(r"16yr", "16 year", tweet)
tweet = re.sub(r"lmao", "laughing my ass off", tweet)
tweet = re.sub(r"TRAUMATISED", "traumatized", tweet)
tweet = re.sub(r"IranDeal", "Iran Deal", tweet)
tweet = re.sub(r"ArianaGrande", "Ariana Grande", tweet)
tweet = re.sub(r"camilacabello97", "camila cabello", tweet)
tweet = re.sub(r"RondaRousey", "Ronda Rousey", tweet)
tweet = re.sub(r"MTVHottest", "MTV Hottest", tweet)
tweet = re.sub(r"TrapMusic", "Trap Music", tweet)
tweet = re.sub(r"ProphetMuhammad", "Prophet Muhammad", tweet)
tweet = re.sub(r"PantherAttack", "Panther Attack", tweet)
tweet = re.sub(r"StrategicPatience", "Strategic Patience", tweet)
tweet = re.sub(r"socialnews", "social news", tweet)
tweet = re.sub(r"NASAHurricane", "NASA Hurricane", tweet)
tweet = re.sub(r"onlinecommunities", "online communities", tweet)
tweet = re.sub(r"humanconsumption", "human consumption", tweet)
tweet = re.sub(r"Typhoon-Devastated", "Typhoon Devastated", tweet)
tweet = re.sub(r"Meat-Loving", "Meat Loving", tweet)
tweet = re.sub(r"facialabuse", "facial abuse", tweet)
tweet = re.sub(r"LakeCounty", "Lake County", tweet)
tweet = re.sub(r"BeingAuthor", "Being Author", tweet)
tweet = re.sub(r"withheavenly", "with heavenly", tweet)
tweet = re.sub(r"thankU", "thank you", tweet)
tweet = re.sub(r"iTunesMusic", "iTunes Music", tweet)
tweet = re.sub(r"OffensiveContent", "Offensive Content", tweet)
tweet = re.sub(r"WorstSummerJob", "Worst Summer Job", tweet)
tweet = re.sub(r"HarryBeCareful", "Harry Be Careful", tweet)
tweet = re.sub(r"NASASolarSystem", "NASA Solar System", tweet)
tweet = re.sub(r"animalrescue", "animal rescue", tweet)
tweet = re.sub(r"KurtSchlichter", "Kurt Schlichter", tweet)
tweet = re.sub(r"aRmageddon", "armageddon", tweet)
tweet = re.sub(r"Throwingknifes", "Throwing knives", tweet)
tweet = re.sub(r"GodsLove", "God's Love", tweet)
tweet = re.sub(r"bookboost", "book boost", tweet)
tweet = re.sub(r"ibooklove", "I book love", tweet)
tweet = re.sub(r"NestleIndia", "Nestle India", tweet)
tweet = re.sub(r"realDonaldTrump", "Donald Trump", tweet)
tweet = re.sub(r"DavidVonderhaar", "David Vonderhaar", tweet)
tweet = re.sub(r"CecilTheLion", "Cecil The Lion", tweet)
tweet = re.sub(r"weathernetwork", "weather network", tweet)
tweet = re.sub(r"withBioterrorism&use", "with Bioterrorism & use", tweet)
tweet = re.sub(r"Hostage&2", "Hostage & 2", tweet)
tweet = re.sub(r"GOPDebate", "GOP Debate", tweet)
tweet = re.sub(r"RickPerry", "Rick Perry", tweet)
tweet = re.sub(r"frontpage", "front page", tweet)
tweet = re.sub(r"NewsInTweets", "News In Tweets", tweet)
tweet = re.sub(r"ViralSpell", "Viral Spell", tweet)
tweet = re.sub(r"til_now", "until now", tweet)
tweet = re.sub(r"volcanoinRussia", "volcano in Russia", tweet)
tweet = re.sub(r"ZippedNews", "Zipped News", tweet)
tweet = re.sub(r"MicheleBachman", "Michele Bachman", tweet)
tweet = re.sub(r"53inch", "53 inch", tweet)
tweet = re.sub(r"KerrickTrial", "Kerrick Trial", tweet)
tweet = re.sub(r"abstorm", "Alberta Storm", tweet)
tweet = re.sub(r"Beyhive", "Beyonce hive", tweet)
tweet = re.sub(r"IDFire", "Idaho Fire", tweet)
tweet = re.sub(r"DETECTADO", "Detected", tweet)
tweet = re.sub(r"RockyFire", "Rocky Fire", tweet)
tweet = re.sub(r"Listen/Buy", "Listen / Buy", tweet)
tweet = re.sub(r"NickCannon", "Nick Cannon", tweet)
tweet = re.sub(r"FaroeIslands", "Faroe Islands", tweet)
tweet = re.sub(r"yycstorm", "Calgary Storm", tweet)
tweet = re.sub(r"IDPs:", "Internally Displaced People :", tweet)
tweet = re.sub(r"ArtistsUnited", "Artists United", tweet)
tweet = re.sub(r"ClaytonBryant", "Clayton Bryant", tweet)
tweet = re.sub(r"jimmyfallon", "jimmy fallon", tweet)
tweet = re.sub(r"justinbieber", "justin bieber", tweet)
tweet = re.sub(r"UTC2015", "UTC 2015", tweet)
tweet = re.sub(r"Time2015", "Time 2015", tweet)
tweet = re.sub(r"djicemoon", "dj icemoon", tweet)
tweet = re.sub(r"LivingSafely", "Living Safely", tweet)
tweet = re.sub(r"FIFA16", "Fifa 2016", tweet)
tweet = re.sub(r"thisiswhywecanthavenicethings", "this is why we cannot have nice things", tweet)
tweet = re.sub(r"bbcnews", "bbc news", tweet)
tweet = re.sub(r"UndergroundRailraod", "Underground Railraod", tweet)
tweet = re.sub(r"c4news", "c4 news", tweet)
tweet = re.sub(r"OBLITERATION", "obliteration", tweet)
tweet = re.sub(r"MUDSLIDE", "mudslide", tweet)
tweet = re.sub(r"NoSurrender", "No Surrender", tweet)
tweet = re.sub(r"NotExplained", "Not Explained", tweet)
tweet = re.sub(r"greatbritishbakeoff", "great british bake off", tweet)
tweet = re.sub(r"LondonFire", "London Fire", tweet)
tweet = re.sub(r"KOTAWeather", "KOTA Weather", tweet)
tweet = re.sub(r"LuchaUnderground", "Lucha Underground", tweet)
tweet = re.sub(r"KOIN6News", "KOIN 6 News", tweet)
tweet = re.sub(r"LiveOnK2", "Live On K2", tweet)
tweet = re.sub(r"9NewsGoldCoast", "9 News Gold Coast", tweet)
tweet = re.sub(r"nikeplus", "nike plus", tweet)
tweet = re.sub(r"david_cameron", "David Cameron", tweet)
tweet = re.sub(r"peterjukes", "Peter Jukes", tweet)
tweet = re.sub(r"JamesMelville", "James Melville", tweet)
tweet = re.sub(r"megynkelly", "Megyn Kelly", tweet)
tweet = re.sub(r"cnewslive", "C News Live", tweet)
tweet = re.sub(r"JamaicaObserver", "Jamaica Observer", tweet)
tweet = re.sub(r"TweetLikeItsSeptember11th2001", "Tweet like it is september 11th 2001", tweet)
tweet = re.sub(r"cbplawyers", "cbp lawyers", tweet)
tweet = re.sub(r"fewmoretweets", "few more tweets", tweet)
tweet = re.sub(r"BlackLivesMatter", "Black Lives Matter", tweet)
tweet = re.sub(r"cjoyner", "Chris Joyner", tweet)
tweet = re.sub(r"ENGvAUS", "England vs Australia", tweet)
tweet = re.sub(r"ScottWalker", "Scott Walker", tweet)
tweet = re.sub(r"MikeParrActor", "Michael Parr", tweet)
tweet = re.sub(r"4PlayThursdays", "Foreplay Thursdays", tweet)
tweet = re.sub(r"TGF2015", "Tontitown Grape Festival", tweet)
tweet = re.sub(r"realmandyrain", "Mandy Rain", tweet)
tweet = re.sub(r"GraysonDolan", "Grayson Dolan", tweet)
tweet = re.sub(r"ApolloBrown", "Apollo Brown", tweet)
tweet = re.sub(r"saddlebrooke", "Saddlebrooke", tweet)
tweet = re.sub(r"TontitownGrape", "Tontitown Grape", tweet)
tweet = re.sub(r"AbbsWinston", "Abbs Winston", tweet)
tweet = re.sub(r"ShaunKing", "Shaun King", tweet)
tweet = re.sub(r"MeekMill", "Meek Mill", tweet)
tweet = re.sub(r"TornadoGiveaway", "Tornado Giveaway", tweet)
tweet = re.sub(r"GRupdates", "GR updates", tweet)
tweet = re.sub(r"SouthDowns", "South Downs", tweet)
tweet = re.sub(r"braininjury", "brain injury", tweet)
tweet = re.sub(r"auspol", "Australian politics", tweet)
tweet = re.sub(r"PlannedParenthood", "Planned Parenthood", tweet)
tweet = re.sub(r"calgaryweather", "Calgary Weather", tweet)
tweet = re.sub(r"weallheartonedirection", "we all heart one direction", tweet)
tweet = re.sub(r"edsheeran", "Ed Sheeran", tweet)
tweet = re.sub(r"TrueHeroes", "True Heroes", tweet)
tweet = re.sub(r"S3XLEAK", "sex leak", tweet)
tweet = re.sub(r"ComplexMag", "Complex Magazine", tweet)
tweet = re.sub(r"TheAdvocateMag", "The Advocate Magazine", tweet)
tweet = re.sub(r"CityofCalgary", "City of Calgary", tweet)
tweet = re.sub(r"EbolaOutbreak", "Ebola Outbreak", tweet)
tweet = re.sub(r"SummerFate", "Summer Fate", tweet)
tweet = re.sub(r"RAmag", "Royal Academy Magazine", tweet)
tweet = re.sub(r"offers2go", "offers to go", tweet)
tweet = re.sub(r"foodscare", "food scare", tweet)
tweet = re.sub(r"MNPDNashville", "Metropolitan Nashville Police Department", tweet)
tweet = re.sub(r"TfLBusAlerts", "TfL Bus Alerts", tweet)
tweet = re.sub(r"GamerGate", "Gamer Gate", tweet)
tweet = re.sub(r"IHHen", "Humanitarian Relief", tweet)
tweet = re.sub(r"spinningbot", "spinning bot", tweet)
tweet = re.sub(r"ModiMinistry", "Modi Ministry", tweet)
tweet = re.sub(r"TAXIWAYS", "taxi ways", tweet)
tweet = re.sub(r"Calum5SOS", "Calum Hood", tweet)
tweet = re.sub(r"po_st", "po.st", tweet)
tweet = re.sub(r"scoopit", "scoop.it", tweet)
tweet = re.sub(r"UltimaLucha", "Ultima Lucha", tweet)
tweet = re.sub(r"JonathanFerrell", "Jonathan Ferrell", tweet)
tweet = re.sub(r"aria_ahrary", "Aria Ahrary", tweet)
tweet = re.sub(r"rapidcity", "Rapid City", tweet)
tweet = re.sub(r"OutBid", "outbid", tweet)
tweet = re.sub(r"lavenderpoetrycafe", "lavender poetry cafe", tweet)
tweet = re.sub(r"EudryLantiqua", "Eudry Lantiqua", tweet)
tweet = re.sub(r"15PM", "15 PM", tweet)
tweet = re.sub(r"OriginalFunko", "Funko", tweet)
tweet = re.sub(r"rightwaystan", "Richard Tan", tweet)
tweet = re.sub(r"CindyNoonan", "Cindy Noonan", tweet)
tweet = re.sub(r"RT_America", "RT America", tweet)
tweet = re.sub(r"narendramodi", "Narendra Modi", tweet)
tweet = re.sub(r"BakeOffFriends", "Bake Off Friends", tweet)
tweet = re.sub(r"TeamHendrick", "Hendrick Motorsports", tweet)
tweet = re.sub(r"alexbelloli", "Alex Belloli", tweet)
tweet = re.sub(r"itsjustinstuart", "Justin Stuart", tweet)
tweet = re.sub(r"gunsense", "gun sense", tweet)
tweet = re.sub(r"DebateQuestionsWeWantToHear", "debate questions we want to hear", tweet)
tweet = re.sub(r"RoyalCarribean", "Royal Carribean", tweet)
tweet = re.sub(r"samanthaturne19", "Samantha Turner", tweet)
tweet = re.sub(r"JonVoyage", "Jon Stewart", tweet)
tweet = re.sub(r"renew911health", "renew 911 health", tweet)
tweet = re.sub(r"SuryaRay", "Surya Ray", tweet)
tweet = re.sub(r"pattonoswalt", "Patton Oswalt", tweet)
tweet = re.sub(r"minhazmerchant", "Minhaz Merchant", tweet)
tweet = re.sub(r"TLVFaces", "Israel Diaspora Coalition", tweet)
tweet = re.sub(r"pmarca", "Marc Andreessen", tweet)
tweet = re.sub(r"pdx911", "Portland Police", tweet)
tweet = re.sub(r"jamaicaplain", "Jamaica Plain", tweet)
tweet = re.sub(r"Japton", "Arkansas", tweet)
tweet = re.sub(r"RouteComplex", "Route Complex", tweet)
tweet = re.sub(r"INSubcontinent", "Indian Subcontinent", tweet)
tweet = re.sub(r"NJTurnpike", "New Jersey Turnpike", tweet)
tweet = re.sub(r"Politifiact", "PolitiFact", tweet)
tweet = re.sub(r"Hiroshima70", "Hiroshima", tweet)
tweet = re.sub(r"GMMBC", "Greater Mt Moriah Baptist Church", tweet)
tweet = re.sub(r"versethe", "verse the", tweet)
tweet = re.sub(r"TubeStrike", "Tube Strike", tweet)
tweet = re.sub(r"MissionHills", "Mission Hills", tweet)
tweet = re.sub(r"ProtectDenaliWolves", "Protect Denali Wolves", tweet)
tweet = re.sub(r"NANKANA", "Nankana", tweet)
tweet = re.sub(r"SAHIB", "Sahib", tweet)
tweet = re.sub(r"PAKPATTAN", "Pakpattan", tweet)
tweet = re.sub(r"Newz_Sacramento", "News Sacramento", tweet)
tweet = re.sub(r"gofundme", "go fund me", tweet)
tweet = re.sub(r"pmharper", "Stephen Harper", tweet)
tweet = re.sub(r"IvanBerroa", "Ivan Berroa", tweet)
tweet = re.sub(r"LosDelSonido", "Los Del Sonido", tweet)
tweet = re.sub(r"bancodeseries", "banco de series", tweet)
tweet = re.sub(r"timkaine", "Tim Kaine", tweet)
tweet = re.sub(r"IdentityTheft", "Identity Theft", tweet)
tweet = re.sub(r"AllLivesMatter", "All Lives Matter", tweet)
tweet = re.sub(r"mishacollins", "Misha Collins", tweet)
tweet = re.sub(r"BillNeelyNBC", "Bill Neely", tweet)
tweet = re.sub(r"BeClearOnCancer", "be clear on cancer", tweet)
tweet = re.sub(r"Kowing", "Knowing", tweet)
tweet = re.sub(r"ScreamQueens", "Scream Queens", tweet)
tweet = re.sub(r"AskCharley", "Ask Charley", tweet)
tweet = re.sub(r"BlizzHeroes", "Heroes of the Storm", tweet)
tweet = re.sub(r"BradleyBrad47", "Bradley Brad", tweet)
tweet = re.sub(r"HannaPH", "Typhoon Hanna", tweet)
tweet = re.sub(r"meinlcymbals", "MEINL Cymbals", tweet)
tweet = re.sub(r"Ptbo", "Peterborough", tweet)
tweet = re.sub(r"cnnbrk", "CNN Breaking News", tweet)
tweet = re.sub(r"IndianNews", "Indian News", tweet)
tweet = re.sub(r"savebees", "save bees", tweet)
tweet = re.sub(r"GreenHarvard", "Green Harvard", tweet)
tweet = re.sub(r"StandwithPP", "Stand with planned parenthood", tweet)
tweet = re.sub(r"hermancranston", "Herman Cranston", tweet)
tweet = re.sub(r"WMUR9", "WMUR-TV", tweet)
tweet = re.sub(r"RockBottomRadFM", "Rock Bottom Radio", tweet)
tweet = re.sub(r"ameenshaikh3", "Ameen Shaikh", tweet)
tweet = re.sub(r"ProSyn", "Project Syndicate", tweet)
tweet = re.sub(r"Daesh", "ISIS", tweet)
tweet = re.sub(r"s2g", "swear to god", tweet)
tweet = re.sub(r"listenlive", "listen live", tweet)
tweet = re.sub(r"CDCgov", "Centers for Disease Control and Prevention", tweet)
tweet = re.sub(r"FoxNew", "Fox News", tweet)
tweet = re.sub(r"CBSBigBrother", "Big Brother", tweet)
tweet = re.sub(r"JulieDiCaro", "Julie DiCaro", tweet)
tweet = re.sub(r"theadvocatemag", "The Advocate Magazine", tweet)
tweet = re.sub(r"RohnertParkDPS", "Rohnert Park Police Department", tweet)
tweet = re.sub(r"THISIZBWRIGHT", "Bonnie Wright", tweet)
tweet = re.sub(r"Popularmmos", "Popular MMOs", tweet)
tweet = re.sub(r"WildHorses", "Wild Horses", tweet)
tweet = re.sub(r"FantasticFour", "Fantastic Four", tweet)
tweet = re.sub(r"HORNDALE", "Horndale", tweet)
tweet = re.sub(r"PINER", "Piner", tweet)
tweet = re.sub(r"BathAndNorthEastSomerset", "Bath and North East Somerset", tweet)
tweet = re.sub(r"thatswhatfriendsarefor", "that is what friends are for", tweet)
tweet = re.sub(r"residualincome", "residual income", tweet)
tweet = re.sub(r"YahooNewsDigest", "Yahoo News Digest", tweet)
tweet = re.sub(r"MalaysiaAirlines", "Malaysia Airlines", tweet)
tweet = re.sub(r"AmazonDeals", "Amazon Deals", tweet)
tweet = re.sub(r"MissCharleyWebb", "Charley Webb", tweet)
tweet = re.sub(r"shoalstraffic", "shoals traffic", tweet)
tweet = re.sub(r"GeorgeFoster72", "George Foster", tweet)
tweet = re.sub(r"pop2015", "pop 2015", tweet)
tweet = re.sub(r"_PokemonCards_", "Pokemon Cards", tweet)
tweet = re.sub(r"DianneG", "Dianne Gallagher", tweet)
tweet = re.sub(r"KashmirConflict", "Kashmir Conflict", tweet)
tweet = re.sub(r"BritishBakeOff", "British Bake Off", tweet)
tweet = re.sub(r"FreeKashmir", "Free Kashmir", tweet)
tweet = re.sub(r"mattmosley", "Matt Mosley", tweet)
tweet = re.sub(r"BishopFred", "Bishop Fred", tweet)
tweet = re.sub(r"EndConflict", "End Conflict", tweet)
tweet = re.sub(r"EndOccupation", "End Occupation", tweet)
tweet = re.sub(r"UNHEALED", "unhealed", tweet)
tweet = re.sub(r"CharlesDagnall", "Charles Dagnall", tweet)
tweet = re.sub(r"Latestnews", "Latest news", tweet)
tweet = re.sub(r"KindleCountdown", "Kindle Countdown", tweet)
tweet = re.sub(r"NoMoreHandouts", "No More Handouts", tweet)
tweet = re.sub(r"datingtips", "dating tips", tweet)
tweet = re.sub(r"charlesadler", "Charles Adler", tweet)
tweet = re.sub(r"twia", "Texas Windstorm Insurance Association", tweet)
tweet = re.sub(r"txlege", "Texas Legislature", tweet)
tweet = re.sub(r"WindstormInsurer", "Windstorm Insurer", tweet)
tweet = re.sub(r"Newss", "News", tweet)
tweet = re.sub(r"hempoil", "hemp oil", tweet)
tweet = re.sub(r"CommoditiesAre", "Commodities are", tweet)
tweet = re.sub(r"tubestrike", "tube strike", tweet)
tweet = re.sub(r"JoeNBC", "Joe Scarborough", tweet)
tweet = re.sub(r"LiteraryCakes", "Literary Cakes", tweet)
tweet = re.sub(r"TI5", "The International 5", tweet)
tweet = re.sub(r"thehill", "the hill", tweet)
tweet = re.sub(r"3others", "3 others", tweet)
tweet = re.sub(r"stighefootball", "Sam Tighe", tweet)
tweet = re.sub(r"whatstheimportantvideo", "what is the important video", tweet)
tweet = re.sub(r"ClaudioMeloni", "Claudio Meloni", tweet)
tweet = re.sub(r"DukeSkywalker", "Duke Skywalker", tweet)
tweet = re.sub(r"carsonmwr", "Fort Carson", tweet)
tweet = re.sub(r"offdishduty", "off dish duty", tweet)
tweet = re.sub(r"andword", "and word", tweet)
tweet = re.sub(r"rhodeisland", "Rhode Island", tweet)
tweet = re.sub(r"easternoregon", "Eastern Oregon", tweet)
tweet = re.sub(r"WAwildfire", "Washington Wildfire", tweet)
tweet = re.sub(r"fingerrockfire", "Finger Rock Fire", tweet)
tweet = re.sub(r"57am", "57 am", tweet)
tweet = re.sub(r"fingerrockfire", "Finger Rock Fire", tweet)
tweet = re.sub(r"JacobHoggard", "Jacob Hoggard", tweet)
tweet = re.sub(r"newnewnew", "new new new", tweet)
tweet = re.sub(r"under50", "under 50", tweet)
tweet = re.sub(r"getitbeforeitsgone", "get it before it is gone", tweet)
tweet = re.sub(r"freshoutofthebox", "fresh out of the box", tweet)
tweet = re.sub(r"amwriting", "am writing", tweet)
tweet = re.sub(r"Bokoharm", "Boko Haram", tweet)
tweet = re.sub(r"Nowlike", "Now like", tweet)
tweet = re.sub(r"seasonfrom", "season from", tweet)
tweet = re.sub(r"epicente", "epicenter", tweet)
tweet = re.sub(r"epicenterr", "epicenter", tweet)
tweet = re.sub(r"sicklife", "sick life", tweet)
tweet = re.sub(r"yycweather", "Calgary Weather", tweet)
tweet = re.sub(r"calgarysun", "Calgary Sun", tweet)
tweet = re.sub(r"approachng", "approaching", tweet)
tweet = re.sub(r"evng", "evening", tweet)
tweet = re.sub(r"Sumthng", "something", tweet)
tweet = re.sub(r"EllenPompeo", "Ellen Pompeo", tweet)
tweet = re.sub(r"shondarhimes", "Shonda Rhimes", tweet)
tweet = re.sub(r"ABCNetwork", "ABC Network", tweet)
tweet = re.sub(r"SushmaSwaraj", "Sushma Swaraj", tweet)
tweet = re.sub(r"pray4japan", "Pray for Japan", tweet)
tweet = re.sub(r"hope4japan", "Hope for Japan", tweet)
tweet = re.sub(r"Illusionimagess", "Illusion images", tweet)
tweet = re.sub(r"SummerUnderTheStars", "Summer Under The Stars", tweet)
tweet = re.sub(r"ShallWeDance", "Shall We Dance", tweet)
tweet = re.sub(r"TCMParty", "TCM Party", tweet)
tweet = re.sub(r"marijuananews", "marijuana news", tweet)
tweet = re.sub(r"onbeingwithKristaTippett", "on being with Krista Tippett", tweet)
tweet = re.sub(r"Beingtweets", "Being tweets", tweet)
tweet = re.sub(r"newauthors", "new authors", tweet)
tweet = re.sub(r"remedyyyy", "remedy", tweet)
tweet = re.sub(r"44PM", "44 PM", tweet)
tweet = re.sub(r"HeadlinesApp", "Headlines App", tweet)
tweet = re.sub(r"40PM", "40 PM", tweet)
tweet = re.sub(r"myswc", "Severe Weather Center", tweet)
tweet = re.sub(r"ithats", "that is", tweet)
tweet = re.sub(r"icouldsitinthismomentforever", "I could sit in this moment forever", tweet)
tweet = re.sub(r"FatLoss", "Fat Loss", tweet)
tweet = re.sub(r"02PM", "02 PM", tweet)
tweet = re.sub(r"MetroFmTalk", "Metro Fm Talk", tweet)
tweet = re.sub(r"Bstrd", "bastard", tweet)
tweet = re.sub(r"bldy", "bloody", tweet)
tweet = re.sub(r"MetrofmTalk", "Metro Fm Talk", tweet)
tweet = re.sub(r"terrorismturn", "terrorism turn", tweet)
tweet = re.sub(r"BBCNewsAsia", "BBC News Asia", tweet)
tweet = re.sub(r"BehindTheScenes", "Behind The Scenes", tweet)
tweet = re.sub(r"GeorgeTakei", "George Takei", tweet)
tweet = re.sub(r"WomensWeeklyMag", "Womens Weekly Magazine", tweet)
tweet = re.sub(r"SurvivorsGuidetoEarth", "Survivors Guide to Earth", tweet)
tweet = re.sub(r"incubusband", "incubus band", tweet)
tweet = re.sub(r"Babypicturethis", "Baby picture this", tweet)
tweet = re.sub(r"BombEffects", "Bomb Effects", tweet)
tweet = re.sub(r"win10", "Windows 10", tweet)
tweet = re.sub(r"idkidk", "I do not know I do not know", tweet)
tweet = re.sub(r"TheWalkingDead", "The Walking Dead", tweet)
tweet = re.sub(r"amyschumer", "Amy Schumer", tweet)
tweet = re.sub(r"crewlist", "crew list", tweet)
tweet = re.sub(r"Erdogans", "Erdogan", tweet)
tweet = re.sub(r"BBCLive", "BBC Live", tweet)
tweet = re.sub(r"TonyAbbottMHR", "Tony Abbott", tweet)
tweet = re.sub(r"paulmyerscough", "Paul Myerscough", tweet)
tweet = re.sub(r"georgegallagher", "George Gallagher", tweet)
tweet = re.sub(r"JimmieJohnson", "Jimmie Johnson", tweet)
tweet = re.sub(r"pctool", "pc tool", tweet)
tweet = re.sub(r"DoingHashtagsRight", "Doing Hashtags Right", tweet)
tweet = re.sub(r"ThrowbackThursday", "Throwback Thursday", tweet)
tweet = re.sub(r"SnowBackSunday", "Snowback Sunday", tweet)
tweet = re.sub(r"LakeEffect", "Lake Effect", tweet)
tweet = re.sub(r"RTphotographyUK", "Richard Thomas Photography UK", tweet)
tweet = re.sub(r"BigBang_CBS", "Big Bang CBS", tweet)
tweet = re.sub(r"writerslife", "writers life", tweet)
tweet = re.sub(r"NaturalBirth", "Natural Birth", tweet)
tweet = re.sub(r"UnusualWords", "Unusual Words", tweet)
tweet = re.sub(r"wizkhalifa", "Wiz Khalifa", tweet)
tweet = re.sub(r"acreativedc", "a creative DC", tweet)
tweet = re.sub(r"vscodc", "vsco DC", tweet)
tweet = re.sub(r"VSCOcam", "vsco camera", tweet)
tweet = re.sub(r"TheBEACHDC", "The beach DC", tweet)
tweet = re.sub(r"buildingmuseum", "building museum", tweet)
tweet = re.sub(r"WorldOil", "World Oil", tweet)
tweet = re.sub(r"redwedding", "red wedding", tweet)
tweet = re.sub(r"AmazingRaceCanada", "Amazing Race Canada", tweet)
tweet = re.sub(r"WakeUpAmerica", "Wake Up America", tweet)
tweet = re.sub(r"\\Allahuakbar\", "Allahu Akbar", tweet)
tweet = re.sub(r"bleased", "blessed", tweet)
tweet = re.sub(r"nigeriantribune", "Nigerian Tribune", tweet)
tweet = re.sub(r"HIDEO_KOJIMA_EN", "Hideo Kojima", tweet)
tweet = re.sub(r"FusionFestival", "Fusion Festival", tweet)
tweet = re.sub(r"50Mixed", "50 Mixed", tweet)
tweet = re.sub(r"NoAgenda", "No Agenda", tweet)
tweet = re.sub(r"WhiteGenocide", "White Genocide", tweet)
tweet = re.sub(r"dirtylying", "dirty lying", tweet)
tweet = re.sub(r"SyrianRefugees", "Syrian Refugees", tweet)
tweet = re.sub(r"changetheworld", "change the world", tweet)
tweet = re.sub(r"Ebolacase", "Ebola case", tweet)
tweet = re.sub(r"mcgtech", "mcg technologies", tweet)
tweet = re.sub(r"withweapons", "with weapons", tweet)
tweet = re.sub(r"advancedwarfare", "advanced warfare", tweet)
tweet = re.sub(r"letsFootball", "let us Football", tweet)
tweet = re.sub(r"LateNiteMix", "late night mix", tweet)
tweet = re.sub(r"PhilCollinsFeed", "Phil Collins", tweet)
tweet = re.sub(r"RudyHavenstein", "Rudy Havenstein", tweet)
tweet = re.sub(r"22PM", "22 PM", tweet)
tweet = re.sub(r"54am", "54 AM", tweet)
tweet = re.sub(r"38am", "38 AM", tweet)
tweet = re.sub(r"OldFolkExplainStuff", "Old Folk Explain Stuff", tweet)
tweet = re.sub(r"BlacklivesMatter", "Black Lives Matter", tweet)
tweet = re.sub(r"InsaneLimits", "Insane Limits", tweet)
tweet = re.sub(r"youcantsitwithus", "you cannot sit with us", tweet)
tweet = re.sub(r"2k15", "2015", tweet)
tweet = re.sub(r"TheIran", "Iran", tweet)
tweet = re.sub(r"JimmyFallon", "Jimmy Fallon", tweet)
tweet = re.sub(r"AlbertBrooks", "Albert Brooks", tweet)
tweet = re.sub(r"defense_news", "defense news", tweet)
tweet = re.sub(r"nuclearrcSA", "Nuclear Risk Control Self Assessment", tweet)
tweet = re.sub(r"Auspol", "Australia Politics", tweet)
tweet = re.sub(r"NuclearPower", "Nuclear Power", tweet)
tweet = re.sub(r"WhiteTerrorism", "White Terrorism", tweet)
tweet = re.sub(r"truthfrequencyradio", "Truth Frequency Radio", tweet)
tweet = re.sub(r"ErasureIsNotEquality", "Erasure is not equality", tweet)
tweet = re.sub(r"ProBonoNews", "Pro Bono News", tweet)
tweet = re.sub(r"JakartaPost", "Jakarta Post", tweet)
tweet = re.sub(r"toopainful", "too painful", tweet)
tweet = re.sub(r"melindahaunton", "Melinda Haunton", tweet)
tweet = re.sub(r"NoNukes", "No Nukes", tweet)
tweet = re.sub(r"curryspcworld", "Currys PC World", tweet)
tweet = re.sub(r"ineedcake", "I need cake", tweet)
tweet = re.sub(r"blackforestgateau", "black forest gateau", tweet)
tweet = re.sub(r"BBCOne", "BBC One", tweet)
tweet = re.sub(r"AlexxPage", "Alex Page", tweet)
tweet = re.sub(r"jonathanserrie", "Jonathan Serrie", tweet)
tweet = re.sub(r"SocialJerkBlog", "Social Jerk Blog", tweet)
tweet = re.sub(r"ChelseaVPeretti", "Chelsea Peretti", tweet)
tweet = re.sub(r"irongiant", "iron giant", tweet)
tweet = re.sub(r"RonFunches", "Ron Funches", tweet)
tweet = re.sub(r"TimCook", "Tim Cook", tweet)
tweet = re.sub(r"sebastianstanisaliveandwell", "Sebastian Stan is alive and well", tweet)
tweet = re.sub(r"Madsummer", "Mad summer", tweet)
tweet = re.sub(r"NowYouKnow", "Now you know", tweet)
tweet = re.sub(r"concertphotography", "concert photography", tweet)
tweet = re.sub(r"TomLandry", "Tom Landry", tweet)
tweet = re.sub(r"showgirldayoff", "show girl day off", tweet)
tweet = re.sub(r"Yougslavia", "Yugoslavia", tweet)
tweet = re.sub(r"QuantumDataInformatics", "Quantum Data Informatics", tweet)
tweet = re.sub(r"FromTheDesk", "From The Desk", tweet)
tweet = re.sub(r"TheaterTrial", "Theater Trial", tweet)
tweet = re.sub(r"CatoInstitute", "Cato Institute", tweet)
tweet = re.sub(r"EmekaGift", "Emeka Gift", tweet)
tweet = re.sub(r"LetsBe_Rational", "Let us be rational", tweet)
tweet = re.sub(r"Cynicalreality", "Cynical reality", tweet)
tweet = re.sub(r"FredOlsenCruise", "Fred Olsen Cruise", tweet)
tweet = re.sub(r"NotSorry", "not sorry", tweet)
tweet = re.sub(r"UseYourWords", "use your words", tweet)
tweet = re.sub(r"WordoftheDay", "word of the day", tweet)
tweet = re.sub(r"Dictionarycom", "Dictionary.com", tweet)
tweet = re.sub(r"TheBrooklynLife", "The Brooklyn Life", tweet)
tweet = re.sub(r"jokethey", "joke they", tweet)
tweet = re.sub(r"nflweek1picks", "NFL week 1 picks", tweet)
tweet = re.sub(r"uiseful", "useful", tweet)
tweet = re.sub(r"JusticeDotOrg", "The American Association for Justice", tweet)
tweet = re.sub(r"autoaccidents", "auto accidents", tweet)
tweet = re.sub(r"SteveGursten", "Steve Gursten", tweet)
tweet = re.sub(r"MichiganAutoLaw", "Michigan Auto Law", tweet)
tweet = re.sub(r"birdgang", "bird gang", tweet)
tweet = re.sub(r"nflnetwork", "NFL Network", tweet)
tweet = re.sub(r"NYDNSports", "NY Daily News Sports", tweet)
tweet = re.sub(r"RVacchianoNYDN", "Ralph Vacchiano NY Daily News", tweet)
tweet = re.sub(r"EdmontonEsks", "Edmonton Eskimos", tweet)
tweet = re.sub(r"david_brelsford", "David Brelsford", tweet)
tweet = re.sub(r"TOI_India", "The Times of India", tweet)
tweet = re.sub(r"hegot", "he got", tweet)
tweet = re.sub(r"SkinsOn9", "Skins on 9", tweet)
tweet = re.sub(r"sothathappened", "so that happened", tweet)
tweet = re.sub(r"LCOutOfDoors", "LC Out Of Doors", tweet)
tweet = re.sub(r"NationFirst", "Nation First", tweet)
tweet = re.sub(r"IndiaToday", "India Today", tweet)
tweet = re.sub(r"HLPS", "helps", tweet)
tweet = re.sub(r"HOSTAGESTHROSW", "hostages throw", tweet)
tweet = re.sub(r"SNCTIONS", "sanctions", tweet)
tweet = re.sub(r"BidTime", "Bid Time", tweet)
tweet = re.sub(r"crunchysensible", "crunchy sensible", tweet)
tweet = re.sub(r"RandomActsOfRomance", "Random acts of romance", tweet)
tweet = re.sub(r"MomentsAtHill", "Moments at hill", tweet)
tweet = re.sub(r"eatshit", "eat shit", tweet)
tweet = re.sub(r"liveleakfun", "live leak fun", tweet)
tweet = re.sub(r"SahelNews", "Sahel News", tweet)
tweet = re.sub(r"abc7newsbayarea", "ABC 7 News Bay Area", tweet)
tweet = re.sub(r"facilitiesmanagement", "facilities management", tweet)
tweet = re.sub(r"facilitydude", "facility dude", tweet)
tweet = re.sub(r"CampLogistics", "Camp logistics", tweet)
tweet = re.sub(r"alaskapublic", "Alaska public", tweet)
tweet = re.sub(r"MarketResearch", "Market Research", tweet)
tweet = re.sub(r"AccuracyEsports", "Accuracy Esports", tweet)
tweet = re.sub(r"TheBodyShopAust", "The Body Shop Australia", tweet)
tweet = re.sub(r"yychail", "Calgary hail", tweet)
tweet = re.sub(r"yyctraffic", "Calgary traffic", tweet)
tweet = re.sub(r"eliotschool", "eliot school", tweet)
tweet = re.sub(r"TheBrokenCity", "The Broken City", tweet)
tweet = re.sub(r"OldsFireDept", "Olds Fire Department", tweet)
tweet = re.sub(r"RiverComplex", "River Complex", tweet)
tweet = re.sub(r"fieldworksmells", "field work smells", tweet)
tweet = re.sub(r"IranElection", "Iran Election", tweet)
tweet = re.sub(r"glowng", "glowing", tweet)
tweet = re.sub(r"kindlng", "kindling", tweet)
tweet = re.sub(r"riggd", "rigged", tweet)
tweet = re.sub(r"slownewsday", "slow news day", tweet)
tweet = re.sub(r"MyanmarFlood", "Myanmar Flood", tweet)
tweet = re.sub(r"abc7chicago", "ABC 7 Chicago", tweet)
tweet = re.sub(r"copolitics", "Colorado Politics", tweet)
tweet = re.sub(r"AdilGhumro", "Adil Ghumro", tweet)
tweet = re.sub(r"netbots", "net bots", tweet)
tweet = re.sub(r"byebyeroad", "bye bye road", tweet)
tweet = re.sub(r"massiveflooding", "massive flooding", tweet)
tweet = re.sub(r"EndofUS", "End of United States", tweet)
tweet = re.sub(r"35PM", "35 PM", tweet)
tweet = re.sub(r"greektheatrela", "Greek Theatre Los Angeles", tweet)
tweet = re.sub(r"76mins", "76 minutes", tweet)
tweet = re.sub(r"publicsafetyfirst", "public safety first", tweet)
tweet = re.sub(r"livesmatter", "lives matter", tweet)
tweet = re.sub(r"myhometown", "my hometown", tweet)
tweet = re.sub(r"tankerfire", "tanker fire", tweet)
tweet = re.sub(r"MEMORIALDAY", "memorial day", tweet)
tweet = re.sub(r"MEMORIAL_DAY", "memorial day", tweet)
tweet = re.sub(r"instaxbooty", "instagram booty", tweet)
tweet = re.sub(r"Jerusalem_Post", "Jerusalem Post", tweet)
tweet = re.sub(r"WayneRooney_INA", "Wayne Rooney", tweet)
tweet = re.sub(r"VirtualReality", "Virtual Reality", tweet)
tweet = re.sub(r"OculusRift", "Oculus Rift", tweet)
tweet = re.sub(r"OwenJones84", "Owen Jones", tweet)
tweet = re.sub(r"jeremycorbyn", "Jeremy Corbyn", tweet)
tweet = re.sub(r"paulrogers002", "Paul Rogers", tweet)
tweet = re.sub(r"mortalkombatx", "Mortal Kombat X", tweet)
tweet = re.sub(r"mortalkombat", "Mortal Kombat", tweet)
tweet = re.sub(r"FilipeCoelho92", "Filipe Coelho", tweet)
tweet = re.sub(r"OnlyQuakeNews", "Only Quake News", tweet)
tweet = re.sub(r"kostumes", "costumes", tweet)
tweet = re.sub(r"YEEESSSS", "yes", tweet)
tweet = re.sub(r"ToshikazuKatayama", "Toshikazu Katayama", tweet)
tweet = re.sub(r"IntlDevelopment", "Intl Development", tweet)
tweet = re.sub(r"ExtremeWeather", "Extreme Weather", tweet)
tweet = re.sub(r"WereNotGruberVoters", "We are not gruber voters", tweet)
tweet = re.sub(r"NewsThousands", "News Thousands", tweet)
tweet = re.sub(r"EdmundAdamus", "Edmund Adamus", tweet)
tweet = re.sub(r"EyewitnessWV", "Eye witness WV", tweet)
tweet = re.sub(r"PhiladelphiaMuseu", "Philadelphia Museum", tweet)
tweet = re.sub(r"DublinComicCon", "Dublin Comic Con", tweet)
tweet = re.sub(r"NicholasBrendon", "Nicholas Brendon", tweet)
tweet = re.sub(r"Alltheway80s", "All the way 80s", tweet)
tweet = re.sub(r"FromTheField", "From the field", tweet)
tweet = re.sub(r"NorthIowa", "North Iowa", tweet)
tweet = re.sub(r"WillowFire", "Willow Fire", tweet)
tweet = re.sub(r"MadRiverComplex", "Mad River Complex", tweet)
tweet = re.sub(r"feelingmanly", "feeling manly", tweet)
tweet = re.sub(r"stillnotoverit", "still not over it", tweet)
tweet = re.sub(r"FortitudeValley", "Fortitude Valley", tweet)
tweet = re.sub(r"CoastpowerlineTramTr", "Coast powerline", tweet)
tweet = re.sub(r"ServicesGold", "Services Gold", tweet)
tweet = re.sub(r"NewsbrokenEmergency", "News broken emergency", tweet)
tweet = re.sub(r"Evaucation", "evacuation", tweet)
tweet = re.sub(r"leaveevacuateexitbe", "leave evacuate exit be", tweet)
tweet = re.sub(r"P_EOPLE", "PEOPLE", tweet)
tweet = re.sub(r"Tubestrike", "tube strike", tweet)
tweet = re.sub(r"CLASS_SICK", "CLASS SICK", tweet)
tweet = re.sub(r"localplumber", "local plumber", tweet)
tweet = re.sub(r"awesomejobsiri", "awesome job siri", tweet)
tweet = re.sub(r"PayForItHow", "Pay for it how", tweet)
tweet = re.sub(r"ThisIsAfrica", "This is Africa", tweet)
tweet = re.sub(r"crimeairnetwork", "crime air network", tweet)
tweet = re.sub(r"KimAcheson", "Kim Acheson", tweet)
tweet = re.sub(r"cityofcalgary", "City of Calgary", tweet)
tweet = re.sub(r"prosyndicate", "pro syndicate", tweet)
tweet = re.sub(r"660NEWS", "660 NEWS", tweet)
tweet = re.sub(r"BusInsMagazine", "Business Insurance Magazine", tweet)
tweet = re.sub(r"wfocus", "focus", tweet)
tweet = re.sub(r"ShastaDam", "Shasta Dam", tweet)
tweet = re.sub(r"go2MarkFranco", "Mark Franco", tweet)
tweet = re.sub(r"StephGHinojosa", "Steph Hinojosa", tweet)
tweet = re.sub(r"Nashgrier", "Nash Grier", tweet)
tweet = re.sub(r"NashNewVideo", "Nash new video", tweet)
tweet = re.sub(r"IWouldntGetElectedBecause", "I would not get elected because", tweet)
tweet = re.sub(r"SHGames", "Sledgehammer Games", tweet)
tweet = re.sub(r"bedhair", "bed hair", tweet)
tweet = re.sub(r"JoelHeyman", "Joel Heyman", tweet)
tweet = re.sub(r"viaYouTube", "via YouTube", tweet)
tweet=tweet.strip()
return tweet<categorify>
|
model = keras.models.load_model("best_mnist_model.h5")
model.evaluate(X_train, Y_train)
|
Digit Recognizer
|
12,714,797 |
def remove_punct(text):
table=str.maketrans('','',string.punctuation)
return text.translate(table )<feature_engineering>
|
results = model.predict(test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label" )
|
Digit Recognizer
|
12,714,797 |
df_train['text']=df_train['text'].apply(reduce_lengthening, 0)
df_train['text']=df_train['text'].apply(decontracted, 0)
df_train['text']=df_train['text'].apply(lambda x : remove_punct(x))<drop_column>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_with_aug.csv",index=False )
|
Digit Recognizer
|
11,966,240 |
def remove_URL(text):
url = re.compile(r'https?://\S+|www\.\S+')
return url.sub(r'',text )<feature_engineering>
|
%matplotlib inline
np.random.seed(42)
|
Digit Recognizer
|
11,966,240 |
df_train['text']=df_train['text'].apply(lambda x : remove_URL(x))<choose_model_class>
|
train_data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test_data = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
submission=pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv' )
|
Digit Recognizer
|
11,966,240 |
aug_w2v = naw.WordEmbsAug(
model_type='glove', model_path='.. /input/glove-global-vectors-for-word-representation/glove.6B.100d.txt',
action="substitute")
<normalization>
|
train_data=".. /input/digit-recognizer/train.csv"
test_data = ".. /input/digit-recognizer/test.csv"
|
Digit Recognizer
|
11,966,240 |
aug_w2v.aug_p=0.2
print("Augmented Text:")
for ii in range(5):
augmented_text = aug_w2v.augment(text)
print(augmented_text )<split>
|
Xtrain,Xtest,ytrain,ytest=train_test_split(raw_data[:,1:], raw_data[:,0],test_size=0.2,
stratify=raw_data[:,0],
random_state=42)
|
Digit Recognizer
|
11,966,240 |
train,valid=train_test_split(df_train,test_size=0.15)
print('Shape of train',train.shape)
print("Shape of Validation ",valid.shape )<categorify>
|
Xtrain=Xtrain/255.
Xtest=Xtest/255 .
|
Digit Recognizer
|
11,966,240 |
def augment_text(df,samples=300,pr=0.2):
aug_w2v.aug_p=pr
new_text=[]
df_n=df[df.target==1].reset_index(drop=True)
for i in tqdm(np.random.randint(0,len(df_n),samples)) :
text = df_n.iloc[i]['text']
augmented_text = aug_w2v.augment(text)
new_text.append(augmented_text)
new=pd.DataFrame({'text':new_text,'target':1})
df=shuffle(df.append(new ).reset_index(drop=True))
return df
<categorify>
|
ytrain= to_categorical(ytrain, num_classes = 10)
ytest= to_categorical(ytest, num_classes = 10 )
|
Digit Recognizer
|
11,966,240 |
train = augment_text(train,samples=400)
tweet = train.append(valid ).reset_index(drop=True )<concatenate>
|
Xtrain=Xtrain.astype("float32" ).reshape(-1,28,28,1)
Xtest=Xtest.astype("float32" ).reshape(-1,28,28,1 )
|
Digit Recognizer
|
11,966,240 |
df=pd.concat([tweet,df_test] )<remove_duplicates>
|
train_gen=ImageDataGenerator(rotation_range=20,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1,
shear_range=0.2
)
|
Digit Recognizer
|
11,966,240 |
def create_corpus(df):
corpus=[]
for tweet in tqdm(df['text']):
words=[word.lower() for word in word_tokenize(tweet)if(( word.isalpha() ==1)&(word not in stop)) ]
corpus.append(words)
return corpus
<statistical_test>
|
def model() :
model=tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(16,(3,3), activation="relu",padding="SAME",input_shape=(28,28,1)))
model.add(BatchNormalization())
model.add(tf.keras.layers.Conv2D(32,(3,3),activation="relu",padding="SAME"))
model.add(BatchNormalization())
model.add(tf.keras.layers.MaxPool2D(strides=(2,2)))
model.add(Dropout(0.25))
model.add(tf.keras.layers.Conv2D(32,(3,3),padding='SAME',activation='relu'))
model.add(BatchNormalization())
model.add(tf.keras.layers.Conv2D(64,(3,3),padding='SAME',activation='relu'))
model.add(BatchNormalization())
model.add(tf.keras.layers.MaxPool2D(strides=(2,2)))
model.add(Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(512,activation="relu"))
model.add(Dropout(0.25))
model.add(tf.keras.layers.Dense(256,activation="relu"))
model.add(Dropout(0.5))
model.add(tf.keras.layers.Dense(10,activation="softmax"))
return model
|
Digit Recognizer
|
11,966,240 |
corpus=create_corpus(df )<load_from_csv>
|
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.9 ** x )
|
Digit Recognizer
|
11,966,240 |
embedding_dict={}
with open('.. /input/glove-global-vectors-for-word-representation/glove.6B.100d.txt','r')as f:
for line in f:
values=line.split()
word=values[0]
vectors=np.asarray(values[1:],'float32')
embedding_dict[word]=vectors
f.close()<categorify>
|
model.compile(optimizer=Adam(lr=1e-4),loss='categorical_crossentropy',
metrics=['accuracy'] )
|
Digit Recognizer
|
11,966,240 |
MAX_LEN=50
tokenizer_obj=Tokenizer()
tokenizer_obj.fit_on_texts(corpus)
sequences=tokenizer_obj.texts_to_sequences(corpus)
tweet_pad=pad_sequences(sequences,maxlen=MAX_LEN,truncating='post',padding='post' )<count_unique_values>
|
class MyCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self,epochs,logs={}):
if(logs.get('accuracy')>0.99):
print()
print("Stopping Training")
self.model.stop_training=True
|
Digit Recognizer
|
11,966,240 |
word_index=tokenizer_obj.word_index
print('Number of unique words:',len(word_index))<define_variables>
|
history=model.fit_generator(train_gen.flow(Xtrain,ytrain,batch_size=16),
validation_data=(Xtest[:1000,:], ytest[:1000,:]),
epochs=30,callbacks=[annealer], steps_per_epoch=500
)
|
Digit Recognizer
|
11,966,240 |
num_words=len(word_index)+1
embedding_matrix=np.zeros(( num_words,100))
for word,i in tqdm(word_index.items()):
if i > num_words:
continue
emb_vec=embedding_dict.get(word)
if emb_vec is not None:
embedding_matrix[i]=emb_vec
<choose_model_class>
|
model.evaluate(Xtest,ytest )
|
Digit Recognizer
|
11,966,240 |
model = Sequential()
embedding=Embedding(num_words,100,embeddings_initializer=Constant(embedding_matrix),
input_length=MAX_LEN,trainable=False)
model.add(embedding)
model.add(SimpleRNN(100))
model.add(Dense(1, activation='sigmoid'))
optimzer=Adam(learning_rate=1e-5)
model.compile(loss='binary_crossentropy',optimizer=optimzer,metrics=['accuracy'])
model.summary()
<split>
|
testing = np.loadtxt(test_data, skiprows=1, dtype='int', delimiter=',')
test = testing.astype("float32")
test = testing.reshape(-1, 28, 28, 1)/255 .
|
Digit Recognizer
|
11,966,240 |
train_df=tweet_pad[:tweet.shape[0]]
test_df=tweet_pad[tweet.shape[0]:]<prepare_x_and_y>
|
ypred=np.argmax(model.predict(test),axis=-1 )
|
Digit Recognizer
|
11,966,240 |
<train_model><EOS>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("MNIST_digit_recog.csv",index=False )
|
Digit Recognizer
|
12,482,444 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<predict_on_test>
|
%matplotlib inline
np.random.seed(2)
sns.set(style='white', context='notebook', palette='deep')
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
Digit Recognizer
|
12,482,444 |
y_pre=model.predict(X_test)
y_pre=np.round(y_pre ).astype(int ).reshape(1142 )<compute_test_metric>
|
train = pd.read_csv('.. /input/digit-recognizer/train.csv')
test = pd.read_csv('.. /input/digit-recognizer/test.csv' )
|
Digit Recognizer
|
12,482,444 |
print(roc_auc_score(y_pre,y_test))<define_variables>
|
y_train = train.label
x_train = train.drop('label', 1 )
|
Digit Recognizer
|
12,482,444 |
scores_model = []<compute_test_metric>
|
train.isnull().values.any()
|
Digit Recognizer
|
12,482,444 |
scores_model.append({'Model': 'SimpleRNN','AUC_Score': roc_auc_score(y_pre,y_test)} )<choose_model_class>
|
x_train = x_train/255.0
test = test/255.0
|
Digit Recognizer
|
12,482,444 |
model=Sequential()
embedding=Embedding(num_words,100,embeddings_initializer=Constant(embedding_matrix),
input_length=MAX_LEN,trainable=False)
model.add(embedding)
model.add(SpatialDropout1D(0.2))
model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
optimzer=Adam(learning_rate=1e-5)
model.compile(loss='binary_crossentropy',optimizer=optimzer,metrics=['accuracy'])
<train_model>
|
y_train = to_categorical(y_train, num_classes = 10 )
|
Digit Recognizer
|
12,482,444 |
history=model.fit(X_train,y_train,batch_size=4,epochs=10,validation_data=(X_test,y_test),verbose=2 )<predict_on_test>
|
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.1, random_state=2)
|
Digit Recognizer
|
12,482,444 |
y_pre=model.predict(X_test)
y_pre=np.round(y_pre ).astype(int ).reshape(1142 )<compute_test_metric>
|
model = Sequential()
model.add(Conv2D(64,kernel_size=5,padding = 'Same',activation='relu',input_shape=(28,28,1)))
model.add(Conv2D(64,kernel_size=5,padding = 'Same',activation='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.40))
model.add(Conv2D(64,kernel_size=3,padding = 'Same',activation='relu'))
model.add(Conv2D(64,kernel_size=3,padding = 'Same',activation='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.40))
model.add(Flatten())
model.add(Dense(128, activation = "relu"))
model.add(Dropout(0.40))
model.add(Dense(10, activation = "softmax"))
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] )
|
Digit Recognizer
|
12,482,444 |
print(roc_auc_score(y_pre,y_test))<compute_test_metric>
|
epochs = 30
batch_size = 100
|
Digit Recognizer
|
12,482,444 |
scores_model.append({'Model': 'LSTM','AUC_Score': roc_auc_score(y_pre,y_test)} )<choose_model_class>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(x_train )
|
Digit Recognizer
|
12,482,444 |
model=Sequential()
embedding=Embedding(num_words,100,embeddings_initializer=Constant(embedding_matrix),
input_length=MAX_LEN,trainable=False)
model.add(embedding)
model.add(SpatialDropout1D(0.2))
model.add(GRU(300))
model.add(Dense(1, activation='sigmoid'))
optimzer=Adam(learning_rate=1e-5)
model.compile(loss='binary_crossentropy',optimizer=optimzer,metrics=['accuracy'])
model.summary()<train_model>
|
LR_reduction = ReduceLROnPlateau(monitor='val_accuracy',
patience = 2,
verbose = 1,
factor = 0.5,
min_lr = 0.00001 )
|
Digit Recognizer
|
12,482,444 |
history=model.fit(X_train,y_train,batch_size=8,epochs=10,validation_data=(X_test,y_test),verbose=2 )<predict_on_test>
|
model.fit_generator(datagen.flow(x_train, y_train, batch_size = batch_size),
epochs = epochs, validation_data =(x_val,y_val), steps_per_epoch=x_train.shape[0] // batch_size, callbacks=[LR_reduction] )
|
Digit Recognizer
|
12,482,444 |
<choose_model_class><EOS>
|
results = model.predict(test)
results = np.argmax(results, axis = 1)
results = pd.Series(results, name="Label")
submission = pd.concat([pd.Series(range(1,28001), name = "ImageId"), results], axis = 1)
submission.to_csv("submission.csv", index=False )
|
Digit Recognizer
|
12,477,197 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<train_model>
|
import os
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision import transforms
import numpy as np
import matplotlib.pyplot as plt
from torch.nn import CrossEntropyLoss
from tqdm import tqdm
from collections import Counter
|
Digit Recognizer
|
12,477,197 |
history=model.fit(X_train,y_train,batch_size=4,epochs=5,validation_data=(X_test,y_test),verbose=2 )<compute_train_metric>
|
root = "/kaggle/input/digit-recognizer"
train_data = np.loadtxt(os.path.join(root,"train.csv"),delimiter=",",skiprows=1)
test_data = np.loadtxt(os.path.join(root,"test.csv"),delimiter=",",skiprows=1 )
|
Digit Recognizer
|
12,477,197 |
y_pre=model.predict(X_test)
y_pre=np.round(y_pre ).astype(int ).reshape(1142)
print(roc_auc_score(y_pre,y_test))<compute_test_metric>
|
!nvidia-smi
|
Digit Recognizer
|
12,477,197 |
scores_model.append({'Model': 'Bidirectional-LSTM','AUC_Score': roc_auc_score(y_pre,y_test)} )<load_from_url>
|
class Dataset:
def __init__(self,data,targets,transform=None):
self.data = data
self.targets = targets
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self,idx):
if self.transform == None:
return self.data[idx],self.targets[idx]
else:
return self.transform(self.data[idx]),self.targets[idx]
|
Digit Recognizer
|
12,477,197 |
!wget --quiet https://raw.githubusercontent.com/tensorflow/models/master/official/nlp/bert/tokenization.py<import_modules>
|
transform = transforms.Compose([transforms.ToPILImage() ,
transforms.RandomResizedCrop(size=28,scale=(0.9,1.0),ratio=(0.9,1,15)) ,
transforms.RandomAffine(degrees=12,translate=(1/7,1/7),shear=12),
transforms.RandomRotation(degrees=12),
transforms.ToTensor() ])
x_train = train_data[:,1:].reshape(-1,28,28 ).astype(np.uint8)
y_train = torch.LongTensor(train_data[:,0])
train_dataset = Dataset(x_train,y_train,transform )
|
Digit Recognizer
|
12,477,197 |
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import ModelCheckpoint
import tensorflow_hub as hub
import tokenization<categorify>
|
class CNN(nn.Module):
def __init__(self):
super(CNN,self ).__init__()
self.layer1 = self.get_conv_block(1,64)
self.layer2 = self.get_conv_block(64,128,paddings=(0,1))
self.layer3 = self.get_conv_block(128,256)
self.fc1 = nn.Linear(256*3*3,2048)
self.fc2 = nn.Linear(2048,512)
self.fc3 = nn.Linear(512,10)
def forward(self,x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x ).flatten(start_dim=1)
x = F.dropout(F.relu(self.fc1(x)) ,0.7,training=self.training)
out = self.fc3(F.relu(self.fc2(x)))
return out
def get_conv_block(self,in_chan,out_chan,strides=(1,1),paddings=(1,1)) :
return nn.Sequential(
nn.Conv2d(in_chan,out_chan,3,strides[0],paddings[0]),
nn.ReLU() ,
nn.BatchNorm2d(out_chan),
nn.Conv2d(out_chan,out_chan,3,strides[0],paddings[1]),
nn.ReLU() ,
nn.BatchNorm2d(out_chan),
nn.MaxPool2d(2)
)
|
Digit Recognizer
|
12,477,197 |
def bert_encode(texts, tokenizer, max_len=512):
all_tokens = []
all_masks = []
all_segments = []
for text in texts:
text = tokenizer.tokenize(text)
text = text[:max_len-2]
input_sequence = ["[CLS]"] + text + ["[SEP]"]
pad_len = max_len - len(input_sequence)
tokens = tokenizer.convert_tokens_to_ids(input_sequence)
tokens += [0] * pad_len
pad_masks = [1] * len(input_sequence)+ [0] * pad_len
segment_ids = [0] * max_len
all_tokens.append(tokens)
all_masks.append(pad_masks)
all_segments.append(segment_ids)
return np.array(all_tokens), np.array(all_masks), np.array(all_segments )<choose_model_class>
|
device = torch.device("cuda")if torch.cuda.is_available() else torch.device("cpu")
random.seed(1234)
np.random.seed(1234)
torch.random.manual_seed(1234)
epochs = 80
batch_size = 512
trainloader = DataLoader(train_dataset,batch_size,shuffle=True,pin_memory=True)
def train(dataloader,net,optimizer,loss_fn,epochs=50):
for n in range(epochs):
with tqdm(dataloader,desc=f"{n+1}/{epochs} epochs")as t:
running_loss = 0.0
running_correct = 0
running_total = 0
for x,y in t:
out = net(x.to(device))
pred = out.max(dim=1)[1]
loss = loss_fn(out,y.to(device))
opt.zero_grad()
loss.backward()
opt.step()
running_loss += loss.item() *x.size(0)
running_correct +=(pred==y.to(device)).sum().item()
running_total += x.size(0)
t.set_postfix({"train_loss":running_loss/running_total,"train_acc":running_correct/running_total})
models = []
for i in range(5):
print("Training {} model".format(str(i+1)+["st","nd","rd","th","th"][i]))
cnn = CNN()
cnn.to(device)
opt = Adam(cnn.parameters() ,lr=1e-4)
loss_fn = CrossEntropyLoss()
train(trainloader,cnn,opt,loss_fn,epochs)
models.append(cnn )
|
Digit Recognizer
|
12,477,197 |
def build_model(bert_layer, max_len=512):
input_word_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
input_mask = Input(shape=(max_len,), dtype=tf.int32, name="input_mask")
segment_ids = Input(shape=(max_len,), dtype=tf.int32, name="segment_ids")
_, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])
clf_output = sequence_output[:, 0, :]
out = Dense(1, activation='sigmoid' )(clf_output)
model = Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out)
model.compile(Adam(lr=1e-5), loss='binary_crossentropy', metrics=['accuracy'])
return model<choose_model_class>
|
cnn.eval()
train_dataset.transform = transforms.ToTensor()
trainloader = DataLoader(train_dataset,batch_size=512,shuffle=False,pin_memory=True)
train_preds = []
for cnn in models:
train_pred = []
with torch.no_grad() :
for x,_ in trainloader:
out = cnn(x.to(device))
pred = out.max(dim=1)[1]
train_pred.append(pred.detach().cpu().numpy())
train_preds.append(np.concatenate(train_pred))
train_preds = list(zip(*train_preds))
train_pred = np.array(list(map(lambda x: Counter(x ).most_common(1)[0][0],train_preds)))
train_acc =(train_pred == y_train.numpy() ).astype("float" ).mean()
print("The training accurary is {}".format(train_acc))
|
Digit Recognizer
|
12,477,197 |
%%time
module_url = "https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1"
bert_layer = hub.KerasLayer(module_url, trainable=True )<load_from_csv>
|
x_test = test_data.reshape(-1,1,28,28)
x_test = torch.Tensor(x_test)/255.
test_preds = []
for cnn in models:
test_pred = []
with torch.no_grad() :
for i in range(0,len(x_test),batch_size):
out = cnn(x_test[i:i+batch_size].to(device))
pred = out.max(dim=1)[1]
test_pred.append(pred.detach().cpu().numpy())
test_preds.append(np.concatenate(test_pred))
test_preds = list(zip(*test_preds))
test_pred = list(map(lambda x: Counter(x ).most_common(1)[0][0],test_preds))
|
Digit Recognizer
|
12,477,197 |
train = pd.read_csv(".. /input/nlp-getting-started/train.csv")
test = pd.read_csv(".. /input/nlp-getting-started/test.csv")
submission = pd.read_csv(".. /input/nlp-getting-started/sample_submission.csv")
<feature_engineering>
|
imageid = pd.Series(np.arange(len(test_pred)))+1
df = pd.DataFrame({"ImageId":imageid,"Label":test_pred})
df.set_index("ImageId")
df.to_csv("/kaggle/working/test_pred.csv",index=False )
|
Digit Recognizer
|
12,506,433 |
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case )<categorify>
|
%matplotlib inline
|
Digit Recognizer
|
12,506,433 |
train_input = bert_encode(train.text.values, tokenizer, max_len=160)
test_input = bert_encode(test.text.values, tokenizer, max_len=160)
train_labels = train.target.values<train_model>
|
train_data = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
train_data.head()
|
Digit Recognizer
|
12,506,433 |
checkpoint = ModelCheckpoint('model.h5', monitor='val_loss', save_best_only=True)
train_history = model.fit(
train_input, train_labels,
validation_split=0.2,
epochs=3,
callbacks=[checkpoint],
batch_size=16
)<predict_on_test>
|
test_data = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
test_data.head()
|
Digit Recognizer
|
12,506,433 |
model.load_weights('model.h5')
test_pred = model.predict(test_input )<save_to_csv>
|
from sklearn.model_selection import train_test_split
|
Digit Recognizer
|
12,506,433 |
submission['target'] = test_pred.round().astype(int)
submission.to_csv('submission.csv', index=False )<load_from_csv>
|
from sklearn.model_selection import train_test_split
|
Digit Recognizer
|
12,506,433 |
train = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv')
print(train.shape, test.shape)
train.sample(10, random_state=26 )<feature_engineering>
|
y = train_data['label']
|
Digit Recognizer
|
12,506,433 |
def preprocess(df):
df_new = df.copy(deep=True)
df_new['text'] = df.apply(lambda row: re.sub('@[A-z0-9]', '', row['text'] ).lower() , axis=1)
df_new['text_w_kword'] = df_new.apply(lambda row: 'keyword: ' + str(row['keyword'])+ '.'+ str(row['text']), axis=1)
return df_new
train_prep = preprocess(train)
test_prep = preprocess(test)
train_prep[40:60]<split>
|
df_train = train_data.drop(['label'], axis=1 )
|
Digit Recognizer
|
12,506,433 |
X_train, X_valid, y_train, y_valid = train_test_split(train_prep['text_w_kword'],
train_prep['target'],
test_size=0.1,
random_state=1 )<load_pretrained>
|
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import sklearn.metrics as metrics
|
Digit Recognizer
|
12,506,433 |
tokenizer = DistilBertTokenizerFast.from_pretrained('/kaggle/input/huggingface-bert-variants/distilbert-base-uncased/distilbert-base-uncased/')
train_encodings = tokenizer(list(X_train), truncation=True, padding='max_length', max_length=100)
valid_encodings = tokenizer(list(X_valid), truncation=True, padding='max_length', max_length=100 )<data_type_conversions>
|
df_train = df_train/255
df_train[0:5]
|
Digit Recognizer
|
12,506,433 |
train_dataset = tf.data.Dataset.from_tensor_slices((
dict(train_encodings),
y_train.values.astype('float32' ).reshape(( -1,1))
))
valid_dataset = tf.data.Dataset.from_tensor_slices((
dict(valid_encodings),
y_valid.values.astype('float32' ).reshape(( -1,1))
))
train_dataset<choose_model_class>
|
y = to_categorical(y, num_classes = 10)
y.shape
|
Digit Recognizer
|
12,506,433 |
es = EarlyStopping(monitor='val_loss',
verbose=1,
patience=4,
restore_best_weights=True )<choose_model_class>
|
X_train, X_test, y_train, y_test = train_test_split(df_train, y, test_size = 0.1, random_state=42, stratify=y )
|
Digit Recognizer
|
12,506,433 |
batch_size = 64
num_epochs = 15
num_train_steps =(X_train.shape[0] // batch_size)* num_epochs
lr_scheduler = PolynomialDecay(
initial_learning_rate=5e-5,
end_learning_rate=1e-5,
decay_steps=num_train_steps
)
new_opt = Adam(learning_rate=lr_scheduler )<compute_test_metric>
|
input_shape =(28,28,1)
X_input = Input(input_shape)
x = Conv2D(64,(3,3),strides=(1,1),name='layer_conv1',padding='same' )(X_input)
x = BatchNormalization()(x)
x = Activation('relu' )(x)
x = MaxPooling2D(( 2,2),name='maxPool1' )(x)
x = Conv2D(32,(3,3),strides=(1,1),name='layer_conv2',padding='same' )(x)
x = BatchNormalization()(x)
x = Activation('relu' )(x)
x = MaxPooling2D(( 2,2),name='maxPool2' )(x)
x = Conv2D(32,(3,3),strides=(1,1),name='conv3',padding='same' )(x)
x = BatchNormalization()(x)
x = Activation('relu' )(x)
x = MaxPooling2D(( 2,2), name='maxPool3' )(x)
x = Flatten()(x)
x = Dense(64,activation ='relu',name='fc0' )(x)
x = Dropout(0.25 )(x)
x = Dense(32,activation ='relu',name='fc1' )(x)
x = Dropout(0.25 )(x)
x = Dense(10,activation ='softmax',name='fc2' )(x)
conv_model = Model(inputs=X_input, outputs=x, name='Predict')
conv_model.summary()
|
Digit Recognizer
|
12,506,433 |
def f1_score(true, pred):
ground_positives = K.sum(true, axis=0)+ K.epsilon()
pred_positives = K.sum(pred, axis=0)+ K.epsilon()
true_positives = K.sum(true * pred, axis=0)+ K.epsilon()
precision = true_positives / pred_positives
recall = true_positives / ground_positives
f1 = 2 *(precision * recall)/(precision + recall + K.epsilon())
return f1<train_model>
|
conv_model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
conv_model.fit(X_train, y_train, epochs=10, batch_size=100, validation_data=(X_test,y_test))
|
Digit Recognizer
|
12,506,433 |
model = TFDistilBertForSequenceClassification.from_pretrained('/kaggle/input/huggingface-bert-variants/distilbert-base-uncased/distilbert-base-uncased/',
num_labels=2)
model.compile(
optimizer=new_opt,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
)
history = model.fit(train_dataset.batch(batch_size),
validation_data=valid_dataset.batch(batch_size),
epochs=num_epochs,
callbacks=[es] )<categorify>
|
sgd = SGD(lr=0.0005, momentum=0.5, decay=0.0, nesterov=False)
conv_model.compile(optimizer=sgd,loss='categorical_crossentropy',metrics=['accuracy'])
conv_model.fit(X_train, y_train, epochs=30, validation_data=(X_test, y_test))
|
Digit Recognizer
|
12,506,433 |
test_encodings = tokenizer(list(test_prep['text_w_kword']), truncation=True, padding='max_length', max_length=100)
test_dataset = tf.data.Dataset.from_tensor_slices((
dict(test_encodings)
))<predict_on_test>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train )
|
Digit Recognizer
|
12,506,433 |
test_preds = model.predict(test_dataset.batch(1))<prepare_output>
|
hist = conv_model.fit_generator(datagen.flow(X_train,y_train),
epochs = 1, validation_data =(X_test,y_test),
verbose = 2)
|
Digit Recognizer
|
12,506,433 |
class_preds = np.argmax(test_preds.logits, axis=1)
class_preds<predict_on_test>
|
data_generator = ImageDataGenerator(rescale=1./255,
rotation_range=1,
zoom_range=0.1,
width_shift_range=0.05,
height_shift_range=0.05)
data_generator.fit(X_train)
|
Digit Recognizer
|
12,506,433 |
valid_preds = model.predict(valid_dataset.batch(batch_size))<prepare_output>
|
df_test = test_data/255
df_test[0:5]
|
Digit Recognizer
|
12,506,433 |
valid_class_preds = np.argmax(valid_preds.logits, axis=1 )<compute_test_metric>
|
results = conv_model.predict(df_test)
results = np.argmax(results,axis=1)
my_submission = pd.DataFrame({'ImageId': list(range(1, len(results)+1)) , 'Label': results})
my_submission.to_csv('submission.csv', index=False )
|
Digit Recognizer
|
12,506,433 |
<create_dataframe><EOS>
|
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_datagen.csv",index=False)
print("Your submission was successfully saved!")
|
Digit Recognizer
|
12,403,998 |
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<save_to_csv>
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Conv2D , MaxPool2D , Flatten , Dropout , BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import LearningRateScheduler
|
Digit Recognizer
|
12,403,998 |
df_submission.to_csv('submission.csv', index=False )<import_modules>
|
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
|
Digit Recognizer
|
12,403,998 |
print(tf.__version__ )<import_modules>
|
train_Y = np.array(train["label"])
train_Y = np_utils.to_categorical(train_Y, num_classes = 10 )
|
Digit Recognizer
|
12,403,998 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow import keras
import cv2
import PIL
import os
import pathlib
import shutil
from IPython.display import Image, display
import plotly.graph_objs as go
import plotly.graph_objects as go
from sklearn.metrics import cohen_kappa_score
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential, Model,load_model
from tensorflow.keras.applications import vgg16
from tensorflow.keras.applications import resnet50
from tensorflow.keras.applications import xception
from tensorflow.keras.applications import inception_v3
from tensorflow.keras.applications import inception_resnet_v2
from tensorflow.keras.applications import resnet_v2
from tensorflow.keras.applications import nasnet
from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img, img_to_array
from tensorflow.keras import Input
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Flatten,BatchNormalization,Activation
from tensorflow.keras.layers import GlobalAveragePooling2D, Concatenate
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
from tensorflow.keras.callbacks import ModelCheckpoint, Callback, EarlyStopping, LearningRateScheduler
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing import image_dataset_from_directory
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers.experimental import preprocessing
import gc
import skimage.io
<load_from_csv>
|
train_X = np.array(train.iloc[:, 1:])
test_X = np.array(test )
|
Digit Recognizer
|
12,403,998 |
train_dir = '.. /input/dog-breed-dataset-with-subdirectories-by-class/data/train'
test_dir = '.. /input/dog-breed-dataset-with-subdirectories-by-class/data/test'
train_labels = pd.read_csv('.. /input/dog-breed-identification/labels.csv', index_col = 'id')
submission=pd.read_csv('.. /input/dog-breed-identification/sample_submission.csv' )<categorify>
|
train_X, valid_X, train_Y, valid_Y = train_test_split(train_X, train_Y, shuffle=True, test_size = 0.1, random_state=2, stratify=train_Y )
|
Digit Recognizer
|
12,403,998 |
target, dog_breeds = pd.factorize(train_labels['breed'], sort = True)
train_labels['target'] = target
print(dog_breeds )<count_values>
|
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(train_X )
|
Digit Recognizer
|
12,403,998 |
train_labels['breed'].value_counts()<define_variables>
|
nets = 5
model = [0] *nets
for i in range(nets):
model[i] = Sequential()
model[i].add(Conv2D(32, kernel_size = 3, activation='relu', input_shape =(28, 28, 1)))
model[i].add(BatchNormalization())
model[i].add(Conv2D(32, kernel_size = 3, activation='relu'))
model[i].add(BatchNormalization())
model[i].add(Conv2D(32, kernel_size = 5, strides=2, padding='same', activation='relu'))
model[i].add(BatchNormalization())
model[i].add(Dropout(0.4))
model[i].add(Conv2D(64, kernel_size = 3, activation='relu'))
model[i].add(BatchNormalization())
model[i].add(Conv2D(64, kernel_size = 3, activation='relu'))
model[i].add(BatchNormalization())
model[i].add(Conv2D(64, kernel_size = 5, strides=2, padding='same', activation='relu'))
model[i].add(BatchNormalization())
model[i].add(Dropout(0.4))
model[i].add(Conv2D(128, kernel_size = 4, activation='relu'))
model[i].add(BatchNormalization())
model[i].add(Flatten())
model[i].add(Dropout(0.4))
model[i].add(Dense(10, activation='softmax'))
model[i].compile(optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"])
|
Digit Recognizer
|
12,403,998 |
N_EPOCHS = 50
BATCH_SIZE = 128
IMG_HEIGHT = 331
IMG_WIDTH = 331<create_dataframe>
|
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x)
history = [0] * nets
epochs = 45
for j in range(nets):
print("CNN ",j+1)
history[j] = model[j].fit_generator(datagen.flow(train_X, train_Y, batch_size=64),
epochs = epochs, steps_per_epoch = train_X.shape[0]//64,
validation_data =(valid_X, valid_Y), callbacks=[annealer], verbose=1)
print("CNN {0:d} DONE".format(j+1))
|
Digit Recognizer
|
12,403,998 |
train_ds = image_dataset_from_directory(
directory = train_dir,
labels = 'inferred',
label_mode='int',
batch_size=BATCH_SIZE,
image_size=(IMG_HEIGHT, IMG_WIDTH),
shuffle = True,
seed=1234,
validation_split=0.1,
subset="training",
)<define_variables>
|
results = np.zeros(( test_X.shape[0],10))
for j in range(nets):
results = results + model[j].predict(test_X)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("submission.csv",index=False )
|
Digit Recognizer
|
13,307,296 |
class_names = train_ds.class_names
print(len(class_names))
print(class_names )<create_dataframe>
|
df_train=pd.read_csv(".. /input/digit-recognizer/train.csv")
df_train.head()
|
Digit Recognizer
|
13,307,296 |
val_ds = image_dataset_from_directory(
directory = train_dir,
labels = 'inferred',
label_mode='int',
batch_size=BATCH_SIZE,
image_size=(IMG_HEIGHT, IMG_WIDTH),
shuffle = True,
seed=1234,
validation_split=0.1,
subset="validation",
)<create_dataframe>
|
df_train = shuffle(df_train )
|
Digit Recognizer
|
13,307,296 |
test_ds = image_dataset_from_directory(
directory = test_dir,
label_mode= None,
batch_size=BATCH_SIZE,
image_size=(IMG_HEIGHT, IMG_WIDTH),
shuffle = False,
seed=1234
)<drop_column>
|
X=df_train.drop(["label"],axis=1)
y=df_train["label"]
|
Digit Recognizer
|
13,307,296 |
del class_names<data_type_conversions>
|
y.value_counts(normalize=True )
|
Digit Recognizer
|
13,307,296 |
plt.figure(figsize=(20, 20))
for images, labels in train_ds.take(1):
for i in range(16):
ax = plt.subplot(4, 4, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(dog_breeds[labels[i]])
plt.axis("off" )<data_type_conversions>
|
from tensorflow.keras.layers import Conv2D,Dense,Reshape,MaxPool2D,Dropout
from tensorflow.keras.models import Sequential
|
Digit Recognizer
|
13,307,296 |
plt.figure(figsize=(20, 20))
for images, labels in val_ds.take(1):
for i in range(16):
ax = plt.subplot(4, 4, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(dog_breeds[labels[i]])
plt.axis("off" )<data_type_conversions>
|
from tensorflow.keras.layers import Flatten
|
Digit Recognizer
|
13,307,296 |
plt.figure(figsize=(20, 20))
for images in test_ds.take(1):
for i in range(16):
ax = plt.subplot(4, 4, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.axis("off" )<load_pretrained>
|
import tensorflow as tf
|
Digit Recognizer
|
13,307,296 |
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.prefetch(buffer_size=AUTOTUNE)
test_ds = test_ds.prefetch(buffer_size=AUTOTUNE)
<choose_model_class>
|
model=Sequential()
model.add(Reshape(( 28,28,1),input_shape=(784,)))
model.add(Conv2D(64,(3,3),activation="relu",kernel_initializer="he_uniform"))
model.add(Dropout(0.5))
model.add(MaxPool2D(pool_size=(2,2),strides=2))
model.add(Conv2D(64,(5,5),activation="relu",kernel_initializer="he_uniform"))
model.add(Dropout(0.5))
model.add(MaxPool2D(pool_size=(2,2),strides=2))
model.add(Flatten())
model.add(Dense(64,activation="relu",kernel_initializer="he_uniform"))
model.add(Dropout(0.3))
model.add(Dense(10,activation="softmax"))
model.compile(optimizer="adam",loss="sparse_categorical_crossentropy",metrics=["accuracy"])
model.summary()
|
Digit Recognizer
|
13,307,296 |
data_augmentation = Sequential(
[
preprocessing.RandomFlip("horizontal"),
preprocessing.RandomRotation(0.1),
preprocessing.RandomZoom(0.1),
]
)<choose_model_class>
|
model.fit(tf.cast(X,tf.float32)/255.0,tf.cast(y,tf.float32),validation_split=0.3,batch_size=100,verbose=2,epochs=100)
|
Digit Recognizer
|
13,307,296 |
base_model_1 = xception.Xception(weights='imagenet', include_top=False, input_shape=(IMG_HEIGHT, IMG_WIDTH,3))
base_model_2 = inception_v3.InceptionV3(weights='imagenet', include_top=False, input_shape=(IMG_HEIGHT, IMG_WIDTH,3))
base_model_3 = inception_resnet_v2.InceptionResNetV2(weights='imagenet', include_top=False, input_shape=(IMG_HEIGHT, IMG_WIDTH,3))
base_model_5 = nasnet.NASNetLarge(weights='imagenet', include_top=False, input_shape=(IMG_HEIGHT, IMG_WIDTH,3))
base_model_1.trainable = False
base_model_2.trainable = False
base_model_3.trainable = False
base_model_5.trainable = False
inputs = Input(shape=(IMG_HEIGHT, IMG_WIDTH, 3))
aug_inputs = data_augmentation(inputs)
x1 = xception.preprocess_input(aug_inputs)
x1 = base_model_1(x1, training=False)
x1 = GlobalAveragePooling2D()(x1)
x2 = inception_v3.preprocess_input(aug_inputs)
x2 = base_model_2(x2, training=False)
x2 = GlobalAveragePooling2D()(x2)
x3 = inception_resnet_v2.preprocess_input(aug_inputs)
x3 = base_model_3(x3, training=False)
x3 = GlobalAveragePooling2D()(x3)
x5 = nasnet.preprocess_input(aug_inputs)
x5 = base_model_5(x5, training=False)
x5 = GlobalAveragePooling2D()(x5)
x = Concatenate()([x1, x2, x3, x5])
x = Dropout (.7 )(x)
outputs = Dense(120, activation='softmax' )(x)
model = Model(inputs, outputs)
display(model.summary() )<choose_model_class>
|
df_test=pd.read_csv(".. /input/digit-recognizer/test.csv")
df_test.head()
|
Digit Recognizer
|
13,307,296 |
optimizer = Adam(learning_rate=0.001)
model.compile(loss="sparse_categorical_crossentropy", metrics=['accuracy'], optimizer=optimizer )<choose_model_class>
|
submit=pd.DataFrame(columns=["ImageId","Label"] )
|
Digit Recognizer
|
13,307,296 |
EarlyStop_callback = EarlyStopping(min_delta=0.001, patience=10, restore_best_weights=True)
<train_model>
|
submit["ImageId"]=df_test.index.values
submit["ImageId"]=submit["ImageId"]+1
submit
|
Digit Recognizer
|
13,307,296 |
history = model.fit(
train_ds,
epochs=N_EPOCHS,
validation_data=val_ds,
callbacks=[EarlyStop_callback]
)<predict_on_test>
|
import numpy as np
|
Digit Recognizer
|
13,307,296 |
wrong_pred_images = np.array([])
actual_labels = np.array([])
predicted_labels = np.array([])
batch = 1
for images, labels in val_ds:
batch_predictions_probs = model.predict_on_batch(images)
batch_predictions = np.argmax(batch_predictions_probs, axis=1)
mask =(batch_predictions != labels.numpy())
print("No of wrong predictions on batch {}: {}".format(batch, mask.sum()))
wrong_pred_indices = np.arange(len(batch_predictions)) [mask]
print(wrong_pred_indices)
if len(wrong_pred_images)== 0:
wrong_pred_images = images.numpy() [wrong_pred_indices]
actual_labels = labels.numpy() [wrong_pred_indices]
predicted_labels = batch_predictions[wrong_pred_indices]
else:
wrong_pred_images = np.append(wrong_pred_images, images.numpy() [wrong_pred_indices], axis = 0)
actual_labels = np.append(actual_labels, labels.numpy() [wrong_pred_indices], axis = 0)
predicted_labels = np.append(predicted_labels, batch_predictions[wrong_pred_indices], axis = 0)
batch = batch + 1
print(wrong_pred_images.shape)
print(actual_labels.shape)
print(predicted_labels.shape )<predict_on_test>
|
output=model.predict(tf.cast(df_test,tf.float32)/255.0)
output
|
Digit Recognizer
|
13,307,296 |
predictions = model.predict(
test_ds,
batch_size = BATCH_SIZE,
verbose=1
)<prepare_output>
|
prediction=np.argmax(output,axis=1)
len(prediction )
|
Digit Recognizer
|
13,307,296 |
submission.loc[:, dog_breeds] = predictions
submission.head()<save_to_csv>
|
submit["Label"]=prediction
submit
|
Digit Recognizer
|
13,307,296 |
submission.to_csv('submission.csv', index=False)
<import_modules>
|
submit.to_csv("digit_submission",index=False )
|
Digit Recognizer
|
13,330,495 |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow import keras
from keras.preprocessing.image import ImageDataGenerator as Imgen
from keras.models import Sequential,load_model
from keras.layers import Conv2D,MaxPooling2D,Flatten,Dense,GlobalAveragePooling2D,Dropout
from keras.preprocessing import image
import cv2
import pickle<load_from_csv>
|
import os
import numpy as np
import glob
import shutil
import pandas as pd;
import matplotlib.pyplot as plt
|
Digit Recognizer
|
13,330,495 |
labels = pd.read_csv(".. /input/dog-breed-identification/labels.csv")
sample_sub = pd.read_csv(".. /input/dog-breed-identification/sample_submission.csv")
labels.head()<feature_engineering>
|
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras import regularizers
from keras.layers.normalization import BatchNormalization
|
Digit Recognizer
|
13,330,495 |
def addjpg(id):
return id+".jpg"<feature_engineering>
|
train_full = pd.read_csv(".. /input/digit-recognizer/train.csv")
test= pd.read_csv(".. /input/digit-recognizer/test.csv")
train_full.head()
|
Digit Recognizer
|
13,330,495 |
labels['id'] = labels['id'].apply(addjpg)
sample_sub['id'] = sample_sub['id'].apply(addjpg )<choose_model_class>
|
train = train_full.sample(frac=0.8, random_state=0)
val = train_full.drop(train.index )
|
Digit Recognizer
|
13,330,495 |
data = Imgen(preprocessing_function=keras.applications.nasnet.preprocess_input,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0.1
)<prepare_x_and_y>
|
X_full =(train_full.iloc[:,1:].values ).astype('float32')
y_full = train_full.iloc[:,0].values.astype('int32')
X_train =(train.iloc[:,1:].values ).astype('float32')
y_train = train.iloc[:,0].values.astype('int32')
X_val =(val.iloc[:,1:].values ).astype('float32')
y_val = val.iloc[:,0].values.astype('int32')
X_test = test.values.astype('float32' )
|
Digit Recognizer
|
13,330,495 |
train_ds = data.flow_from_dataframe(
labels,
directory = '.. /input/dog-breed-identification/train',
x_col = 'id',
y_col = 'breed',
subset="training",
color_mode="rgb",
target_size =(331,331),
class_mode="categorical",
batch_size=32,
shuffle=True,
seed=123,
)
val_ds = data.flow_from_dataframe(
labels,
directory = '.. /input/dog-breed-identification/train',
x_col = 'id',
y_col = 'breed',
subset="validation",
color_mode="rgb",
target_size =(331,331),
class_mode="categorical",
batch_size=32,
shuffle=True,
seed=123,
)<prepare_x_and_y>
|
BATCH_SIZE = 100
|
Digit Recognizer
|
13,330,495 |
x,y = next(train_ds)
x.shape<import_modules>
|
image_gen_train = ImageDataGenerator(
rescale=1./255,
rotation_range=20,
horizontal_flip=False,
fill_mode='nearest')
train_data_gen = image_gen_train.flow(X_train, y_train, batch_size=BATCH_SIZE,
shuffle=True)
full_data_gen = image_gen_train.flow(X_full, y_full, batch_size=BATCH_SIZE,
shuffle=True )
|
Digit Recognizer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.