markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
QUESTION 3
import numpy as np A = np.array([2,7,4]) B = np.array([3,9,8]) cross = np.cross(A,B) print(cross)
[20 -4 -3]
Apache-2.0
PRELIM_EXAM.ipynb
Singko25/Linear-Algebra-58020
from google.colab import drive drive.mount('/content/drive') import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torchvision.datasets as dset import torchvision.transforms as T from torch.utils.data import TensorDataset from torch.utils.data import DataLoader from torch.utils.data import sampler import numpy as np import pandas as pd from PIL import Image from sklearn import preprocessing, metrics, model_selection USE_GPU = True dtype = torch.float # we will be using float throughout this tutorial if USE_GPU and torch.cuda.is_available(): device = torch.device('cuda') else: device = torch.device('cpu') # Constant to control how frequently we print train loss print_every = 100 print('using device:', device) train_mean = 0.0 train_std = 0.0 val_mean = 0.0 val_std = 0.0 district_list = ['villages_towns_Sanxia', 'villages_towns_Sanzhi', 'villages_towns_Sanchong', 'villages_towns_Zhonghe', 'villages_towns_Zhongshan', 'villages_towns_Zhongzheng', 'villages_towns_Wugu', 'villages_towns_Xinyi', 'villages_towns_Neihu', 'villages_towns_Bali', 'villages_towns_Beitou', 'villages_towns_Nangang', 'villages_towns_Tucheng', 'villages_towns_Shilin', 'villages_towns_Datong', 'villages_towns_Daan', 'villages_towns_Wenshan', 'villages_towns_Xindian', 'villages_towns_Xinzhuang', 'villages_towns_Songshan', 'villages_towns_Banqiao', 'villages_towns_Linkou', 'villages_towns_Shulin', 'villages_towns_Yonghe', 'villages_towns_Xizhi', 'villages_towns_Taishan', 'villages_towns_Tamsui', 'villages_towns_Shenkeng', 'villages_towns_Ruifang', 'villages_towns_Wanhua', 'villages_towns_Wanli', 'villages_towns_Luzhou', 'villages_towns_Gongliao', 'villages_towns_Jinshan', 'villages_towns_Shuangxi', 'villages_towns_Yingge'] building_material = ['building_materials_RC', 'building_materials_RB', 'building_materials_brick', 'building_materials_steel', 'building_materials_SRC', 'building_materials_PRX', 'building_materials_other_material'] FILE = '/content/drive/MyDrive/SC201_Final_Project/Data/final_data_taipei.csv' # standardize data # train_mean = 154170.694 # train_std = 79570.40139 data = pd.read_csv(FILE) data = data[data.unit_price != 0] data = data[data.unit_price != 2211457] print(data.count()) sd = {} mean = {} columns = ['zoning', 'total_floors', 'floors_area', 'unit_price', 'unit_berth_price', 'total_berth_price', 'main_building_area', 'auxiliary_building_area', 'balcony_area', 'building_age'] for column in columns: print(column) sd_each = data[column].std() # sd[column] = sd_each mean_each = data[column].mean() # mean[column] = mean_each data[column] = (data[column] - mean_each)/sd_each data.to_csv('/content/drive/MyDrive/SC201_Final_Project/Data/new_data_taipei.csv', encoding="utf_8_sig", index=False)
_____no_output_____
MIT
Mean & SD.ipynb
sharlenechen0113/Real-Estate-Price-Prediction
Install Python. jupyter --no-browserIn the favorite browser, typehttp://localhost:8888 (or the port that is assigned)Basic usage of jupyter notebooks.- create a newdocument by clicking the New Notebook- start typing code in the shaded textbox- execute the code
x = 0.1 N = 3 a = 1 b = 0 c = -1 print('f(' + str(x) + ') = ' + str(a*x**2 + b*x + c)) a = 1 b = 1 print(a*b,a*(b+1),a*(b+2),a*(b+3)) a = 2 print(a*b,a*(b+1),a*(b+2),a*(b+3)) a = 3 print(a*b,a*(b+1),a*(b+2),a*(b+3)) a = 4 print(a*b,a*(b+1),a*(b+2),a*(b+3))
(1, 2, 3, 4) (2, 4, 6, 8) (3, 6, 9, 12) (4, 8, 12, 16)
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Fibionacci Series
N = 0 a_1 = 1 a_2 = 0 x = 1 if N>0: print('x_' + str(0) + ' = ' + str(x) ) for i in range(1,N): x = a_1 + a_2 print('x_' + str(i) + ' = ' + str(x) ) a_2 = a_1 a_1 = x l = -1 r = 1 delta = 0.1 steps = (r-l)/delta+1 print '-'*20 print('| '), print('x'), print('| '), print('3*x**2 + 2*x + 3'), print('| ') for i in range(0,int(steps)): x = l+i*delta print '-'*20 print('| '), print(x), print('| '), print(3*x**2 + 2*x + 3), print('| ') def f(x): return 3*x**2 + 2*x + 3 l = -1 r = 1 delta = 0.1 steps = (r-l)/delta for i in range(0,int(steps)): x = l+i*delta print x, print f(x) def f(r, T, S_0): return S_0*(1+r)**T interest_rate = 0.12 T = 10 S_0 = 100 l = 1 r = T delta = 1 steps = (r-l)/delta for i in range(0,int(steps)): T = l+i*delta print T, print f(interest_rate, T, S_0)
1 112.0 2 125.44 3 140.4928 4 157.351936 5 176.23416832 6 197.382268518 7 221.068140741 8 247.596317629 9 277.307875745
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Arrays, lists
a = [1,2,5,7,5, 3.2, 7] names = ['Ali','Veli','Fatma','Asli'] #for s in names: # print(s) print(names[3]) print(len(names)) for i in range(len(names)-1,-1,-1): print(names[i]) for i in range(len(names)): print(names[len(names)-i]) for n in reversed(names): print(n)
Asli Fatma Veli Ali
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Average and standard deviation
#x = [0.1,3,-2.1,5,12,3,17] x = [1,-1,0] s1 = 0.0 for a in x: s1 += a mean = s1/len(x) s2 = 0.0 for a in x: s2 += (a-mean)**2 variance = s2/len(x) print('mean = '), print(mean) print('variance = '), print(variance)
mean = 0.0 variance = 0.666666666667
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Find the minimum in an array
a = [2,5,1.2, 0,-4, 3] mn = a[0] for i in range(1,len(a)): if a[i]<mn: mn = a[i] print(mn) a.sort() a.append(-7) v = a.pop() a.reverse() v = a.pop(0) a.sort a = 5 a.bit_length
_____no_output_____
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Homework: Value counts given an array of integers
a = [5, 3, 1, 1, 6, 3, 2] ua = [] for j in a: found = False for i in ua: if j==i: found = True; break; if not found: ua.append(j) print(ua) for i in ua: s = 0 for j in a: if i==j: s = s+1 print(i, s) a = [5, 3, 1, 1, 6, 3, 2] ca = [] sum = 0 for j in a: ca.append(sum) sum += j ca.append(sum) print(ca) #ca = [0, 5, 8, 9, 10, 16, 19, 21] a = [3, 6, 7, 2] #oa = [3, 6, 7, 2, 2, 7, 6, 3] oa = a for i in reversed(a): oa.append(i) oa oa = a for i in range() a = [3, 4, 6] oa = list(a) oa = a print(a) for i in range(1,len(a)+1): # print(a[-i]) oa.append(a[-i]) oa a+list(reversed(a)) a0 = 0 a = [2, 6, 3, 1, 4, 8, 3, 5, 5] prev = a0 Inc = [] Dec = [] for i in a: Inc.append(i>prev) Dec.append(i<prev) prev = i print(Inc) print(Dec) #Inc = [True, True, False, False, True, True, False, True, False] #Dec = [False, False, True, True,False, False, True, False, False]
[True, True, False, False, True, True, False, True, False] [False, False, True, True, False, False, True, False, False]
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Generate random walk in an array
import random N = 10 mu = 0 sig = 1 x = 0 a = [x] for i in range(N): w = random.gauss(mu, sig) x = x + w a.append(x) print(a) len(a)
[0, 0.07173293905450501, -0.3652340160453349, -0.07610430577230803, -1.4172015782500376, -0.31469586619290335, -1.4458834127459201, -0.7189045208807692, 0.9895551731951309, 0.1012103597338051, -1.0353093339238497]
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
List Comprehension
N = 100 mu = 0 sig = 1 a = [random.gauss(mu, sig) for i in range(N)] for i in range(len(a)-1): a[i+1] = a[i] + a[i+1] %matplotlib inline import matplotlib.pylab as plt plt.plot(a) plt.show()
/Users/cemgil/anaconda/envs/py27/lib/python2.7/site-packages/matplotlib/font_manager.py:273: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment. warnings.warn('Matplotlib is building the font cache using fc-list. This may take a moment.')
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Moving Average
# Window Lenght W = 20 y = [] for i in range(len(a)): s = 0 n = 0 for j in range(W): if i-j < 0: break; s = s + a[i-j] n = n + 1 y.append(s/n) plt.plot(a) plt.plot(y) plt.show()
_____no_output_____
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Moving average, second version
# Window Lenght W = 20 y = [] s = 0 n = 0 for i in range(len(a)): s = s + a[i] if i>=W: s = s - a[i-W] else: n = n + 1 y.append(s/n) plt.plot(a) plt.plot(y) plt.show() def mean(a): s = 0 for x in a: s = s+x return s/float(len(a)) def var(a): mu = mean(a) s = 0 for i in range(len(a)): s = s+ (a[i]-mu)**2 return float(s)/len(a) a = [3,4,1,2] print(mean(a)) print(var(a))
2.5 1.25
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Mean and Variance, online calculation
def mean(a): mu = 0.0 for i in range(len(a)): mu = i/(i+1.0)*mu + 1.0/(i+1.0)*a[i] return mu a = [3,4,1,2] #print(a) print(mean(a))
2.5
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Implement the recursive formula for the variance
for i in range(1,len(a)+1): print(i) a = [i**2 for i in range(10)] a st = 'if' if st == 'if': print('if') elif st == 'elif': print('elif') else: print('not elif') if x<10 and x>3: for i in range(10): if i%2: continue print i x del x from math import exp import math as m x = 3 exp(x) m.sin(x) if x == 3: print 'x' i = 0 while i<10: i+=1 print(i*10) %matplotlib inline import matplotlib.pylab as plt x = [z/10. for z in range(-20,21)] x2 = [z**2 for z in x] x3 = [z**3 for z in x] sinx = [math.sin(z) for z in x] plt.plot(x,x) plt.plot(x,x2) plt.plot(x,sinx) plt.show() import numpy as np x = [z for z in np.arange(-2,2,0.1)] x x = [2,3] y = (2,3) x[0] = 3 y = (z for z in range(5)) for i in y: print(i) import random math.pi*(2*random.random()-1) def unif(): return 2*random.random() - 1 N = 100 # Generate N points in the square and store in a list points = [[unif(), unif()] for i in range(N) ] # For each point check if it is in the circle and count the total number # plot points in the circle as blue # plot points outside as red count = 0 px_in = [] py_in = [] px_out = [] py_out = [] for x in points: if x[0]**2 + x[1]**2< 1: count += 1 px_in.append(x[0]) py_in.append(x[1]) else: px_out.append(x[0]) py_out.append(x[1]) print(4.0*count/N) plt.plot(px_in, py_in,'.b') plt.plot(px_out, py_out,'.r') plt.show() def unif(): return 2*random.random() - 1 N = 1000000 count = 0 for i in range(N): x = [unif(), unif(), unif()] if x[0]**2 + x[1]**2 + x[2]**2 < 1: count += 1 print(8*float(count)/N) print(4.0/3.0*math.pi) for x in points: print(x) y = [3,4,5] x = ['a','b','d'] for u,v in zip(x, y): print(u,v) import sys sys.stdout a = [3,4,5] a = map(lambda x: x**2, a) reduce(lambda a,b: a*b, a) ln = 'twinkle tinkle little star' ln.split() lst = [1,3,4] lst.insert(2,4) lst import sys N = int(raw_input().strip()) lst = [] for line in sys.stdin: tok = line.split() if tok[0] == 'insert': i = int(tok[1]) val = int(tok[2]) lst.insert(i,val) elif tok[0] == 'print': print(lst) elif tok[0] == 'remove': val = int(tok[1]) lst.remove(val) elif tok[0] == 'append': val = int(tok[1]) lst.append(val) elif tok[0] == 'sort': lst.sort() elif tok[0] == 'pop': lst.pop() elif tok[0] == 'reverse': lst.reverse() else: print('none') p = raw_input('Enter Price ') C = raw_input('Enter Capital ') print 'Number of Items' print float(C)/int(p) c = {'A': 3, 'B': 7, 'C': [2,3], 'D': 'Adana'} #print(c) c['D']
_____no_output_____
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Catalog
def fun(x, par): print(x, par['volatility']) params = {'volatility': 0.1, 'interest_rate': 0.08} sig = params['volatility'] r = params['interest_rate'] fun(3, params) plate = {'Istanbul':34} city = 'Istanbul' print 'the number plate for', city,'is', plate[city] plate = {'Istanbul':34, 'Adana': '01', 'Ankara': '06', 'Izmir': 35, 'Hannover': 'H'} cities = ['Adana', 'Ankara','Istanbul','Izmir','Hannover'] for city in cities: print('the number plate for', city,'is', plate[city]) for i in plate.keys(): print i plate.has_key('Balikesir') plate['Eskisehir'] = 26 for i in sorted(plate.keys()): print i, plate[i] students = {273: {'Name': 'Ali', 'Surname': 'Yasar', 'Gender': 'M'}} students[395] = {'Name': 'Ayse', 'Surname': 'Oz', 'Gender': 'F'} students[398] = {'Name': 'Ayse', 'Surname': 'Atik', 'Gender': 'F'} students[112] = {'Name': 'Ahmet', 'Surname': 'Uz', 'Gender': 'M'} students[450] = {'Name': 'Veli', 'Surname': 'Gez', 'Gender': 'M'} students[451] = {'Name': 'Taylan', 'Surname': 'Cemgil', 'Gender': 'U'} for i in students: if students[i]['Gender'] is 'F': print students[i]['Name'] counts = {'M': 0, 'F': 0} for i in students: G = students[i]['Gender'] if counts.has_key(G): counts[G] += 1 else: counts[G] = 1 counts counts = {} for i in students: G = students[i]['Name'] if counts.has_key(G): counts[G] += 1 else: counts[G] = 1 counts
_____no_output_____
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Tuples (Immutable Arrays, no change possible after creation)
a = ('Ankara', '06') a.count('Istanbul') %matplotlib inline import numpy as np import matplotlib.pylab as plt x = np.arange(-2,2,0.1) plt.plot(x,x) plt.plot(x,x**2) plt.plot(x,np.sin(x)) plt.show()
_____no_output_____
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Numpy arrays versus matrices
A = np.random.rand(3,5) x = np.random.rand(5,1) print(A.dot(x)) A = np.mat(A) x = np.mat(x) print(A*x) a = np.mat(np.random.rand(3,1)) b = np.mat(np.random.rand(3,1)) print(a) print(b) a.T*b N = 1000 D = 3 X = np.random.rand(N, D) mu = X.mean(axis=0, keepdims=True) #print(mu) print((X - mu).T.dot(X-mu)/(N-1.)) np.cov(X.T) print(np.mat(np.arange(1,11)).T*np.mat(np.arange(1,11)))
[[ 1 2 3 4 5 6 7 8 9 10] [ 2 4 6 8 10 12 14 16 18 20] [ 3 6 9 12 15 18 21 24 27 30] [ 4 8 12 16 20 24 28 32 36 40] [ 5 10 15 20 25 30 35 40 45 50] [ 6 12 18 24 30 36 42 48 54 60] [ 7 14 21 28 35 42 49 56 63 70] [ 8 16 24 32 40 48 56 64 72 80] [ 9 18 27 36 45 54 63 72 81 90] [ 10 20 30 40 50 60 70 80 90 100]]
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
B&S with Monte Carlo Call and Put pricing, use a catalog and numpy, avoid using for loops
import numpy as np def European(Param, S0=1., T=1., Strike=1.,N=10000 ): ''' Price_Call, Price_Put = European(Param, S0, T, Strike,N) Param: Market parameters, a catalog with fields Param['InterestRate'] : Yearly risk free interest rate Param['Volatility'] : S0 : Initial asset price T : Time Period (in Years) Strike: Strike Price N : Number of Monte Carlo Samples ''' W = np.sqrt(T)*np.random.standard_normal(N) ST = S0*np.exp(T*(Param['InterestRate']-0.5*Param['Volatility']**2) + Param['Volatility']*W) CT = np.maximum(ST-Strike, 0) PT = np.maximum(Strike-ST, 0) Price_C = CT.mean()*np.exp(-Param['InterestRate']*T) Price_P = PT.mean()*np.exp(-Param['InterestRate']*T) return Price_C, Price_P def Lookback(Param, S0=1., T=1., Strike=1., Steps=12, N=10000 ): ''' Price_Call, Price_Put = Lookback(Param, S0, T, Strike, Steps, N) Param: Market parameters, a catalog with fields Param['InterestRate'] : Yearly risk free interest rate Param['Volatility'] : S0 : Initial asset price T : Time Period (in Years) Strike: Strike Price Steps : Number of steps to monitor the stock price N : Number of Monte Carlo Samples ''' Tstep = T/Steps Smax = S0*np.ones(N) Smin = S0*np.ones(N) St = S0*np.ones(N) for t in range(Steps): W = np.sqrt(Tstep)*np.random.standard_normal(N) St = St*np.exp(Tstep*(Param['InterestRate']-0.5*Param['Volatility']**2) + Param['Volatility']*W) Smax = np.maximum(St, Smax) Smin = np.minimum(St, Smin) CT = np.maximum(Smax-Strike, 0) PT = np.maximum(Strike-Smin, 0) Price_C = CT.mean()*np.exp(-Param['InterestRate']*T) Price_P = PT.mean()*np.exp(-Param['InterestRate']*T) return Price_C, Price_P def Asian(Param, S0=1., T=1., Strike=1., Steps=12, N=10000 ): ''' Price_Call, Price_Put = Asian(Param, S0, T, Strike, Steps, N) Param: Market parameters, a catalog with fields Param['InterestRate'] : Yearly risk free interest rate Param['Volatility'] : S0 : Initial asset price T : Time Period (in Years) Strike: Strike Price Steps : Number of steps to monitor the stock price N : Number of Monte Carlo Samples ''' Tstep = T/Steps Smean = np.zeros(N) St = S0*np.ones(N) for t in range(Steps): W = np.sqrt(Tstep)*np.random.standard_normal(N) St = St*np.exp(Tstep*(Param['InterestRate']-0.5*Param['Volatility']**2) + Param['Volatility']*W) i = t+1 Smean = (i-1)*Smean/i + St/i CT = np.maximum(Smean-Strike, 0) PT = np.maximum(Strike-Smean, 0) Price_C = CT.mean()*np.exp(-Param['InterestRate']*T) Price_P = PT.mean()*np.exp(-Param['InterestRate']*T) return Price_C, Price_P def FloatingLookback(Param, S0=1., T=1., Steps=12, N=10000 ): ''' Price_Call, Price_Put = FloatingLookback(Param, S0, T, Steps, N) Param: Market parameters, a catalog with fields Param['InterestRate'] : Yearly risk free interest rate Param['Volatility'] : S0 : Initial asset price T : Time Period (in Years) Steps : Number of steps to monitor the stock price N : Number of Monte Carlo Samples ''' Tstep = T/Steps Smax = S0*np.ones(N) Smin = S0*np.ones(N) St = S0*np.ones(N) for t in range(Steps): W = np.sqrt(Tstep)*np.random.standard_normal(N) St = St*np.exp(Tstep*(Param['InterestRate']-0.5*Param['Volatility']**2) + Param['Volatility']*W) Smax = np.maximum(St, Smax) Smin = np.minimum(St, Smin) CT = np.maximum(St-Smin, 0) PT = np.maximum(Smax-St, 0) Price_C = CT.mean()*np.exp(-Param['InterestRate']*T) Price_P = PT.mean()*np.exp(-Param['InterestRate']*T) return Price_C, Price_P Param = {'Volatility': 0.25, 'InterestRate': 0.11} Price_C, Price_P = European(Param, S0=100, T=1.0, Strike=100) print 'European\nCall= ', Price_C,'\n','Put = ', Price_P Price_C, Price_P = Asian(Param, S0=100, T=1.0, Strike=100, Steps=1000) print 'Asian\nCall= ', Price_C,'\n','Put = ', Price_P Price_C, Price_P = Lookback(Param, S0=100, T=1.0, Strike=100, Steps=1000) print 'Lookback\nCall= ', Price_C,'\n','Put = ', Price_P Price_C, Price_P = FloatingLookback(Param, S0=100, T=1.0, Steps=1000) print 'FloatingLookback\nCall= ', Price_C,'\n','Put = ', Price_P
European Call= 15.5726197769 Put = 5.13556380233 Asian Call= 8.16477817074 Put = 3.17271035914 Lookback Call= 25.6819276647 Put = 12.5838549789 FloatingLookback Call= 23.0385882044 Put = 15.3296952253
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Next week Assignment:Consolidate all pricing methods into one single function avoiding code repetitions.
def OptionPricer(type_of_option, Params): ''' Price_Call, Price_Put = OptionPricer(type_of_option, Param, S0, T, Strike, Steps, N) type_of_option = 'European' 'Asian', 'Lookback', 'FloatingLookback' Param: Parameter catalog with fields Param['InterestRate'] : Yearly risk free interest rate (default: 0.11) Param['Volatility'] : scalar or array of length steps (default: 0.11) Param['S0'] : Initial asset price (default: 1) Param['T '] : Time Period (in Years) (default: 1) Param['Strike']: Strike Price (default: 1) Param['Steps'] : Number of steps to monitor the stock price (default: 12) Param['N'] : Number of Monte Carlo Samples (default: 1000) ''' # Some test cases par = {'Volatility': [0.01,0.01,0.01,0.03,0.03], 'InterestRate': 0.11} OptionPricer('Asian', par)
_____no_output_____
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Next week: Kalman Filtering (Learn Numpy and matplotlib)
th = 0.5 A = np.mat([[np.cos(th), np.sin(th)],[-np.sin(th), np.cos(th)]]) x = np.mat([1,0]).T x = np.mat([[1],[0]]) for t in range(10): x = A*x + 0*np.random.randn(2,1) print(x) name = raw_input("What is your name? ") print name def fun(x): print x, x = map(fun,range(1,10+1)) x = map(fun,range(1,10+1)) def myfun(x,y): return x*y def myfun2(x,y): return x+y def funn(f, x, y): return f(x,y) print funn(myfun2, 3, 5) l = [1,2,3,4] def power(x): return 2**x r = sum(map(power, l)) print(r) s = 'insert 0 5' u = s.split(' ') print(u) l = [1,2,3] l.pop N = int(raw_input()) l = [] for i in range(N): s = raw_input() items = s.split(' ') cmd = items[0] if cmd == 'insert': pos = int(items[1]) num = int(items[2]) l.insert(pos, num) if cmd == 'print': print l if cmd == 'remove': num = int(items[1]) l.remove(num) if cmd == 'append': num = int(items[1]) l.append(num) if cmd == 'sort': l.sort() if cmd == 'pop': l.pop() if cmd == 'reverse': l.reverse() N = 2 l = '1 2'.split(' ') l2 = [int(i) for i in l[0:N+1]] c = tuple(l2) print hash(c) 48*21
_____no_output_____
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Importing classes from the `tcalc` module
from TCalc.tcalc import eyepiece, telescope, barlow_lens, focal_reducer
_____no_output_____
MIT
docs/tutorials/TCalc_tutorial.ipynb
Bhavesh012/Telescope-Calculator
To quickly access the docstring, run `help(classname)`
help(eyepiece)
Help on class eyepiece in module TCalc.tcalc: class eyepiece(builtins.object) | eyepiece(f_e, fov_e=50) | | Class representing a single eyepiece | Args: | f_e: focal length of the eyepiece (mm) | fov_e: field of view of the eyepiece (deg). Defaults to 50 degrees. | | Methods defined here: | | __init__(self, f_e, fov_e=50) | Initialize self. See help(type(self)) for accurate signature. | | ---------------------------------------------------------------------- | Data descriptors defined here: | | __dict__ | dictionary for instance variables (if defined) | | __weakref__ | list of weak references to the object (if defined)
MIT
docs/tutorials/TCalc_tutorial.ipynb
Bhavesh012/Telescope-Calculator
For an example, let's try to have estimate the specifications of Celestron's 8 SE telescope.
c8 = telescope(D_o=203.2, f_o=2032, user_D_eye=None, user_age=22) # adding configuration of 8in scope omni_40 = eyepiece(40, 52) # defining 40 mm eyepiece omni_25 = eyepiece(25, 52) # defining 25 mm eyepiece # adding eyepiece to the telescope c8.add_eyepiece(omni_40, id='omni_40', select=True) c8.add_eyepiece(omni_25, id='omni_25', select=True) # listing all the added eyepieces in a table format c8.list_eyepiece() # listing overall configuration of the telescope c8.say_configuration() # remember this with 25 mm eyepiece # selecting different eyepiece c8.select_eyepiece('omni_40') c8.say_configuration() # calling individual functions c8._compute_focal_ratio() # adding additional optical parts reducer = focal_reducer(.5) # defining focal reducer of 0.5x barlow = barlow_lens(2) # defining barlow lens of 2x c8.add_optic(reducer,'reducer 1', select=True) # adding reducer to the telescope c8.add_optic(barlow,'barlow 1', select=False) # adding barlow to the telescope #if the magnifications limits get reached then warning will be printed. c8.add_optic(reducer,'reducer 1', select=False) c8.add_optic(barlow,'barlow 1', select=True) # printing configuration again with barlow lens c8.say_configuration()
The telescope has the following layout: Aperture diameter: 203.2 mm Focal length: 2032 mm, corresponding to a focal ratio of 10.0 'barlow 1', a Barlow lens, has been added to the optical path. This increases the focal length by 2 This results in Focal length: 4064 mm, corresponding to a focal ratio of 20.0 In good atmospheric conditions, the resolution of the telescope (Dawes limit) is 0.6 arcseconds By wavelength, the resolution is 400 nm (blue): 0.5 arcsec 550 nm (green): 0.7 arcsec 700 nm (red): 0.9 arcsec The maximum possible magnification factor is 406.4 This means the minimum compatible eyepiece focal length is 10.0 mm The minimum magnification factor and corresponding maximum eyepiece focal length depend on the diameter of the observer's eye. For a telescope user with an eye diameter of 7 mm (apropriate for an age around 25 years): The minimum magnification factor is 29.0 This means the maximum compatible eyepiece focal length is 406.4 mm The faintest star that can be seen by this telescope is 13.5 mag The currently selected eyepiece is 'omni_40', which has the following layout: Focal length: 40 mm Field of view: 52 degrees With this eyepiece: The magnification factor is 101.6. This is compatible with the telescope limits. The true field of view is 1 degrees The exit pupil diameter is 2.0 mm The faintest surface brightness that can be seen by this telescope is 8.00
MIT
docs/tutorials/TCalc_tutorial.ipynb
Bhavesh012/Telescope-Calculator
You can notice that if used a *2x barlow lens* on a *40mm eyepiece*, the brightness of the object will be decresead by **4 times!**This way you can simulate different scenarios and find out which accesories are optimal for your purpose. This will save you both time and money on costly accesories! For advanced users, the plot functionality provides the plots of `resolution performance`, `maginfication_limits` and `eyepiece_limits`.
c8.show_resolving_power() c8.show_magnification_limits() c8.show_eyepiece_limits()
_____no_output_____
MIT
docs/tutorials/TCalc_tutorial.ipynb
Bhavesh012/Telescope-Calculator
Week 6 - SMM695Matteo DevigiliJune, 28th 2021[_PySpark_](https://spark.apache.org/docs/latest/api/python/index.html): during this lecture, we will approach Spark through Python **Agenda**:1. Introduction to Spark1. Installing PySpark1. PySpark Basics1. PySpark and Pandas1. PySpark and SQL1. Load data from your DBMS Introduction to Spark**Big Data Challenge**:* Cost of storing data has dropped* The need for parallel computation has increased![IBM Blue Gene\L](https://www.ibm.com/ibm/history/ibm100/images/icp/U225116Q82800V30/us__en_us__ibm100__blue_gene__man_next_blue_gene__620x350.jpg)**Note**: [IBM Blue Gen\L](https://www.ibm.com/ibm/history/ibm100/us/en/icons/bluegene/) **What is [Apache Spark](https://spark.apache.org)**?> "Apache Spark is a unified computing engine and a set of libraries for parallel data processing on computer clusters"[Chambers and Zaharia 2018](references) **Programming Languages Supported**: **Spark's philosophy**:* *Unified*: Spark offers a large variety of data analytics tools* *Computing Engine*: Spark focuses on computing, not on storage* *Libraries*: Spark has different libraries to perform several tasks **Apache Spark Libraries**:* *Spark SQL** *Spark Streaming** *Spark MLlib** *Spark GraphX*[Third-party projects](https://spark.apache.org/third-party-projects.html) **Spark Application**:| Component ||Role ||----|----|---|| *Spark Driver*| | Execute user-defined tasks || *Cluster Manager* | | Manage workers nodes|| *Executors* | | Execute tasks | **From Python to Spark code and back**:![The relationship between the SparkSession and Spark’s Language API](https://www.oreilly.com/library/view/spark-the-definitive/9781491912201/assets/spdg_0202.png)Source: _Bill Chambers, Matei Zaharia 2018_ (p. 23) Installing PySparkThere are several ways to set-up PySpark on your local machine. Here, two methods are discussed:* Pure-python users: ```pythonpip install pyspark```* Conda users:```pythonconda install pyspark```Further info at [Spark Download page](https://spark.apache.org/downloads.html). RequirementsPay attention to the following:>Spark runs on Java 8/11Check java version running on your machine. Type the following on your terminal:```pythonjava -version```If you are running a different Java version, install java 8/11! Check out [Spark Downloading info](https://spark.apache.org/docs/latest/downloading). PySpark - Basics Libraries
#to create a spark session object from pyspark.sql import SparkSession # functions import pyspark.sql.functions as F # data types from pyspark.sql.types import * # import datetime from datetime import date as dt
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
* More info on **Functions** at these [link-1](https://spark.apache.org/docs/latest/api/python/pyspark.sql.htmlmodule-pyspark.sql.functions) & [link-2](https://spark.apache.org/docs/2.3.0/api/sql/index.htmlyear)* More info on **Data Types** at this [link](https://spark.apache.org/docs/latest/sql-ref-datatypes.html) Opening a SessionThe **SparkSession** is a driver process that enables:* to control our Spark Application* to execute user-defined manipulationsCheck this [link](https://spark.apache.org/docs/latest/api/python/pyspark.sql.htmlpyspark.sql.SparkSession) for further reference.
# to open a Session spark = SparkSession.builder.appName('last_dance').getOrCreate()
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
**Spark UI**The spark UI is useful to monitor your application. You have the following tabs:* *Jobs*: info concerning Spark jobs* *Stages*: info on individual stages and their tasks* *Storage*: info on data that is currently in our spark application* *Environment*: info on configurations and current settings of our application* *Executors*: info on the executors that run our application* *SQL*: refers to both SQL and DataFrames
spark
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
Create DataframeIn order to create a dataframe from scratch, we need to:1. Create a schema, passing: * Column names * Data types1. Pass values as an array of tuples
# Here, I define a schema # .add(field, data_type=None, nullable=True, metadata=None) schema = StructType().add("id", "integer", True).add("first_name", "string", True).add( "last_name", "string", True).add("dob", "date", True) ''' schema = StructType().add("id", IntegerType(), True).add("first_name", StringType(), True).add( "last_name", StringType(), True).add("dob", DateType(), True) ''' # Then, I can pass some values df = spark.createDataFrame([(1, 'Michael', "Jordan", dt(1963, 2, 17)), (2, 'Scottie', "Pippen", dt(1965, 9, 25)), (3, 'Dennis', "Rodman", dt(1961, 5, 16))], schema=schema) # Let's explore Schema structure df.printSchema() # We can also leverage on functions to create a new column df=df.withColumn('age', F.year(F.current_date()) - F.year(df.dob)) df.show()
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
**Transformations*** Immutability: once created, data structures can not be changed* Lazy evaluation: computational instructions will be executed at the very last **Actions*** view data* collect data* write to output data sources PySpark and Pandas Load a csv Loading a csv file from you computer, you need to type:* Pands: * db = pd.read_csv('path/to/movies.csv')* Pyspark: * df = spark.read.csv('path/to/movies.csv', header=True, inferSchema=True)Here, we will import a csv directly from GitHub. Data are provided by [FiveThirtyEight](https://github.com/fivethirtyeight)[](https://fivethirtyeight.com/features/the-dollar-and-cents-case-against-hollywoods-exclusion-of-women/)
# import pandas import pandas as pd # import SparkFiles from pyspark import SparkFiles # target dataset url = 'https://raw.githubusercontent.com/fivethirtyeight/data/master/bechdel/movies.csv' # loading data with pandas db = pd.read_csv(url) # loading data with pyspark spark.sparkContext.addFile(url) df = spark.read.csv(SparkFiles.get('movies.csv'), header=True, inferSchema=True)
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
Inspecting dataframes
# pandas info db.info() # pyspark schema df.printSchema() # pandas fetch 5 db.head(5) # pyspark fetch 5 df.show(5) df.take(5) # pandas filtering: db[db.year == 1970] # pyspark filtering: df[df.year == 1970].show() # get columns and data types print(""" Pandas db.columns: =================== {} PySpark df.columns: =================== {} Pandas db.dtype: =================== {} PySpark df.dtypes: =================== {} """.format(db.columns, df.columns, db.dtypes, df.dtypes), flush = True)
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
Columns
# pandas add a column db['newcol'] = db.domgross/db.intgross # pyspark add a column df=df.withColumn('newcol', df.domgross/df.intgross) # pandas rename columns db.rename(columns={'newcol': 'dgs/igs'}, inplace=True) # pyspark rename columns df=df.withColumnRenamed('newcol', 'dgs/igs')
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
Drop
# pandas drop `code' column db.drop('code', axis=1, inplace=True) # pyspark drop `code' column df=df.drop('code') # pandas dropna() db.dropna(subset=['domgross'], inplace=True) # pyspark dropna() df=df.dropna(subset='domgross')
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
Stats
# pandas describe db.describe() # pyspark describe df.describe(['year', 'budget']).show()
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
Pyspark and SQL
# pyspark rename 'budget_2013$' df=df.withColumnRenamed('budget_2013$', 'budget_2013') # Create a temporary table df.createOrReplaceTempView('bechdel') # Run a simple SQL command sql = spark.sql("""SELECT imdb, year, title, budget FROM bechdel LIMIT(5)""") sql.show() # AVG budget differences sql_avg = spark.sql( """ SELECT binary, COUNT(*) AS count, format_number(AVG(budget),2) AS avg_budget, format_number((SELECT AVG(budget) FROM bechdel),2) AS avg_budget_samp, format_number(AVG(budget_2013),2) AS avg_budget2013, format_number((SELECT AVG(budget_2013) FROM bechdel),2) AS avg_budget2013_samp FROM bechdel GROUP BY binary """ ) sql_avg.show()
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
Load data from DBMS To run the following you need to restart the notebook.
# to create a spark session object from pyspark.sql import SparkSession
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
PostgreSQL To interact with postgre you need to: * Download the *postgresql-42.2.22.jar file* [here](https://jdbc.postgresql.org/download.html)* Include the path to the downloaded jar file into SparkSession()
# Open a session running data from PostgreSQL spark_postgre = SparkSession \ .builder \ .appName("last_dance_postgre") \ .config("spark.jars", "/Users/matteodevigili/py3venv/dms695/share/py4j/postgresql-42.2.22.jar") \ .getOrCreate() spark_postgre # Read data from PostgreSQL running at localhost df = spark_postgre.read \ .format("jdbc") \ .option("url", "jdbc:postgresql://localhost:5432/pagila") \ .option("dbtable", "film") \ .option("user", "dms695") \ .option("password", "smm695") \ .option("driver", "org.postgresql.Driver") \ .load() df.printSchema() # get some stats df.describe(['release_year', 'rental_rate', 'rental_duration']).show() # Create a temporary table df.createOrReplaceTempView('film') # Run a simple SQL command sql = spark_postgre.sql("""SELECT title, release_year, length, rating FROM film LIMIT(1)""") sql.show()
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
MongoDB For further reference check the [Python Guide provided by Mongo](https://docs.mongodb.com/spark-connector/current/python-api/) or the [website for the mongo-spark connector](https://spark-packages.org/package/mongodb/mongo-spark).
# add path to Mongo spark_mongo = SparkSession \ .builder \ .appName("last_dance_mongo") \ .config("spark.mongodb.input.uri", "mongodb://127.0.0.1/amazon.music") \ .config("spark.mongodb.output.uri", "mongodb://127.0.0.1/amazon.music") \ .config('spark.jars.packages', 'org.mongodb.spark:mongo-spark-connector_2.12:3.0.1') \ .getOrCreate() spark_mongo # load data from MongoDB df = spark_mongo.read.format("mongo").load() df.printSchema() # get some stats df.describe(['overall', 'unixReviewTime']).show() # Create a temporary table df.createOrReplaceTempView('music') # Run a simple SQL command sql = spark_mongo.sql("""SELECT asin, date, helpful, overall, unixReviewTime FROM music LIMIT(1)""") sql.show()
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
______Universidad Tecnológica Nacional, Buenos Aires__\__Ingeniería Industrial__\__Cátedra de Investigación Operativa__\__Autor: Rodrigo Maranzana______ Ejercicio 3 Un agente comercial realiza su trabajo en tres ciudades A, B y C. Para evitar desplazamientos innecesarios está todo el día en la misma ciudad y allí pernocta, desplazándose a otra ciudad al día siguiente, si no tiene suficiente trabajo. Después de estar trabajando un día en C, la probabilidad de tener que seguir trabajando en ella al día siguiente es 0,4, la de tener que viajar a B es 0,4 y la de tener que ir a A es 0,2. Si el viajante duerme un día en B, con probabilidad de un 20% tendrá que seguir trabajando en la misma ciudad al día siguiente, en el 60% de los casos viajará a C, mientras que irá a A con probabilidad 0,2. Por último si el agente comercial trabaja todo un día en A, permanecerá en esa misma ciudad, al día siguiente, con una probabilidad 0,1, irá a B con una probabilidad de 0,3 y a C con una probabilidad de 0,6.* Ejercicio A: Si hoy el viajante está en C, ¿cuál es la probabilidad de que también tenga que trabajar en C al cabo de cuatro días?* Ejercicio B: ¿Cuáles son los porcentajes de días en los que el agente comercial está en cada una de las tres ciudades? Índice1&nbsp;&nbsp;Datos Iniciales2&nbsp;&nbsp;Ejercicio A2.1&nbsp;&nbsp;Forma alternativa de resolución:3&nbsp;&nbsp;Ejercicio B3.1&nbsp;&nbsp;Forma alternativa: usando una matriz no cuadrada3.2&nbsp;&nbsp;Cálculo auxiliar: partiendo directamente de la matriz de transición Datos Iniciales Importamos las librerías necesarias.
import numpy as np
_____no_output_____
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Ingresamos los datos de la matriz de transición en una matriz numpy:
# Matriz de transición como numpy array: T = np.array([[0.1, 0.3, 0.6], [0.2, 0.2, 0.6], [0.2, 0.4, 0.4]]) # Printeamos T print(f'Matriz de transición: \n{T}')
Matriz de transición: [[0.1 0.3 0.6] [0.2 0.2 0.6] [0.2 0.4 0.4]]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Ejercicio A En primer lugar, calculamos la matriz de transición habiendo pasado 4 días: elevamos la matriz a la cuarta usando el método de la potencia de álgebra lineal de la librería Numpy.
# Cálculo de la matriz de transición a tiempo 4: T4 = np.linalg.matrix_power(T, 4) # printeamos la matriz de transicion de 4 pasos: print(f'Matriz de transición a tiempo 4: \n{T4}\n')
Matriz de transición a tiempo 4: [[0.1819 0.3189 0.4992] [0.1818 0.319 0.4992] [0.1818 0.3174 0.5008]]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Sabiendo que $p_0$ considera que el agente está en el nodo C:$ p_0 = (0, 0, 1) $
# Generación del vector inicial p_0: p_0 = np.array([0, 0, 1]) # printeamos el vector inicial: print(f'Vector de estado a tiempo 0: \n{p_0}\n')
Vector de estado a tiempo 0: [0 0 1]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Calculamos: $ p_0 T^4 = p_4 $
# Cálculo del estado a tiempo 4, p_4: p_4 = np.dot(p_0, T4) # printeamos p4: print(f'Vector de estado a tiempo 4: \n{p_4}\n')
Vector de estado a tiempo 4: [0.1818 0.3174 0.5008]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Dado el vector $ p_4 $, nos quedamos con el componente perteneciente al estado C.
# Componente del nodo C: p_4_c = p_4[2] # printeamos lo obtenido: print(f'Probabilidad de estar en c habiendo iniciado en c: \n{p_4_c}\n')
Probabilidad de estar en c habiendo iniciado en c: 0.5008
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Forma alternativa de resolución:El resultado es el mismo si consideramos que la componente ${T^4}_{cc}$ es la probabilidad de transición del nodo c al mismo nodo habiendo pasado 4 ciclos.Veamos cómo se obtiene esa componente:
# Componente de cc de la matriz de transición a tiempo 4: T4cc = T4[2,2] print('\n ** Probabilidad de estar en c habiendo iniciado en c: \n %.5f' % T4cc)
** Probabilidad de estar en c habiendo iniciado en c: 0.50080
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Ejercicio B Dada una matriz $A$ proveniente del sistema de ecuaciones que resuelve $\pi T = \pi$
# Matriz A: A = np.array([[-0.9, 0.2, 0.2], [ 0.3, -0.8, 0.4], [ 0.6, 0.6, -0.6], [1, 1, 1]]) # Printeamos A: print(f'Matriz asociada al sistema lineal de ecuaciones: \n{A}')
Matriz asociada al sistema lineal de ecuaciones: [[-0.9 0.2 0.2] [ 0.3 -0.8 0.4] [ 0.6 0.6 -0.6] [ 1. 1. 1. ]]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Y dado un vector $B$ relacionado con los términos independientes del sistema de ecuaciones anteriormente mencionado.
# Vector B: B = np.array([0, 0, 0, 1]) # Printeamos B: print(f'Vector de términos independientes: \n{B}')
Vector de términos independientes: [0 0 0 1]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Dado que el solver de numpy solamente admite sistemas lineales cuadrados por el algoritmo que usa para la resolución [1], debemos eliminar una de las filas (cualquiera) de la matriz homogénea y quedarnos con la fila relacionada a la ecuación $ \sum_i{\pi_i} = 1$.Hacemos lo mismo para el vector de términos independientes B.Para hacer esto usamos la función el método delete de numpy, indicando la posición a eliminar y el eje (axis) al que pertenece.[1] https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.linalg.solve.html
# Copio la matriz A original, para que no se modifique. A_s = A.copy() # Eliminamos la primer fila de la matriz A: A_s = np.delete(A_s, 0, 0) # Printeamos: print(f'Matriz asociada al sistema lineal de ecuaciones: \n{A_s}') print(f'\n -> Dimensión: {A_s.shape}') # Copio el vector B original, para que no se modifique. B_s = B.copy() # Eliminamos la primera componente del vector B: B_s = np.delete(B_s, 0, 0) print(f'\nVector de términos independientes: \n{B_s}') print(f'\n -> Dimensión: {B_s.shape}')
Vector de términos independientes: [0 0 1] -> Dimensión: (3,)
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Cumpliendo con un sistema cuadrado, usamos el método solve de numpy para obtener $x$ del sistema $Ax = B$
x = np.linalg.solve(A_s, B_s) print('\n ** Vector solución de estado estable: \n %s' % x)
** Vector solución de estado estable: [0.18181818 0.31818182 0.5 ]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Forma alternativa: usando una matriz no cuadradaComo explicamos anteriormente no podemos usar el método $solve$ en matrices no cuadradas. En su lugar podemos usar el método de los mínimos cuadrados para aproximar la solución[2]. Este método no tiene restricciones en cuanto a la dimensión de la matriz.El desarrollo del método no forma parte de la materia, siendo contenido de Análisis Numérico y Cálculo Avanzado.[2] https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.lstsq.html
x_lstsq, _, _, _ = np.linalg.lstsq(A, B, rcond=None) print('\n ** Vector solución de estado estable: \n %s' % x_lstsq)
** Vector solución de estado estable: [0.18181818 0.31818182 0.5 ]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Cálculo auxiliar: partiendo directamente de la matriz de transiciónEn la resolución original, usamos una matriz A relacionada al sistema lineal de ecuaciones que resolvimos a mano. Ahora veremos otra forma de llegar a la solución solamente con los datos dados y tratamiento de matrices.Partiendo del sistema original: $\pi T = \pi$Despejando $\pi$ obtenemos:$(T^T - I) \pi^T = 0 $Podemos transformar lo anterior en la notación que usamos más arriba para que tenga consistencia:$A = (T^T - I)$$X = \pi^T$$B = 0$Por lo tanto, llegamos a la misma expresión $Ax = B$ Entonces, comenzamos calculando: $A = (T^T - I)$
# Primero calculamos la traspuesta de la matriz de transición: Tt = np.transpose(T) print(f'\nT traspuesta: \n{Tt}') # Luego con calculamos la matriz A, sabiendo que es la traspuesta de T menos la identidad. A1 = Tt - np.identity(Tt.shape[0]) print(f'\nMatriz A: \n{A1}')
T traspuesta: [[0.1 0.2 0.2] [0.3 0.2 0.4] [0.6 0.6 0.4]] Matriz A: [[-0.9 0.2 0.2] [ 0.3 -0.8 0.4] [ 0.6 0.6 -0.6]]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Seguimos con: $B = 0$
# El vector B, es un vector de ceros: B1 = np.zeros(3) print(f'\nVector B: \n{B1}')
Vector B: [0. 0. 0.]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
A partir de aca, simplemente aplicamos el método que ya sabemos. Agregamos la información correspondiente a: $\sum_i{\pi_i} = 1$.
# Copio la matriz A1 original, para que no se modifique. A1_s = A1.copy() # Agregamos las probabilidades a la matriz A eq_suma_p = np.array([[1, 1, 1]]) A1_s = np.concatenate((A1_s, eq_suma_p), axis=0) # Printeamos: print(f'Matriz A: \n{A1_s}') # Copio el vector B1 original, para que no se modifique. B1_s = B1.copy() # Agregamos 1 al vector B: B1_s = np.append(B1_s, 1) # Printeamos: print(f'\nVector B: \n{B1_s}')
Vector B: [0. 0. 0. 1.]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Resolvemos por mínimos cuadrados:
# Resolvemos con método de mínimos cuadrados: x_lstsq, _, _, _ = np.linalg.lstsq(A1_s, B1_s, rcond=None) # Printeamos la solucion: print(f'\nVector solución de estado estable: {x_lstsq}')
Vector solución de estado estable: [0.18181818 0.31818182 0.5 ]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Wavefront set inpainting real phantom In this notebook we are implementing a Wavefront set inpainting algorithm based on a hallucination network
%matplotlib inline import os os.environ["CUDA_VISIBLE_DEVICES"]="0" # Import the needed modules from data.data_factory import generate_realphantom_WFinpaint, DataGenerator_realphantom_WFinpaint from ellipse.ellipseWF_factory import plot_WF import matplotlib.pyplot as plt import numpy.random as rnd import numpy as np import odl import matplotlib.pyplot as plt
/store/kepler/datastore/andrade/GitHub_repos/Joint_CTWF_Recon/WF_inpaint/data/data_factory.py:7: UserWarning: This call to matplotlib.use() has no effect because the backend has already been chosen; matplotlib.use() must be called *before* pylab, matplotlib.pyplot, or matplotlib.backends is imported for the first time. The backend was *originally* set to 'module://ipykernel.pylab.backend_inline' by the following code: File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/runpy.py", line 85, in _run_code exec(code, run_globals) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel_launcher.py", line 16, in <module> app.launch_new_instance() File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/traitlets/config/application.py", line 658, in launch_instance app.start() File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelapp.py", line 505, in start self.io_loop.start() File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/platform/asyncio.py", line 132, in start self.asyncio_loop.run_forever() File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/asyncio/base_events.py", line 438, in run_forever self._run_once() File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/asyncio/base_events.py", line 1451, in _run_once handle._run() File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/asyncio/events.py", line 145, in _run self._callback(*self._args) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/ioloop.py", line 758, in _run_callback ret = callback() File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/stack_context.py", line 300, in null_wrapper return fn(*args, **kwargs) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py", line 1233, in inner self.run() File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py", line 1147, in run yielded = self.gen.send(value) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 357, in process_one yield gen.maybe_future(dispatch(*args)) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py", line 326, in wrapper yielded = next(result) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 267, in dispatch_shell yield gen.maybe_future(handler(stream, idents, msg)) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py", line 326, in wrapper yielded = next(result) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 534, in execute_request user_expressions, allow_stdin, File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py", line 326, in wrapper yielded = next(result) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/ipkernel.py", line 294, in do_execute res = shell.run_cell(code, store_history=store_history, silent=silent) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/zmqshell.py", line 536, in run_cell return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2823, in run_cell self.events.trigger('post_run_cell', result) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/events.py", line 88, in trigger func(*args, **kwargs) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/pylab/backend_inline.py", line 164, in configure_once activate_matplotlib(backend) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/pylabtools.py", line 314, in activate_matplotlib matplotlib.pyplot.switch_backend(backend) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/matplotlib/pyplot.py", line 231, in switch_backend matplotlib.use(newbackend, warn=False, force=True) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/matplotlib/__init__.py", line 1410, in use reload(sys.modules['matplotlib.backends']) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/importlib/__init__.py", line 166, in reload _bootstrap._exec(spec, module) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/matplotlib/backends/__init__.py", line 16, in <module> line for line in traceback.format_stack() matplotlib.use('Agg') /store/kepler/datastore/andrade/GitHub_repos/Joint_CTWF_Recon/WF_inpaint/ellipse/ellipseWF_factory.py:9: UserWarning: This call to matplotlib.use() has no effect because the backend has already been chosen; matplotlib.use() must be called *before* pylab, matplotlib.pyplot, or matplotlib.backends is imported for the first time. The backend was *originally* set to 'module://ipykernel.pylab.backend_inline' by the following code: File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/runpy.py", line 85, in _run_code exec(code, run_globals) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel_launcher.py", line 16, in <module> app.launch_new_instance() File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/traitlets/config/application.py", line 658, in launch_instance app.start() File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelapp.py", line 505, in start self.io_loop.start() File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/platform/asyncio.py", line 132, in start self.asyncio_loop.run_forever() File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/asyncio/base_events.py", line 438, in run_forever self._run_once() File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/asyncio/base_events.py", line 1451, in _run_once handle._run() File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/asyncio/events.py", line 145, in _run self._callback(*self._args) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/ioloop.py", line 758, in _run_callback ret = callback() File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/stack_context.py", line 300, in null_wrapper return fn(*args, **kwargs) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py", line 1233, in inner self.run() File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py", line 1147, in run yielded = self.gen.send(value) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 357, in process_one yield gen.maybe_future(dispatch(*args)) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py", line 326, in wrapper yielded = next(result) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 267, in dispatch_shell yield gen.maybe_future(handler(stream, idents, msg)) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py", line 326, in wrapper yielded = next(result) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 534, in execute_request user_expressions, allow_stdin, File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/tornado/gen.py", line 326, in wrapper yielded = next(result) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/ipkernel.py", line 294, in do_execute res = shell.run_cell(code, store_history=store_history, silent=silent) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/zmqshell.py", line 536, in run_cell return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2823, in run_cell self.events.trigger('post_run_cell', result) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/events.py", line 88, in trigger func(*args, **kwargs) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/ipykernel/pylab/backend_inline.py", line 164, in configure_once activate_matplotlib(backend) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/IPython/core/pylabtools.py", line 314, in activate_matplotlib matplotlib.pyplot.switch_backend(backend) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/matplotlib/pyplot.py", line 231, in switch_backend matplotlib.use(newbackend, warn=False, force=True) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/matplotlib/__init__.py", line 1410, in use reload(sys.modules['matplotlib.backends']) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/importlib/__init__.py", line 166, in reload _bootstrap._exec(spec, module) File "/homes/extern/andrade/store/miniconda3/envs/tf_gpu/lib/python3.6/site-packages/matplotlib/backends/__init__.py", line 16, in <module> line for line in traceback.format_stack() matplotlib.use('Agg')
MIT
WF_inpaint/WF_inpaint_realphantom_unet_train.ipynb
arsenal9971/DeeMicrolocalReconstruction
Data generator
batch_size = 1 size = 256 nClasses = 180 lowd = 40 y_arr, x_true_arr =generate_realphantom_WFinpaint(batch_size, size, nClasses, lowd) plt.figure(figsize=(6,6)) plt.axis('off') plot_WF(y_arr[0,:,:,0]) plt.figure(figsize=(6,6)) plt.axis('off') plot_WF(x_true_arr[0,:,:,0])
_____no_output_____
MIT
WF_inpaint/WF_inpaint_realphantom_unet_train.ipynb
arsenal9971/DeeMicrolocalReconstruction
Load the model
# Tensorflow and seed seed_value = 0 import random random.seed(seed_value) import tensorflow as tf tf.set_random_seed(seed_value) # Importing relevant keras modules from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger from tensorflow.keras.models import load_model from shared.shared import create_increasing_dir import pickle # Import model and custom losses from models.unet import UNet from models.losses import CUSTOM_OBJECTS # Parameters for the training learning_rate = 1e-3 loss = 'mae' batch_size = 50 epoches = 10000 pretrained = 1 path_to_model_dir = './models/unets_realphantom_WFinpaint/training_5' # Data generator size = 256 nClasses = 180 lowd = 40 train_gen = DataGenerator_realphantom_WFinpaint(batch_size, size, nClasses, lowd) val_gen = DataGenerator_realphantom_WFinpaint(batch_size, size, nClasses, lowd) if pretrained==0: # Create a fresh model print("Create a fresh model") unet = UNet() model = unet.create_model( img_shape = (size, size, 1) , loss = loss, learning_rate = learning_rate) path_to_training = create_increasing_dir('./models/unets_realphantom_WFinpaint', 'training') print("Save training in {}".format(path_to_training)) path_to_model_dir = path_to_training else: print("Use trained model as initialization:") print(path_to_model_dir+"/weights.hdf5") model = load_model(path_to_model_dir+"/weights.hdf5", custom_objects=CUSTOM_OBJECTS) path_to_training = path_to_model_dir # Callbacks for saving model context = { "loss": loss, "batch_size": batch_size, "learning_rate": learning_rate, "path_to_model_dir": path_to_model_dir, } path_to_context = path_to_training+'/context.log' with open(path_to_context, 'wb') as dict_items_save: pickle.dump(context, dict_items_save) print("Save training context to {}".format(path_to_context)) # Save architecture model_json = model.to_json() path_to_architecture = path_to_training + "/model.json" with open(path_to_architecture, "w") as json_file: json_file.write(model_json) print("Save model architecture to {}".format(path_to_architecture)) # Checkpoint for trained model checkpoint = ModelCheckpoint( path_to_training+'/weights.hdf5', monitor='val_loss', verbose=1, save_best_only=True) csv_logger = CSVLogger(path_to_training+'/training.log') callbacks_list = [checkpoint, csv_logger] model.fit_generator(train_gen,epochs=epoches, steps_per_epoch=5600 // batch_size, callbacks=callbacks_list, validation_data=val_gen, validation_steps= 2000// batch_size)
Epoch 1/10000 111/112 [============================>.] - ETA: 13s - loss: 0.9985 - my_mean_squared_error: 111.1464 - mean_squared_error: 111.1464 - mean_absolute_error: 0.9985 - l2_on_wedge: 107.8654 - my_psnr: -5.8870
MIT
WF_inpaint/WF_inpaint_realphantom_unet_train.ipynb
arsenal9971/DeeMicrolocalReconstruction
7. Vertical Vibration of Quarter Car ModelThis notebook introduces the base excitation system by examning the behavior of a quarter car model.After the completion of this assignment students will be able to:- excite a system with a sinusoidal input- understand the difference in transient and steady state solutions- create a frequency response plot- define resonance and determine the parameters that cause resonance![](quarter-car.jpg)
import numpy as np import matplotlib.pyplot as plt %matplotlib notebook from resonance.linear_systems import SimpleQuarterCarSystem sys = SimpleQuarterCarSystem()
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The simple quarter car model has a suspension stiffness and damping, along with the sprung car mass in kilograms, and a travel speed parameter in meters per second.
sys.constants sys.coordinates sys.speeds
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
A sinusoidal roadThe road is described as:$$y(t) = Ysin\omega_b t$$where $Y$ is the amplitude of the sinusoidal road undulations and $\omega_b$ is the frequency of the a function of the car's speed. If the distance between the peaks (amplitude 0.01 meters) of the sinusoidal road is 6 meters and the car is traveling at 7.5 m/s calculate what the frequency will be.
Y = 0.01 # m v = sys.constants['travel_speed'] bump_distance = 6 # m wb = v / bump_distance * 2 * np.pi # rad /s print(wb)
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Now with the amplitude and frequency set you can use the `sinusoidal_base_displacing_response()` function to simulate the system.
traj = sys.sinusoidal_base_displacing_response(Y, wb, 20.0) traj.head() traj.plot(subplots=True);
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
We've written an animation for you. You can play it with:
sys.animate_configuration(fps=20)
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
**Exercise**Try different travel speeds and see what kind of behavior you can observe. Make sure to set the `travel_speed` constant and the frequency value for `sinusoidal_base_displacing_response()` to be consistent. TransmissibilityWhen designing a car the designer wants the riders to feel comfortable and to isolate them from the road's bumps. There are two important aspects to investigate. The first is called *displacement transmissibility* and is a ratio between the ampitude of the steady state motion and the ampitude of the sinusoidal base displacement. So in our case this would be:$$ \frac{X}{Y}(\omega_b) = \frac{\textrm{Steady State Amplitude}}{\textrm{Base Displacement Amplitude}} $$This can be plotted as a function of the base displacement frequency. A car suspension designer may want this ratio to be an optimal value for rider comfort. Maybe they'd like to make the ratio 1 or maybe even less than one if possible.**Exercise**Use the curve fitting technique from the previous notebook to plot $X/Y$ for a range of frequencies. Your code should look something like:```pythonfrom scipy.optimize import curve_fitdef cosine_func(times, amp, freq, phase_angle): return amp * np.cos(freq * times - phase_angle)frequencies = np.linspace(1.0, 20.0, num=100) amplitudes = [] for omega in frequencies: your code hereamplitudes = np.array(amplitudes)fig, ax = plt.subplots(1, 1, sharex=True)ax.set_xlabel('$\omega_b$ [rad/s]')ax.set_ylabel('Displacement Transmissibility') ax.axvline(, color='black') natural frequencyax.plot()?ax.grid();```
from scipy.optimize import curve_fit def cosine_func(times, amp, freq, phase_angle): return amp * np.cos(freq * times - phase_angle) frequencies = np.linspace(1.0, 20.0, num=100) amplitudes = [] for omega in frequencies: traj = sys.sinusoidal_base_displacing_response(Y, omega, 20.0) popt, pcov = curve_fit(cosine_func, traj[10:].index, traj[10:].car_vertical_position, p0=(Y, omega, 0.05)) amplitudes.append(abs(popt[0])) amplitudes = np.array(amplitudes) fig, ax = plt.subplots(1, 1, sharex=True) ax.set_xlabel('$\omega_b$ [rad/s]') ax.set_ylabel('Displacement Transmissibility') ax.axvline(np.sqrt(sys.constants['suspension_stiffness'] / sys.constants['sprung_mass']), color='black') ax.plot(frequencies, amplitudes / Y) ax.grid(); # write you answer here
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The second thing to investigate is the *force transmissibility*. This is the ratio of the force applied by the suspension to the sprung car. Riders will feel this force when the car travels over bumps. Reducing this is also preferrable. The force applied to the car can be compared to the **Excersice**Create a measurement to calculate the force applied to the car by the suspension. Simulate the system with $Y=0.01$ m, $v = 10$ m/s, and the distance between bump peaks as $6$ m. Plot the trajectories.```pythondef force_on_car(suspension_damping, suspension_stiffness, car_vertical_position, car_vertical_velocity, travel_speed, time): write this codesys.add_measurement('force_on_car', force_on_car) write code for Y and omega_b, etc```
Y = 0.01 # m bump_distance = 6 # m def force_on_car(suspension_damping, suspension_stiffness, car_vertical_position, car_vertical_velocity, travel_speed, time): wb = travel_speed / bump_distance * 2 * np.pi y = Y * np.sin(wb * time) yd = Y * wb * np.cos(wb * time) return (suspension_damping * (car_vertical_velocity - yd) + suspension_stiffness * (car_vertical_position - y)) sys.add_measurement('force_on_car', force_on_car) v = 10.0 sys.constants['travel_speed'] = v wb = v / bump_distance * 2 * np.pi # rad /s traj = sys.sinusoidal_base_displacing_response(Y, wb, 10.0) traj[['car_vertical_position', 'car_vertical_velocity', 'force_on_car']].plot(subplots=True) # write your answer here sys.animate_configuration(fps=30)
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Force transmissibility will be visited more in your next homework. Arbitrary Periodic Forcing (Fourier Series)Fourier discovered that any periodic function with a period $T$ can be described by an infinite series of sums of sines and cosines. See the wikipedia article for more info (https://en.wikipedia.org/wiki/Fourier_series). The key equation is this:$$ F(t) = \frac{a_0}{2} + \sum_{n=1}^\infty (a_n \cos n\omega_T t + b_n \sin n \omega_T t)$$The terms $a_0, a_n, b_n$ are called the Fourier coefficients and are defined as such:$$ a_0 = \frac{2}{T} \int_0^T F(t) dt$$$$ a_n = \frac{2}{T} \int_0^T F(t) \cos n \omega_T t dt \quad \textrm{for} \quad n = 1, 2, \ldots $$$$ b_n = \frac{2}{T} \int_0^T F(t) \sin n \omega_T t dt \quad \textrm{for} \quad n = 1, 2, \ldots $$ Introduction to SymPySymPy is a Python package for symbolic computing. It can do many symbolic operations, for instance, integration, differentiation, linear algebra, etc. See http://sympy.org for more details of the features and the documentation. Today we will cover how to do integrals using SymPy and use it to find the Fourier series that represents a sawtooth function.
import sympy as sm
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The function `init_printing()` enables LaTeX based rendering in the Jupyter notebook of all SymPy objects.
sm.init_printing()
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Symbols can be created by using the `symbols()` function.
x, y, z = sm.symbols('x, y, z') x, y, z
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The `integrate()` function allows you to do symbolic indefinite or definite integrals. Note that the constants of integration are not included in indefinite integrals.
sm.integrate(x * y, x)
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The `Integral` class creates and unevaluated integral, where as the `integrate()` function automatically evaluates the integral.
expr = sm.Integral(x * y, x) expr
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
To evaluate the unevaluated form you call the `.doit()` method. Note that all unevaluated SymPy objects have this method.
expr.doit()
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
This shows how to create an unevaluated definite integral, store it in a variable, and then evaluate it.
expr = sm.Integral(x * y, (x, 0, 5)) expr expr.doit()
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Fourier Coefficients for the Sawtooth functionNow let's compute the Fourier coefficients for a saw tooth function. The function that describes the saw tooth is:$$F(t) = \begin{cases} A \left( \frac{4t}{T} - 1 \right) & 0 \leq t \leq T/2 \\ A \left( 3 - \frac{4t}{t} \right) & T/2 \leq t \leq T \end{cases}$$where:- $A$ is the amplitude of the saw tooth- $T$ is the period of the saw tooth- $\omega_T$ is the frequency of the saw tooth, i.e. $\omega_T = \frac{2\pi}{T}$- $t$ is timeThis is a piecewise function with two parts from $t=0$ to $t=T$.
A, T, wT, t = sm.symbols('A, T, omega_T, t', real=True, positive=True) A, T, wT, t
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The first Fourier coefficient $a_0$ describes the average value of the periodic function. and is:$$a_0 = \frac{2}{T} \int_0^T F(t) dt$$This integral will have to be done in two parts:$$a_0 = a_{01} + a_{02} = \frac{2}{T} \int_0^{T/2} F(t) dt + \frac{2}{T} \int_{T/2}^T F(t) dt$$These two integrals are evaluated below. Note that $a_0$ evaluates to zero. This is because the average of our function is 0.
ao_1 = 2 / T * sm.Integral(A * (4 * t / T - 1), (t, 0, T / 2)) ao_1 ao_1.doit() ao_2 = 2 / T * sm.Integral(A * (3 - 4 * t / T), (t, T / 2, T)) ao_2 ao_2.doit()
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
But SymPy can also handle piecewise directly. The following shows how to define a piecewise function.
F_1 = A * (4 * t / T - 1) F_2 = A * (3 - 4 * t / T) F = sm.Piecewise((F_1, t<=T/2), (F_2, T/2<t)) F F_of_t_only = F.xreplace({A: 0.01, T: 2 * sm.pi / wb}) F_of_t_only sm.plot(F_of_t_only, (t, 0, 2 * np.pi / wb))
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The integral can be taken of the entire piecewise function in one call.
sm.integrate(F, (t, 0, T))
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Now the Fourier coefficients $a_n$ and $b_n$ can be computed.$$a_n = \frac{2}{T}\int_0^T F(t) \cos n\omega_Tt dt \\b_n = \frac{2}{T}\int_0^T F(t) \sin n\omega_Tt dt$$
n = sm.symbols('n', real=True, positive=True)
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
For $a_n$:
an = 2 / T * sm.Integral(F * sm.cos(n * wT * t), (t, 0, T)) an an.doit()
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
This can be simplified:
an = an.doit().simplify() an
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Now substitute the $2\pi/T$ for $\omega_T$.
an = an.subs({wT: 2 * sm.pi / T}) an
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Let's see how this function varies with increasing $n$. We will use a loop but the SymPy expressions will not automatically display because they are inside a loop. So we need to use SymPy's `latex()` function and the IPython display tools. SymPy's `latex()` function transforms the SymPy expression into a string of matching LaTeX commands.
sm.latex(an, mode='inline')
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The `display()` and `LaTeX()` functions then turn the LaTeX string in to a displayed version.
from IPython.display import display, Latex
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Now we can see how $a_n$ varies with $n=1,2,\ldots$.
for n_i in range(1, 6): ans = an.subs({n: n_i}) display(Latex('$a_{} = $'.format(n_i) + sm.latex(ans, mode='inline')))
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
For even $n$ values the coefficient is zero and for even values it varies with the inverse of $n^2$. More precisely:$$a_n =\begin{cases}0 & \textrm{if }n\textrm{ is even} \\-\frac{8A}{n^2\pi^2} & \textrm{if }n\textrm{ is odd}\end{cases}$$SymPy can actually reduce this further if your set the assumption that $n$ is an integer.
n = sm.symbols('n', real=True, positive=True, integer=True) an = 2 / T * sm.Integral(F * sm.cos(n * wT * t), (t, 0, T)) an = an.doit().simplify() an.subs({wT: 2 * sm.pi / T})
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The odd and even versions can be computed by setting the respective assumptions.
n = sm.symbols('n', real=True, positive=True, integer=True, odd=True) an = 2 / T * sm.Integral(F * sm.cos(n * wT * t), (t, 0, T)) an = an.doit().simplify() an.subs({wT: 2 * sm.pi / T})
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Note that $b_n$ is always zero:
bn = 2 / T * sm.Integral(F * sm.sin(n * wT * t), (t, 0, T)) bn bn.doit().simplify().subs({wT: 2 * sm.pi / T})
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Numerical evalution of the Fourier SeriesNow the Fourier coefficients can be used to plot the approximation of the saw tooth forcing function.
import numpy as np
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The following function plots the actual sawtooth function. It does it all in one line by cleverly using the absolute value and the modulo functions.
def sawtooth(A, T, t): return (4 * A / T) * (T / 2 - np.abs(t % T - T / 2) ) - A A = 1 T = 2 t = np.linspace(0, 5, num=500) plt.figure() plt.plot(t, sawtooth(A, T, t));
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
ExerciseWrite a function that computes the Fourier approximation of the sawtooth function for a given value of $n$, i.e. using a finite number of terms. Then plot it for $n=2, 4, 6, 8, 10$ on top of the actual sawtooth function. How many terms of the infinite series are needed to get a good sawtooth?```pythondef sawtooth_approximation(n, A, T, t): code here return f plot sawtoothf = sawtooth(A, T, t)plt.figure()plt.plot(t, f, color='k', label='true sawtooth')for n in np.arange(2, 12, 2): f_approx = sawtooth_approximation(n, A, T, t) plt.plot(t, f_approx, label='n = {}'.format(n))plt.legend() zoom in a bit on the interesting bitplt.xlim(0, T)```
def sawtooth_approximation(n, A, T, t): # odd values of indexing variable up to n n = np.arange(1, n+1)[:, np.newaxis] # cos coefficients an = A *(8 * (-1)**n - 8) / 2 / np.pi**2 / n**2 # sawtooth frequency wT = 2 * np.pi / T # sum of n cos functions f = np.sum(an * np.cos(n * wT * t), axis=0) return f # plot sawtooth f = sawtooth(A, T, t) plt.figure() plt.plot(t, f, color='k', label='true sawtooth') for n in np.arange(2, 12, 2): f_approx = sawtooth_approximation(n, A, T, t) plt.plot(t, f_approx, label='n = {}'.format(n)) plt.legend() # zoom in a bit on the interesting bit plt.xlim(0, T) # write answer here
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Below is a interactive plot that shows the same thing as above.
A = 1 T = 2 t = np.linspace(0, 5, num=500) fig, ax = plt.subplots(1, 1) f = sawtooth(A, T, t) saw_tooth_lines = ax.plot(t, f, color='k') n = 2 f_approx = sawtooth_approximation(n, A, T, t) approx_lines = ax.plot(t, f_approx) leg = ax.legend(['true', 'approx, n = {}'.format(n)]) # zoom in a bit on the interesting bit plt.xlim(0, 2 * T) def update(n=0): f_approx = sawtooth_approximation(n, A, T, t) approx_lines[0].set_ydata(f_approx) leg.get_texts()[1].set_text('approx, n = {}'.format(n)) from ipywidgets import interact interact(update, n=(0, 20, 2))
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Apply the sawtooth to the quarter carNow that you know the Fourier series coefficients. Calculate them for a suitable number of terms and simulate them with the `sys.periodic_base_displacing_response()` function.Your code should look something like:```pythondef fourier_coeffs(A, T, N): write your code herea0, an, bn = fourier_coeffs(?)traj = sys.periodic_base_displacing_response(?)```
def fourier_coeffs(A, T, N): n = np.arange(1, N+1) an = A *(8 * (-1)**n - 8) / 2 / np.pi**2 / n**2 return 0, an, np.zeros_like(an) a0, an, bn = fourier_coeffs(0.01, 2 * np.pi / wb, 100) traj = sys.periodic_base_displacing_response(a0, an, bn, wb, 20.0) traj.plot(subplots=True) sys.animate_configuration(fps=30)
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Define the data_directory of preprocessed data
data_directory = "C:/Users/kwokp/OneDrive/Desktop/Study/zzz_application project/Final/data_after_preprocessing.csv"
_____no_output_____
MIT
Codes/2.2 Remove redundant words - SVM,KNN,Kmeans_v2.ipynb
matchlesswei/application_project_nlp_company_description
We devide the data into 3 groups:* Group 1: full data* Group 2: data with four large categories which have more than 1000 companies each* Group 3: seven categories of data, number of companies in each category is same but small In the function selectGroup, giving 1, 2 or 3 as input parameter to selet the relevant data for experiment
# read the data from directory, then select the group # of data we want to process. def selectGroup(directory, group_nr): data = pd.read_csv(directory, sep='\t') if group_nr == 1: return data if group_nr == 2: df_healthcare_group=data[data['Category'] == 'HEALTHCARE GROUP'].sample(n=1041,replace=False) df_business_financial_services=data[data['Category'] == 'BUSINESS & FINANCIAL SERVICES'].sample(n=1041,replace=False) df_consumer_service_group=data[data['Category'] == 'CONSUMER SERVICES GROUP'].sample(n=1041,replace=False) df_information_technology_group=data[data['Category'] == 'INFORMATION TECHNOLOGY GROUP'].sample(n=1041,replace=False) df_clean = pd.concat([df_healthcare_group, df_business_financial_services,df_consumer_service_group,df_information_technology_group]) return df_clean.sample(frac=1) if group_nr == 3: df_healthcare_group=data[data['Category'] == 'HEALTHCARE GROUP'].sample(n=219,replace=False) df_business_financial_services=data[data['Category'] == 'BUSINESS & FINANCIAL SERVICES'].sample(n=219,replace=False) df_consumer_service_group=data[data['Category'] == 'CONSUMER SERVICES GROUP'].sample(n=219,replace=False) df_information_technology_group=data[data['Category'] == 'INFORMATION TECHNOLOGY GROUP'].sample(n=219,replace=False) df_industry_goods=data[data['Category'] == 'INDUSTRIAL GOODS & MATERIALS GROUP'].sample(n=219,replace=False) df_consumer_goods=data[data['Category'] == 'CONSUMER GOODS GROUP'].sample(n=219,replace=False) df_energy=data[data['Category'] == 'ENERGY & UTILITIES GROUP'].sample(n=219,replace=False) df_clean = pd.concat([df_healthcare_group, df_business_financial_services,df_consumer_service_group,df_information_technology_group,df_industry_goods,df_consumer_goods,df_energy]) return df_clean.sample(frac=1) # use tf-idf methode to generate scores for each company def tf_idf_func(df_document, max_features): feature_extraction = TfidfVectorizer(max_features = max_features, stop_words = 'english') score_matrix = feature_extraction.fit_transform(df_document.values) return score_matrix, feature_extraction # get the top_n words def get_top_keywords(scores_matrix, clusters, labels, n_terms): df = pd.DataFrame(scores_matrix.todense()).groupby(clusters).mean() for i,r in df.iterrows(): print('\nCluster {}'.format(i)) print(','.join([labels[t] for t in np.argsort(r)[-n_terms:]])) # get the top_n words with highest tf-idf scores in each category, and count the word occurence def get_top_keywords_with_frequence(Top_N, score_matrix, df_data, feature_extraction): df = pd.DataFrame(score_matrix.todense()) #read tf-idf score-matrix, each line is vectors for each company, each column matches each word df['Category'] = df_data['Category'] #assign the category for each line(company) in score-matrix dfg = df.groupby(['Category']).mean() #calculate the mean score of each word in each cateogry labels = feature_extraction.get_feature_names() categories = df_data['Category'].unique() col_names = ['Category', 'Top_N', 'Score'] df_top = pd.DataFrame(columns = col_names) Dict = {} for i,r in dfg.iterrows(): #i-index(category), r-row, iterate the average score matrix of each category category = i top_series = np.argsort(r)[-Top_N:]#find the location of top_n words label_series = top_series.apply(lambda x: labels[x]) #find top_n words with best scores in each category top_scores = np.sort(r)[-Top_N:] #find the scores corresponding with top_n words df_each = pd.DataFrame({'Category':category,'Top_N':label_series,'Score':top_scores}) df_top = df_top.append(df_each, ignore_index = True) for key in label_series: #count how often each word appears in the top_n if key in Dict: Dict[key] = Dict[key]+1 else: Dict[key] = 1 df_reshape = df_top.pivot(index='Top_N', columns='Category') #reformat the top-n score matrix sortedDict = sorted(Dict.items(), key=lambda x: x[1]) #sort the dictionary return sortedDict # convert the input of the top_n words with their occurence in each category, to a list of stopwords, # if the occurence is larger than the given occurence def get_word_occurence_stopwordslist(max_occurence, dict_list): word = [] occurence = [] frequent_stopwords = [] for key, value in dict_list: word.append(key) occurence.append(value) if value > max_occurence: # if the occurence is larger than the given occurence frequent_stopwords.append(key) # store to a list of stopwords return word, occurence, frequent_stopwords #remove the words from a sentence, which is in the stopwords def remove_frequent_stopwords(sentences, frequent_stopwords): splitted_string = sentences.split() remove_stopwords = [w for w in splitted_string if not w in frequent_stopwords] return ' '.join(remove_stopwords) #remove the words from the website content, which is in the stopwords #update the tf-idf score matrix for the whole corpus def remove_frequent_stopwords_and_get_updated_tfidfscore(data, feature_extraction, top_n, frequent_stopwords): df_update = data['clean'].apply(lambda x: remove_frequent_stopwords(x, frequent_stopwords)) score_matrix_update = feature_extraction.fit_transform(df_update.values) return score_matrix_update
_____no_output_____
MIT
Codes/2.2 Remove redundant words - SVM,KNN,Kmeans_v2.ipynb
matchlesswei/application_project_nlp_company_description
List Occurence of words in Top 50 Keywords in Categories
#visualize top_n words with occurence def visulaze_topwords_occurence(top_n, word_list, occurence_list): objects = word_list y_pos = np.arange(len(word_list)) performance = occurence_list plt.figure(figsize=(10,24)) plt.barh(y_pos, performance, align='center', alpha=0.5) plt.yticks(y_pos, objects) plt.xlabel('Occurence') plt.title('Occurence of words in Top ' + str(top_n) + ' Keywords in categories') plt.show() data = selectGroup(data_directory, 1) score_matrix, feature_extraction = tf_idf_func(data['clean'], 8000) sortedDict = get_top_keywords_with_frequence(50, score_matrix, data, feature_extraction) word, occurence, _ = get_word_occurence_stopwordslist(1, sortedDict) visulaze_topwords_occurence(50, word, occurence)
_____no_output_____
MIT
Codes/2.2 Remove redundant words - SVM,KNN,Kmeans_v2.ipynb
matchlesswei/application_project_nlp_company_description
We remove the redundunt words which appears in multiple category . Main steps are as follows:1. select the group of data to do the test2. generate TF-IDF score matrix3. get the top 50 words in each category4. find the words which appears in more than one category's top-50 words, set them as stopwords5. remove these stopwords and update the tf-idf score matrix6. count and calculate the word occurences in each company's website7. plot the number of valid words in each website8. remove the website which has less than 200 words We may notice there are quite a few companies which has less than 200 words. These websites could be useless. And the category distrubtion after processing is shown as the reuslt of the cell.
#get the data, remove the frequent words which appear in more than one category, and update the tf-idf score matrix data = selectGroup(data_directory, 1) score_matrix, feature_extraction = tf_idf_func(data['clean'], 8000) sortedDict = get_top_keywords_with_frequence(50, score_matrix, data, feature_extraction) _, _, frequent_stopwords = get_word_occurence_stopwordslist(1, sortedDict) score_matrix_update = remove_frequent_stopwords_and_get_updated_tfidfscore(data, feature_extraction, 10, frequent_stopwords) #show the top keywords of the rest words after removing the frequent words which appear in more than one category get_top_keywords(score_matrix_update, data['Category'].values, feature_extraction.get_feature_names(), 10) # count the non-zero words from updated tf-idf score matrix and display the non-zero word count in each company website score_value = score_matrix_update.todense() website_word_count=np.asarray(np.count_nonzero(score_value, axis=1)).reshape(-1) plt.hist(website_word_count, bins = 30) plt.xlabel('number of words in the whole website') plt.ylabel('number of websites') plt.title('Distribution of number of words in the websites') df_score=pd.DataFrame(score_value) df_score.columns=feature_extraction.get_feature_names() df_score['Keep']=website_word_count>200 df_score['Category'] = data['Category'].reset_index(drop=True) df_score_valid = df_score[df_score['Keep']] df_score_valid['Category'].value_counts()
Cluster BUSINESS & FINANCIAL SERVICES learn,agreement,need,insurance,media,experience,financial,companies,clients,marketing Cluster CONSUMER GOODS GROUP read,sites,address,brand,organic,home,shipping,ingredients,foods,food Cluster CONSUMER SERVICES GROUP experience,media,sites,world,address,parties,people,day,agreement,agree Cluster ENERGY & UTILITIES GROUP llc,basin,electricity,wind,drilling,renewable,fuel,power,oil,solar Cluster HEALTHCARE GROUP dr,treatment,cancer,healthcare,patient,care,health,clinical,patients,medical Cluster INDUSTRIAL GOODS & MATERIALS GROUP industries,process,range,industrial,parts,aerospace,materials,steel,packaging,manufacturing Cluster INFORMATION TECHNOLOGY GROUP application,world,performance,experience,need,learn,enterprise,domain,solution,network
MIT
Codes/2.2 Remove redundant words - SVM,KNN,Kmeans_v2.ipynb
matchlesswei/application_project_nlp_company_description
Split the data 80% for training and 20% for testing
df_final = df_score_valid[df_score_valid.columns.difference(['Keep', 'Category'])] #remove columns'Keep' and 'Category' df_category = df_score_valid['Category'].reset_index(drop=True) msk = np.random.rand(len(df_final)) < 0.8 train_x = np.nan_to_num(df_final[msk]) test_x = np.nan_to_num(df_final[~msk]) train_y = df_category[msk].to_numpy() test_y = df_category[~msk].to_numpy()
_____no_output_____
MIT
Codes/2.2 Remove redundant words - SVM,KNN,Kmeans_v2.ipynb
matchlesswei/application_project_nlp_company_description
Perform Linear SVM
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score #use svm classifier to classify TF-IDF of each website def linear_svc_classifier(train_x, train_y, test_x, test_y): print("start svm") classifier_svm = svm.LinearSVC() classifier_svm.fit(train_x, train_y) predictions = classifier_svm.predict(test_x) print(confusion_matrix(test_y, predictions)) print(classification_report(test_y, predictions)) print(accuracy_score(test_y, predictions)) array = confusion_matrix(test_y, predictions) y_true = ["BUSINESS & FINANCIAL SERVICES", "CONSUMER GOODS GROUP", "CONSUMER SERVICES GROUP", "ENERGY & UTILITIES GROUP","HEALTHCARE GROUP", "INDUSTRIAL GOODS & MATERIALS GROUP","INFORMATION TECHNOLOGY GROUP"] y_pred = y_true df_cm = pd.DataFrame(array, y_true, y_pred) df_cm.index.name = 'Actual' df_cm.columns.name = 'Predicted' plt.figure(figsize = (10,7)) #sn.set(font_scale=1.4)#for label size ax=sn.heatmap(df_cm, cmap="Blues", annot=True, fmt='d',annot_kws={"size": 16})# font size bottom, top=ax.get_ylim() ax.set_ylim(bottom+0.5, top-0.5) ax.tick_params(labelsize=10) plt.show() return confusion_matrix(test_y, predictions),predictions confusion_matrix, predictions = linear_svc_classifier(train_x, train_y, test_x, test_y)
start svm [[145 4 24 2 3 5 66] [ 4 28 16 0 3 7 5] [ 24 7 115 1 8 2 30] [ 6 0 0 21 0 2 3] [ 8 5 6 1 135 4 15] [ 15 3 6 3 1 45 9] [ 68 4 32 1 6 9 225]] precision recall f1-score support BUSINESS & FINANCIAL SERVICES 0.54 0.58 0.56 249 CONSUMER GOODS GROUP 0.55 0.44 0.49 63 CONSUMER SERVICES GROUP 0.58 0.61 0.60 187 ENERGY & UTILITIES GROUP 0.72 0.66 0.69 32 HEALTHCARE GROUP 0.87 0.78 0.82 174 INDUSTRIAL GOODS & MATERIALS GROUP 0.61 0.55 0.58 82 INFORMATION TECHNOLOGY GROUP 0.64 0.65 0.64 345 accuracy 0.63 1132 macro avg 0.64 0.61 0.62 1132 weighted avg 0.64 0.63 0.63 1132 0.6307420494699647
MIT
Codes/2.2 Remove redundant words - SVM,KNN,Kmeans_v2.ipynb
matchlesswei/application_project_nlp_company_description
Perform KNN with 5 Neighbours
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score #use knn classifier to classify TF-IDF of each website def knn_classifier(x_train, y_train, x_test, y_test): print("start knn") modelknn = KNeighborsClassifier(n_neighbors=5) modelknn.fit(x_train, y_train) predictions = modelknn.predict(x_test) print(confusion_matrix(y_test, predictions)) print(classification_report(y_test, predictions)) print(accuracy_score(y_test, predictions)) array = confusion_matrix(y_test, predictions) y_true = ["INDUSTRIAL GOODS & MATERIALS GROUP", "CONSUMER SERVICES GROUP","CONSUMER GOODS GROUP","INFORMATION TECHNOLOGY GROUP","ENERGY & UTILITIES GROUP","BUSINESS & FINANCIAL SERVICES", "HEALTHCARE GROUP"] y_pred = y_true df_cm = pd.DataFrame(array, y_true, y_pred) df_cm.index.name = 'Actual' df_cm.columns.name = 'Predicted' plt.figure(figsize = (10,7)) #sn.set(font_scale=1.4)#for label size ax=sn.heatmap(df_cm, cmap="Greens", annot=True, fmt='d',annot_kws={"size": 16})# font size bottom, top=ax.get_ylim() ax.set_ylim(bottom+0.5, top-0.5) ax.tick_params(labelsize=10) plt.show() return confusion_matrix(test_y, predictions),predictions confusion_matrix, predictions = knn_classifier(train_x, train_y, test_x, test_y)
start knn [[153 7 22 4 5 4 54] [ 8 25 19 1 3 2 5] [ 33 17 89 1 12 2 33] [ 10 1 0 16 2 0 3] [ 18 5 8 0 133 3 7] [ 21 4 4 4 2 34 13] [106 6 40 2 8 7 176]] precision recall f1-score support BUSINESS & FINANCIAL SERVICES 0.44 0.61 0.51 249 CONSUMER GOODS GROUP 0.38 0.40 0.39 63 CONSUMER SERVICES GROUP 0.49 0.48 0.48 187 ENERGY & UTILITIES GROUP 0.57 0.50 0.53 32 HEALTHCARE GROUP 0.81 0.76 0.78 174 INDUSTRIAL GOODS & MATERIALS GROUP 0.65 0.41 0.51 82 INFORMATION TECHNOLOGY GROUP 0.60 0.51 0.55 345 accuracy 0.55 1132 macro avg 0.56 0.53 0.54 1132 weighted avg 0.57 0.55 0.56 1132 0.5530035335689046
MIT
Codes/2.2 Remove redundant words - SVM,KNN,Kmeans_v2.ipynb
matchlesswei/application_project_nlp_company_description
Perform K means and Plot SSE, PCA and TSNE
from sklearn.cluster import MiniBatchKMeans from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import PCA from sklearn.manifold import TSNE import matplotlib.cm as cm import itertools #Find the optimal clusters from 2 to maximum of clusters of data group, plot respective SSE. def find_optimal_clusters(data, max_k): iters = range(2, max_k+1, 2) sse = [] for k in iters: sse.append(MiniBatchKMeans(n_clusters=k, init_size=512, batch_size=1024, random_state=20).fit(data).inertia_) print('Fit {} clusters'.format(k)) f, ax = plt.subplots(1, 1) ax.plot(iters, sse, marker='o') ax.set_xlabel('Cluster Centers') ax.set_xticks(iters) ax.set_xticklabels(iters) ax.set_ylabel('SSE') ax.set_title('SSE by Cluster Center Plot') #Plot TSNE and PCA for the clusters def plot_tsne_pca(data, labels): max_label = max(labels+1) max_items = np.random.choice(range(data.shape[0]), size=3000, replace=False) pca = PCA(n_components=2).fit_transform(data[max_items,:].todense()) tsne = TSNE().fit_transform(PCA(n_components=50).fit_transform(data[max_items,:].todense())) idx = np.random.choice(range(pca.shape[0]), size=300, replace=False) label_subset = labels[max_items] label_subset = [cm.hsv(i/max_label) for i in label_subset[idx]] f, ax = plt.subplots(1, 2, figsize=(14, 6)) ax[0].scatter(pca[idx, 0], pca[idx, 1], c=label_subset) ax[0].set_title('PCA Cluster Plot') ax[1].scatter(tsne[idx, 0], tsne[idx, 1], c=label_subset) ax[1].set_title('TSNE Cluster Plot') #Calculate the accuracy of the clustered data and actual label def Calculate_accuracy(clusters, actual_label): count = 0 for index, cluster in enumerate(clusters): if cluster==actual_label[index]: count+=1 accuracy = count/len(clusters)*1.0 return accuracy #Assign clisters for the clustered data def assign_clusters(original_label, permu, nr_group): if nr_group == 2: categories = ["BUSINESS & FINANCIAL SERVICES", "CONSUMER SERVICES GROUP", "HEALTHCARE GROUP", "INFORMATION TECHNOLOGY GROUP"] else: categories = ["INDUSTRIAL GOODS & MATERIALS GROUP", "CONSUMER SERVICES GROUP","CONSUMER GOODS GROUP","INFORMATION TECHNOLOGY GROUP","ENERGY & UTILITIES GROUP","BUSINESS & FINANCIAL SERVICES", "HEALTHCARE GROUP"] mydict=dict(zip(categories, permu)) actual_label = np.zeros(len(original_label)) for index, label in enumerate(original_label): actual_label[index] = mydict[label] return actual_label #Perform Kmeans and Plot def kmeans_classifier(score_matrix_update, nr_group): if nr_group == 2: nr_cluster = 4 else: nr_cluster = 7 find_optimal_clusters(score_matrix_update, nr_cluster) clusters = MiniBatchKMeans(n_clusters=nr_cluster, init_size=512, batch_size=1024, random_state=20).fit_predict(score_matrix_update) plot_tsne_pca(score_matrix_update, clusters) get_top_keywords(score_matrix_update, clusters, feature_extraction.get_feature_names(), 10) if nr_group == 2: numbers=[0,1,2,3] else: numbers = [0,1,2,3,4,5,6] permu = list(itertools.permutations(numbers)) best_accuracy = 0 for i in range(len(permu)): actual_label = assign_clusters(data['Category'].values, permu[i], nr_group) accuracy = Calculate_accuracy(clusters, actual_label) if best_accuracy<accuracy: best_accuracy=accuracy final_label = actual_label category = permu[i] else: best_accuracy=best_accuracy print(category) #print(final_label) print("The Accuracy is " + str(round(best_accuracy,2))) kmeans_classifier(score_matrix_update, 1)
Fit 2 clusters Fit 4 clusters Fit 6 clusters Cluster 0 devices,storage,application,performance,networks,infrastructure,enterprise,solution,wireless,network Cluster 1 reserved,click,read,learn,world,copyright,need,home,wordpress,domain Cluster 2 dr,healthcare,treatment,cancer,care,health,patient,medical,clinical,patients Cluster 3 meal,day,fresh,coffee,delicious,cheese,restaurant,foods,ingredients,food Cluster 4 http,window,archive,width,function,document,gform,jquery,px,var Cluster 5 people,group,market,global,years,financial,companies,experience,marketing,clients Cluster 6 address,provided,applicable,sites,websites,law,collect,parties,agree,agreement (4, 6, 3, 1, 0, 5, 2) The Accuracy is 0.34
MIT
Codes/2.2 Remove redundant words - SVM,KNN,Kmeans_v2.ipynb
matchlesswei/application_project_nlp_company_description
Carpetplots
import opengrid as og from opengrid.library import plotting as og_plot import pandas as pd from joule import meta, filter_meta plt = og.plot_style() #%matplotlib notebook #%matplotlib notebook for building in meta['RecordNumber'].unique(): ts = pd.read_pickle('data/Electricity_{}.pkl'.format(building)).sum(axis=1)*60 if not ts.empty: for i in range(1,13): df_month = ts[ts.index.month == i] if len(df_month) > 1: og_plot.carpet(df_month, title=building, ) fig = plt.gcf() fig.savefig("figures/carpet_electricity_month{}_{}.png".format(i, building)) plt.show()
_____no_output_____
Apache-2.0
Carpet.ipynb
saroele/jouleboulevard
Zbozinek TD, Perez OD, Wise T, Fanselow M, & Mobbs D
import numpy as np import pandas as pd import matplotlib.pyplot as plt from theano import scan import theano.tensor as T import pymc3 as pm import theano import seaborn as sns import os, sys, subprocess
_____no_output_____
Apache-2.0
modeling/modeling code/Experiment_2_Direct_Associations.ipynb
tzbozinek/2nd-order-occasion-setting
Load Data
data = pd.read_csv(os.path.join('../data/', "2nd_POS_Modeling_Data_Direct_Associations.csv")) data['DV'] = ((data['DV'].values - 1) / 2) - 1 observed_R = data.pivot(columns = 'ID', index = 'trialseq', values = 'DV').values[:, np.newaxis, :] #values.T transposes the data, so you can make trials the first dimension or participants first
_____no_output_____
Apache-2.0
modeling/modeling code/Experiment_2_Direct_Associations.ipynb
tzbozinek/2nd-order-occasion-setting