markdown
stringlengths
0
37k
code
stringlengths
1
33.3k
path
stringlengths
8
215
repo_name
stringlengths
6
77
license
stringclasses
15 values
Set up details for PostGIS DB, run in terminal: We are going to use a PostGis database, which requires you to have an empty database. Enter these steps into the terminal to set up you databse. In this example we use "demo" as the name of our database. Feel free to give you database another name, but replace "demo" with the name you have chosen. Connect to postgres psql -d postgres" Create database postgres=# CREATE DATABASE demo; Switch to new DB postgres=# \c demo Add PostGIS extension to new DB demo=# create extension postgis; Add Table demo=# CREATE TABLE tweets (id serial primary key, tweet_id BIGINT, text varchar(140), date DATE, time TIME, geom geometry(POINT,4326) ); Enter your database connection details:
dbname = "demo" user = "user" password = "user" table = "tweets"
Lesson 14/Lesson 14 - Assignment.ipynb
jornvdent/WUR-Geo-Scripting-Course
gpl-3.0
Function which connects to PostGis database and inserts data
def insert_into_DB(tweet_id, tweet_text, tweet_date, tweet_time, tweet_lat, tweet_lon): try: conn = psycopg2.connect(dbname = dbname, user = user, password = password) cur = conn.cursor() # enter stuff in database sql = "INSERT INTO " + str(table) + " (tweet_id, text, date, time, geom) \ VALUES (" + str(tweet_id) + ", '" + str(tweet_text) + "', '" + str(tweet_date) + "', '" + str(tweet_time) + "', \ ST_GeomFromText('POINT(" + str(tweet_lon) + " " + str(tweet_lat) + ")', 4326))" cur.execute(sql) conn.commit() conn.close() except psycopg2.DatabaseError, e: print 'Error %s' % e
Lesson 14/Lesson 14 - Assignment.ipynb
jornvdent/WUR-Geo-Scripting-Course
gpl-3.0
Function to remove the hyperlinks from the text
def remove_link(text): pattern = r'(https://)' matcher = re.compile(pattern) match = matcher.search(text) if match != None: text = text[:match.start(1)] return text
Lesson 14/Lesson 14 - Assignment.ipynb
jornvdent/WUR-Geo-Scripting-Course
gpl-3.0
Process JSON twitter streamd data
#Class to process JSON data comming from the twitter stream API. Extract relevant fields class MyStreamer(TwythonStreamer): def on_success(self, data): tweet_lat = 0.0 tweet_lon = 0.0 tweet_name = "" retweet_count = 0 if 'id' in data: tweet_id = data['id'] if 'text' in data: tweet_text = data['text'].encode('utf-8').replace("'","''").replace(';','') tweet_text = remove_link(tweet_text) if 'coordinates' in data: geo = data['coordinates'] if geo is not None: latlon = geo['coordinates'] tweet_lon = latlon[0] tweet_lat = latlon[1] if 'created_at' in data: dt = data['created_at'] tweet_datetime = datetime.strptime(dt, '%a %b %d %H:%M:%S +0000 %Y') tweet_date = str(tweet_datetime)[:11] tweet_time = str(tweet_datetime)[11:] if 'user' in data: users = data['user'] tweet_name = users['screen_name'] if 'retweet_count' in data: retweet_count = data['retweet_count'] if tweet_lat != 0: # call function to write to DB insert_into_DB(tweet_id, tweet_text, tweet_date, tweet_time, tweet_lat, tweet_lon) def on_error(self, status_code, data): print "OOPS FOUTJE: " +str(status_code) #self.disconnect
Lesson 14/Lesson 14 - Assignment.ipynb
jornvdent/WUR-Geo-Scripting-Course
gpl-3.0
Main procedure
def main(): try: stream = MyStreamer(APP_KEY, APP_SECRET,OAUTH_TOKEN, OAUTH_TOKEN_SECRET) print 'Connecting to twitter: will take a minute' except ValueError: print 'OOPS! that hurts, something went wrong while making connection with Twitter: '+str(ValueError) # Filter based on bounding box see twitter api documentation for more info try: stream.statuses.filter(locations='-0.351468, 51.38494, 0.148271, 51.672343') except ValueError: print 'OOPS! that hurts, something went wrong while getting the stream from Twitter: '+str(ValueError) if __name__ == '__main__': main()
Lesson 14/Lesson 14 - Assignment.ipynb
jornvdent/WUR-Geo-Scripting-Course
gpl-3.0
Pivot Tables w/ pandas http://nicolas.kruchten.com/content/2015/09/jupyter_pivottablejs/
YouTubeVideo("ZbrRrXiWBKc", width=800, height=600) !pip install pivottablejs df = pd.read_csv("../data/mps.csv") df.head() from pivottablejs import pivot_ui pivot_ui(df) # Province, Party, Average, Age, Heatmap
deliver/01-Tips-and-tricks.ipynb
jbwhit/svds-jupyter
mit
Keyboard shortcuts
# in select mode, shift j/k (to select multiple cells at once) # split cell with ctrl shift - first = 1 second = 2 third = 3
deliver/01-Tips-and-tricks.ipynb
jbwhit/svds-jupyter
mit
Floating Table of Contents Creates a new button on the toolbar that pops up a table of contents that you can navigate by. In your documentation if you indent by 4 spaces, you get monospaced code-style code so you can embed in a Markdown cell: $ mkdir toc $ cd toc $ wget https://raw.githubusercontent.com/minrk/ipython_extensions/master/nbextensions/toc.js $ wget https://raw.githubusercontent.com/minrk/ipython_extensions/master/nbextensions/toc.css $ cd .. $ jupyter-nbextension install --user toc $ jupyter-nbextension enable toc/toc You can also get syntax highlighting if you tell it the language that you're including: ```bash mkdir toc cd toc wget https://raw.githubusercontent.com/minrk/ipython_extensions/master/nbextensions/toc.js wget https://raw.githubusercontent.com/minrk/ipython_extensions/master/nbextensions/toc.css cd .. jupyter-nbextension install --user toc jupyter-nbextension enable toc/toc ``` R pyRserve rpy2
import rpy2 %load_ext rpy2.ipython X = np.array([0,1,2,3,4]) Y = np.array([3,5,4,6,7]) %%R -i X,Y -o XYcoef XYlm = lm(Y~X) XYcoef = coef(XYlm) print(summary(XYlm)) par(mfrow=c(2,2)) plot(XYlm)
deliver/01-Tips-and-tricks.ipynb
jbwhit/svds-jupyter
mit
Now lets add some erosive noise to the image and then lets see the recall
import cv2 erode_img = sample_image # kernel is a pixel set like a cross( or any shape) which convolves and erodes kernel = np.ones((5,5),np.uint8) erosion = cv2.erode(erode_img,kernel,iterations = 1) plt.figure() plt.imshow(erosion) plt.show() # Now lets try to do some recall x_eroded = erosion x_eroded_vector = x_eroded.flatten() add_individual = np.add(weights, x_eroded_vector) result = np.array([max(row) for row in add_individual]) # now lets reshape the result to 128 x 128 result.shape = (128, 128) plt.figure() plt.imshow(result) plt.show() # now lets see the amount of recall error result = result.flatten() np.testing.assert_array_almost_equal(result, x_vectors) print('done 0%')
MAMs discussion.ipynb
shubham0704/ATR-FNN
mit
上面的例子也可以用 try...finally... 实现,它们的效果是相同(或者说上下文管理器就是封装、简化了错误捕捉的过程):
try: f = open("utf8.txt", "r") print(f.read()) finally: f.close()
Tips/2016-03-23-With-Context-Manager.ipynb
rainyear/pytips
mit
除了文件对象之外,我们也可以自己创建上下文管理器,与 0x01 中介绍的迭代器类似,只要定义了 __enter__() 和 __exit__() 方法就成为了上下文管理器类型。with 语句的执行过程如下: 执行 with 后的语句获取上下文管理器,例如 open('utf8.txt', 'r') 就是返回一个 file object; 加载 __exit__() 方法备用; 执行 __enter__(),该方法的返回值将传递给 as 后的变量(如果有的话); 执行 with 语法块的子句; 执行 __exit__() 方法,如果 with 语法块子句中出现异常,将会传递 type, value, traceback 给 __exit__(),否则将默认为 None;如果 __exit__() 方法返回 False,将会抛出异常给外层处理;如果返回 True,则忽略异常。 了解了 with 语句的执行过程,我们可以编写自己的上下文管理器。假设我们需要一个引用计数器,而出于某些特殊的原因需要多个计数器共享全局状态并且可以相互影响,而且在计数器使用完毕之后需要恢复初始的全局状态:
_G = {"counter": 99, "user": "admin"} class Refs(): def __init__(self, name = None): self.name = name self._G = _G self.init = self._G['counter'] def __enter__(self): return self def __exit__(self, *args): self._G["counter"] = self.init return False def acc(self, n = 1): self._G["counter"] += n def dec(self, n = 1): self._G["counter"] -= n def __str__(self): return "COUNTER #{name}: {counter}".format(**self._G, name=self.name) with Refs("ref1") as ref1, Refs("ref2") as ref2: # Python 3.1 加入了多个并列上下文管理器 for _ in range(3): ref1.dec() print(ref1) ref2.acc(2) print(ref2) print(_G)
Tips/2016-03-23-With-Context-Manager.ipynb
rainyear/pytips
mit
上面的例子很别扭但是可以很好地说明 with 语句的执行顺序,只是每次定义两个方法看起来并不是很简洁,一如既往地,Python 提供了 @contextlib.contextmanager + generator 的方式来简化这一过程(正如 0x01 中 yield 简化迭代器一样):
from contextlib import contextmanager as cm _G = {"counter": 99, "user": "admin"} @cm def ref(): counter = _G["counter"] yield _G _G["counter"] = counter with ref() as r1, ref() as r2: for _ in range(3): r1["counter"] -= 1 print("COUNTER #ref1: {}".format(_G["counter"])) r2["counter"] += 2 print("COUNTER #ref2: {}".format(_G["counter"])) print("*"*20) print(_G)
Tips/2016-03-23-With-Context-Manager.ipynb
rainyear/pytips
mit
我将其准换成铁路图的形式,(可能)更直观一些: 模板中替换变量用 {} 包围,且由 : 分为两部分,其中后半部分 format_spec 在后面会单独讨论。前半部分有三种用法: 空 代表位置的数字 代表keyword的标识符 这与函数调用的参数类别是一致的:
print("{} {}".format("Hello", "World")) # is equal to... print("{0} {1}".format("Hello", "World")) print("{hello} {world}".format(hello="Hello", world="World")) print("{0}{1}{0}".format("H", "e"))
Tips/2016-03-18-String-Format.ipynb
rainyear/pytips
mit
除此之外,就像在0x05 函数参数与解包中提到的一样,format() 中也可以直接使用解包操作:
print("{lang}.{suffix}".format(**{"lang": "Python", "suffix": "py"})) print("{} {}".format(*["Python", "Rocks"]))
Tips/2016-03-18-String-Format.ipynb
rainyear/pytips
mit
在模板中还可以通过 .identifier 和 [key] 的方式获取变量内的属性或值(需要注意的是 "{}{}" 相当于 "{0}{1}"):
data = {'name': 'Python', 'score': 100} print("Name: {0[name]}, Score: {0[score]}".format(data)) # 不需要引号 langs = ["Python", "Ruby"] print("{0[0]} vs {0[1]}".format(langs)) print("\n====\nHelp(format):\n {.__doc__}".format(str.format))
Tips/2016-03-18-String-Format.ipynb
rainyear/pytips
mit
强制转换 可以通过 ! + r|s|a 的方式对替换的变量进行强制转换: "{!r}" 对变量调用 repr() "{!s}" 对变量调用 str() "{!a}" 对变量调用 ascii() 格式 最后 : 之后的部分定义输出的样式: align 代表对齐方向,通常要配合 width 使用,而 fill 则是填充的字符(默认为空白):
for align, text in zip("<^>", ["left", "center", "right"]): print("{:{fill}{align}16}".format(text, fill=align, align=align)) print("{:0=10}".format(100)) # = 只允许数字
Tips/2016-03-18-String-Format.ipynb
rainyear/pytips
mit
同时可以看出,样式设置里面可以嵌套 {} ,但是必须通过 keyword 指定,且只能嵌套一层。 接下来是符号样式:+|-|' ' 分别指定数字是否需要强制符号(其中空格是指在正数的时候不显示 + 但保留一位空格):
print("{0:+}\n{1:-}\n{0: }".format(3.14, -3.14))
Tips/2016-03-18-String-Format.ipynb
rainyear/pytips
mit
# 用于表示特殊格式的数字(二进制、十六进制等)是否需要前缀符号;, 也是用于表示数字时是否需要在千位处进行分隔;0 相当于前面的 {:0=} 右对齐并用 0 补充空位:
print("Binary: {0:b} => {0:#b}".format(3)) print("Large Number: {0:} => {0:,}".format(1.25e6)) print("Padding: {0:16} => {0:016}".format(3))
Tips/2016-03-18-String-Format.ipynb
rainyear/pytips
mit
最后两个就是我们熟悉的小数点精度 .n 和格式化类型了,这里仅给出一些示例,详细内容可以查阅文档:
from math import pi print("pi = {pi:.2}, also = {pi:.7}".format(pi=pi))
Tips/2016-03-18-String-Format.ipynb
rainyear/pytips
mit
Integer
for t in "b c d #o #x #X n".split(): print("Type {0:>2} of {1} shows: {1:{t}}".format(t, 97, t=t))
Tips/2016-03-18-String-Format.ipynb
rainyear/pytips
mit
Float
for t, n in zip("eEfFgGn%", [12345, 12345, 1.3, 1.3, 1, 2, 3.14, 0.985]): print("Type {} shows: {:.2{t}}".format(t, n, t=t))
Tips/2016-03-18-String-Format.ipynb
rainyear/pytips
mit
String (default)
try: print("{:s}".format(123)) except: print("{}".format(456))
Tips/2016-03-18-String-Format.ipynb
rainyear/pytips
mit
Task #1 Then, for this first task, we import the csv file into variable called data. We leverage a new lambda function that will allow the importer to convert the timestamp strings into datetime objects:
anewdate = '2014/11/10 17:34:28' dateConverter = lambda d : dt.datetime.strptime(d,'%Y/%m/%d %H:%M:%S') data = np.genfromtxt('../../../data/campusDemand.csv',delimiter=",",names=True,dtype=('a255',type(dt),float,),converters={1: dateConverter}) data[0] data['Point_name']
lectures/Lecture5_Assignment2-2014-ReDo.ipynb
keylime1/courses_12-752
mit
To make sure that the import succeeded, we print the contents of the variable. Also, because we wan't to make sure the full meter names appear in the printed output, we modify Numpy's printoptions by using the method np.set_printoptions:
np.set_printoptions(threshold=8) # make sure all the power meter names will be printed
lectures/Lecture5_Assignment2-2014-ReDo.ipynb
keylime1/courses_12-752
mit
Task #2 To find the unique number of point names, we use the unique function from Numpy, and apply it to the 'Point_name' column in data:
pointNames = np.unique(data['Point_name']) print "There are {} unique meters.".format(pointNames.shape[0])
lectures/Lecture5_Assignment2-2014-ReDo.ipynb
keylime1/courses_12-752
mit
Task #3 We now print the contents of the pointNames array:
print pointNames #extractedData = np.extract(data['Point_name']==pointNames[6],data) plt.plot(data['Time'][np.where(data['Point_name']==pointNames[0])],'rd')
lectures/Lecture5_Assignment2-2014-ReDo.ipynb
keylime1/courses_12-752
mit
Task #4 To count the numer of samples present on each power meter, there are many ways to achieve it. For instance, we can use an iterator to loop over all pointNames and create a list of tuples in the process (this is formally called a List Comprehension). Every tuple will then contain two elements: the meter name, and the number of samples in it: Task #5 First, we can use another List Comprehension to iterate over the point names and create a new list whose elements are in turn tuples with the indeces for the samples corresponding to this meter:
idx = [np.where(data['Point_name']==meter) for meter in pointNames] print "idx is now a {0:s} of {1:d} items.".format(type(idx),len(idx)) print "Each item in idx is of {0:s}.".format(type(idx[0])) [(meter,(data[idxItem]['Time'][-1]-data[idxItem]['Time'][0]).days) for meter,idxItem in zip(pointNames,idx)]
lectures/Lecture5_Assignment2-2014-ReDo.ipynb
keylime1/courses_12-752
mit
And then use yet another list comprehension to calculate the differences between the first and last timestamp:
help(zip)
lectures/Lecture5_Assignment2-2014-ReDo.ipynb
keylime1/courses_12-752
mit
Task #6 For this task, we are going to directly take the difference between any two consecutive datetime objects and display the result in terms of, say, number of seconds elapsed between these timestamps. Before we do this, though, it is useful to plot the timestamps to figure out if there are discontinuities that we can visually see:
fig = plt.figure(figsize=(20,30)) # A 20 inch x 20 inch figure box ### What else?
lectures/Lecture5_Assignment2-2014-ReDo.ipynb
keylime1/courses_12-752
mit
As you may have seen, gaps were easily identifiable as discontinuities in the lines that were plotted. If no gaps existed, the plot would be a straight line. But now let's get back to solving this using exact numbers... First, you need to know that applying the difference operator (-) on two datetime objects results in a timedelta object. These objects (timedelta) describe time differences in terms of number of days, seconds and microseconds (see the link above for more details). Because of this, we can quickly convert any timedelta object (say dt) into the number of seconds by doing: <pre> dt.days*3600*24+dt.seconds+dt.microseconds/1000000 </pre> In this case, however, our timestamps do not contain information about the microseconds, so we will skip that part of the converstion. Using this knowledge, we can create a list of lists (a nested list) in a similar manner as we've done before (i.e. using list comprehensions), and in it store the timedeltas in seconds for each meter. In other words, the outer list is a list of the same length as pointNames, and each element is a list of timedeltas for the corresponding meter. One more thing comes in handy for this task: the np.diff function, which takes an array (or a list) and returns the difference between any two consecutive items of the list. Now, in a single line of code we can get the nested list we talked about:
delta_t =
lectures/Lecture5_Assignment2-2014-ReDo.ipynb
keylime1/courses_12-752
mit
Now we need to be able to print out the exact times during which there are gaps. We will define gaps to be any timedelta that is longer than the median timedelta for a meter. We will achieve this as follows: first we will create a for loop to iterate over every item in the list delta_t (which means we will iterate over all meters). then, inside the loop, we will calculate the median value for the delta_t that corresponds to each meter following this, we will find the indeces of delta_t where its value is greater than the median lastly, we will iterate over all the indeces found in the previous step and print out their values
np.set_printoptions(threshold=np.nan)
lectures/Lecture5_Assignment2-2014-ReDo.ipynb
keylime1/courses_12-752
mit
Task #7 First, we will define a new variable containing the weekday for each of the timestamps.
wd = lambda d : d.weekday() weekDays = np.array(map(wd,data['Time'])) Monday = data[np.where(weekDays==0)] Tuesday = data[np.where(weekDays==1)] Wednesday = data[np.where(weekDays==2)] Thursday = data[np.where(weekDays==3)] Friday = data[np.where(weekDays==4)] Saturday = data[np.where(weekDays==5)] Sunday = data[np.where(weekDays==6)]
lectures/Lecture5_Assignment2-2014-ReDo.ipynb
keylime1/courses_12-752
mit
Then we can do logical indexing to segment the data:
plt.plot(Sunday['Time'][np.where(Sunday['Point_name']==pointNames[0])],Sunday['Value'][np.where(Sunday['Point_name']==pointNames[0])],'rd')
lectures/Lecture5_Assignment2-2014-ReDo.ipynb
keylime1/courses_12-752
mit
Task #8 In this task we basically use two for loops and a the subplot functionality of PyPlot to do visualize the data contained in the variables we declared above. The main trick is that we need to create a time index that only contains information about the hours, minutes and seconds (i.e. it completely disregards the exact day of the measurement) so that all of the measurements can be displayed within a single 24-hour period.
Days = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday'] fig = plt.figure(figsize=(20,20)) for i in range(len(pointNames)): # iterate over meters for j in range(7): # iterate over days of the week plt.subplot(7,7,i*7+j+1) # Data from the day being plotted = All[j] # Data from the meter being plotted = All[j][All[j]['Point_name']==pointNames[i]] time = np.array([t.hour*3600+t.minute*60+t.second for t in All[j][All[j]['Point_name']==pointNames[i]]['Time']]) # plot the power vs the hours in a day plt.plot(time/3600.,All[j][All[j]['Point_name']==pointNames[i]]['Value'],'.') if i==6: plt.xlabel('hours in a day') if j==0: plt.ylabel(pointNames[i].split('-')[0]+'\n'+pointNames[i].split('-')[1]) if i==0: plt.title(Days[j]) fig.tight_layout() plt.show()
lectures/Lecture5_Assignment2-2014-ReDo.ipynb
keylime1/courses_12-752
mit
Using the Embedding layer Keras makes it easy to use word embeddings. Take a look at the Embedding layer. The Embedding layer can be understood as a lookup table that maps from integer indices (which stand for specific words) to dense vectors (their embeddings). The dimensionality (or width) of the embedding is a parameter you can experiment with to see what works well for your problem, much in the same way you would experiment with the number of neurons in a Dense layer.
# Embed a 1,000 word vocabulary into 5 dimensions. # TODO: Your code goes here
courses/machine_learning/deepdive2/text_classification/labs/word_embeddings.ipynb
GoogleCloudPlatform/training-data-analyst
apache-2.0
Create a classification model Use the Keras Sequential API to define the sentiment classification model. In this case it is a "Continuous bag of words" style model. * The TextVectorization layer transforms strings into vocabulary indices. You have already initialized vectorize_layer as a TextVectorization layer and built it's vocabulary by calling adapt on text_ds. Now vectorize_layer can be used as the first layer of your end-to-end classification model, feeding transformed strings into the Embedding layer. * The Embedding layer takes the integer-encoded vocabulary and looks up the embedding vector for each word-index. These vectors are learned as the model trains. The vectors add a dimension to the output array. The resulting dimensions are: (batch, sequence, embedding). The GlobalAveragePooling1D layer returns a fixed-length output vector for each example by averaging over the sequence dimension. This allows the model to handle input of variable length, in the simplest way possible. The fixed-length output vector is piped through a fully-connected (Dense) layer with 16 hidden units. The last layer is densely connected with a single output node. Caution: This model doesn't use masking, so the zero-padding is used as part of the input and hence the padding length may affect the output. To fix this, see the masking and padding guide.
embedding_dim=16 # TODO: Your code goes here
courses/machine_learning/deepdive2/text_classification/labs/word_embeddings.ipynb
GoogleCloudPlatform/training-data-analyst
apache-2.0
Compile and train the model Create a tf.keras.callbacks.TensorBoard.
# TODO: Your code goes here
courses/machine_learning/deepdive2/text_classification/labs/word_embeddings.ipynb
GoogleCloudPlatform/training-data-analyst
apache-2.0
Compile and train the model using the Adam optimizer and BinaryCrossentropy loss.
# TODO: Your code goes here model.fit( train_ds, validation_data=val_ds, epochs=10, callbacks=[tensorboard_callback])
courses/machine_learning/deepdive2/text_classification/labs/word_embeddings.ipynb
GoogleCloudPlatform/training-data-analyst
apache-2.0
Run the following command in Cloud Shell: <code>gcloud beta compute ssh --zone &lt;instance-zone&gt; &lt;notebook-instance-name&gt; --project &lt;project-id&gt; -- -L 8081:localhost:8081</code> Make sure to replace &lt;instance-zone&gt;, &lt;notebook-instance-name&gt; and &lt;project-id&gt;. In Cloud Shell, click Web Preview > Change Port and insert port number 8081. Click Change and Preview to open the TensorBoard. To quit the TensorBoard, click Kernel > Interrupt kernel. Retrieve the trained word embeddings and save them to disk Next, retrieve the word embeddings learned during training. The embeddings are weights of the Embedding layer in the model. The weights matrix is of shape (vocab_size, embedding_dimension). Obtain the weights from the model using get_layer() and get_weights(). The get_vocabulary() function provides the vocabulary to build a metadata file with one token per line.
weights = # TODO: Your code goes here vocab = # TODO: Your code goes here
courses/machine_learning/deepdive2/text_classification/labs/word_embeddings.ipynb
GoogleCloudPlatform/training-data-analyst
apache-2.0
<a id='step1a'></a> A. Generating the first scene object This is a standard fixed-tilt setup for one hour. Gencumsky could be used too for the whole year. The key here is that we are setting in sceneDict the variable appendRadfile to true.
demo = RadianceObj("tutorial_7", path = testfolder) demo.setGround(0.62) epwfile = demo.getEPW(lat = 37.5, lon = -77.6) metdata = demo.readWeatherFile(epwfile, coerce_year=2001) fullYear = True timestamp = metdata.datetime.index(pd.to_datetime('2001-06-17 13:0:0 -5')) # Noon, June 17th demo.gendaylit(timestamp) module_type = 'test-moduleA' mymodule = demo.makeModule(name=module_type,y=1,x=1.7) sceneDict = {'tilt':10,'pitch':1.5,'clearance_height':0.2,'azimuth':180, 'nMods': 5, 'nRows': 2, 'appendRadfile':True} sceneObj1 = demo.makeScene(mymodule, sceneDict)
docs/tutorials/7 - Advanced topics - Multiple SceneObjects Example.ipynb
NREL/bifacial_radiance
bsd-3-clause
Checking values after Scene for the scene Object created
print ("SceneObj1 modulefile: %s" % sceneObj1.modulefile) print ("SceneObj1 SceneFile: %s" %sceneObj1.radfiles) print ("SceneObj1 GCR: %s" % round(sceneObj1.gcr,2)) print ("FileLists: \n %s" % demo.getfilelist())
docs/tutorials/7 - Advanced topics - Multiple SceneObjects Example.ipynb
NREL/bifacial_radiance
bsd-3-clause
<a id='step1b'></a> B. Generating the second scene object. Creating a different Scene. Same Module, different values. Notice we are passing a different originx and originy to displace the center of this new sceneObj to that location.
sceneDict2 = {'tilt':30,'pitch':5,'clearance_height':1,'azimuth':180, 'nMods': 5, 'nRows': 1, 'originx': 0, 'originy': 3.5, 'appendRadfile':True} module_type2='test-moduleB' mymodule2 = demo.makeModule(name=module_type2,x=1,y=1.6, numpanels=2, ygap=0.15) sceneObj2 = demo.makeScene(mymodule2, sceneDict2) # Checking values for both scenes after creating new SceneObj print ("SceneObj1 modulefile: %s" % sceneObj1.modulefile) print ("SceneObj1 SceneFile: %s" %sceneObj1.radfiles) print ("SceneObj1 GCR: %s" % round(sceneObj1.gcr,2)) print ("\nSceneObj2 modulefile: %s" % sceneObj2.modulefile) print ("SceneObj2 SceneFile: %s" %sceneObj2.radfiles) print ("SceneObj2 GCR: %s" % round(sceneObj2.gcr,2)) #getfilelist should have info for the rad file created by BOTH scene objects. print ("NEW FileLists: \n %s" % demo.getfilelist())
docs/tutorials/7 - Advanced topics - Multiple SceneObjects Example.ipynb
NREL/bifacial_radiance
bsd-3-clause
<a id='step2'></a> 2. Add a Marker at the Origin (coordinates 0,0) for help with visualization Creating a "markers" for the geometry is useful to orient one-self when doing sanity-checks (for example, marke where 0,0 is, or where 5,0 coordinate is). <div class="alert alert-warning"> Note that if you analyze the module that intersects with the marker, some of the sensors will be wrong. To perform valid analysis, do so without markers, as they are 'real' objects on your scene. </div>
# NOTE: offsetting translation by 0.1 so the center of the marker (with sides of 0.2) is at the desired coordinate. name='Post1' text='! genbox black originMarker 0.2 0.2 1 | xform -t -0.1 -0.1 0' customObject = demo.makeCustomObject(name,text) demo.appendtoScene(sceneObj1.radfiles, customObject, '!xform -rz 0')
docs/tutorials/7 - Advanced topics - Multiple SceneObjects Example.ipynb
NREL/bifacial_radiance
bsd-3-clause
<a id='step3'></a> 3. Combine all scene Objects into one OCT file & Visualize Marking this as its own steps because this is the step that joins our Scene Objects 1, 2 and the appended Post. Run makeOCT to make the scene with both scene objects AND the marker in it, the ground and the skies.
octfile = demo.makeOct(demo.getfilelist())
docs/tutorials/7 - Advanced topics - Multiple SceneObjects Example.ipynb
NREL/bifacial_radiance
bsd-3-clause
At this point you should be able to go into a command window (cmd.exe) and check the geometry. Example: rvu -vf views\front.vp -e .01 -pe 0.3 -vp 1 -7.5 12 tutorial_7.oct
## Comment the ! line below to run rvu from the Jupyter notebook instead of your terminal. ## Simulation will stop until you close the rvu window #!rvu -vf views\front.vp -e .01 -pe 0.3 -vp 1 -7.5 12 tutorial_7.oct
docs/tutorials/7 - Advanced topics - Multiple SceneObjects Example.ipynb
NREL/bifacial_radiance
bsd-3-clause
It should look something like this: <a id='step4'></a> 4. Analysis for Each sceneObject a sceneDict is saved for each scene. When calling the Analysis, you should reference the scene object you want.
sceneObj1.sceneDict sceneObj2.sceneDict analysis = AnalysisObj(octfile, demo.basename) frontscan, backscan = analysis.moduleAnalysis(sceneObj1) frontdict, backdict = analysis.analysis(octfile, "FirstObj", frontscan, backscan) # compare the back vs front irradiance print('Annual bifacial ratio First Set of Panels: %0.3f ' %( np.mean(analysis.Wm2Back) / np.mean(analysis.Wm2Front)) )
docs/tutorials/7 - Advanced topics - Multiple SceneObjects Example.ipynb
NREL/bifacial_radiance
bsd-3-clause
Let's do a Sanity check for first object: Since we didn't pass any desired module, it should grab the center module of the center row (rounding down). For 2 rows and 5 modules, that is row 1, module 3 ~ indexed at 0, a2.0.a0.PVmodule.....""
print (frontdict['x']) print ("") print (frontdict['y']) print ("") print (frontdict['mattype'])
docs/tutorials/7 - Advanced topics - Multiple SceneObjects Example.ipynb
NREL/bifacial_radiance
bsd-3-clause
Let's analyze a module in sceneobject 2 now. Remember we can specify which module/row we want. We only have one row in this Object though.
analysis2 = AnalysisObj(octfile, demo.basename) modWanted = 4 rowWanted = 1 sensorsy=4 frontscan, backscan = analysis2.moduleAnalysis(sceneObj2, modWanted = modWanted, rowWanted = rowWanted, sensorsy=sensorsy) frontdict2, backdict2 = analysis2.analysis(octfile, "SecondObj", frontscan, backscan) print('Annual bifacial ratio Second Set of Panels: %0.3f ' %( np.mean(analysis2.Wm2Back) / np.mean(analysis2.Wm2Front)) )
docs/tutorials/7 - Advanced topics - Multiple SceneObjects Example.ipynb
NREL/bifacial_radiance
bsd-3-clause
Sanity check for first object. Since we didn't pass any desired module, it should grab the center module of the center row (rounding down). For 1 rows, that is row 0, module 4 ~ indexed at 0, a3.0.a0.Longi... and a3.0.a1.Longi since it is a 2-UP system.
print ("x coordinate points:" , frontdict2['x']) print ("") print ("y coordinate points:", frontdict2['y']) print ("") print ("Elements intersected at each point: ", frontdict2['mattype'])
docs/tutorials/7 - Advanced topics - Multiple SceneObjects Example.ipynb
NREL/bifacial_radiance
bsd-3-clause
PyTorch The fastai deep learning library uses PyTorch, a Python framework for dynamic neural networks with GPU acceleration, which was released by Facebook's AI team. PyTorch has two overlapping, yet distinct, purposes. As described in the PyTorch documentation: <img src="images/what_is_pytorch.png" alt="pytorch" style="width: 80%"/> The neural network functionality of PyTorch is built on top of the Numpy-like functionality for fast matrix computations on a GPU. Although the neural network purpose receives way more attention, both are very useful. We'll implement a neural net from scratch today using PyTorch. Further learning: If you are curious to learn what dynamic neural networks are, you may want to watch this talk by Soumith Chintala, Facebook AI researcher and core PyTorch contributor. If you want to learn more PyTorch, you can try this introductory tutorial or this tutorial to learn by examples. About GPUs Graphical processing units (GPUs) allow for matrix computations to be done with much greater speed, as long as you have a library such as PyTorch that takes advantage of them. Advances in GPU technology in the last 10-20 years have been a key part of why neural networks are proving so much more powerful now than they did a few decades ago. You may own a computer that has a GPU which can be used. For the many people that either don't have a GPU (or have a GPU which can't be easily accessed by Python), there are a few differnt options: Don't use a GPU: For the sake of this tutorial, you don't have to use a GPU, although some computations will be slower. The only change needed to the code is to remove .cuda() wherever it appears. Use crestle, through your browser: Crestle is a service that gives you an already set up cloud service with all the popular scientific and deep learning frameworks already pre-installed and configured to run on a GPU in the cloud. It is easily accessed through your browser. New users get 10 hours and 1 GB of storage for free. After this, GPU usage is 34 cents per hour. I recommend this option to those who are new to AWS or new to using the console. Set up an AWS instance through your console: You can create an AWS instance with a GPU by following the steps in this fast.ai setup lesson.] AWS charges 90 cents per hour for this. Data About The Data Today we will be working with MNIST, a classic data set of hand-written digits. Solutions to this problem are used by banks to automatically recognize the amounts on checks, and by the postal service to automatically recognize zip codes on mail. <img src="images/mnist.png" alt="" style="width: 60%"/> A matrix can represent an image, by creating a grid where each entry corresponds to a different pixel. <img src="images/digit.gif" alt="digit" style="width: 55%"/> (Source: Adam Geitgey ) Download Let's download, unzip, and format the data.
path = '../data/' import os os.makedirs(path, exist_ok=True) URL='http://deeplearning.net/data/mnist/' FILENAME='mnist.pkl.gz' def load_mnist(filename): return pickle.load(gzip.open(filename, 'rb'), encoding='latin-1') get_data(URL+FILENAME, path+FILENAME) ((x, y), (x_valid, y_valid), _) = load_mnist(path+FILENAME)
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
Normalize Many machine learning algorithms behave better when the data is normalized, that is when the mean is 0 and the standard deviation is 1. We will subtract off the mean and standard deviation from our training set in order to normalize the data:
mean = x.mean() std = x.std() x=(x-mean)/std x.mean(), x.std()
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
Look at the data In any sort of data science work, it's important to look at your data, to make sure you understand the format, how it's stored, what type of values it holds, etc. To make it easier to work with, let's reshape it into 2d images from the flattened 1d format. Helper methods
%matplotlib inline import numpy as np import matplotlib.pyplot as plt def show(img, title=None): plt.imshow(img, interpolation='none', cmap="gray") if title is not None: plt.title(title) def plots(ims, figsize=(12,6), rows=2, titles=None): f = plt.figure(figsize=figsize) cols = len(ims)//rows for i in range(len(ims)): sp = f.add_subplot(rows, cols, i+1) sp.axis('Off') if titles is not None: sp.set_title(titles[i], fontsize=16) plt.imshow(ims[i], interpolation='none', cmap='gray')
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
The Most Important Machine Learning Concepts Functions, parameters, and training A function takes inputs and returns outputs. For instance, $f(x) = 3x + 5$ is an example of a function. If we input $2$, the output is $3\times 2 + 5 = 11$, or if we input $-1$, the output is $3\times -1 + 5 = 2$ Functions have parameters. The above function $f$ is $ax + b$, with parameters a and b set to $a=3$ and $b=5$. Machine learning is often about learning the best values for those parameters. For instance, suppose we have the data points on the chart below. What values should we choose for $a$ and $b$? <img src="images/sgd2.gif" alt="" style="width: 70%"/> In the above gif fast.ai Practical Deep Learning for Coders course, intro to SGD notebook), an algorithm called stochastic gradient descent is being used to learn the best parameters to fit the line to the data (note: in the gif, the algorithm is stopping before the absolute best parameters are found). This process is called training or fitting. Most datasets will not be well-represented by a line. We could use a more complicated function, such as $g(x) = ax^2 + bx + c + \sin d$. Now we have 4 parameters to learn: $a$, $b$, $c$, and $d$. This function is more flexible than $f(x) = ax + b$ and will be able to accurately model more datasets. Neural networks take this to an extreme, and are infinitely flexible. They often have thousands, or even hundreds of thousands of parameters. However the core idea is the same as above. The neural network is a function, and we will learn the best parameters for modeling our data. Training & Validation data sets Possibly the most important idea in machine learning is that of having separate training & validation data sets. As motivation, suppose you don't divide up your data, but instead use all of it. And suppose you have lots of parameters: This is called over-fitting. A validation set helps prevent this problem. <img src="images/overfitting2.png" alt="" style="width: 70%"/> <center> Underfitting and Overfitting </center> The error for the pictured data points is lowest for the model on the far right (the blue curve passes through the red points almost perfectly), yet it's not the best choice. Why is that? If you were to gather some new data points, they most likely would not be on that curve in the graph on the right, but would be closer to the curve in the middle graph. This illustrates how using all our data can lead to overfitting. Neural Net (with nn.torch) Imports
from fastai.metrics import * from fastai.model import * from fastai.dataset import * from fastai.core import * import torch.nn as nn
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
Neural networks We will use fastai's ImageClassifierData, which holds our training and validation sets and will provide batches of that data in a form ready for use by a PyTorch model.
md = ImageClassifierData.from_arrays(path, (x,y), (x_valid, y_valid))
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
We will begin with the highest level abstraction: using a neural net defined by PyTorch's Sequential class.
net = nn.Sequential( nn.Linear(28*28, 256), nn.ReLU(), nn.Linear(256, 10) ).cuda()
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
Each input is a vector of size $28\times 28$ pixels and our output is of size $10$ (since there are 10 digits: 0, 1, ..., 9). We use the output of the final layer to generate our predictions. Often for classification problems (like MNIST digit classification), the final layer has the same number of outputs as there are classes. In that case, this is 10: one for each digit from 0 to 9. These can be converted to comparative probabilities. For instance, it may be determined that a particular hand-written image is 80% likely to be a 4, 18% likely to be a 9, and 2% likely to be a 3. In our case, we are not interested in viewing the probabilites, and just want to see what the most likely guess is. Layers Sequential defines layers of our network, so let's talk about layers. Neural networks consist of linear layers alternating with non-linear layers. This creates functions which are incredibly flexible. Deeper layers are able to capture more complex patterns. Layer 1 of a convolutional neural network: <img src="images/zeiler1.png" alt="pytorch" style="width: 40%"/> <center> Matthew Zeiler and Rob Fergus </center> Layer 2: <img src="images/zeiler2.png" alt="pytorch" style="width: 90%"/> <center> Matthew Zeiler and Rob Fergus </center> Deeper layers can learn about more complicated shapes (although we are only using 2 layers in our network): <img src="images/zeiler4.png" alt="pytorch" style="width: 90%"/> <center> Matthew Zeiler and Rob Fergus </center> Training the network Next we will set a few inputs for our fit method: - Optimizer: algorithm for finding the minimum. typically these are variations on stochastic gradient descent, involve taking a step that appears to be the right direction based on the change in the function. - Loss: what function is the optimizer trying to minimize? We need to say how we're defining the error. - Metrics: other calculations you want printed out as you train
loss=F.cross_entropy metrics=[accuracy] opt=optim.Adam(net.parameters())
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
Fitting is the process by which the neural net learns the best parameters for the dataset.
fit(net, md, epochs=1, crit=loss, opt=opt, metrics=metrics)
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
GPUs are great at handling lots of data at once (otherwise don't get performance benefit). We break the data up into batches, and that specifies how many samples from our dataset we want to send to the GPU at a time. The fastai library defaults to a batch size of 64. On each iteration of the training loop, the error on 1 batch of data will be calculated, and the optimizer will update the parameters based on that. An epoch is completed once each data sample has been used once in the training loop. Now that we have the parameters for our model, we can make predictions on our validation set.
preds = predict(net, md.val_dl) preds = preds.max(1)[1]
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
Let's see how some of our preditions look!
plots(x_imgs[:8], titles=preds[:8])
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
These predictions are pretty good! Coding the Neural Net ourselves Recall that above we used PyTorch's Sequential to define a neural network with a linear layer, a non-linear layer (ReLU), and then another linear layer.
# Our code from above net = nn.Sequential( nn.Linear(28*28, 256), nn.ReLU(), nn.Linear(256, 10) ).cuda()
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
It turns out that Linear is defined by a matrix multiplication and then an addition. Let's try defining this ourselves. This will allow us to see exactly where matrix multiplication is used (we will dive in to how matrix multiplication works in teh next section). Just as Numpy has np.matmul for matrix multiplication (in Python 3, this is equivalent to the @ operator), PyTorch has torch.matmul. PyTorch class has two things: constructor (says parameters) and a forward method (how to calculate prediction using those parameters) The method forward describes how the neural net converts inputs to outputs. In PyTorch, the optimizer knows to try to optimize any attribute of type Parameter.
def get_weights(*dims): return nn.Parameter(torch.randn(*dims)/dims[0]) class SimpleMnist(nn.Module): def __init__(self): super().__init__() self.l1_w = get_weights(28*28, 256) # Layer 1 weights self.l1_b = get_weights(256) # Layer 1 bias self.l2_w = get_weights(256, 10) # Layer 2 weights self.l2_b = get_weights(10) # Layer 2 bias def forward(self, x): x = x.view(x.size(0), -1) x = torch.matmul(x, self.l1_w) + self.l1_b # Linear Layer x = x * (x > 0).float() # Non-linear Layer x = torch.matmul(x, self.l2_w) + self.l2_b # Linear Layer return x
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
We create our neural net and the optimizer. (We will use the same loss and metrics from above).
net2 = SimpleMnist().cuda() opt=optim.Adam(net2.parameters()) fit(net2, md, epochs=1, crit=loss, opt=opt, metrics=metrics)
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
Now we can check our predictions:
preds = predict(net2, md.val_dl).max(1)[1] plots(x_imgs[:8], titles=preds[:8])
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
what torch.matmul (matrix multiplication) is doing Now let's dig in to what we were doing with torch.matmul: matrix multiplication. First, let's start with a simpler building block: broadcasting. Element-wise operations Broadcasting and element-wise operations are supported in the same way by both numpy and pytorch. Operators (+,-,*,/,>,<,==) are usually element-wise. Examples of element-wise operations:
a = np.array([10, 6, -4]) b = np.array([2, 8, 7]) a + b a < b
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
How are we able to do a > 0? 0 is being broadcast to have the same dimensions as a. Remember above when we normalized our dataset by subtracting the mean (a scalar) from the entire data set (a matrix) and dividing by the standard deviation (another scalar)? We were using broadcasting! Other examples of broadcasting with a scalar:
a + 1 m = np.array([[1, 2, 3], [4,5,6], [7,8,9]]); m m * 2
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
Broadcasting a vector to a matrix We can also broadcast a vector to a matrix:
c = np.array([10,20,30]); c m + c
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
Although numpy does this automatically, you can also use the broadcast_to method:
np.broadcast_to(c, (3,3)) c.shape
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
The numpy expand_dims method lets us convert the 1-dimensional array c into a 2-dimensional array (although one of those dimensions has value 1).
np.expand_dims(c,0).shape m + np.expand_dims(c,0) np.expand_dims(c,1).shape m + np.expand_dims(c,1) np.broadcast_to(np.expand_dims(c,1), (3,3))
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
Broadcasting Rules When operating on two arrays, Numpy/PyTorch compares their shapes element-wise. It starts with the trailing dimensions, and works its way forward. Two dimensions are compatible when they are equal, or one of them is 1 Arrays do not need to have the same number of dimensions. For example, if you have a $256 \times 256 \times 3$ array of RGB values, and you want to scale each color in the image by a different value, you can multiply the image by a one-dimensional array with 3 values. Lining up the sizes of the trailing axes of these arrays according to the broadcast rules, shows that they are compatible: Image (3d array): 256 x 256 x 3 Scale (1d array): 3 Result (3d array): 256 x 256 x 3 The numpy documentation includes several examples of what dimensions can and can not be broadcast together. Matrix Multiplication We are going to use broadcasting to define matrix multiplication. Matrix-Vector Multiplication
m, c m @ c # np.matmul(m, c)
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
We get the same answer using torch.matmul:
torch.matmul(torch.from_numpy(m), torch.from_numpy(c))
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
The following is NOT matrix multiplication. What is it?
m * c (m * c).sum(axis=1) c np.broadcast_to(c, (3,3))
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
From a machine learning perspective, matrix multiplication is a way of creating features by saying how much we want to weight each input column. Different features are different weighted averages of the input columns. The website matrixmultiplication.xyz provides a nice visualization of matrix multiplcation Draw a picture
n = np.array([[10,40],[20,0],[30,-5]]); n m @ n (m * n[:,0]).sum(axis=1) (m * n[:,1]).sum(axis=1)
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
Homework: another use of broadcasting If you want to test your understanding of the above tutorial. I encourage you to work through it again, only this time use CIFAR 10, a dataset that consists of 32x32 color images in 10 different categories. Color images have an extra dimension, containing RGB values, compared to black & white images. <img src="images/cifar10.png" alt="" style="width: 70%"/> <center> (source: Cifar 10) </center> Fortunately, broadcasting will make it relatively easy to add this extra dimension (for color RGB), but you will have to make some changes to the code. Other applications of Matrix and Tensor Products Here are some other examples of where matrix multiplication arises. This material is taken from Chapter 1 of my Computational Linear Algebra course. Matrix-Vector Products: The matrix below gives the probabilities of moving from 1 health state to another in 1 year. If the current health states for a group are: - 85% asymptomatic - 10% symptomatic - 5% AIDS - 0% death what will be the % in each health state in 1 year? <img src="images/markov_health.jpg" alt="floating point" style="width: 80%"/>(Source: Concepts of Markov Chains) Answer
import numpy as np #Exercise: Use Numpy to compute the answer to the above
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
Matrix-Matrix Products <img src="images/shop.png" alt="floating point" style="width: 100%"/>(Source: Several Simple Real-world Applications of Linear Algebra Tools) Answer
#Exercise: Use Numpy to compute the answer to the above
data_science/courses/temp/tutorials/linalg_pytorch.ipynb
jmhsi/justin_tinker
apache-2.0
4.5.1 UV coverage : UV tracks The objective of $\S$ 4.5.1 &#10549; and $\S$ 4.5.2 &#10142; is to give you a glimpse into the process of aperture synthesis. <span style="background-color:cyan">TLG:GM: Check if the italic words are in the glossary. </span> An interferometer measures components of the Fourier Transform of the sky by sampling the visibility function, $\mathcal{V}$. This collection of samples lives in ($u$, $v$, $w$) space, and are often projected onto the so-called $uv$-plane. In $\S$ 4.5.1 &#10549;, we will focus on the way the visibility function is sampled. This sampling is a function of the interferometer's configuration, the direction of the source and the observation time. In $\S$ 4.5.2 &#10142;, we will see how this sampling can be improved by using certain observing techniques. 4.5.1.1 The projected baseline with time: the $uv$ track A projected baseline is obtained via a baseline and a direction in the sky. It corresponds to the baseline as seen from the source. The projected baseline is associated with the measurement of a spatial frequency of the source. <span style="background-color:red">TLG:RC: Rewrite previous sentence.</span> As the Earth rotates, the projected baseline and its corresponding spatial frequency (defined by the baseline's ($u$, $v$)-coordinates) vary slowly in time, generating a path in the $uv$-plane. We will now generate test cases to see what locus the path takes, and how it can be predicted depending on the baseline's geometry. 4.5.1.1.1 Baseline projection as seen from the source Let's generate one baseline from two antennas Ant$_1$ and Ant$_2$.
ant1 = np.array([-500e3,500e3,0]) # in m ant2 = np.array([500e3,-500e3,+10]) # in m
4_Visibility_Space/4_5_1_uv_coverage_uv_tracks.ipynb
KshitijT/fundamentals_of_interferometry
gpl-2.0
Figure 4.5.1: A baseline located at +45$^\circ$ as seen from the sky. This plot is interactive and can be rotated in 3D to see different baseline projections, depending on the position of the source w.r.t. the physical baseline. On the interactive plot above, we represent a baseline located at +45$^\circ$, aligned with the local south-west/north-east as seen from the celestial sphere. <span style="background-color:red">TLG:RC: Rewrite previous sentence.</span> By rotating the sphere westward, you can simulate the variation of the projected baseline as seen from a source in apparent motion on the celestial sphere. 4.5.1.1.2 Coordinates of the baseline in the ($u$,$v$,$w$) plane We will now simulate an observation to study how a projected baseline will change with time. We will position this baseline at a South African latitude. We first need the expression of the physical baseline in a convenient reference frame, attached to the source in the sky. In $\S$ 4.2 &#10142;, we linked the equatorial coordinates of the baseline to the ($u$,$v$,$w$) coordinates through the transformation matrix: \begin{equation} \begin{pmatrix} u\ v\ w \end{pmatrix} = \frac{1}{\lambda} \begin{pmatrix} \sin H_0 & \cos H_0 & 0\ -\sin \delta_0 \cos H_0 & \sin\delta_0\sin H_0 & \cos\delta_0\ \cos \delta_0 \cos H_0 & -\cos\delta_0\sin H_0 & \sin\delta_0\ \end{pmatrix} \begin{pmatrix} X\ Y\ Z \end{pmatrix} \end{equation} <a id="vis:eq:451"></a> <!---\label{vis:eq:451}---> \begin{equation} \begin{bmatrix} X\ Y\ Z \end{bmatrix} =|\mathbf{b}| \begin{bmatrix} \cos L_a \sin \mathcal{E} - \sin L_a \cos \mathcal{E} \cos \mathcal{A}\nonumber\ \cos \mathcal{E} \sin \mathcal{A} \nonumber\ \sin L_a \sin \mathcal{E} + \cos L_a \cos \mathcal{E} \cos \mathcal{A}\ \end{bmatrix} \end{equation} Equation 4.5.1 This expression of $\mathcal{b}$ is a function of ($\mathcal{A}$,$\mathcal{E}$) in the equatorial ($X$,$Y$,$Z$) systems. <span style="background-color:red">TLG:RC: Rewrite previous sentence as the meaning is unclear. Notice the unbold $b$.</span> 4.5.1.1.2 Observation parameters Let's define an arbitrary set of observation parameters to mimic a real observation. Latitude of the baseline: $L_a=-30^\circ43'17.34''$ Declination of the observation: $\delta=-74^\circ39'37.481''$ Duration of the observation: $\Delta \text{HA}=[-4^\text{h},4^\text{h}]$ Time steps: 600 Frequency: 1420 MHz
# Observation parameters c = 3e8 # Speed of light f = 1420e9 # Frequency lam = c/f # Wavelength dec = (np.pi/180)*(-30-43.0/60-17.34/3600) # Declination time_steps = 600 # Time Steps h = np.linspace(-4,4,num=time_steps)*np.pi/12 # Hour angle window
4_Visibility_Space/4_5_1_uv_coverage_uv_tracks.ipynb
KshitijT/fundamentals_of_interferometry
gpl-2.0
Below we sample our visibility plane on the $uv$-track derived in the first section, i.e. $V(u_t,v_t)$. <span style="background-color:red">TLG:RC: The graphs below intersect. Axis labels inside other graphs.</span>
plt.subplot(121) plt.imshow(zz.real,extent=[-1*(np.amax(np.abs(u_60)))-10, np.amax(np.abs(u_60))+10,-1*(np.amax(abs(v_60)))-10, \ np.amax(abs(v_60))+10]) plt.plot(u_60,v_60,"k") plt.xlim([-1*(np.amax(np.abs(u_60)))-10, np.amax(np.abs(u_60))+10]) plt.ylim(-1*(np.amax(abs(v_60)))-10, np.amax(abs(v_60))+10) plt.xlabel("u") plt.ylabel("v") plt.title("Real part of visibilities") plt.subplot(122) plt.imshow(zz.imag,extent=[-1*(np.amax(np.abs(u_60)))-10, np.amax(np.abs(u_60))+10,-1*(np.amax(abs(v_60)))-10, \ np.amax(abs(v_60))+10]) plt.plot(u_60,v_60,"k") plt.xlim([-1*(np.amax(np.abs(u_60)))-10, np.amax(np.abs(u_60))+10]) plt.ylim(-1*(np.amax(abs(v_60)))-10, np.amax(abs(v_60))+10) plt.xlabel("u") plt.ylabel("v") plt.title("Imaginary part of visibilities")
4_Visibility_Space/4_5_1_uv_coverage_uv_tracks.ipynb
KshitijT/fundamentals_of_interferometry
gpl-2.0
Figure 4.5.7: Real and imaginary parts of the visibility function. The black curve is the portion of the $uv$ track crossing the visibility. We now plot the sampled visibilites as a function of time-slots, i.e $V(u_t(t_s),v_t(t_s))$.
plt.subplot(121) plt.plot(z.real) plt.xlabel("Timeslots") plt.ylabel("Jy") plt.title("Real: sampled visibilities") plt.subplot(122) plt.plot(z.imag) plt.xlabel("Timeslots") plt.ylabel("Jy") plt.title("Imag: sampled visibilities")
4_Visibility_Space/4_5_1_uv_coverage_uv_tracks.ipynb
KshitijT/fundamentals_of_interferometry
gpl-2.0
Figure 4.5.8: Real and imaginary parts of the visibility sampled by the black curve in Fig. 4.5.7, plotted as a function of time.
plt.subplot(121) plt.imshow(abs(zz), extent=[-1*(np.amax(np.abs(u_60)))-10, np.amax(np.abs(u_60))+10, -1*(np.amax(abs(v_60)))-10, np.amax(abs(v_60))+10]) plt.plot(u_60,v_60,"k") plt.xlim([-1*(np.amax(np.abs(u_60)))-10, np.amax(np.abs(u_60))+10]) plt.ylim(-1*(np.amax(abs(v_60)))-10, np.amax(abs(v_60))+10) plt.xlabel("u") plt.ylabel("v") plt.title("Amplitude of visibilities") plt.subplot(122) plt.imshow(np.angle(zz), extent=[-1*(np.amax(np.abs(u_60)))-10, np.amax(np.abs(u_60))+10, -1*(np.amax(abs(v_60)))-10, np.amax(abs(v_60))+10]) plt.plot(u_60,v_60,"k") plt.xlim([-1*(np.amax(np.abs(u_60)))-10, np.amax(np.abs(u_60))+10]) plt.ylim(-1*(np.amax(abs(v_60)))-10, np.amax(abs(v_60))+10) plt.xlabel("u") plt.ylabel("v") plt.title("Phase of visibilities")
4_Visibility_Space/4_5_1_uv_coverage_uv_tracks.ipynb
KshitijT/fundamentals_of_interferometry
gpl-2.0
Figure 4.5.9: Amplitude and Phase of the visibility function. The black curve is the portion of the $uv$ track crossing the visibility.
plt.subplot(121) plt.plot(abs(z)) plt.xlabel("Timeslots") plt.ylabel("Jy") plt.title("Abs: sampled visibilities") plt.subplot(122) plt.plot(np.angle(z)) plt.xlabel("Timeslots") plt.ylabel("Jy") plt.title("Phase: sampled visibilities")
4_Visibility_Space/4_5_1_uv_coverage_uv_tracks.ipynb
KshitijT/fundamentals_of_interferometry
gpl-2.0
Finally starting to understand this problem. So ResourceExhaustedError isn't system memory (or at least not only) but graphics memory. The card (obviously) cannot handle a batch size of 64. But batch size must be a multiple of chunk length, which here is 64.. so I have to find a way to reduce the chunk length down to something my system can handle: no more than 8.
arr_lr_c8 = bcolz.carray(arr_lr, chunklen=8, rootdir=path+'trn_resized_72_c8.bc') arr_lr_c8.flush() arr_hr_c8 = bcolz.carray(arr_hr, chunklen=8, rootdir=path+'trn_resized_288_c8.bc') arr_hr_c8.flush() arr_lr_c8.chunklen, arr_hr_c8.chunklen
FAI02_old/Lesson9/neural_sr_attempt2.ipynb
WNoxchi/Kaukasos
mit
That looks successful, now to redo the whole thing with the _c8 versions:
arr_lr_c8 = bcolz.open(path+'trn_resized_72_c8.bc') arr_hr_c8 = bcolz.open(path+'trn_resized_288_c8.bc') inp,outp=get_model(arr_lr_c8) shp = arr_hr_c8.shape[1:] vgg_inp=Input(shp) vgg= VGG16(include_top=False, input_tensor=Lambda(preproc)(vgg_inp)) for l in vgg.layers: l.trainable=False vgg_content = Model(vgg_inp, [get_outp(vgg, o) for o in [1,2,3]]) vgg1 = vgg_content(vgg_inp) vgg2 = vgg_content(outp) m_sr = Model([inp, vgg_inp], Lambda(content_fn)(vgg1+vgg2)) m_sr.compile('adam', 'mae') def train(bs, niter=10): targ = np.zeros((bs, 1)) bc = BcolzArrayIterator(arr_hr_c8, arr_lr_c8, batch_size=bs) for i in range(niter): hr,lr = next(bc) m_sr.train_on_batch([lr[:bs], hr[:bs]], targ) %time train(8, 18000) # not sure what exactly the '18000' is for arr_lr.shape, arr_hr.shape, arr_lr_c8.shape, arr_hr_c8.shape # 19439//8 = 2429 %time train(8, 2430)
FAI02_old/Lesson9/neural_sr_attempt2.ipynb
WNoxchi/Kaukasos
mit
Redução de dimensões Usaremos o algoritmo PCA do scikit-learn para reduzir o número de dimenSões para dois no dataset.
from sklearn.decomposition import PCA RANDOM_STATE=1234 pca_model = # Crie um objeto PCA com dois componentes iris_2d = # Use o método fit_transform() para reduzir o dataset para duas dimensões import matplotlib.pyplot as plt %matplotlib inline # Crie um scatterplot do dataset reduzido # Exiba o gráfico
03-Delimitação de grupos de flores.ipynb
ffmmjj/intro_to_data_science_workshop
apache-2.0
Quantos grupos distintos você consegue identificar? Descoberta de clusters com K-Means O problem descrito anteriormente pode ser descrito como um problema de Clusterização. Clusterização permite encontrar grupos de exemplos que sejam semelhantes a outros exemplos no mesmo grupo mas diferentes de exemplos pertencentes a outros grupos. Neste exemplo, usaremos o algoritmo KMeans do scikit-learn para encontrar cluster no dataset. Uma limitação do KMeans é que ele precisa receber o número esperado de clusters como argumento, então é necessário que se tenha algum conhecimento daquele domínio para chutar um número razoável de grupos ou pode-se testar diferentes números de clusters e ver qual deles apresenta o melhor resultado.
# Crie dois modelos KMeans: um com dois clusters e outro com três clusters # Armazene os identificadores previstos pelos modelos usando dois e três clusters from sklearn.cluster import KMeans model2 = # Crie um objeto KMeans que espere dois clusters labels2 = # Infira o identificador de cluster de cada exemplo no dataset usando predict() model3 = # Crie um objeto KMeans que espere três clusters labels3 = # Infira o identificador de cluster de cada exemplo no dataset usando predict() # Crie um scatterplot usando o dataset reduzido colorindo cada ponto de acordo com o cluster # ao qual ele pertence segundo o KMeans de dois clusters # Exiba o scatterplot # Crie um scatterplot usando o dataset reduzido colorindo cada ponto de acordo com o cluster # ao qual ele pertence segundo o KMeans de três clusters # Exiba o scatterplot
03-Delimitação de grupos de flores.ipynb
ffmmjj/intro_to_data_science_workshop
apache-2.0
Primitives with Additional Arguments Some features require more advanced calculations than others. Advanced features usually entail additional arguments to help output the desired value. With custom primitives, you can use primitive arguments to help you create advanced features. String Count Example In this example, you will learn how to make custom primitives that take in additional arguments. You will create a primitive to count the number of times a specific string value occurs inside a text. First, derive a new transform primitive class using TransformPrimitive as a base. The primitive will take in a text column as the input and return a numeric column as the output, so set the input type to a Woodwork ColumnSchema with logical type NaturalLanguage and the return type to a Woodwork ColumnSchema with the semantic tag 'numeric'. The specific string value is the additional argument, so define it as a keyword argument inside __init__. Then, override get_function to return a primitive function that will calculate the feature. Featuretools' primitives use Woodwork's ColumnSchema to control the input and return types of columns for the primitive. For more information about using the Woodwork typing system in Featuretools, see the Woodwork Typing in Featuretools guide.
class StringCount(TransformPrimitive): '''Count the number of times the string value occurs.''' name = 'string_count' input_types = [ColumnSchema(logical_type=NaturalLanguage)] return_type = ColumnSchema(semantic_tags={'numeric'}) def __init__(self, string=None): self.string = string def get_function(self): def string_count(column): assert self.string is not None, "string to count needs to be defined" # this is a naive implementation used for clarity counts = [text.lower().count(self.string) for text in column] return counts return string_count
docs/source/guides/advanced_custom_primitives.ipynb
Featuretools/featuretools
bsd-3-clause
Now you have a primitive that is reusable for different string values. For example, you can create features based on the number of times the word "the" appears in a text. Create an instance of the primitive where the string value is "the" and pass the primitive into DFS to generate the features. The feature name will automatically reflect the string value of the primitive.
es = make_ecommerce_entityset() feature_matrix, features = ft.dfs( entityset=es, target_dataframe_name="sessions", agg_primitives=["sum", "mean", "std"], trans_primitives=[StringCount(string="the")], ) feature_matrix[[ 'STD(log.STRING_COUNT(comments, string=the))', 'SUM(log.STRING_COUNT(comments, string=the))', 'MEAN(log.STRING_COUNT(comments, string=the))', ]]
docs/source/guides/advanced_custom_primitives.ipynb
Featuretools/featuretools
bsd-3-clause
Features with Multiple Outputs Some calculations output more than a single value. With custom primitives, you can make the most of these calculations by creating a feature for each output value. Case Count Example In this example, you will learn how to make custom primitives that output multiple features. You will create a primitive that outputs the count of upper case and lower case letters of a text. First, derive a new transform primitive class using TransformPrimitive as a base. The primitive will take in a text column as the input and return two numeric columns as the output, so set the input type to a Woodwork ColumnSchema with logical type NaturalLanguage and the return type to a Woodwork ColumnSchema with semantic tag 'numeric'. Since this primitive returns two columns, also set number_output_features to two. Then, override get_function to return a primitive function that will calculate the feature and return a list of columns.
class CaseCount(TransformPrimitive): '''Return the count of upper case and lower case letters of a text.''' name = 'case_count' input_types = [ColumnSchema(logical_type=NaturalLanguage)] return_type = ColumnSchema(semantic_tags={'numeric'}) number_output_features = 2 def get_function(self): def case_count(array): # this is a naive implementation used for clarity upper = np.array([len(re.findall('[A-Z]', i)) for i in array]) lower = np.array([len(re.findall('[a-z]', i)) for i in array]) return upper, lower return case_count
docs/source/guides/advanced_custom_primitives.ipynb
Featuretools/featuretools
bsd-3-clause
Now you have a primitive that outputs two columns. One column contains the count for the upper case letters. The other column contains the count for the lower case letters. Pass the primitive into DFS to generate features. By default, the feature name will reflect the index of the output.
feature_matrix, features = ft.dfs( entityset=es, target_dataframe_name="sessions", agg_primitives=[], trans_primitives=[CaseCount], ) feature_matrix[[ 'customers.CASE_COUNT(favorite_quote)[0]', 'customers.CASE_COUNT(favorite_quote)[1]', ]]
docs/source/guides/advanced_custom_primitives.ipynb
Featuretools/featuretools
bsd-3-clause
Custom Naming for Multiple Outputs When you create a primitive that outputs multiple features, you can also define custom naming for each of those features. Hourly Sine and Cosine Example In this example, you will learn how to apply custom naming for multiple outputs. You will create a primitive that outputs the sine and cosine of the hour. First, derive a new transform primitive class using TransformPrimitive as a base. The primitive will take in the time index as the input and return two numeric columns as the output. Set the input type to a Woodwork ColumnSchema with a logical type of Datetime and the semantic tag 'time_index'. Next, set the return type to a Woodwork ColumnSchema with semantic tag 'numeric' and set number_output_features to two. Then, override get_function to return a primitive function that will calculate the feature and return a list of columns. Also, override generate_names to return a list of the feature names that you define.
class HourlySineAndCosine(TransformPrimitive): '''Returns the sine and cosine of the hour.''' name = 'hourly_sine_and_cosine' input_types = [ColumnSchema(logical_type=Datetime, semantic_tags={'time_index'})] return_type = ColumnSchema(semantic_tags={'numeric'}) number_output_features = 2 def get_function(self): def hourly_sine_and_cosine(column): sine = np.sin(column.dt.hour) cosine = np.cos(column.dt.hour) return sine, cosine return hourly_sine_and_cosine def generate_names(self, base_feature_names): name = self.generate_name(base_feature_names) return f'{name}[sine]', f'{name}[cosine]'
docs/source/guides/advanced_custom_primitives.ipynb
Featuretools/featuretools
bsd-3-clause
Now you have a primitive that outputs two columns. One column contains the sine of the hour. The other column contains the cosine of the hour. Pass the primitive into DFS to generate features. The feature name will reflect the custom naming you defined.
feature_matrix, features = ft.dfs( entityset=es, target_dataframe_name="log", agg_primitives=[], trans_primitives=[HourlySineAndCosine], ) feature_matrix.head()[[ 'HOURLY_SINE_AND_COSINE(datetime)[sine]', 'HOURLY_SINE_AND_COSINE(datetime)[cosine]', ]]
docs/source/guides/advanced_custom_primitives.ipynb
Featuretools/featuretools
bsd-3-clause
Preliminary Report Read the following results/report. While you are reading it, think about if the conclusions are correct, incorrect, misleading or unfounded. Think about what you would change or what additional analyses you would perform. A. Initial observations based on the plot above + Overall, rate of readmissions is trending down with increasing number of discharges + With lower number of discharges, there is a greater incidence of excess rate of readmissions (area shaded red) + With higher number of discharges, there is a greater incidence of lower rates of readmissions (area shaded green) B. Statistics + In hospitals/facilities with number of discharges < 100, mean excess readmission rate is 1.023 and 63% have excess readmission rate greater than 1 + In hospitals/facilities with number of discharges > 1000, mean excess readmission rate is 0.978 and 44% have excess readmission rate greater than 1 C. Conclusions + There is a significant correlation between hospital capacity (number of discharges) and readmission rates. + Smaller hospitals/facilities may be lacking necessary resources to ensure quality care and prevent complications that lead to readmissions. D. Regulatory policy recommendations + Hospitals/facilties with small capacity (< 300) should be required to demonstrate upgraded resource allocation for quality care to continue operation. + Directives and incentives should be provided for consolidation of hospitals and facilities to have a smaller number of them with higher capacity and number of discharges. ANSWERS to Exercise 3: Question A. Do you agree with the above analysis and recommendations? Why or why not? At first glance, it appears that the analysis hold weight but one problem it didn't address is whether there is enough evidence to conclude is true. So, at this point I can't categorically say that I agree with the conclusion until I conduct a hypothesis test and test the p-value of the sample population. After this test, then I can answer this question. Question B. Provide support for your arguments and your own recommendations with a statistically sound analysis: Setup an appropriate hypothesis test. The hypothesis test is basically comprised of a NULL HYPOTHESIS and an ALTERNATIVE HYPOTHESIS. The NULL HYPOTHESIS is usually a statement of 'no effect' or 'no difference' and is the statement being tested based on the p-value of the sample data. If the p-value is less than or equal to the level of significance then the NULL HYPOTHESIS can be neglected, which in turn signifies that there is enough evidence in the data to support the ALTERNATIVE HYPOTHESIS. For this particular set of data, looking at the scatter plot, it appears that there are a lot more hospitals with a relatively small number of discharges compared to hospitals with a large number of discharges. We can equate this arbitrarily to small hospitals vs. large hospitals. Since the original conclusion correlate hospital capacity (number of discharges) with readmission rate, an appropriate null hypothesis should involve these two: NULL HYPOTHESIS: Ho:μ1=μ2 where μ1 is the average rate of readmission of hospitals with < 100 discharges and μ2 is the average rate of readmission of hospitals with > 1000 discharges. In other words, the null hypothesis states that there is no difference in the average rate of readmissions between hospitals with less than 100 discharges or hospitals with greater than 1000 discharges. ALTERNATIVE HYPOTHESIS: Ho:μ1≠μ2 where μ1 is the average rate of readmission of hospitals with < 100 discharges and μ2 is the average rate of readmission of hospitals with > 1000 discharges. In other words, the alternative hypothesis states that there is a significant difference in average hospital readmission rates in hospitals with less than 100 discharges and hospitals with greater than 1000 discharges.
%matplotlib inline import pandas as pd import numpy as np import matplotlib.pyplot as plt import bokeh.plotting as bkp import scipy.stats as st from mpl_toolkits.axes_grid1 import make_axes_locatable # read in readmissions data provided hospital_read_df = pd.read_csv('data/cms_hospital_readmissions.csv') # Set-up the hypothesis test. # Get the two groups of hospitals, one with < 100 discharges and the other with > 1000 discharges. # Get the hospitals with small discharges first. # First statement deals with missing data. clean_hospital_read_df = hospital_read_df[(hospital_read_df['Number of Discharges'] != 'Not Available')] hosp_with_small_discharges = clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'].astype(int) < 100] hosp_with_small_discharges = hosp_with_small_discharges[hosp_with_small_discharges['Number of Discharges'].astype(int) != 0] hosp_with_small_discharges.sort_values(by = 'Number of Discharges', ascending = False) # Now get the hospitals with relatively large discharges. hosp_with_large_discharges = clean_hospital_read_df[clean_hospital_read_df['Number of Discharges'].astype(int) > 1000] hosp_with_large_discharges = hosp_with_large_discharges[hosp_with_large_discharges['Number of Discharges'].astype(int) != 0] hosp_with_large_discharges.sort_values(by = 'Number of Discharges', ascending = False) # Now calculate the statistical significance and p-value small_hospitals = hosp_with_small_discharges['Excess Readmission Ratio'] large_hospitals = hosp_with_large_discharges['Excess Readmission Ratio'] result = st.ttest_ind(small_hospitals,large_hospitals, equal_var=False) print("Statistical significance is equal to : %6.4F, P-value is equal to: %5.14F" % (result[0],result[1]))
springboard-answers-to-exercises/sliderule_dsi_inferential_statistics_exercise_3_answers.ipynb
norsween/data-science
gpl-3.0
Report statistical significance for α = .01: Since the P-value &lt; 0.01, we can reject the null hypothesis that states that there are no significant differences between the two hospital groups originally mentioned in conclusion. Discuss statistical significance and practical significance: The hypothesis test has shown that there is a difference between the two groups being compared in the preliminary report: of hospitals with readmissions rate &lt; 100 and hospitals with readmissions rate &gt; 1000. It may be that the difference between the two groups is not practically significant since the samples we used are quite large and large sample sizes can make hypothesis testing very sensitive to even slight differences in the data. The hypothesis test prove that there is a strong level of confidence that the samples are not statistically identical. Look at the scatterplot above. What are the advantages and disadvantages of using this plot to convey information? To me, the main advantage of a scatterplot is the range of data flow,i.e., the maximum and minimum values can be easily determined. And also, one can easily see the relationship between two variables. But the one drawback to it is that one can not qualitatively visualize the significance in differences. Construct another plot that conveys the same information in a more direct manner: Below I've constructed a hexabgon binning plot that can easily show the relative counts of a combination of data points for readmission rate and number of discharges.
%matplotlib inline import pandas as pd import numpy as np import matplotlib.pyplot as plt import bokeh.plotting as bkp from mpl_toolkits.axes_grid1 import make_axes_locatable # read in readmissions data provided hospital_read_df = pd.read_csv('data/cms_hospital_readmissions.csv') # deal with missing and inconvenient portions of data clean_hospital_read_df = hospital_read_df[hospital_read_df['Number of Discharges'] != 'Not Available'] clean_hospital_read_df = clean_hospital_read_df.sort_values('Number of Discharges') # generate a scatterplot for number of discharges vs. excess rate of readmissions # lists work better with matplotlib scatterplot function x = [a for a in clean_hospital_read_df['Number of Discharges'][81:-3]] y = list(clean_hospital_read_df['Excess Readmission Ratio'][81:-3]) fig, ax = plt.subplots(figsize=(8,5)) im = ax.hexbin(x, y,gridsize=20) fig.colorbar(im, ax=ax) ax.fill_between([0,350], 1.15, 2, facecolor='red', alpha = .15, interpolate=True) ax.fill_between([800,2500], .5, .95, facecolor='green', alpha = .15, interpolate=True) ax.set_xlabel('Number of discharges', fontsize=10) ax.set_ylabel('Excess rate of readmissions', fontsize=10) ax.set_title('Hexagon Bin Plot of number of discharges vs. excess rate of readmissions', fontsize=12, fontweight='bold')
springboard-answers-to-exercises/sliderule_dsi_inferential_statistics_exercise_3_answers.ipynb
norsween/data-science
gpl-3.0
Either define entire neural network inside the constructor of the Sequential class as below,
# # Network model can be initialized using following syntax in the constructor itself # model1 = Sequential([ Dense(32,input_dim=784), Activation("relu"), Dense(10), Activation("softmax") ])
notebooks/getting_started_with_keras.ipynb
ninadhw/ninadhw.github.io
cc0-1.0
Or add layers to the network one by one as per convinience.
# # Layers to the network can be added dynamically # model2 = Sequential() model2.add(Dense(32, input_dim=784)) model2.add(Activation('relu')) model2.add(Dense(10)) model2.add(Activation('softmax'))
notebooks/getting_started_with_keras.ipynb
ninadhw/ninadhw.github.io
cc0-1.0
The model needs to know what input shape it should expect i.e whether input is 28x28 (746 pixels) image or some numeric text or some other size features. For this reason, the first layer in a <span style="color:red;font-weight:bold">Sequential model</span> (and only the first, because following layers can do automatic shape inference from the shape of previous layers) needs to receive information about its input shape hence first <span style="color:red;font-weight:bold">model.add</span> function has extra argument of <span style="color:red;font-weight:bold">input_dim</span>. There are several possible ways to do this: -- pass an <span style="color:red;font-weight:bold">input_shape</span> argument to the first layer. This is a shape tuple (a tuple of integers or None entries, where None indicates that any positive integer may be expected). In <span style="color:red;font-weight:bold">input_shape</span>, the batch dimension is not included. e.g. input_shape=(784,10) -> neural network shall have 10 inputs of 784 length each input_shape=(784,) or input_shape=(784,None) -&gt; neural network shall have any positive number of inputs with 784 length each -- pass instead a batch_input_shape argument, where the batch dimension is included. This is useful for specifying a fixed batch size (e.g. with stateful RNNs). -- some 2D layers, such as Dense, support the specification of their input shape via the argument input_dim, and some 3D temporal layers support the arguments input_dim and input_length. As such, the following three snippets are strictly equivalent:
model1 = Sequential() model1.add(Dense(32, input_shape=(784,))) model2 = Sequential() model2.add(Dense(32, batch_input_shape=(None, 784))) # note that batch dimension is "None" here, # so the model will be able to process batches of any size with each input of length 784. model3 = Sequential() model3.add(Dense(32, input_dim=784))
notebooks/getting_started_with_keras.ipynb
ninadhw/ninadhw.github.io
cc0-1.0
Note that <span style="font-weight:bold">input_dim=784 is same as input_shape=(784,)</span> The Merge layer Multiple Sequential instances can be merged into a single output via a Merge layer. The output is a layer that can be added as first layer in a new Sequential model. For instance, here's a model with two separate input branches getting merged:
Image("keras_examples/keras_merge.png") from keras.layers import Merge left_branch = Sequential() left_branch.add(Dense(32, input_dim=784)) right_branch = Sequential() right_branch.add(Dense(32, input_dim=784)) merged = Merge([left_branch, right_branch], mode='concat') final_model = Sequential() final_model.add(merged) final_model.add(Dense(10, activation='softmax'))
notebooks/getting_started_with_keras.ipynb
ninadhw/ninadhw.github.io
cc0-1.0
Such a two-branch model can then be trained via e.g.:
final_model.compile(optimizer='rmsprop', loss='categorical_crossentropy') final_model.fit([input_data_1, input_data_2], targets) # we pass one data array per model input
notebooks/getting_started_with_keras.ipynb
ninadhw/ninadhw.github.io
cc0-1.0
The Merge layer supports a number of pre-defined modes: <ul> <li>sum (default): element-wise sum</li> <li>concat: tensor concatenation. You can specify the concatenation axis via the argument concat_axis.</li> <li>mul: element-wise multiplication</li> <li>ave: tensor average</li> <li>dot: dot product. You can specify which axes to reduce along via the argument dot_axes.</li> <li>cos: cosine proximity between vectors in 2D tensors.</li> </ul> You can also pass a function as the mode argument, allowing for arbitrary transformations:
merged = Merge([left_branch, right_branch], mode=lambda x: x[0] - x[1])
notebooks/getting_started_with_keras.ipynb
ninadhw/ninadhw.github.io
cc0-1.0
Now you know enough to be able to define almost any model with Keras. For complex models that cannot be expressed via Sequential and Merge, you can use the functional API. Compilation Before training a model, you need to configure the learning process, which is done via the compile method. It receives three arguments: <ul> <li>an optimizer, it is a type of optimizer to be used e.g. gradient descent. This could be the string identifier of an existing optimizer (such as rmsprop or adagrad), or an instance of the Optimizer class. <a href="https://keras.io/optimizers" target="_blank">See: optimizers.</a> </li> <li>a loss function, it is an error function to be optimized e.g. squered error function or cross-entropy function. This is the objective that the model will try to minimize. It can be the string identifier of an existing loss function (such as categorical_crossentropy or mse), or it can be an objective function. <a href="https://keras.io/objectives" target="_blank">See: objectives.</a></li> <li>a list of metrics, to evaluate performance of the network. For any classification problem you will want to set this to metrics=['accuracy']. A metric could be the string identifier of an existing metric or a custom metric function. Custom metric function should return either a single tensor value or a dict metric_name -> metric_value. <a href="https://keras.io/metrics" target="_blank">See: metrics.</a></li>
# for a multi-class classification problem model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) # for a binary classification problem model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) # for a mean squared error regression problem model.compile(optimizer='rmsprop', loss='mse') # for custom metrics import keras.backend as K def mean_pred(y_true, y_pred): return K.mean(y_pred) def false_rates(y_true, y_pred): false_neg = ... false_pos = ... return { 'false_neg': false_neg, 'false_pos': false_pos, } model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy', mean_pred, false_rates])
notebooks/getting_started_with_keras.ipynb
ninadhw/ninadhw.github.io
cc0-1.0
Training Keras models are trained on Numpy arrays of input data and labels. For training a model, you will typically use the fit function. <a href="https://keras.io/models/sequential" target="_blank">Read its documentation here.</a>
# for a single-input model with 2 classes (binary): model = Sequential() model.add(Dense(1, input_dim=784, activation='sigmoid')) model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) # generate dummy data import numpy as np data = np.random.random((1000, 784)) labels = np.random.randint(2, size=(1000, 1)) # train the model, iterating on the data in batches # of 32 samples model.fit(data, labels, nb_epoch=10, batch_size=32) # for a multi-input model with 10 classes: left_branch = Sequential() left_branch.add(Dense(32, input_dim=784)) right_branch = Sequential() right_branch.add(Dense(32, input_dim=784)) merged = Merge([left_branch, right_branch], mode='concat') model = Sequential() model.add(merged) model.add(Dense(10, activation='softmax')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) # generate dummy data import numpy as np from keras.utils.np_utils import to_categorical data_1 = np.random.random((1000, 784)) data_2 = np.random.random((1000, 784)) # these are integers between 0 and 9 labels = np.random.randint(10, size=(1000, 1)) # we convert the labels to a binary matrix of size (1000, 10) # for use with categorical_crossentropy labels = to_categorical(labels, 10) # train the model # note that we are passing a list of Numpy arrays as training data # since the model has 2 inputs model.fit([data_1, data_2], labels, nb_epoch=10, batch_size=32)
notebooks/getting_started_with_keras.ipynb
ninadhw/ninadhw.github.io
cc0-1.0