markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
--- ๊ณผ๊ฑฐ์ž‘์—… [1] Rename
# 1) ๊ฒฝ๋กœ์„ค์ • from google.colab import drive drive.mount('/content/gdrive') %cd /content/gdrive/MyDrive/แ„€แ…ขแ†จแ„Žแ…ฆแ„แ…กแ†ทแ„Œแ…ต/dataset # 2) ํ•„์š”ํ•œ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ์ž„ํฌํŠธ import os import torch from IPython.display import Image import os import random import shutil from sklearn.model_selection import train_test_split import xml.etree.ElementTree as ET from xml.dom import minidom from tqdm import tqdm from PIL import Image, ImageDraw import numpy as np import matplotlib.pyplot as plt # 3) ์ด๋ฏธ์ง€ ํด๋” ์ ‘๊ทผ ## ์ฃผ์–ด์ง„ ๋””๋ ‰ํ† ๋ฆฌ์— ์žˆ๋Š” ํ•ญ๋ชฉ๋“ค์˜ ์ด๋ฆ„์„ ๋‹ด๊ณ  ์žˆ๋Š” ๋ฆฌ์ŠคํŠธ๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค. ## ๋ฆฌ์ŠคํŠธ๋Š” ์ž„์˜์˜ ์ˆœ์„œ๋Œ€๋กœ ๋‚˜์—ด๋ฉ๋‹ˆ๋‹ค. images = os.listdir('origin/images') images.sort() labels = os.listdir('origin/labels') labels.sort() # ํŒŒ์ผ๋ช… ํ™•์ธ print(images[1200:1204]) print(labels[1200:1204]) file_path = 'origin/images' result_path = 'images' file_names = images file2_path = 'origin/labels' result2_path = 'labels' file2_names = labels # 4) ์ฐจ๋ก€๋Œ€๋กœ ํŒŒ์ผ ์ด๋ฆ„์„ ๋ณ€๊ฒฝํ•˜๊ณ  ์ €์žฅ ".jpg" ## os.rename(src, dst) ๋ฉ”์„œ๋“œ๋Š” ํŒŒ์ผ ๋˜๋Š” ๋””๋ ‰ํ† ๋ฆฌ(ํด๋”) src์˜ ์ด๋ฆ„์„ dst๋กœ ๋ณ€๊ฒฝํ•ฉ๋‹ˆ๋‹ค. i = 1 for name in file_names: src = os.path.join(file_path, name) dst = str(i) + '.jpg' dst = os.path.join(result_path, dst) os.rename(src, dst) i += 1 # 4) ์ฐจ๋ก€๋Œ€๋กœ ํŒŒ์ผ ์ด๋ฆ„์„ ๋ณ€๊ฒฝํ•˜๊ณ  ์ €์žฅ ".xml" ## os.rename(src, dst) ๋ฉ”์„œ๋“œ๋Š” ํŒŒ์ผ ๋˜๋Š” ๋””๋ ‰ํ† ๋ฆฌ(ํด๋”) src์˜ ์ด๋ฆ„์„ dst๋กœ ๋ณ€๊ฒฝํ•ฉ๋‹ˆ๋‹ค. i = 1 for name in file2_names: src = os.path.join(file2_path, name) dst = str(i) + '.xml' dst = os.path.join(result2_path, dst) os.rename(src, dst) i += 1
_____no_output_____
MIT
yolov5/YOLOv5_Task1.ipynb
sosodoit/yolov5
[2] XmlToTxt- https://github.com/Isabek/XmlToTxt [3] Devide [4] ์‚ฌ์ง„ ๊ฑฐ๋ฅด๊ธฐclass 1,2,3,4 ํ•˜๋‚˜๋ผ๋„ ์žˆ์œผ๋ฉด ์‚ด๋ฆฌ๊ณ  ์—†์œผ๋ฉด ์ œ๊ฑฐ.
# 1) ๊ฒฝ๋กœ์„ค์ • %cd /content/gdrive/MyDrive/แ„€แ…ขแ†จแ„Žแ…ฆแ„แ…กแ†ทแ„Œแ…ต/dataset # 2) ํ•„์š”ํ•œ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ์ž„ํฌํŠธ import os from glob import glob img_list = glob('labels/train/*.txt') val_img_list = glob('labels/val/*.txt') print(len(img_list)) print(len(val_img_list)) four = [] #์ฒดํฌ๋ฅผ ์œ„ํ•œ ๋ฆฌ์ŠคํŠธ ์ƒ์„ฑ ## ์—ฌ๊ธฐ์„œ val_img_list / img_list ๋‘๊ฐœ๋ฅผ ๋ณ€๊ฒฝํ•˜๋ฉด์„œ ๋‚ด๋ถ€์˜ ๋‹ค๋ฅธ index๋ฅผ ์ œ๊ฑฐํ•ด์ฃผ์–ด์•ผ ํ•œ๋‹ค. for x in img_list: four = [] #์ƒˆ๋กœ์šด ๋ผ๋ฒจ๋ง ๋ฐ์ดํ„ฐ .txt๋ฅผ ๋ถˆ๋ €์„ ๋•Œ ์ดˆ๊ธฐํ™”๋ฅผ ์œ„ํ•จ f = open(x, 'r') lines = f.readlines() for line in lines: #์ฝ์–ด๋“ค์ธ ๋ชจ๋“  ๋ฐ์ดํ„ฐ๋ฅผ ํ•œ ์ค„์”ฉ ๋ถˆ๋Ÿฌ์˜ด # 1,2,3,4 ์—†์œผ๋ฉด ๋ชจ๋‘ ์ œ๊ฑฐํ•˜๊ธฐ ์œ„ํ•ด check. # 1:Vehicle_Car 2:Vehicle_Bus 3:Vehicle_Motorcycle 4:Vehicle_Unknown if (int(line.split(' ')[0]) == 1) or (int(line.split(' ')[0]) == 2) or (int(line.split(' ')[0]) == 3) or (int(line.split(' ')[0]) == 4): four.append(line) # ๋งŒ์•ฝ ๋„ท ์ค‘ ํ•˜๋‚˜๋„ ์—†๋Š” ์‚ฌ์ง„์ด ์žˆ๋‹ค๋ฉด ์ œ๊ฑฐ. if len(four) == 0: print("four ๋‚ด์šฉ ์—†์Œ") os.remove(x.split('.txt')[0].replace('labels','images')+'.jpg') os.remove(x) f.close() #๋‘ ๊ฐœ๊ฐ€ ์กด์žฌํ•œ๋‹ค๋ฉด ๋‹ค์‹œ ๋‘ ๊ฐœ๋งŒ ๋ถˆ๋Ÿฌ์˜ค๋„๋ก ์žฌ์ •์˜. #๋‹ค์‹œ ์“ฐ๋Š”์ด์œ ๋Š” ํ•˜๋‚˜์˜ ์‚ฌ์ง„์— 1,2,3,4 ์ด์™ธ์— ๋‹ค๋ฅธ ๊ฐ’๋„ ์žˆ๋Š” ์‚ฌ์ง„์„ ์ˆ˜์ •ํ•˜๊ธฐ ์œ„ํ•จ. #w๋ฅผ ์ด์šฉํ•ด ์žฌ์ •์˜ํ•˜๋ฉด ์›๋ž˜ txtํŒŒ์ผ์— ์กด์žฌํ•˜๋Š” ๋ชจ๋“  ์ˆ˜์น˜๋ฅผ ์ œ๊ฑฐํ•˜๊ณ  ์ฒ˜์Œ๋ถ€ํ„ฐ ์ ๋Š”๋‹ค. ์ด๋ฅผ ์ด์šฉํ•จ. if len(four) >= 1: print("four ์”€: ", len(four)) w = open(x,'w') w.writelines(four) w.close() four = [] count1 = 0 count2 = 0 count3 = 0 count4 = 0 for x in val_img_list: four = [] f = open(x, 'r') lines = f.readlines() for line in lines: if line.split(' ')[0] == '1': four.append(line.replace(line.split(' ')[0], '0')) count1 += 1 elif line.split(' ')[0] == '2': four.append(line.replace(line.split(' ')[0], '1')) count2 += 1 elif line.split(' ')[0] == '3': four.append(line.replace(line.split(' ')[0], '2')) count3 += 1 elif line.split(' ')[0] == '4': four.append(line.replace(line.split(' ')[0], '3')) count4 += 1 f.close() w = open(x,'w') w.writelines(four) w.close() print(' 1:Vehicle_Car %d ๊ฐœ ๋ณ€ํ™˜ ์™„๋ฃŒ ' % count1 ) print(' 2:Vehicle_Bus %d ๊ฐœ ๋ณ€ํ™˜ ์™„๋ฃŒ ' % count2 ) print(' 3:Vehicle_Motorcycle %d ๊ฐœ ๋ณ€ํ™˜ ์™„๋ฃŒ ' % count3 ) print(' 4:Vehicle_Unknown %d ๊ฐœ ๋ณ€ํ™˜ ์™„๋ฃŒ ' % count4 ) ## val_img_list # 1:Vehicle_Car 2155 ๊ฐœ ๋ณ€ํ™˜ ์™„๋ฃŒ # 2:Vehicle_Bus 109 ๊ฐœ ๋ณ€ํ™˜ ์™„๋ฃŒ # 3:Vehicle_Motorcycle 195 ๊ฐœ ๋ณ€ํ™˜ ์™„๋ฃŒ # 4:Vehicle_Unknown 579 ๊ฐœ ๋ณ€ํ™˜ ์™„๋ฃŒ ## img_list (train) # 1:Vehicle_Car 8967 ๊ฐœ ๋ณ€ํ™˜ ์™„๋ฃŒ # 2:Vehicle_Bus 399 ๊ฐœ ๋ณ€ํ™˜ ์™„๋ฃŒ # 3:Vehicle_Motorcycle 790 ๊ฐœ ๋ณ€ํ™˜ ์™„๋ฃŒ # 4:Vehicle_Unknown 2395 ๊ฐœ ๋ณ€ํ™˜ ์™„๋ฃŒ
_____no_output_____
MIT
yolov5/YOLOv5_Task1.ipynb
sosodoit/yolov5
1806554 Ganesh Bhandarkar (DA LAB RECORD) LAB 1
print("Hello World") date() print(mean(1:5)) x <- 1 x apple <-c('red','green',"yellow") print(apple) print(class(apple)) list1 <-list(c(2,5,3),21.3,sin) M = matrix(c('a','a','b','c','b','a'),nrow=2,ncol=3,byrow =TRUE) apple_colors <-c('green','green','yellow','red','red','red','green') factor_apple <-factor(apple_colors) print(factor_apple) print(nlevels(factor_apple)) BMI <-data.frame(gender =c("Male","Male","Female"), height =c(152,171.5,165), weight =c(81,93,78), age=c(42,38,26)) print(BMI) var_x <-"Hello" cat("The class of var_x is ",class(var_x),"\n") var_x <-34.5 cat("Now the class of var_x is ",class(var_x),"\n") var_x <-27L cat(" Next the class of var_x becomes ",class(var_x),"\n") v <-c(2,5.5,6) t <-c(8,3,4) print(v+t) print(v-t) print(v*t) print(v/t) print(v%%t) # Give the remainder of the first vector with the second print(v%/%t) # Give the result of division of first vector with second (quotient) print(v^t) # The first vector raised to the exponent of second vector v <-c(2,5.5,6) t <-c(8,3,4) print(v>t) print(v<t) print(v==t) print(v<=t) print(v>=t) print(v!=t) v <-c(2,5.5,6) u = c(18,13,14) print(v) print(t) print(u) c(2,5.5,6) -> v print(v) print(t) v <-2:8 print(v)
[1] "Hello World"
MIT
DA Lab/1806554_da_lab_records_all.ipynb
ganeshbhandarkar/College-Labs-And-Projects
Batch processing with Argo WorfklowsIn this notebook we will dive into how you can run batch processing with Argo Workflows and Seldon Core.Dependencies:* Seldon core installed as per the docs with an ingress* Minio running in your cluster to use as local (s3) object storage* Argo Workfklows installed in cluster (and argo CLI for commands) Setup Install Seldon CoreUse the notebook to [set-up Seldon Core with Ambassador or Istio Ingress](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html).Note: If running with KIND you need to make sure do follow [these steps](https://github.com/argoproj/argo/issues/2376issuecomment-595593237) as workaround to the `/.../docker.sock` known issue. Set up Minio in your clusterUse the notebook to [set-up Minio in your cluster](https://docs.seldon.io/projects/seldon-core/en/latest/examples/minio_setup.html). Copy the Minio Secret to namespaceWe need to re-use the minio secret for the batch job, so this can be done by just copying the minio secret created in the `minio-system`The command below just copies the secred with the name "minio" from the minio-system namespace to the default namespace.
!kubectl get secret minio -n minio-system -o json | jq '{apiVersion,data,kind,metadata,type} | .metadata |= {"annotations", "name"}' | kubectl apply -n default -f -
secret/minio created
Apache-2.0
examples/batch/argo-workflows-batch/README.ipynb
Syakyr/seldon-core
Install Argo WorkflowsYou can follow the instructions from the official [Argo Workflows Documentation](https://github.com/argoproj/argoquickstart).You also need to make sure that argo has permissions to create seldon deployments - for this you can just create a default-admin rolebinding as follows:
!kubectl create rolebinding default-admin --clusterrole=admin --serviceaccount=default:default
rolebinding.rbac.authorization.k8s.io/default-admin created
Apache-2.0
examples/batch/argo-workflows-batch/README.ipynb
Syakyr/seldon-core
Create some input for our modelWe will create a file that will contain the inputs that will be sent to our model
mkdir -p assets/ with open("assets/input-data.txt", "w") as f: for i in range(10000): f.write('[[1, 2, 3, 4]]\n')
_____no_output_____
Apache-2.0
examples/batch/argo-workflows-batch/README.ipynb
Syakyr/seldon-core
Check the contents of the file
!wc -l assets/input-data.txt !head assets/input-data.txt
10000 assets/input-data.txt [[1, 2, 3, 4]] [[1, 2, 3, 4]] [[1, 2, 3, 4]] [[1, 2, 3, 4]] [[1, 2, 3, 4]] [[1, 2, 3, 4]] [[1, 2, 3, 4]] [[1, 2, 3, 4]] [[1, 2, 3, 4]] [[1, 2, 3, 4]]
Apache-2.0
examples/batch/argo-workflows-batch/README.ipynb
Syakyr/seldon-core
Upload the file to our minio
!mc mb minio-seldon/data !mc cp assets/input-data.txt minio-seldon/data/
Bucket created successfully `minio-seldon/data`. ...-data.txt: 146.48 KiB / 146.48 KiB โ”ƒโ–“โ–“โ–“โ–“โ–“โ–“โ–“โ–“โ–“โ–“โ–“โ–“โ–“โ–“โ–“โ–“โ–“โ–“โ–“โ–“โ–“โ–“โ–“โ–“โ–“โ”ƒ 2.14 MiB/s 0s
Apache-2.0
examples/batch/argo-workflows-batch/README.ipynb
Syakyr/seldon-core
Create Argo WorkflowIn order to create our argo workflow we have made it simple so you can leverage the power of the helm charts.Before we dive into the contents of the full helm chart, let's first give it a try with some of the settings.We will run a batch job that will set up a Seldon Deployment with 10 replicas and 100 batch client workers to send requests.
!helm template seldon-batch-workflow helm-charts/seldon-batch-workflow/ \ --set workflow.name=seldon-batch-process \ --set seldonDeployment.name=sklearn \ --set seldonDeployment.replicas=10 \ --set seldonDeployment.serverWorkers=1 \ --set seldonDeployment.serverThreads=10 \ --set batchWorker.workers=100 \ --set batchWorker.payloadType=ndarray \ --set batchWorker.dataType=data \ | argo submit - !argo list !argo get seldon-batch-process !argo logs -w seldon-batch-process || argo logs seldon-batch-process # The 2nd command is for argo 2.8+
create-seldon-resource: time="2020-08-06T07:21:48.400Z" level=info msg="Starting Workflow Executor" version=v2.9.3 create-seldon-resource: time="2020-08-06T07:21:48.404Z" level=info msg="Creating a docker executor" create-seldon-resource: time="2020-08-06T07:21:48.404Z" level=info msg="Executor (version: v2.9.3, build_date: 2020-07-18T19:11:19Z) initialized (pod: default/seldon-batch-process-3626514072) with template:\n{\"name\":\"create-seldon-resource-template\",\"arguments\":{},\"inputs\":{},\"outputs\":{},\"metadata\":{},\"resource\":{\"action\":\"create\",\"manifest\":\"apiVersion: machinelearning.seldon.io/v1\\nkind: SeldonDeployment\\nmetadata:\\n name: \\\"sklearn\\\"\\n namespace: default\\n ownerReferences:\\n - apiVersion: argoproj.io/v1alpha1\\n blockOwnerDeletion: true\\n kind: Workflow\\n name: \\\"seldon-batch-process\\\"\\n uid: \\\"401c8bc0-0ff0-4f7b-94ba-347df5c786f9\\\"\\nspec:\\n name: \\\"sklearn\\\"\\n predictors:\\n - componentSpecs:\\n - spec:\\n containers:\\n - name: classifier\\n env:\\n - name: GUNICORN_THREADS\\n value: 10\\n - name: GUNICORN_WORKERS\\n value: 1\\n resources:\\n requests:\\n cpu: 50m\\n memory: 100Mi\\n limits:\\n cpu: 50m\\n memory: 1000Mi\\n graph:\\n children: []\\n implementation: SKLEARN_SERVER\\n modelUri: gs://seldon-models/sklearn/iris\\n name: classifier\\n name: default\\n replicas: 10\\n \\n\"}}" create-seldon-resource: time="2020-08-06T07:21:48.404Z" level=info msg="Loading manifest to /tmp/manifest.yaml" create-seldon-resource: time="2020-08-06T07:21:48.405Z" level=info msg="kubectl create -f /tmp/manifest.yaml -o json" create-seldon-resource: time="2020-08-06T07:21:48.954Z" level=info msg=default/SeldonDeployment.machinelearning.seldon.io/sklearn create-seldon-resource: time="2020-08-06T07:21:48.954Z" level=info msg="No output parameters" wait-seldon-resource: Waiting for deployment "sklearn-default-0-classifier" rollout to finish: 0 of 10 updated replicas are available... wait-seldon-resource: Waiting for deployment "sklearn-default-0-classifier" rollout to finish: 1 of 10 updated replicas are available... wait-seldon-resource: Waiting for deployment "sklearn-default-0-classifier" rollout to finish: 2 of 10 updated replicas are available... wait-seldon-resource: Waiting for deployment "sklearn-default-0-classifier" rollout to finish: 3 of 10 updated replicas are available... wait-seldon-resource: Waiting for deployment "sklearn-default-0-classifier" rollout to finish: 4 of 10 updated replicas are available... wait-seldon-resource: Waiting for deployment "sklearn-default-0-classifier" rollout to finish: 5 of 10 updated replicas are available... wait-seldon-resource: Waiting for deployment "sklearn-default-0-classifier" rollout to finish: 6 of 10 updated replicas are available... wait-seldon-resource: Waiting for deployment "sklearn-default-0-classifier" rollout to finish: 7 of 10 updated replicas are available... wait-seldon-resource: Waiting for deployment "sklearn-default-0-classifier" rollout to finish: 8 of 10 updated replicas are available... wait-seldon-resource: Waiting for deployment "sklearn-default-0-classifier" rollout to finish: 9 of 10 updated replicas are available... wait-seldon-resource: deployment "sklearn-default-0-classifier" successfully rolled out download-object-store: Added `minio-local` successfully. download-object-store: `minio-local/data/input-data.txt` -> `/assets/input-data.txt` download-object-store: Total: 0 B, Transferred: 146.48 KiB, Speed: 31.81 MiB/s process-batch-inputs: Elapsed time: 35.089903831481934 upload-object-store: Added `minio-local` successfully. upload-object-store: `/assets/output-data.txt` -> `minio-local/data/output-data-401c8bc0-0ff0-4f7b-94ba-347df5c786f9.txt` upload-object-store: Total: 0 B, Transferred: 2.75 MiB, Speed: 105.34 MiB/s
Apache-2.0
examples/batch/argo-workflows-batch/README.ipynb
Syakyr/seldon-core
Check output in object storeWe can now visualise the output that we obtained in the object store.First we can check that the file is present:
import json wf_arr = !argo get seldon-batch-process -o json wf = json.loads("".join(wf_arr)) WF_ID = wf["metadata"]["uid"] print(f"Workflow ID is {WF_ID}") !mc ls minio-seldon/data/output-data-"$WF_ID".txt
[2020-08-06 08:23:07 BST]  2.7MiB output-data-401c8bc0-0ff0-4f7b-94ba-347df5c786f9.txt 
Apache-2.0
examples/batch/argo-workflows-batch/README.ipynb
Syakyr/seldon-core
Now we can output the contents of the file created using the `mc head` command.
!mc cp minio-seldon/data/output-data-"$WF_ID".txt assets/output-data.txt !head assets/output-data.txt !argo delete seldon-batch-process
Workflow 'seldon-batch-process' deleted
Apache-2.0
examples/batch/argo-workflows-batch/README.ipynb
Syakyr/seldon-core
Load data Read the csv file (first row contains the column names), specify the data types.
csv_dir = Path.cwd().parent / "data" speeches_path = csv_dir / "all_speeches.txt" dtypes={'title':'string', 'pages':'int64', 'date':'string', 'location':'string', 'highest_speaker_count':'int64', 'content':'string'} df = pd.read_csv(speeches_path, header=0, dtype=dtypes) df.head() df.dtypes
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
Dates Some dates had the year missing. The year for `Community_College_Plan` has a typo.
temp = df.loc[:, ['title','date']] temp['has_year'] = temp.apply(lambda row: row['date'][-4:].isnumeric(), axis=1) temp.loc[temp.has_year==False, :]
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
Edit the dates that need to be corrected.
print(df.loc[df.title=='Community_College_Plan','date']) df.loc[df.title=='Community_College_Plan','date'] = '9 January 2015' print(df.loc[df.title=='Community_College_Plan','date'], '\n') print(df.loc[df.title=='Recovery_and_Reinvestment_Act_2016','date']) df.loc[df.title=='Recovery_and_Reinvestment_Act_2016','date'] = '26 February 2016' print(df.loc[df.title=='Recovery_and_Reinvestment_Act_2016','date'], '\n') print(df.loc[df.title=='Post_Iran_Nuclear_Accord_Presser','date']) df.loc[df.title=='Post_Iran_Nuclear_Accord_Presser','date'] = '15 July 2015' print(df.loc[df.title=='Post_Iran_Nuclear_Accord_Presser','date'], '\n')
78 9 January 20105 Name: date, dtype: string 78 9 January 2015 Name: date, dtype: string 256 26 February Name: date, dtype: string 256 26 February 2016 Name: date, dtype: string 265 15 July Name: date, dtype: string 265 15 July 2015 Name: date, dtype: string
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
Parse the dates.
df['date'] = pd.to_datetime(df['date'], dayfirst=True, format="%d %B %Y") df.head()
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
The `date` column now has type `datetime`.
df.dtypes
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
Locations Locations that specify a specific place in the White House can be replaced by `White House, Washington D.C.`.
contains_WH = df.location.str.contains("White House", flags=re.I) df.loc[contains_WH, 'location'] = "White House, Washington D.C."
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
Make a `country`column, values for `White House` can already be filled.
df.loc[contains_WH, 'country'] = "USA" df.loc[~contains_WH, 'country'] = ""
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
Set country to `USA` for locations that contain state names or state abbreviations. In case it contains the abbreviation, replace it by the full state name.
states_full = ['Alabama','Alaska','Arizona','Arkansas','California','Colorado','Connecticut','Delaware','Florida','Georgia','Hawaii','Idaho','Illinois','Indiana','Iowa','Kansas','Kentucky','Louisiana','Maine','Maryland','Massachusetts','Michigan','Minnesota','Mississippi','Missouri','Montana','Nebraska','Nevada','New Hampshire','New Jersey','New Mexico','New York','North Carolina','North Dakota','Ohio','Oklahoma','Oregon','Pennsylvania','Rhode Island','South Carolina','South Dakota','Tennessee','Texas','Utah','Vermont','Virginia','Washington','West Virginia','Wisconsin','Wyoming'] states_abbr = ['AL','AK','AZ','AR','CA','CO','CT','DE','FL','GA','HI','ID','IL','IN','IA','KS','KY','LA','ME','MD','MA','MI','MN','MS','MO','MT','NE','NV','NH','NJ','NM','NY','NC','ND','OH','OK','OR','PA','RI','SC','SD','TN','TX','UT','VT','VA','WA','WV','WI','WY'] for state in states_full: contains = df.location.str.contains(state, flags=re.I) df.loc[contains, 'country'] = "USA" for i in range(len(states_abbr)): contains = df.location.str.contains(r", \b"+states_abbr[i]+r"\b", flags=re.I) df.loc[contains, 'country'] = "USA" df['location'] = df.location.str.replace( r", \b"+states_abbr[i]+r"\b", repl=", "+states_full[i], flags=re.I, regex=True) df.loc[df.country=="USA", :] df.loc[(df.country!="USA") & (df.location!="unknown_location"), :]
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
Change `Washington, D.C.` (and some variations) to `Washington D.C.`.
df['location'] = df.location.str.replace("Washington, D.?C.?", repl="Washington D.C.", flags=re.I, regex=True)
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
If `country=='USA'`: We assume the last substring to be the state, the second to last the city and everything before that a more specific locations.If `country!='USA'`: We assume the last substring to be the country, the second to last the city and everything before that a more specific locations.
df.loc[:, 'count_commas'] = df.loc[:, 'location'].str.count(',') df.loc[:,['location','country','count_commas']].sort_values(by='count_commas')
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
USA No commas
print(df.loc[(df.country=="USA") & (df.count_commas == 0), ['title','location']].sort_values(by='location'), '\n') df.loc[df.title=='Ebola_CDC', ['state','city','specific_location']] = ['Georgia', 'Atlanta', 'no_specific_location'] df.loc[df.location=='Washington D.C.', ['state','city','specific_location']] = ['no_state', 'Washington D.C.', 'no_specific_location']
title location 308 Ebola_CDC Atlanta Georgia 258 State_of_the_Union_2012 Washington D.C. 278 Health_Care_Law_Signing Washington D.C. 284 White_House_Correspondents_Dinner_2015 Washington D.C. 285 Paris_Terrorist_Attacks Washington D.C. 288 Brookings_Institute Washington D.C. 293 Go_Presidential_Election_Outcome_2016 Washington D.C. 298 Howard_University_Commencement Washington D.C. 4 Umpqua_Community_College_Shootings Washington D.C. 314 National_Holocaust_Memorial_Museum Washington D.C. 321 Iftar_Dinner_on_Religious_Tolerance Washington D.C. 322 Second_Presidential_Inaugural_Address Washington D.C. 330 White_House_Correspondent_Dinner_2014 Washington D.C. 356 Syria_Speech_to_the_Nation Washington D.C. 370 Final_State_of_the_Union_Address Washington D.C. 371 Lincoln_Memorial Washington D.C. 234 Naturalization_National_Archives Washington D.C. 215 Prayer_Breakfast_2011 Washington D.C. 203 Senate_Floor_Coretta_King Washington D.C. 181 National_Chamber_of_Commerce Washington D.C. 7 Senate_Floor_Immigration_Reform Washington D.C. 13 Holocaust_Days_of_Remembrance Washington D.C. 22 Prayer_Breakfast_2014 Washington D.C. 33 Joint_Session_on_Economy_and_Jobs Washington D.C. 62 White_House_Correspondents_Dinner_2011 Washington D.C. 63 Iran_Deal_American_University Washington D.C. 73 Iraq_War_After_4_Years Washington D.C. 401 Rosa_Parks Washington D.C. 81 State_of_the_Union_2011 Washington D.C. 93 Chicago_Cubs_WH_Visit Washington D.C. 100 Senate_Speech_on_Ohio_Electoral_Vote Washington D.C. 112 Congressional_Black_Caucus Washington D.C. 120 Oval_Office_BP Washington D.C. 139 VOX_Interview Washington D.C. 145 State_of_the_Union_2010 Washington D.C. 148 Hurricane_Sandy_Red_Cross Washington D.C. 88 State_of_the_Union_2013 Washington D.C. 417 Birth_Certificate_Release Washington D.C.
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
One comma + `Washington D.C.`
contains_WDC = df.location.str.contains("Washington D.C.", flags=re.I) select = contains_WDC & (df.count_commas == 1) print(df.loc[select, 'location']) locations = df.loc[select, 'location'].str.extract(r"(.+), Washington D.C. *", flags=re.I) df.loc[select, ['state','city']] = ['no_state', 'Washington D.C.'] df.loc[select, 'specific_location'] = locations.values
1 Washington Hilton, Washington D.C. 5 Washington Hilton Hotel, Washington D.C. 8 White House, Washington D.C. 11 White House, Washington D.C. 12 White House, Washington D.C. ... 414 White House, Washington D.C. 415 State Department, Washington D.C. 416 Eisenhower Building, Washington D.C. 418 U.S. Capitol Western Front, Washington D.C. 434 White House, Washington D.C. Name: location, Length: 106, dtype: string
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
One comma + other
select = (df.country=="USA") & ~contains_WDC & (df.count_commas == 1) print(df.loc[select, 'location']) states = df.loc[select, 'location'].str.extract(r".+?, *(.+)", flags=re.I) cities = df.loc[select, 'location'].str.extract(r"(.+?), *.+", flags=re.I) df.loc[select, 'state'] = states.values df.loc[select, 'city'] = cities.values df.loc[select, 'specific_location'] = 'no_specific_location' df.loc[select, ['location','country','state','city','specific_location']].sort_values( by= ['state','city','specific_location'])
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
Some cities need corrections. Cities that don't have a space in them are all ok, we only need to look at ones with spaces.
contains = df.loc[select, 'city'].str.contains(" ", flags=re.I) df.loc[select & contains, ['title', 'location','state','city','specific_location']].sort_values( by= ['state','city','specific_location']) need_corrections = ['Mayors_Conference_2015','White_House_Correspondents_Dinner_First','Beau_Biden_Eulogy', 'Gun_Violence_Denver','Second_Presidential_Election_Victory_Speech','Joplin_Tornado_Victims_Memorial', 'American_Legion_Conference','Iraq_War_Camp_Lejeune','Martin_Dempsey_Retirement'] states = ['California', 'no_state', 'Delaware', 'Colorado', 'Illinois', 'Missouri', 'Minnesota', 'North Carolina', 'Virginia'] cities = ['San Francisco', 'Washington D.C.', 'Wilmington', 'Denver', 'Chicago', 'Joplin', 'Minneapolis', 'Jacksonville', 'Arlington'] locations = ['Hilton San Francisco Union Square Hotel', 'Washington Hilton Hotel', 'St. Anthony of Padua Church', 'Denver Police Academy', 'McCormick Place', 'Missouri Southern University', 'Minneapolis Convention Center', 'Camp Lejeune', 'Joint Base Myer-Henderson Hall'] df.loc[df.title.isin(need_corrections), 'state'] = states df.loc[df.title.isin(need_corrections), 'city'] = cities df.loc[df.title.isin(need_corrections), 'specific_location'] = locations df.loc[df.title.isin(need_corrections), ['location','state','city','specific_location']].sort_values( by= ['state','city','specific_location'])
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
Two commas
select = (df.country=="USA") & (df.count_commas == 2) print(df.loc[select, 'location']) states = df.loc[select, 'location'].str.extract(r".+?,.+?, *(.+)", flags=re.I) cities = df.loc[select, 'location'].str.extract(r".+?, *(.+?),.+", flags=re.I) locations = df.loc[select, 'location'].str.extract(r" *(.+?),.+?,.+", flags=re.I) df.loc[select, 'state'] = states.values df.loc[select, 'city'] = cities.values df.loc[select, 'specific_location'] = locations.values df.loc[select, ['title','location','state','city','specific_location']].sort_values( by= ['state','city','specific_location'])
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
Some cities need corrections.
need_corrections = ['Obama-Romney_-_First_Live_Debate', 'NY_NJ_Explosions', 'Afghanistan_War_Troop_Surge', 'Tucson_Memorial_Address', 'Armed_Forces_Farewell'] states = ['Colorado', 'New York', 'New York', 'Arizona', 'Virginia'] cities = ['Denver', 'New York', 'West Point', 'Tucson', 'Arlington'] locations = ['University of Denver Magness Arena', 'Lotte New York Palace Hotel', 'Military Academy Eisenhower Hall', 'University of Arizona McKale Memorial Center', 'Fort Myer Joint Base Myer-Henderson'] df.loc[df.title.isin(need_corrections), 'state'] = states df.loc[df.title.isin(need_corrections), 'city'] = cities df.loc[df.title.isin(need_corrections), 'specific_location'] = locations df.loc[df.title.isin(need_corrections), ['location','state','city','specific_location']].sort_values( by= ['state','city','specific_location'])
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
Result for locations in USA
df.loc[df.country=="USA", ['location','country','state','city','specific_location']].sort_values( by=['state','city','specific_location'])
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
Not USA, but known location Note: some US locations don't have `country=='USA'` yet Zero commas
select = (df.country!="USA") & (df.location!="unknown_location") & (df.count_commas == 0) df.loc[select, ['title','location']] titles = df.loc[select, 'title'] df.loc[df.title=='Joint_Presser_with_President_Benigno_Aquino', ['country','state','city','specific_location']] = ['Philippines', 'no_state', 'Manila', 'no_specific_location'] df.loc[df.title=='Benghazi_Remains_Transfer', ['country','state','city','specific_location']] = ['USA', 'no_state', 'Washington D.C.', 'White House'] df.loc[df.title=='Berlin_Address', ['country','state','city','specific_location']] = ['Germany', 'no_state', 'Berlin', 'Victory Column'] df.loc[df.title=='Miami_Dade_College_Commencement', ['country','state','city','specific_location']] = ['USA', 'Florida', 'Miami', 'James L. Knight International Center'] df.loc[df.title=='Strasbourg_Town_Hall', ['country','state','city','specific_location']] = ['France', 'no_state', 'Strasbourg', 'Rhenus Sports Arena'] df.loc[df.title=='Hradany_Square_Prague', ['country','state','city','specific_location']] = ['Czech Republic', 'no_state', 'Prague', 'Hradany Square'] df.loc[df.title=='Hurricane_Sandy_ERT', ['country','state','city','specific_location']] = ['USA', 'no_state', 'Washington D.C.', 'White House'] df.loc[df.title.isin(titles), ['location','country','state','city','specific_location']].sort_values( by= ['country','state','city','specific_location'])
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
One comma
select = (df.country!="USA") & (df.location!="unknown_location") & (df.count_commas == 1) titles = df.loc[select, 'title'] df.loc[select, ['title','location']] countries = df.loc[df.title.isin(titles), 'location'].str.extract(r".+?, *(.+)", flags=re.I) cities = df.loc[df.title.isin(titles), 'location'].str.extract(r" *(.+?),.+", flags=re.I) df.loc[df.title.isin(titles), 'country'] = countries.values df.loc[df.title.isin(titles), 'state'] = 'no_state' df.loc[df.title.isin(titles), 'city'] = cities.values df.loc[df.title.isin(titles), 'specific_location'] = 'no_specific_location'
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
some corrections
df.loc[df.title=='2004_DNC_Address', ['country','state','city','specific_location']] = ['USA', 'New York', 'Boston', 'no_specific_location'] df.loc[df.title=='Afghanistan_US_Troops_Bagram', ['country','state','city','specific_location']] = ['Afghanistan', 'no_state', 'Bagram', 'Bagram Air Field'] df.loc[df.title=='Peru_Press_Conference', ['country','state','city','specific_location']] = ['Peru', 'no_state', 'Lima', 'Lima Convention Center'] df.loc[df.title=='Rio_de_Janeiro', ['country','state','city','specific_location']] = ['Brazil', 'no_state', 'Rio de Janeiro', 'Teatro Municipal'] df.loc[df.title=='Bagram_Air_Base_December_2010', ['country','state','city','specific_location']] = ['Afghanistan', 'no_state', 'Bagram', 'Bagram Air Field'] df.loc[df.title=='Ebenezer_Baptist', ['country','state','city','specific_location']] = ['USA', 'Georgia', 'Atlanta', 'Ebenezer Baptist Church'] df.loc[df.title=='British_Parliament', ['country','state','city','specific_location']] = ['England', 'no_state', 'London', 'Westminster Hall'] df.loc[df.title=='Ted_Kennedy_Eulogy', ['country','state','city','specific_location']] = ['USA', 'New York', 'Boston', 'Our Lady of Perpetual Help Basilica'] df.loc[df.title=='Post_G7_Conference_Presser_2015', ['country','state','city','specific_location']] = ['Germany', 'no_state', 'Krun', 'Elmau Briefing Center'] df.loc[df.title=='Mexico_Address', ['country','state','city','specific_location']] = ['Mexico', 'no_state', 'Mexico City', 'Anthropological Museum'] df.loc[df.title=='Estonia_People', ['country','state','city','specific_location']] = ['Estonia', 'no_state', 'Tallinn', 'Nordea Concert Hall'] df.loc[df.title.isin(titles), ['location','country','state','city','specific_location']].sort_values( by= ['country','state','city','specific_location'])
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
Two commas
select = (df.country!="USA") & (df.location!="unknown_location") & (df.count_commas == 2) titles = df.loc[select, 'title'] df.loc[select, ['title','location']] countries = df.loc[df.title.isin(titles), 'location'].str.extract(r".+?,.+?, *(.+)", flags=re.I) cities = df.loc[df.title.isin(titles), 'location'].str.extract(r".+?, *(.+?),.+", flags=re.I) locations = df.loc[df.title.isin(titles), 'location'].str.extract(r" *(.+?),.+?,.+", flags=re.I) df.loc[df.title.isin(titles), 'country'] = countries.values df.loc[df.title.isin(titles), 'state'] = 'no_state' df.loc[df.title.isin(titles), 'city'] = cities.values df.loc[df.title.isin(titles), 'specific_location'] = locations.values df.loc[df.title=='YSEALI_Town_Hall', ['country','state','city','specific_location']] = ['Myanmar', 'no_state', 'Yangon', 'Yangon University'] df.loc[df.title.isin(titles), ['title','location','country','state','city','specific_location']].sort_values( by= ['country','state','city','specific_location'])
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
Three commas
select = (df.country!="USA") & (df.location!="unknown_location") & (df.count_commas == 3) df.loc[select, ['title','location']] df.loc[df.title=='UK_Young_Leaders', ['country','state','city','specific_location']] = ['England', 'no_state', 'London', 'Lindley Hall, Royal Horticulture Halls']
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
Result for locations not in USA
df.loc[(df.country!="USA") & (df.location!="unknown_location"), ['location','country','state','city','specific_location']].sort_values( by=['country','state','city','specific_location'])
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
Unknown locations
print('There are %i unknown locations.' % len(df.loc[df.location=="unknown_location", :]))
There are 89 unknown locations.
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
drop location column Make new csv Only known locations
csv_dir = Path.cwd().parent / "speeches_csv" path_only_known = csv_dir / "speeches_loc_known_cleaned.txt" only_known = df.loc[df.location!="unknown_location", :] only_known.to_csv(path_only_known, index=False, header=True, mode='w')
_____no_output_____
MIT
components/notebooks/Clean_Up_Speeches.ipynb
jfsalcedo10/mda-kuwait
15.077: Problem Set 3Alex Berke (aberke)From Rice, J.A., Mathematical Statistics and Data Analysis (with CD Data Sets), 3rd ed., Duxbury, 2007 (ISBN 978-0-534-39942-9).
%config Completer.use_jedi = False # autocomplete import math import numpy as np import pandas as pd import scipy.special from scipy import stats from sklearn.utils import resample import matplotlib.pyplot as plt %matplotlib inline %config InlineBackend.figure_format = 'retina'
_____no_output_____
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
Problems 10.48In 1970, Congress instituted a lottery for the military draft to support the unpopular war in Vietnam. All 366 possible birth dates were placed in plastic capsules in a rotating drum and were selected one by one. Eligible males born on the first day drawn were first in line to be drafted followed by those born on the second day drawn, etc. The results were criticized by some who claimed that government incompetence at running a fair lottery resulted in a tendency of men born later in the year being more likely to be drafted. Indeed, later investigation revealed that the birthdates were placed in the drum by month and were not thoroughly mixed. The columns of the file 1970lottery are month, month number, day of the year, and draft number.
lottery = pd.read_csv('1970lottery.txt') lottery.columns = [c.replace("'", "") for c in lottery.columns] lottery['Month'] = lottery['Month'].apply(lambda m: m.replace("'", "")) lottery.head()
_____no_output_____
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
A. Plot draft number versus day number. Do you see any trend?No, a trend is not clear from a simple plot with these two variables.
_ = lottery.plot.scatter('Day_of_year', 'Draft_No', title='Day of year vs draft number')
_____no_output_____
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
B. Calculate the Pearson and rank correlation coefficients. What do they suggest?Both correlation coefficcients suggest there could be a relationship that is not immediately visible.
print('Pearson correlation coefficient: %0.4f' % ( stats.pearsonr(lottery['Draft_No'], lottery['Day_of_year'])[0])) print('Spearman rank correlation coefficient: %0.4f' % ( stats.spearmanr(lottery['Draft_No'], lottery['Day_of_year'])[0]))
Pearson correlation coefficient: -0.2260 Spearman rank correlation coefficient: -0.2258
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
C. Is the correlation statistically significant? One way to assess this is via a permutation test. Randomly permute the draft numbers and find the correlation of this random permutation with the day numbers. Do this 100 times and see how many of the resulting correlation coefficients exceed the one observed in the data. If you are not satisfied with 100 times, do it 1,000 times.The correlation does appear statistically significant.This is because after 1000 iterations of randomly permuting the draft numbers and computing correlation coefficients between those draft numbers and day numbers, we compute the 95% CI for these correlations (see below) and see that the true correlation coefficients fall outside these confidence intervals.Pearson correlation coefficient 95% CI for randomly permuted draft numbers vs day:(-0.0044, 0.0019)Spearman rank correlation coefficient 95% CI for randomly permuted draft numbers vs day:(-0.0044, 0.0019)True correlation coefficients:Pearson correlation coefficient: -0.2260Spearman rank correlation coefficient: -0.2258
iterations = 1000 pearson_coefficients = [] rank_coefficients = [] for i in range(iterations): permuted_draft_no = np.random.permutation(lottery['Draft_No']) pearson_coefficients += [stats.pearsonr(permuted_draft_no, lottery['Day_of_year'])[0]] rank_coefficients += [stats.spearmanr(permuted_draft_no, lottery['Day_of_year'])[0]] print('Pearson correlation coefficient 95% CI for randomly permuted draft numbers vs day:') print('(%0.4f, %0.4f)' % stats.t.interval(0.95, len(pearson_coefficients)-1, loc=np.mean(pearson_coefficients), scale=stats.sem(pearson_coefficients))) print('Spearman rank correlation coefficient 95% CI for randomly permuted draft numbers vs day:') print('(%0.4f, %0.4f)' % stats.t.interval(0.95, len(rank_coefficients)-1, loc=np.mean(rank_coefficients), scale=stats.sem(rank_coefficients))) fig, ax = plt.subplots(1, 2, figsize=(15, 5), sharey=True) fig.suptitle('Correlation coefficicents for randomly permuted draft numbers vs day number (%s iterations)' % iterations) ax[0].hist(pearson_coefficients) ax[1].hist(rank_coefficients) ax[0].set_title('Pearson correlation') ax[1].set_title('Spearman rank correlation') ax[0].set_xlabel('Pearson correlation coefficient') _ = ax[1].set_xlabel('Spearman rank correlation coefficient')
_____no_output_____
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
D. Make parallel boxplots of the draft numbers by month. Do you see any pattern? The mean draft numbers are lowest in the (later) months of November and December.
pd.DataFrame( {m: lottery[lottery['Month']==m]['Draft_No'] for m in months.values()} ).boxplot(grid=False)
_____no_output_____
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
E. Examine the sampling variability of the two correlation coefficients (Pearson and rank) using the bootstrap (re-sampling pairs with replacement) with 100 (or 1000) bootstrap samples. How does this compare with the permutation approach?The results are drastically different from the results of the permutation approach. The random permutation correlation coefficients were (as expected) centered around a mean of 0. The bootstrap Pearson and Rank correlation coefficient values are both around -0.22.The 95% CIs for the bootstrap vs permutation method values do not overlap.
pearson_coefficients = [] rank_coefficients = [] for i in range(iterations): resampled = resample(lottery) pearson_coefficients += [stats.pearsonr(resampled['Draft_No'], resampled['Day_of_year'])[0]] rank_coefficients += [stats.spearmanr(resampled['Draft_No'], resampled['Day_of_year'])[0]] print('Pearson correlation coefficient 95%% CI for (%s) bootstrap samples:' % iterations) print('(%0.4f, %0.4f)' % stats.t.interval(0.95, len(pearson_coefficients)-1, loc=np.mean(pearson_coefficients), scale=stats.sem(pearson_coefficients))) print('Spearman rank correlation coefficient 95%% CI for (%s) bootstrap samples:' % iterations) print('(%0.4f, %0.4f)' % stats.t.interval(0.95, len(rank_coefficients)-1, loc=np.mean(rank_coefficients), scale=stats.sem(rank_coefficients))) fig, ax = plt.subplots(1, 2, figsize=(15, 5), sharey=True) fig.suptitle('Correlation coefficicents for draft numbers vs day number (%s) bootstrap samples' % iterations) ax[0].hist(pearson_coefficients) ax[1].hist(rank_coefficients) ax[0].set_title('Pearson correlation') ax[1].set_title('Spearman rank correlation') ax[0].set_xlabel('Pearson correlation coefficient') _ = ax[1].set_xlabel('Spearman rank correlation coefficient')
Pearson correlation coefficient 95% CI for (1000) bootstrap samples: (-0.2292, -0.2231) Spearman rank correlation coefficient 95% CI for (1000) bootstrap samples: (-0.2285, -0.2224)
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
11.15 Suppose that n measurements are to be taken under a treatment condition and another n measurements are to be taken independently under a control condition. It is thought that the standard deviation of a single observation is about 10 under both conditions. How large should n be so that a 95% confidence interval for ฮผX โˆ’ ฮผY has a width of 2? Use the normal distribution rather than the t distribution, since n will turn out to be rather large. find: n control = n treatmentฯƒ = 10|ฮผX โˆ’ ฮผY| = 2ฮฑ = 0.05From Rice 11.2.1: Since we are using the normal distribution, and ฯƒ is assumed, a confidence interval for (ฮผX โˆ’ ฮผY) can be based on:$Z = \frac{(\bar{X} - \bar{Y}) - (ฮผX โˆ’ ฮผY)}{\sigma \sqrt{\frac{1}{n} + \frac{1}{m}}} = \frac{(\bar{X} - \bar{Y}) - (ฮผX โˆ’ ฮผY)}{\sigma \sqrt{\frac{2}{n}}} $The confidence interval is then of form:$ (\bar{X} - \bar{Y}) \pm z_{ฮฑ/2} \sigma \sqrt{\frac{2}{n}} $Since the confidence interval is symmetric around $ (\bar{X} - \bar{Y})$ and its width is 2 we have:$ 2 = 2 (z_{ฮฑ/2}) \sigma \sqrt{\frac{2}{n}} $rearranging terms we have:$ n = ((z_{ฮฑ/2})\sigma \sqrt{2} )^2 = (1.96)^2 (10)^2 2 = 768.32$i.e. n= 768 11.16 Referring to Problem 15, how large should n be so that the test of H0: ฮผX = ฮผY against the one-sided alternative HA : ฮผX > ฮผY has a power of .5 if ฮผX โˆ’ ฮผY = 2 and ฮฑ = .10? find: n control = n treatmentฯƒ = 10ฮฑ = 0.05ฮ” = (ฮผX โˆ’ ฮผY) = 2Based on Rice 11.2.2 this is a problem in solving:$ 1 - \beta = 0.5 = 1 - ฮฆ[z(\alpha) - \frac{ฮ”}{\sigma}\sqrt{\frac{n}{2}}] $Rearranging we have:$ 0.5 = ฮฆ[z(\alpha) - \frac{ฮ”}{\sigma}\sqrt{\frac{n}{2}}] $Since $0.5 = ฮฆ(0)$ this means solving for n where:$0 = z(\alpha) - \frac{ฮ”}{\sigma}\sqrt{\frac{n}{2}}$$ \frac{ฮ”}{\sigma}\sqrt{\frac{n}{2}} = z(\alpha)$$ n = (z(\alpha)\frac{\sigma}{ฮ”} \sqrt{2})^2 = (1.28)^2 (\frac{10}{2})^2 2 = 81.92 $i.e. n = 82 11.39 An experiment was done to test a method for reducing faults on telephone lines (Welch 1987). Fourteen matched pairs of areas were used. The following table shows the fault rates for the control areas and for the test areas:
l = "676 88 206 570 230 605 256 617 280 653 433 2913 337 924 466 286 497 1098 512 982 794 2346 428 321 452 615 512 519".split(" ") df = pd.DataFrame({ 'test': l[::2], 'control': l[1::2], }).astype(int) df
_____no_output_____
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
A. Plot the differences versus the control rate and summarize what you see.
differences = df['test'] - df['control'] plt.scatter(df['control'], differences) plt.xlabel('control values') plt.ylabel('difference values') _ = plt.title(' difference vs control values')
_____no_output_____
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
The difference values decrease as the control values increase. This relationship appears linear. B. Calculate the mean difference, its standard deviation, and a confidence interval. Let $D_i$ be the difference value.From Rice Section 11.3.1, a 100(1 โˆ’ ฮฑ)% confidence interval for $ฮผ_D$ is$ \bar{D} \pm t_{n-1}(ฮฑ/2)s_{\bar{D}}$And from class slides โ€œSamplesโ€ (slide 44):$s_{\bar{D}}^2 = \frac{1}{n (n - 1)} \sum{(D_i - \bar{D})^2} $
mean_D = differences.mean() print('The mean difference ~ %s' % round(mean_D, 2)) n = len(differences) std = np.sqrt((1/((n-1)*(n)))*(((differences - mean_D)**2).sum())) print('The standard deviation ~ %s' % round(std, 2))
The standard deviation ~ 202.53
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
Since $n = 14$ and $t_{13}(0.025) = 2.160$, a 95% confidence interval for the mean difference is$ -461.29 \pm (437.46) = -461.29 \pm (2.160 \times 202.53) = \bar{D} \pm t_{n-1}(ฮฑ/2)s_{\bar{D}}$
print('95%% CI (%s , %s)' % (round(mean_D - (2.160 * std), 2), round(mean_D + (2.160 * std), 2)))
95% CI (-898.76 , -23.81)
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
C. Calculate the median difference and a confidence interval and compare to the previous result. Based on Rice section 10.4: We can sort the values, and find the median and find confidence intervals around that median by using a binomial distribution.
sorted_differences = sorted(differences) print('sorted difference values:', sorted_differences) median = np.median(sorted_differences) print('ฮท: median value = %s' % median) n = len(sorted_differences) print('n = %s' % n)
sorted difference values: [-2480, -1552, -601, -587, -470, -375, -373, -364, -361, -163, -7, 107, 180, 588] ฮท: median value = -368.5 n = 14
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
We look to form a confidence interval of the following form:(X(k), X(nโˆ’k+1))The coverage probability of this interval isP(X(k) โ‰ค ฮท โ‰ค X(nโˆ’k+1)) = = 1 โˆ’ P(ฮท X(nโˆ’k+1))The distribution of the number of observations greater than the median is binomial with n trials and probability $\frac{1}{2}$ of success on each trial. Thus,P( j observations are greater than ฮท) = $ \frac{1}{2^n} \binom{n}{j} $Now I make a cumulative binomial distrbution table forn = 14p = 0.5P[X โ‰ค k]
binomials = [] for i in range(int(n/2)): binomials += [((1/(2**n)) * sum([scipy.special.binom(n, j) for j in range(i+1)]))] pd.Series(binomials).rename('P[X โ‰ค k]').to_frame()
_____no_output_____
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
P(X nโˆ’k+1)We can choose k = 4P(X 11) = 0.0287Since $2 \times 0.0287 = 0.0574$Then we have about a 94% confidence interval for (X(4), X(11)) which is:
print('(%s, %s)' % (sorted_differences[4], sorted_differences[11]))
(-470, 107)
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
In summary, the median vaue is -368.5 with a 94% CI of (-470, 107).In comparison, the mean value is approximately -461.29 with a 95% CI of (-898.76 , -23.81).The mean value is lower than the median value and it has a wider confidence interval due to its larger standard deviation. The mean value and its standard deviation is clearly effected by the extreme negative values. D . Do you think it is more appropriate to use a t test or a nonparametric method to test whether the apparent difference between test and control could be due to chance? Why? Carry out both tests and compare. A nonparametric test, namely the Signed Rank Test would be better. This is for the following reasons:- The t test assumes the values follow a normal distribution. However, the following plots show this assumption does not hold for the differences values.- The differences data has extreme outliers and the t test is sensitive to outliers while the Signed Rank Test is not.
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(15,5)) stats.probplot(differences.values, plot=ax0) ax0.set_title('Normal probability plot: difference values') ax1.hist(differences) _ = ax1.set_title('Histogram: difference values')
_____no_output_____
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
Using a t test:The null hypothesis is for no difference, i.e.$H_0: ฮผ_๐ท = 0 $The test statistic is then$ t = \frac{\bar{D} - ฮผ_๐ท}{s_{\bar{D}}} = \frac{\bar{D}}{s_{\bar{D}}} $ which follows a t distribution with n - 1 degrees of freedom.
t = (np.abs(mean_D)/std) print('t = %s' % round(t, 3))
t = 2.278
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
From the t distribution table:$ t_{13}(0.025) = 2.160$ and $t_{13}(0.01) = 2.650 $ so the p-value of a two-sided test is less than .05 but not less than 0.02. Using The Signed Rank Test:
test_df = differences.rename('difference').to_frame() test_df['|difference|'] = np.abs(test_df['difference']) test_df = test_df.sort_values('|difference|').reset_index().drop('index', axis=1).reset_index().rename( columns={'index':'rank'} )[['difference', '|difference|', 'rank']] test_df['rank'] = test_df['rank'] + 1 test_df['signed rank'] = test_df.apply(lambda row: row['rank'] * -1 if row['difference'] < 0 else row['rank'], axis=1) test_df
_____no_output_____
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
We now calculate W+ by summing the positive ranks.
positive_ranks = [w for w in test_df['signed rank'] if w > 0] print('W+ = %s' % sum(positive_ranks))
W+ = 17
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
From Table 9 of Appendix B, the two-sided test is significant at ฮฑ = .05, but not at ฮฑ = .02. The findings between the t test and The Unsigned Rank test are consistent. 11.46 The National Weather Bureauโ€™s ACN cloud-seeding project was carried out in the states of Oregon and Washington. Cloud seeding was accomplished by dispersing dry ice from an aircraft; only clouds that were deemed โ€œripeโ€ for seeding were candidates for seeding. On each occasion, a decision was made at random whether to seed, the probability of seeding being 2/3. This resulted in 22 seeded and 13 control cases. Three types of targets were considered, two of which are dealt with in this problem. Type I targets were large geographical areas downwind from the seeding; type II targets were sections of type I targets located so as to have, theoretically, the greatest sensitivity to cloud seeding. The following table gives the average target rainfalls (in inches) for the seeded and control cases, listed in chronological order. Is there evidence that seeding has an effect on either type of target?
control = pd.DataFrame({ 'Type I': [ .0080, .0046, .0549, .1313, .0587, .1723, .3812, .1720, .1182, .1383, .0106, .2126, .1435, ], 'Type II': [ .0000, .0000, .0053, .0920, .0220, .1133, .2880, .0000, .1058, .2050, .0100, .2450, .1529, ], }) seeded = pd.DataFrame({ 'Type I': [ .1218, .0403, .1166, .2375, .1256, .1400, .2439, .0072, .0707, .1036, .1632, .0788, .0365, .2409, .0408, .2204, .1847, .3332, .0676, .1097, .0952, .2095, ], 'Type II': [ .0200, .0163, .1560, .2885, .1483, .1019, .1867, .0233, .1067, .1011, .2407, .0666, .0133, .2897, .0425, .2191, .0789, .3570, .0760, .0913, .0400, .1467, ], }) control seeded
_____no_output_____
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
First we compare the mean values of each sample with boxplots, and check for normality with normal probability plots.
fig, (ax0, ax1, ax2, ax3) = plt.subplots(1, 4, figsize=(20,5)) fig.suptitle('Normal probability plots of values') stats.probplot(control['Type I'], plot=ax0) stats.probplot(control['Type II'], plot=ax1) ax0.set_title('Control Type I') ax1.set_title('Control Type II') stats.probplot(seeded['Type I'], plot=ax2) stats.probplot(seeded['Type II'], plot=ax3) ax2.set_title('Seeded Type I') _ = ax3.set_title('Seeded Type II') fig, ax = plt.subplots() ax.boxplot([ control['Type I'], control['Type II'], seeded['Type I'], seeded['Type II'], ], labels=[ 'Control Type I', 'Control Type II', 'Seeded Type I', 'Seeded Type II', ]) plt.show()
_____no_output_____
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
The boxplots do not show significant differences.The means of the seeded samples overlap with the values of their control counterparts. The seeded samples have longer tails.The normal probability plots show that the control data does not follow a normal distribution and the sample sizes are relatively small. For this reason, a t test may not be appropriate. It would seem safer to use a nonparametric method.We use the Mann-Whitney test to test the null hypothesis that there is no difference in the control and seeded samples.We test Type I vs Type II separately. i.e. we first compare `Control Type I vs Seeded Type I`and then `Control Type II vs Seeded Type II`.n = the number of control samplesm = the number of seeded samples
n = len(control) m = len(seeded) print('n = %s' % n) print('m = %s' % m)
n = 13 m = 22
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
From Rice Section 11.2.3, a test statistic is calculated in the following way. First, we group all m + n observations together and rank them in order of increasing size.Let $n_1$ be the smaller sample size and let R be the sum of the ranks from that sample. Let $Rโ€ฒ = n_1(m + n + 1) โˆ’ R$ and $R^* = min(R, Rโ€ฒ)$.In our case the smaller sample is the control, so we compute R as the summed ranks of the control and $Rโ€ฒ = 13 (22 + 13 + 1) โˆ’ R$. Test for Type I:See computations below.We can visually see there are no ties to worry about.๐‘…* = ๐‘š๐‘–๐‘›(๐‘…,๐‘…โ€ฒ) = 221From [these Wilcoxon Rank-Sum Tables](https://www.real-statistics.com/statistics-tables/wilcoxon-rank-sum-table-independent-samples/) we see 195 is the critical value for a two-tailed test with ฮฑ = .2. We therefore do not have evidence to reject the null hypothesis.
type1 = pd.DataFrame({ 'value': control['Type I'], 'sample': 'control', }).append(pd.DataFrame({ 'value': seeded['Type I'], 'sample': 'seeded', })).sort_values('value').reset_index().drop('index', axis=1).reset_index().rename( columns={'index':'rank'} ) type1['rank'] += 1 R = type1[type1['sample']=='control']['rank'].sum() print('R = %s' % R) R_prime = 13*(22+13+1) - ๐‘… print('๐‘…โ€ฒ= ๐‘›1(๐‘š+๐‘›+1)โˆ’๐‘… = %s' % R_prime) R_star = min(R, R_prime) print('๐‘…* = ๐‘š๐‘–๐‘›(๐‘…,๐‘…โ€ฒ) = %s' % R_star) type1
R = 221 ๐‘…โ€ฒ= ๐‘›1(๐‘š+๐‘›+1)โˆ’๐‘… = 247 ๐‘…* = ๐‘š๐‘–๐‘›(๐‘…,๐‘…โ€ฒ) = 221
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
Test for Type II:See computations below.There are 3 tied vaues of 0. We can assign them each the values of (1 + 2 + 3) / 3 = 2. However, since they are all in the same sample (control) this does not make a difference.๐‘…* = ๐‘š๐‘–๐‘›(๐‘…,๐‘…โ€ฒ) = 199From [these Wilcoxon Rank-Sum Tables](https://www.real-statistics.com/statistics-tables/wilcoxon-rank-sum-table-independent-samples/) we see 195 is the critical value for a two-tailed test with ฮฑ = .2 and 176 is the critical. We therefore have a suggestion to reject the null hypothesis with 80% confidence for the type II target.
type2 = pd.DataFrame({ 'value': control['Type II'], 'sample': 'control', }).append(pd.DataFrame({ 'value': seeded['Type II'], 'sample': 'seeded', })).sort_values('value').reset_index().drop('index', axis=1).reset_index().rename( columns={'index':'rank'} ) type2['rank'] += 1 R = type2[type2['sample']=='control']['rank'].sum() print('R = %s' % R) R_prime = 13*(22+13+1) - ๐‘… print('๐‘…โ€ฒ= ๐‘›1(๐‘š+๐‘›+1)โˆ’๐‘… = %s' % R_prime) R_star = min(R, R_prime) print('๐‘…* = ๐‘š๐‘–๐‘›(๐‘…,๐‘…โ€ฒ) = %s' % R_star) type2
R = 199 ๐‘…โ€ฒ= ๐‘›1(๐‘š+๐‘›+1)โˆ’๐‘… = 269 ๐‘…* = ๐‘š๐‘–๐‘›(๐‘…,๐‘…โ€ฒ) = 199
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
In general, there is weak evidence that seeding has an effect on either type of target. 13.24 Is it advantageous to wear the color red in a sporting contest? According to Hill and Barton (2005): Although other colours are also present in animal displays, it is specifically the presence and intensity of red coloration that correlates with male dominance and testosterone levels. In humans, anger is associated with a reddening of the skin due to increased blood flow, whereas fear is as- sociated with increased pallor in similarly threatening situations. Hence, increased redness during aggressive interactions may reflect relative dominance. Because artificial stimuli can exploit innate responses to natural stimuli, we tested whether wearing red might influence the outcome of physical contests in humans. In the 2004 Olympic Games, contestants in four combat sports (boxing, tae kwon do, Greco-Roman wrestling, and freestyle wrestling) were randomly assigned red or blue outfits (or body protectors). If colour has no effect on the outcome of contests, the number of winners wearing red should be statistically indistinguishable from the number of winners wearing blue. They thus tabulated the colors worn by the winners in these contests:
sports = pd.DataFrame({ 'Sport': ['Boxing','Freestyle Wrestling', 'Greco Roman Wrestling', 'Tae Kwon Do'], 'Red': [148, 27, 25, 45,], 'Blue': [120, 24, 23, 35], }).set_index('Sport') sports sports['Total'] = sports['Red'] + sports['Blue'] sports.loc['Total'] = sports.sum() sports
_____no_output_____
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
Some supplementary information is given in the file red-blue.txt. a. Let ฯ€R denote the probability that the contestant wearing red wins. Test the null hypothesis that ฯ€R = ยฝ versus the alternative hypothesis that ฯ€R is the same in each sport, but ฯ€R =ฬธ ยฝ .The null and alternative hypothesis setup models a game as a Bernoulli trial where the probabilily of a win (success) is ฯ€R for any sport.Since that is the case we can model the Bernoulli trial over the sum of all the games in all the sports.There were n=447 total games, with r=245 wins by red. The probability of this with the null hypothesis that H0: ฯ€R = ยฝis:$Pr(X=r) = \binom{n}{r}p^r(1 - p)^{n-r}$$Pr(X=245) = \binom{447}{245}0.5^{245}(0.5)^{202} = \binom{447}{245}0.5^{447} = 0.0048 $We can therefore reject the null hypothesis with alpha = 0.01Since n is large, we could also apply the Central limit Theorem and obtain the test statistic and its distribution under the null hypothesis:$ Z = \frac{X - E(X)}{\sqrt{Var(X)}} $ Where Z --> N(0,1) as n --> โˆžIn this case, $E(X) = n (ฯ€_R) = 447 (0.5) = 223.5$$ \sqrt{Var(X)} = \sqrt{n (p)(1 - p)} $$ Z = \frac{245 - 223.5}{\sqrt{447 (0.5)(0.5)}} = 2.03 $$P(Z > 2) = .025 $Since Z is symmetric, this coresponds to alpha = 0.05.Again, we have evidence to reject the null hypothesis for the alternative hypothesis, with alpha = 0.05.
print('(447 choose 245) x (0.5)^447 = %s' % (scipy.special.comb(447, 245) * (0.5 ** 447))) print('Z = %s' % ((245 - 223.5)/(np.sqrt(447*(0.5)*(0.5)))))
Z = 2.033830210313729
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
b. Test the null hypothesis ฯ€R = ยฝ against the alternative hypothesis that allows ฯ€R to be different in different sports, but not equal to ยฝ.Again, games are modeled as bernoulli trials, but this time the bernoulli trials are independent across sports. Since the minimum n (total games) from each sport is reasonably large, the probability of the outcome can be approximated with a normal distribution.Since the $X^2$ distribution is defined as a sum of squared independent and identically distributed standard normal distributions, we can use the $X^2$ test statistic. The number of degrees of freedom are the different sports.i.e.$X_{k}^2 = \sum_{i=1}^{k} \frac{(x_i - p_i n_i)^2}{n_i p_i (1 - p_i)} $(The denominator is the definition of Var(X) squared)In our case, we are testing the null hypothesis where:$H_0: p_i = 0.5, i=1,2,3,4 $$k = 4$$n_i$ is the total number of games in each sport Tables show the $X^2$ critical value with 4 degrees of freedom is 7.78 for alpha=0.1 and 9.49 for alpha = 0.05. The test statistic value is 8.57.There is evidence to reject the null hypothesis with 90% confidence, but not 95% confidence.
t = ( ((sports['Red'] - (0.5 * sports['Total']))**2) / (sports['Total'] * 0.5 * 0.5 )).sum() print('chi-squared = %s' % t)
chi-squared = 8.571642380281773
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
C. Are either of these hypothesis tests equivalent to that which would test the null hypothesis ฯ€R = ยฝ versus the alternative hypothesis ฯ€R =ฬธ ยฝ using as data the total numbers of wins summed over all the sports?Yes, (A) was equivalent to this. D. Is there any evidence that wearing red is more favorable in some of the sports than others?From Rice 13.3,We can use a chi-squared test of homogeneity, modeling the outcomes of each of the 4 sports as (I=4) multinomial disitributions, each with J=2 cells.We want to test the homogeneity of the multinomial distributions.i.e. we want to test the null hypothesis vs the alternative:$H_0 = ฯ€_{1,1} = ฯ€_{2,1} = ฯ€_{3,1} = ฯ€_{4,1}$ ($ฯ€_{i,2}$ is determinedd by $ฯ€_{i,1}$)$H_1$ : Not all probabilities are as specified in $H_0$$df = (I-1)(J-1) = 3$$ X^2 = \sum_{i}^4 \sum_{j}^2 \frac{(O_{i,j} - E_{i,j})^2}{E_{i,j}} $Where $E_{i,j} = \frac{n_{.j}n_{i.}}{n_{..}} $The output expected frequencies from the following test are close to the observed frequencies.The 10% point with 3 df is 6.25.The data are consistent with the null hypothesis.
sports.drop('Total', axis=0).drop('Total', axis=1) chi2, p, dof, ex = stats.chi2_contingency(sports.drop('Total', axis=0).drop('Total', axis=1)) assert(dof == 3) print('chi-squared test statistic = %s' % chi2) print('p = %s' % p) print('\nexpected frequencies:') pd.DataFrame(ex, columns=['Red','Blue'])
chi-squared test statistic = 0.3015017799642389 p = 0.9597457890114767 expected frequencies:
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
E. From an analysis of the points scored by winners and losers, Hill and Barton concluded that color had the greatest effect in close contests. Data on the points of each match are contained in the file red-blue.xls. Analyze this data and see whether you agree with their conclusion. Here we analyze the sports separately. We compare the distribution of absolute difference in points scored and the number of Red vs Blue winners. A smaller absolute difference in points means a closer contest.First we load the data for each sport, and plot the differences against wins.
def get_red_blue_points_differences(points_fpath): df =pd.read_csv(points_fpath)[ ['Winner','Points Scored by Red', 'Points Scored by Blue'] ].dropna() df['|Difference|'] = np.abs(df['Points Scored by Red'] - df['Points Scored by Blue']) return df def plot_point_differences(df, name): fig, ax = plt.subplots(1,1) max_points_diff = df['|Difference|'].max() ax.hist( df[df['Winner']=='Red']['|Difference|'], alpha=0.5, color='red', label='Red wins (n=%s)'%len(df[df['Winner']=='Red']), bins=int(max_points_diff) ) ax.hist( df[df['Winner']=='Blue']['|Difference|'], alpha=0.5, color='blue', label='Blue wins (n=%s)'%len(df[df['Winner']=='Blue']), bins=int(max_points_diff) ) ax.set_ylabel('Wins') ax.set_xlabel('Points difference') ax.legend() ax.set_title('%s: Point differences vs wins by Red and Blue' % name) plt.show() tkd = get_red_blue_points_differences('red-blue TKD.txt') boxing = get_red_blue_points_differences('red-blue boxing.txt') gr_wrestling = get_red_blue_points_differences('red-blue GR.txt') fw_wrestling = get_red_blue_points_differences('red-blue FW.txt') plot_point_differences(tkd, 'Tae Kwon Doe') plot_point_differences(boxing, 'Boxing') plot_point_differences(gr_wrestling, 'Greco Roman Wrestling') plot_point_differences(fw_wrestling, 'Freestyle Wrestling')
_____no_output_____
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
It does appear that for each of the sports, *excluding* Freestyle Wrestling, the distribution of Red wins are skewed towards the smaller points differences, as compared to the Blue wins. We already saw from previous tests (parts A and B) that Red has a statistically significant higher chance of winning (p > 0.5). However, maybe these extra wins are due to the close games.Here we test a slightly different kind of hypothesis: Wearing Red vs Blue has an impact on how many points are scored over the opponent.Well actually we test the opposite as the null hypothesis:H0: The mean difference in Red vs Blue points is 0.If this is the case, then, as the researchers suggested, the Red wins might be due to an advantage in the close contests rather than red having an impact on total point scoring.As we can see above, the distribution of differences in points does not follow a normal distribution, so a non parametric test should be used.We use the Wilcoxon signed-rank test on the observed differences, testing each sport separately.From Rice Section 11.3:The Wilcoxon signed-rank test statistic is W = min{W+, Wโˆ’} , where W+ is the sum of signed ranks that are positive and Wโˆ’ is the sum of ranks whose signs are negative.Since n > 20 for each sport, we use the normalized test statistic:$ Z = \frac{W_+ - E(W_+)}{\sqrt{Var(W+)}} $$ E(W_+) = \frac{n(n+1)}{4} $$ Var(W_+) = \frac{n(n + 1)(2n + 1)}{24} $When computing W+ we will consider Red wins as the positives (though it doesn't make a difference), so that the sign of the differences is the sign of (Red points - Blue points).
def get_signed_ranks(df): """ Assigns signed ranks and computes W+ Returns W+, df """ df = df.sort_values('|Difference|').reset_index(drop=True) # get the ranks, handling ties differences = df['|Difference|'] ranks = [] r = [] for index, d in differences.items(): if (index > 0) and (d > differences[index - 1]): ranks += [np.mean(r) for i in range(len(r))] r = [] r += [index+1] ranks += [np.mean(r) for i in range(len(r))] df['Rank'] = ranks df['Signed Rank'] = df.apply(lambda r: r['Rank'] * (-1 if r['Winner'] == 'Blue' else 1), axis=1) w_plus = df[df['Signed Rank']>0]['Signed Rank'].sum() return w_plus, df
_____no_output_____
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
Tae Kwon Doe
w, tkd = get_signed_ranks(tkd) n = len(tkd) exp_w = (n*(n+1))/4 var_w = (n*(n+1)*((2*n)+1))/24 z = (w - exp_w)/(np.sqrt(var_w)) print('n = %s' % n) print('W+ = %s' % w) print('E(W+) = %s' % exp_w) print('Var(W+) = %s' % var_w) print('Z = %s' % z) tkd.head()
n = 70 W+ = 1292.0 E(W+) = 1242.5 Var(W+) = 29198.75 Z = 0.2896830397843113
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
Boxing
w, boxing = get_signed_ranks(boxing) n = len(boxing) exp_w = (n*(n+1))/4 var_w = (n*(n+1)*((2*n)+1))/24 z = (w - exp_w)/(np.sqrt(var_w)) print('n = %s' % n) print('W+ = %s' % w) print('E(W+) = %s' % exp_w) print('Var(W+) = %s' % var_w) print('Z = %s' % z) boxing.head()
n = 233 W+ = 13854.0 E(W+) = 13630.5 Var(W+) = 1060907.25 Z = 0.2169895498296029
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
Greco Roman Wrestling
w, gr_wrestling = get_signed_ranks(gr_wrestling) n = len(gr_wrestling) exp_w = (n*(n+1))/4 var_w = (n*(n+1)*((2*n)+1))/24 z = (w - exp_w)/(np.sqrt(var_w)) print('n = %s' % n) print('W+ = %s' % w) print('E(W+) = %s' % exp_w) print('Var(W+) = %s' % var_w) print('Z = %s' % z) gr_wrestling.head()
n = 51 W+ = 580.5 E(W+) = 663.0 Var(W+) = 11381.5 Z = -0.773311016598475
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
Freestyle Wrestling
w, fw_wrestling = get_signed_ranks(fw_wrestling) n = len(fw_wrestling) exp_w = (n*(n+1))/4 var_w = (n*(n+1)*((2*n)+1))/24 z = (w - exp_w)/(np.sqrt(var_w)) print('n = %s' % n) print('W+ = %s' % w) print('E(W+) = %s' % exp_w) print('Var(W+) = %s' % var_w) print('Z = %s' % z) fw_wrestling.head()
n = 54 W+ = 835.5 E(W+) = 742.5 Var(W+) = 13488.75 Z = 0.8007502737021251
MIT
pset-3/pset3.ipynb
aberke/mit-stats-15.077
Merge Merging molecular systems A list of molecular systems are merged in to a new molecular system:
molsys_A = msm.build.build_peptide(['AceProNme',{'forcefield':'AMBER14', 'implicit_solvent':'OBC1'}]) molsys_B = msm.build.build_peptide(['AceValNme',{'forcefield':'AMBER14', 'implicit_solvent':'OBC1'}]) molsys_C = msm.build.build_peptide(['AceLysNme',{'forcefield':'AMBER14', 'implicit_solvent':'OBC1'}]) molsys_B = msm.structure.translate(molsys_B, translation='[-1.0, 0.0, 0.0] nanometers') molsys_C = msm.structure.translate(molsys_C, translation='[1.0, 0.0, 0.0] nanometers') molsys_D = msm.merge([molsys_A, molsys_B, molsys_C]) msm.info(molsys_D) msm.view(molsys_D, standardize=True)
_____no_output_____
MIT
docs/contents/basic/merge.ipynb
dprada/molsysmt
Converting raster to vectorA cluster of functions that convert raster (.tiff) files generated as part of future scenario pipeline code, to vector (point shapefile) files.**Original code:** [Konstantinos Pegios](https://github.com/kopegios) **Conceptualization & Methodological review :** [Alexandros Korkovelos](https://github.com/akorkovelos) & [Konstantinos Pegios](https://github.com/kopegios)**Updates, Modifications:** [Alexandros Korkovelos](https://github.com/akorkovelos) **Funding:** The World Bank (contract number: 7190531), [KTH](https://www.kth.se/en/itm/inst/energiteknik/forskning/desa/welcome-to-the-unit-of-energy-systems-analysis-kth-desa-1.197296)
# Importing necessary modules import geopandas as gpd import rasterio as rio import pandas as pd import fiona import gdal import osr import ogr import rasterio.mask import time import os import ogr, gdal, osr, os import numpy as np import itertools import re from rasterio.warp import calculate_default_transform, reproject from rasterio.enums import Resampling from rasterstats import point_query from pyproj import Proj
_____no_output_____
MIT
agrodem_preprocessing/Future_Scenarios/Converting raster to vector.ipynb
babakkhavari/agrodem
Raster (Re)projection to target CRSThis step is not necessary if the raster file is already in the target CRS
# Define project function def reproj(input_raster, output_raster, new_crs, factor): dst_crs = new_crs with rio.open(input_raster) as src: transform, width, height = calculate_default_transform( src.crs, dst_crs, src.width*factor, src.height*factor, *src.bounds) kwargs = src.meta.copy() kwargs.update({ 'crs': dst_crs, 'transform': transform, 'width': width, 'height': height }) with rio.open(output_raster, 'w', **kwargs) as dst: for i in range(1, src.count + 1): reproject( source=rio.band(src, i), destination=rio.band(dst, i), src_transform=src.transform, src_crs=src.crs, dst_transform=transform, dst_crs=dst_crs, resampling=Resampling.nearest) # Set inout directories inpath = r"N:\Agrodem\Future_Scenarios\maize_cassava_scenarios\maize_cassava_scenarios" outpath= r"N:\Agrodem\Future_Scenarios\maize_cassava_scenarios\maize_cassava_scenarios\re_projected" # Provide the input raster and give a name to the output (reprojected) raster input_raster = inpath + "\\" + "cassava_SG.tif" output_raster = outpath + "\\" + "cassava_SG_reproj.tif" # Set target CRS new_crs = "epsg:4326" # Provide a factor if you want zoomed in/out results; suggest keeping it to one unless fully understanding the implications factor = 1 # Run function reproj(input_raster, output_raster, new_crs, factor)
_____no_output_____
MIT
agrodem_preprocessing/Future_Scenarios/Converting raster to vector.ipynb
babakkhavari/agrodem
Converting raster to shapefile
# Define functions def pixelOffset2coord(raster, xOffset,yOffset): geotransform = raster.GetGeoTransform() originX = geotransform[0] originY = geotransform[3] pixelWidth = geotransform[1] pixelHeight = geotransform[5] coordX = originX+pixelWidth*xOffset coordY = originY+pixelHeight*yOffset return coordX, coordY def raster2array(rasterfn): raster = gdal.Open(rasterfn) band = raster.GetRasterBand(1) array = band.ReadAsArray() return array def array2shp(array,outSHPfn,rasterfn): # max distance between points raster = gdal.Open(rasterfn) geotransform = raster.GetGeoTransform() pixelWidth = geotransform[1] srs = osr.SpatialReference() srs.ImportFromWkt(raster.GetProjection()) # wkbPoint shpDriver = ogr.GetDriverByName("ESRI Shapefile") if os.path.exists(outSHPfn): shpDriver.DeleteDataSource(outSHPfn) outDataSource = shpDriver.CreateDataSource(outSHPfn) outLayer = outDataSource.CreateLayer(outSHPfn, geom_type=ogr.wkbPoint, srs=srs ) featureDefn = outLayer.GetLayerDefn() outLayer.CreateField(ogr.FieldDefn("VALUE", ogr.OFTInteger)) # array2dict point = ogr.Geometry(ogr.wkbPoint) row_count = array.shape[0] for ridx, row in enumerate(array): # print("Printing ridx..") # print(ridx) if ridx % 100 == 0: print ("{0} of {1} rows processed".format(ridx, row_count)) for cidx, value in enumerate(row): #print("Printing cidx..") #print(cidx) #Only positive values if value > 0: Xcoord, Ycoord = pixelOffset2coord(raster,cidx,ridx) point.AddPoint(Xcoord, Ycoord) outFeature = ogr.Feature(featureDefn) outFeature.SetGeometry(point) outFeature.SetField("VALUE", int(ridx)) outLayer.CreateFeature(outFeature) outFeature.Destroy() #outDS.Destroy() def main(rasterfn,outSHPfn): array = raster2array(rasterfn) array2shp(array,outSHPfn,rasterfn) # Set inout directories inpath = r"N:\Agrodem\Future_Scenarios\maize_cassava_scenarios\maize_cassava_scenarios\re_projected" outpath= r"N:\Agrodem\Future_Scenarios\maize_cassava_scenarios\maize_cassava_scenarios\vectorfiles" # Provide the input raster and give a name to the output (reprojected) raster rasterfn = inpath + "\\" + "cassava_SG_reproj.tif" outSHPfn = outpath + "\\" + "cassava_SG.shp" # Run the function main(rasterfn,outSHPfn)
0 of 3580 rows processed 100 of 3580 rows processed 200 of 3580 rows processed 300 of 3580 rows processed 400 of 3580 rows processed 500 of 3580 rows processed 600 of 3580 rows processed 700 of 3580 rows processed 800 of 3580 rows processed 900 of 3580 rows processed 1000 of 3580 rows processed 1100 of 3580 rows processed 1200 of 3580 rows processed 1300 of 3580 rows processed 1400 of 3580 rows processed 1500 of 3580 rows processed 1600 of 3580 rows processed 1700 of 3580 rows processed 1800 of 3580 rows processed 1900 of 3580 rows processed 2000 of 3580 rows processed 2100 of 3580 rows processed 2200 of 3580 rows processed 2300 of 3580 rows processed 2400 of 3580 rows processed 2500 of 3580 rows processed 2600 of 3580 rows processed 2700 of 3580 rows processed 2800 of 3580 rows processed 2900 of 3580 rows processed 3000 of 3580 rows processed 3100 of 3580 rows processed 3200 of 3580 rows processed 3300 of 3580 rows processed 3400 of 3580 rows processed 3500 of 3580 rows processed
MIT
agrodem_preprocessing/Future_Scenarios/Converting raster to vector.ipynb
babakkhavari/agrodem
Assigning lat/long columns to the shapefile
# Import as geodataframe path_shp = r"N:\Agrodem\Future_Scenarios\maize_cassava_scenarios\maize_cassava_scenarios\vectorfiles" name_shp = "cassava_SG.shp" future_crop_gdf = gpd.read_file(path_shp + "\\" + name_shp) # Creating lon/lat columns future_crop_gdf['lon'] = future_crop_gdf["geometry"].x future_crop_gdf['lat'] = future_crop_gdf["geometry"].y future_crop_gdf.head(3)
_____no_output_____
MIT
agrodem_preprocessing/Future_Scenarios/Converting raster to vector.ipynb
babakkhavari/agrodem
Exporting file back to shp or gpkg
# Define output path path = r"N:\Agrodem\Future_Scenarios\maize_cassava_scenarios\maize_cassava_scenarios\vectorfiles" name_shp = "cassava_SG.shp" #dshp future_crop_gdf.to_file(os.path.join(path,name_shp), index=False) #gpkg #future_crop_gdf.to_file("maize_BAU.gpkg", layer='Maize_Inputfile_Future', driver="GPKG")
_____no_output_____
MIT
agrodem_preprocessing/Future_Scenarios/Converting raster to vector.ipynb
babakkhavari/agrodem
ๅผบๅŒ–ๅญฆไน ็ฌฌไบŒ็ซ ็š„ไพ‹ๅญ็š„ไปฃ็ ๏ผŒ10่‡‚่ตŒๅš้—ฎ้ข˜๏ผŒ้ฆ–ๅ…ˆๅปบ็ซ‹ไธ€ไธชk่‡‚่ตŒๅš่€…็š„็ฑปใ€‚
class Bandit: '''ๅ‚ๆ•ฐ๏ผš kArm: int, ่ตŒๅš่‡‚็š„ไธชๆ•ฐ epsilon: double, e-่ดชๅฟƒ็ฎ—ๆณ•็š„ๆฆ‚็އๅ€ผ initial: ๆฏไธช่กŒไธบ็š„่กŒไธบ็š„ๅˆๅง‹ๅŒ–ไผฐ่ฎก stepSize: double,ๆ›ดๅŠ ไผฐ่ฎกๅ€ผ็š„ๅธธๆ•ฐๆญฅๆ•ฐ sampleAverages: if ture, ไฝฟ็”จ็ฎ€ๅ•็š„ๅ‡ๅ€ผๆ–นๆณ•ๆ›ฟไปฃstepSizeๆƒ้‡ๆ›ดๆ–ฐ UCB: ไธๆ˜ฏNoneๆ—ถ๏ผŒไฝฟ็”จUCB็ฎ—ๆณ•,(ๅˆๅง‹ๅ€ผไผ˜ๅŒ–็ฎ—ๆณ•) gradient: if ture, ไฝฟ็”จ็ฎ—ๆณ•็š„้€‰ๆ‹ฉ็š„ๅŸบ็ก€ๆ ‡ๅฟ—(่ฟ‡ๅŽป็š„ๅ‡ๅ€ผไฝœไธบๅŸบๅ‡†๏ผŒ่ฏ„ไปท็Žฐๅœจ็š„ๅ€ผ) gradientBaseline: if true, ไฝฟ็”จ่ฟ‡ๅŽป็š„ๅฅ–ๅŠฑ็š„ๅนณๅ‡ๅ€ผ ''' def __init__(self, kArm=10, epsilon=0., initial=0., stepSize=0.1, sampleAverages=False,UCB=None, gradient=False, gradientBaseline=False, trueReward=0.): self.k = kArm self.epsilon = epsilon self.stepSize = stepSize self.sampleAverages = sampleAverages self.indices = np.arange(self.k) # ๆœ‰kArmไธช้€‰ๆ‹ฉ self.time = 0 # ๆ€ป็š„้€‰ๆ‹ฉๆฌกๆ•ฐ ๏ผŸ๏ผŸ self.UCB = UCB self.gradient = gradient self.gradientBaseline = gradientBaseline self.averageReward = 0 self.trueReward = trueReward # ่ฎฐๅฝ•ๆฏไธช่กŒไธบ็š„็œŸๅฎžๅฅ–ๅŠฑ self.qTrue = [] # ่ฎฐๅฝ•ๆฏไธช่กŒไธบ็š„ไผฐ่ฎกๅ€ผ self.qEst = np.zeros(self.k) # ่ฎฐๅฝ•ๆฏไธช่กŒไธบ่ขซ้€‰ๆ‹ฉ็š„ๆฌกๆ•ฐ self.actionCount = [] # ไฝฟ็”จN(0,1)้ซ˜ๆ–ฏๅˆ†ๅธƒ+trueReward๏ผŒๅˆๅง‹ๅŒ–็œŸๆ˜ฏ็š„ๅฅ–ๅŠฑ # ไฝฟ็”จๅˆๅง‹ๅ€ผinitialๅˆๅง‹ๅŒ–ไผฐ่ฎกๅ€ผ for i in range(0,self.k): self.qTrue.append(np.random.randn()+trueReward) self.qEst[i] = initial self.actionCount.append(0) # ๅพ—ๅˆฐๆญฃๅœจ็š„ๆœ€ๅฅฝ็š„้€‰ๆ‹ฉๅฏน้ฅฎ็š„k่‡‚ self.bestAction = np.argmax(self.qTrue) # ๅฏนไบŽ่ฟ™ไธชbanditๆธธๆˆ๏ผŒ้€‰ๆ‹ฉไธ€ไธช่กŒไธบ๏ผŒไฝฟ็”จexplore(่ฏ„ไผฐ) or exploit(ๆŽข็ดข) def getAction(self): # explore(่ฏ„ไผฐ) # ไฝฟ็”จepsilon-greedy็ฎ—ๆณ•๏ผŒๆฏๆฌกไปฅๆฆ‚็އepsilon้šๆœบ้€‰ๆ‹ฉไธ€ไธช่กŒไธบ๏ผŒ # ๅฆๅˆ™ไฝฟ็”จ่ดชๅฟƒ่ง„ๅˆ™ if self.epsilon > 0: if np.random.binomial(1,self.epsilon) == 1:# ๆ‰“ไนฑ๏ผŒ้šๆœบ้€‰ๆ‹ฉ np.random.shuffle(self.indices) return self.indices[0] # exploit # ไฝฟ็”จๅˆๅง‹ๅ€ผไผ˜ๅŒ–่ฟ™ไธช็ฎ—ๆณ• if self.UCB is not None: UCBEst = self.qEst + self.UCB * np.sqrt(np.log(self.time+1) / np.asarray(self.actionCount)+1) return np.argmax(UCBEst) # ไฝฟ็”จๅŸบๅ‡†็บฟ่ฏ„ๆต‹,ๅขžๅผบๆฏ”่พƒ if self.gradient: # softmax่ฎก็ฎ—ๆฏไธช่กŒไธบ็š„ๅๅฅฝ็จ‹ๅบฆ expEst = np.exp(self.qEst) self.actionProb = expEst / np.sum(expEst) # ๆ นๆฎๆฆ‚็އ้šๆœบ้€‰ๆ‹ฉ return np.random.choice(self.indices,p=self.actionProb) # ้€‰ๆ‹ฉๆœ€ๅคงๅ€ผ็š„ไธ‹ๆ ‡ return np.argmax(self.qEst) # ้‡‡ๅ–ไฝ•็ง่กŒไธบ def takeAction(self, action): # ๅŸบไบŽN(real reward, 1)ไบง็”Ÿไธ€ไธชๅฅ–ๅŠฑ reward = np.random.randn() + self.qTrue[action] # ๆฌกๆ•ฐๅŠ 1 self.time += 1 # ่ฟญไปฃ่ฎก็ฎ—ๅนณๅ‡ๅฅ–ๅŠฑ self.averageReward = (self.time - 1.0) / self.time * self.averageReward + reward / self.time self.actionCount[action] += 1 if self.sampleAverages: # ไฝฟ็”จ็ฎ€ๅ•ๅนณๅ‡ๅ€ผๆ›ดๆ–ฐไผฐ่ฎกๅ€ผ self.qEst[action] += 1.0 / self.actionCount[action] * (reward - self.qEst[action]) elif self.gradient: oneHot = np.zeros(self.k) oneHot[action] = 1 if self.gradientBaseline: baseline = gradientBaseline else: baseline = 0 # ๅŸบไบŽ้€‰ๆ‹ฉ๏ผŒๅ…จ้ƒจๆ›ดๆ–ฐๅ€ผ๏ผŒ้€‰ไธญ็š„action่ฟ›่กŒๅŠ ๏ผŒๆฒกๆœ‰้€‰ไธญ็š„่ฟ›่กŒๅ‡ๅŽปไธ€ไธชๅ€ผ self.qEst = self.qEst + self.stepSize * (reward - baseline) * (oneHot - self.actionProb) else: # ๅ›บๅฎšๆญฅ้•ฟๆ›ดๆ–ฐๅ€ผ self.qEst += self.stepSize * (reward - self.qEst[action]) return reward figureIndex = 0 # ๅšๅ‡บๅฏนๅบ”็š„ๅ›พ๏ผŒfigure 2.1 def figure2_1(): global figureIndex figureIndex += 1 sns.violinplot(data=np.random.randn(200,10) + np.random.randn(10)) plt.xlabel('Action') plt.ylabel('Reward distribution') def banditSimulation(nBandits, time, bandits): bestActionCounts = [np.zeros(time, dtype='float') for _ in range(0, len(bandits))] averageRewards = [np.zeros(time, dtype='float') for _ in range(0, len(bandits))] for banditInd, bandit in enumerate(bandits): for i in range(0, nBandits): for t in range(0, time): action = bandit[i].getAction() reward = bandit[i].takeAction(action) averageRewards[banditInd][t] += reward if action == bandit[i].bestAction: bestActionCounts[banditInd][t] += 1 bestActionCounts[banditInd] /= nBandits averageRewards[banditInd] /= nBandits return bestActionCounts, averageRewards # for figure 2.2 def epsilonGreedy(nBandits, time): epsilons = [0, 0.1, 0.01] # ่ตŒๅš็š„ไธชๆ•ฐ bandits = [] for epsInd, eps in enumerate(epsilons): bandits.append([Bandit(epsilon=eps, sampleAverages=True) for _ in range(0,nBandits)]) bestActionCounts, avetageReward = banditSimulation(nBandits, time, bandits) global figureIndex plt.figure(figureIndex) figureIndex += 1 for eps, counts in zip(epsilons, beatActionCounts): plt.plot(counts, label='epsilon='+str(eps)) plt.xlabel('Steps') plt.ylabel('% optimal action') plt.legend() plt.figure(figureIndex) figureIndex += 1 for eps, reward in zip(epsilons, avetageReward): plt.plot(reward, label='epsolon='+str(eps)) plt.xlabel('Steps') plt.ylabel('average reward') plt.legend() figure2_1() epsilonGreedy(2000,1000) plt.show()
_____no_output_____
BSD-2-Clause
EvaluativeFeedback/TenArmedTestbed.ipynb
xiaorancs/xr-reinforcement-learning
Real Estate Modelling Project -Srini Objective -Build a model to predict house prices based on features provided in the dataset. -One of those parameters include understanding which factors are responsible for higher property value - $650K and above.-The data set consists of information on some 22,000 properties. -The dataset consisted of historic data of houses sold between May 2014 to May 2015.-Tools to be used are Pandas (Jupyter notebook) and Tableau. Importing libraries
import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline import pandas as pd import numpy as np from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import QuantileTransformer from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.preprocessing import PowerTransformer from sklearn.metrics import r2_score import statsmodels.api as sm from sklearn.metrics import mean_squared_error as mse from sklearn.metrics import mean_absolute_error as mae from scipy.stats import boxcox pd.options.display.max_rows = 50 pd.options.display.max_columns = 999 import warnings warnings.filterwarnings('ignore') from haversine import haversine
_____no_output_____
MIT
source_code/MidTerm_Project_srini.V3_trials.ipynb
Denny-Meyer/IronHack_mid_term_real_state_regression
Fetching the data
df=pd.read_excel("Data_MidTerm_Project_Real_State_Regression.xls" ) # reading the excel file
_____no_output_____
MIT
source_code/MidTerm_Project_srini.V3_trials.ipynb
Denny-Meyer/IronHack_mid_term_real_state_regression
Checking the data type of the features for any corrections
df.info()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 21597 entries, 0 to 21596 Data columns (total 21 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 id 21597 non-null int64 1 date 21597 non-null datetime64[ns] 2 bedrooms 21597 non-null int64 3 bathrooms 21597 non-null float64 4 sqft_living 21597 non-null int64 5 sqft_lot 21597 non-null int64 6 floors 21597 non-null float64 7 waterfront 21597 non-null int64 8 view 21597 non-null int64 9 condition 21597 non-null int64 10 grade 21597 non-null int64 11 sqft_above 21597 non-null int64 12 sqft_basement 21597 non-null int64 13 yr_built 21597 non-null int64 14 yr_renovated 21597 non-null int64 15 zipcode 21597 non-null int64 16 lat 21597 non-null float64 17 long 21597 non-null float64 18 sqft_living15 21597 non-null int64 19 sqft_lot15 21597 non-null int64 20 price 21597 non-null int64 dtypes: datetime64[ns](1), float64(4), int64(16) memory usage: 3.5 MB
MIT
source_code/MidTerm_Project_srini.V3_trials.ipynb
Denny-Meyer/IronHack_mid_term_real_state_regression
Checking the column headers for case consistency and spacing for any corrections
col_list = df.columns col_list #filtered view of the repetitive house ids repetitive_sales = df.groupby('id').filter(lambda x: len(x) > 1) repetitive_sales
_____no_output_____
MIT
source_code/MidTerm_Project_srini.V3_trials.ipynb
Denny-Meyer/IronHack_mid_term_real_state_regression
Exploring the data
df['zipcode']= df['zipcode'].astype(str) df.zipcode[df.zipcode.isin(["98102", "98103", "98105", "98106", "98107", "98108", "98109", "98112", "98115", "98116", "98117", "98118", "98119", "98122", "98125", "98126", "98133", "98136", "98144", "98177", "98178", "98199"])] = "seattle_zipcode" df.zipcode[df.zipcode.isin(["98002", "98003", "98023", "98092"])]= 'Auburn_zipcode' df.zipcode[df.zipcode.isin(["98004", "98005", "98006", "98007", "98008"])]= 'Bellevue_zipcode' df.zipcode[df.zipcode.isin(["98146", "98148", "98166", "98168"])]= 'Burien_zipcode' df.zipcode[df.zipcode.isin(["98001", "98010", "98011", "98014", "98019", "98022", "98024", "98027", "98028", "98029", "98030", "98031", "98032", "98033", "98034", "98038", "98039", "98040", "98042", "98045", "98052", "98053", "98055", "98056", "98058", "98059", "98065", "98070", "98072", "98074", "98075", "98077", "98155", "98188", "98198"])]= 'other_zipcode' # List of variables to be dropped from the model drop_list = ["price", "id", "sqft_above", "sqft_basement", "sqft_lot15", "sqft_living15", 'lat', 'long', 'dist_from_seattle', 'dist_from_bellevue', 'Auburn_zipcode', 'floors', 'other_zipcode'] # 'lat_long' is not a numeric element hence not passed on outlier_list = [] for item in df.columns: if item not in drop_list: if item not in ['price','date']: # target variable outlier_list.append(item) outlier_list # placeholder for dealing with outliers #Q1 = df.sqft_basement.quantile(0.25) #Q3 = df.sqft_basement.quantile(0.75) #IQR = Q3 - Q1 #IQR is interquartile range. #filter = (df["price"] >= 100000) # Removed the houses with less than price of 100k, that accounted for 45 records #df=df.loc[filter] #filter = (df['bedrooms'] !=33) # Removed the houses with less than condition value 1, that accounted for 29 records #df=df.loc[filter]
_____no_output_____
MIT
source_code/MidTerm_Project_srini.V3_trials.ipynb
Denny-Meyer/IronHack_mid_term_real_state_regression
Mapping a new variable called distance from the epicenter to see how far the properties are located to the main areas of Seatlle and Bellevue.
df['lat_long'] = tuple(zip(df.lat,df.long)) # creating one column with a tuple using latitude and longitude coordinates df df['zipcode']= df['zipcode'].astype(str) seattle = [47.6092,-122.3363] bellevue = [47.61555,-122.20392] seattle_dist = [] for i in df['lat_long']: seattle_dist.append(haversine((seattle),(i), unit = 'mi')) df['dist_from_seattle'] = pd.Series(seattle_dist) bellevue_dist = [] for i in df['lat_long']: bellevue_dist.append(haversine((bellevue),(i), unit = 'mi')) df['dist_from_bellevue'] = pd.Series(bellevue_dist) df['dist_from_citycenter'] = df[['dist_from_seattle', 'dist_from_bellevue']].min(axis=1) corr = round(df.corr(),2) mask = np.zeros_like(corr) mask[np.triu_indices_from(mask)] = True with sns.axes_style("white"): f, ax = plt.subplots(figsize=(14, 11)) ax = sns.heatmap(corr, mask=mask,cmap='coolwarm', vmin=-1,vmax=1,annot=True, square=True) df_zip_dummies = pd.get_dummies(df['zipcode']) df_zips= pd.concat([df, df_zip_dummies],axis=1) df_zips.head() df.describe()
_____no_output_____
MIT
source_code/MidTerm_Project_srini.V3_trials.ipynb
Denny-Meyer/IronHack_mid_term_real_state_regression
Applying Box-cox Powertransform
def plots (df, var, t): plt.figure(figsize= (13,5)) plt.subplot(121) sns.kdeplot(df[var]) plt.title('before' + str(t).split('(')[0]) plt.subplot(122) p1 = t.fit_transform(df[[var]]).flatten() sns.kdeplot(p1) plt.title('after' + str(t).split('(')[0]) box_col = [] for item in df.columns: if item not in drop_list: if item not in ['date', 'lat_long','waterfront','view','zipcode','yr_renovated']: # target variable box_col.append(item) box_col for col in box_col: plots(df, col, PowerTransformer (method='box-cox')) plots(df, 'price', PowerTransformer (method='box-cox')) df.describe() nulls_df = pd.DataFrame(round(df.isna().sum()/len(df),4)*100) nulls_df = nulls_df.reset_index() nulls_df.columns = ['header_name', 'percent_nulls'] nulls_df
_____no_output_____
MIT
source_code/MidTerm_Project_srini.V3_trials.ipynb
Denny-Meyer/IronHack_mid_term_real_state_regression
Preparing the data
# x = df.drop("price", axis=1) x = df_zips._get_numeric_data() y = x['price'] x for col in drop_list: x.drop([col],axis=1,inplace=True)
_____no_output_____
MIT
source_code/MidTerm_Project_srini.V3_trials.ipynb
Denny-Meyer/IronHack_mid_term_real_state_regression
Modelling the data
y x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.2, random_state =1) std_scaler=StandardScaler().fit(x_train) x_train_scaled=std_scaler.transform(x_train) x_test_scaled=std_scaler.transform(x_test) x_train_scaled[0]
_____no_output_____
MIT
source_code/MidTerm_Project_srini.V3_trials.ipynb
Denny-Meyer/IronHack_mid_term_real_state_regression
Modeling using Statsmodels without scaling
x_train_const= sm.add_constant(x_train) # adding a constant model = sm.OLS(y_train, x_train_const).fit() predictions_train = model.predict(x_train_const) x_test_const = sm.add_constant(x_test) # adding a constant predictions_test = model.predict(x_test_const) print_model = model.summary() print(print_model)
OLS Regression Results ============================================================================== Dep. Variable: price R-squared: 0.725 Model: OLS Adj. R-squared: 0.724 Method: Least Squares F-statistic: 3244. Date: Tue, 16 Nov 2021 Prob (F-statistic): 0.00 Time: 19:44:26 Log-Likelihood: -2.3487e+05 No. Observations: 17277 AIC: 4.698e+05 Df Residuals: 17262 BIC: 4.699e+05 Df Model: 14 Covariance Type: nonrobust ======================================================================================== coef std err t P>|t| [0.025 0.975] ---------------------------------------------------------------------------------------- const 2.875e+06 1.53e+05 18.733 0.000 2.57e+06 3.18e+06 bedrooms -3.533e+04 2034.728 -17.366 0.000 -3.93e+04 -3.13e+04 bathrooms 2.614e+04 3409.075 7.669 0.000 1.95e+04 3.28e+04 sqft_living 191.9273 3.347 57.348 0.000 185.367 198.487 sqft_lot 0.2241 0.037 6.034 0.000 0.151 0.297 waterfront 6.341e+05 1.86e+04 34.006 0.000 5.98e+05 6.71e+05 view 4.455e+04 2229.911 19.979 0.000 4.02e+04 4.89e+04 condition 2.292e+04 2518.999 9.098 0.000 1.8e+04 2.79e+04 grade 8.743e+04 2193.290 39.863 0.000 8.31e+04 9.17e+04 yr_built -1668.9448 78.656 -21.218 0.000 -1823.118 -1514.771 yr_renovated 26.0857 3.932 6.634 0.000 18.378 33.793 dist_from_citycenter -1.617e+04 324.581 -49.832 0.000 -1.68e+04 -1.55e+04 Bellevue_zipcode 9.396e+04 6928.163 13.562 0.000 8.04e+04 1.08e+05 Burien_zipcode -9.052e+04 7926.803 -11.419 0.000 -1.06e+05 -7.5e+04 seattle_zipcode 1.323e+04 4472.788 2.957 0.003 4457.918 2.2e+04 ============================================================================== Omnibus: 16099.402 Durbin-Watson: 1.997 Prob(Omnibus): 0.000 Jarque-Bera (JB): 2300363.949 Skew: 4.041 Prob(JB): 0.00 Kurtosis: 58.948 Cond. No. 4.60e+06 ============================================================================== Notes: [1] Standard Errors assume that the covariance matrix of the errors is correctly specified. [2] The condition number is large, 4.6e+06. This might indicate that there are strong multicollinearity or other numerical problems.
MIT
source_code/MidTerm_Project_srini.V3_trials.ipynb
Denny-Meyer/IronHack_mid_term_real_state_regression
checking the significant variables
model.params[list(np.where(model.pvalues < 0.05)[0])].iloc[1:].index.tolist() significant_features=x[model.params[list(np.where(model.pvalues < 0.05)[0])].iloc[1:].index.tolist()] model = LinearRegression() model.fit(x_train, y_train) coefficients = list(model.coef_) coefficients
_____no_output_____
MIT
source_code/MidTerm_Project_srini.V3_trials.ipynb
Denny-Meyer/IronHack_mid_term_real_state_regression
with scaling
x_train.columns x_train_const_scaled = sm.add_constant(x_train_scaled) # adding a constant model = sm.OLS(y_train, x_train_const_scaled).fit() predictions_train = model.predict(x_train_const_scaled) x_test_const_scaled = sm.add_constant(x_test_scaled) # adding a constant predictions_test = model.predict(x_test_const_scaled) print_model = model.summary() print(print_model)
OLS Regression Results ============================================================================== Dep. Variable: price R-squared: 0.725 Model: OLS Adj. R-squared: 0.724 Method: Least Squares F-statistic: 3244. Date: Tue, 16 Nov 2021 Prob (F-statistic): 0.00 Time: 19:44:26 Log-Likelihood: -2.3487e+05 No. Observations: 17277 AIC: 4.698e+05 Df Residuals: 17262 BIC: 4.699e+05 Df Model: 14 Covariance Type: nonrobust ============================================================================== coef std err t P>|t| [0.025 0.975] ------------------------------------------------------------------------------ const 5.414e+05 1475.929 366.787 0.000 5.38e+05 5.44e+05 x1 -3.292e+04 1895.393 -17.366 0.000 -3.66e+04 -2.92e+04 x2 2.015e+04 2627.833 7.669 0.000 1.5e+04 2.53e+04 x3 1.758e+05 3065.731 57.348 0.000 1.7e+05 1.82e+05 x4 9318.8176 1544.413 6.034 0.000 6291.612 1.23e+04 x5 5.501e+04 1617.643 34.006 0.000 5.18e+04 5.82e+04 x6 3.423e+04 1713.086 19.979 0.000 3.09e+04 3.76e+04 x7 1.493e+04 1640.633 9.098 0.000 1.17e+04 1.81e+04 x8 1.029e+05 2581.308 39.863 0.000 9.78e+04 1.08e+05 x9 -4.911e+04 2314.516 -21.218 0.000 -5.36e+04 -4.46e+04 x10 1.048e+04 1580.113 6.634 0.000 7384.994 1.36e+04 x11 -1.005e+05 2015.803 -49.832 0.000 -1.04e+05 -9.65e+04 x12 2.291e+04 1689.438 13.562 0.000 1.96e+04 2.62e+04 x13 -1.787e+04 1565.039 -11.419 0.000 -2.09e+04 -1.48e+04 x14 6249.0668 2113.473 2.957 0.003 2106.446 1.04e+04 ============================================================================== Omnibus: 16099.402 Durbin-Watson: 1.997 Prob(Omnibus): 0.000 Jarque-Bera (JB): 2300363.949 Skew: 4.041 Prob(JB): 0.00 Kurtosis: 58.948 Cond. No. 4.86 ============================================================================== Notes: [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
MIT
source_code/MidTerm_Project_srini.V3_trials.ipynb
Denny-Meyer/IronHack_mid_term_real_state_regression
Linear regression
model=LinearRegression() # model model.fit(x_train_scaled, y_train) # model train y y_pred=model.predict(x_test_scaled) # model prediction y_pred_train=model.predict(x_train_scaled) # Make an scatter plot y_pred vs y # What kind of plot you will get if all the all the predictions are ok? # A stright line fig, ax = plt.subplots(2,3,figsize=(14,12)) ax[0,0].plot(y_pred, y_test, 'o') ax[0,0].set_xlabel("y_test") ax[0,0].set_ylabel("y_pred") ax[0,0].set_title("Test Set -Predicted vs real") # Get a histogram of the residuals ie: y - y_pred. Homoscdasticity # It resembles a normal distribution? ax[0,1].hist(y_test - y_pred) ax[0,1].set_xlabel("Test y-y_pred") ax[0,1].set_title("Test Set Residual histogram") ax[0,2].plot(y_pred,y_test - y_pred,"o") ax[0,2].set_xlabel("predited") ax[0,2].set_ylabel("residuals") ax[0,2].set_title("Residuals by Predicted -- Test set") ax[0,2].plot(y_pred,np.zeros(len(y_pred)),linestyle='dashed') ax[1,0].plot(y_pred_train, y_train, 'o') ax[1,0].set_xlabel("y_train") ax[1,0].set_ylabel("y_pred_train") ax[1,0].set_title("Train set Predicted vs real") # Get a histogram of the residuals ie: y - y_pred. Homoscdasticity # It resembles a normal distribution? ax[1,1].hist(y_train - y_pred_train) ax[1,1].set_xlabel("Train y-y_pred") ax[1,1].set_title("Train Residual histogram") ax[1,2].plot(y_pred_train,y_train - y_pred_train,"o") ax[1,2].set_xlabel("predited") ax[1,2].set_ylabel("residuals") ax[1,2].set_title("Residuals by Predicted -- Train set") ax[1,2].plot(y_pred_train,np.zeros(len(y_pred_train)),linestyle='dashed') y_pred =model.predict(x_test_scaled).astype(int) y_change = round((y_pred/y_test)-1, 2) result = pd.DataFrame({"y_test":y_test, "y_pred":y_pred, "โˆ† %": y_change}) result fig, axs = plt.subplots(1,2,figsize=(12,6)) sns.regplot(x="y_test", y="y_pred", data=result, scatter_kws={"color": "blue"}, line_kws={"color": "black"}, ax=axs[0]) sns.histplot(y_test-y_pred, kde=True, ax=axs[1]) axs[0].set_title("Test Set - Observed VS Predicted") axs[1].set_title("Test Set - Histogram of the Residuals") axs[1].set_xlabel("y_test - y_pred")
_____no_output_____
MIT
source_code/MidTerm_Project_srini.V3_trials.ipynb
Denny-Meyer/IronHack_mid_term_real_state_regression
Model validation
train_mse=mse(y_train,y_pred_train) test_mse=mse(y_test,y_pred) print ('train MSE: {} -- test MSE: {}'.format(train_mse, test_mse))
train MSE: 37602966924.11132 -- test MSE: 34361724727.49398
MIT
source_code/MidTerm_Project_srini.V3_trials.ipynb
Denny-Meyer/IronHack_mid_term_real_state_regression
RMSE
print ('train RMSE: {} -- test RMSE: {}'.format(train_mse**.5, test_mse**.5))
train RMSE: 193914.84451715223 -- test RMSE: 185369.1579726627
MIT
source_code/MidTerm_Project_srini.V3_trials.ipynb
Denny-Meyer/IronHack_mid_term_real_state_regression
MAE
train_mae=mae(y_train,y_pred_train) test_mae=mae(y_test,y_pred) print ('train MAE: {} -- test MAE: {}'.format(train_mse, test_mse))
train MAE: 37602966924.11132 -- test MAE: 34361724727.49398
MIT
source_code/MidTerm_Project_srini.V3_trials.ipynb
Denny-Meyer/IronHack_mid_term_real_state_regression
R2
#R2= model.score(X_test_scaled, y_test) R2_train=r2_score(y_train,y_pred_train) R2_test=r2_score(y_test,y_pred) print (R2_train) print(R2_test) print ('train R2: {} -- test R2: {}'.format(model.score(x_train_scaled, y_train), model.score(x_test_scaled, y_test)))
train R2: 0.7245706790336555 -- test R2: 0.7328942112790509
MIT
source_code/MidTerm_Project_srini.V3_trials.ipynb
Denny-Meyer/IronHack_mid_term_real_state_regression
adjusted rsquare
Adj_R2_train= 1 - (1-R2_train)*(len(y_train)-1)/(len(y_train)-x_train.shape[1]-1) Adj_R2_train Adj_R2_test= 1 - (1-R2_test)*(len(y_test)-1)/(len(y_test)-x_test.shape[1]-1) Adj_R2_test features_importances = pd.DataFrame(data={ 'Attribute': x_train.columns, 'Importance': abs(model.coef_) }) features_importances = features_importances.sort_values(by='Importance', ascending=False) features_importances metrics = {"MSE":mse(y_test, y_pred), "RMSE":mse(y_test, y_pred, squared=False), "MAE":mae(y_test, y_pred), "R2":r2_score(y_test, y_pred)} metrics plt.bar(x=features_importances['Attribute'], height=features_importances['Importance'], color='Orange') plt.title('Feature importances obtained from coefficients', size=20) plt.xticks(rotation='vertical') plt.show()
_____no_output_____
MIT
source_code/MidTerm_Project_srini.V3_trials.ipynb
Denny-Meyer/IronHack_mid_term_real_state_regression
Iteration Example
import pyblp import numpy as np pyblp.__version__
_____no_output_____
MIT
docs/notebooks/api/iteration.ipynb
yusukeaoki1223/pyblp
In this example, we'll build a SQUAREM configuration with a $\ell^2$-norm and use scheme S1 from :ref:`references:Varadhan and Roland (2008)`.
iteration = pyblp.Iteration('squarem', {'norm': np.linalg.norm, 'scheme': 1}) iteration
_____no_output_____
MIT
docs/notebooks/api/iteration.ipynb
yusukeaoki1223/pyblp
Next, instead of using a built-in routine, we'll create a custom method that implements a version of simple iteration, which, for the sake of having a nontrivial example, arbitrarily identifies a major iteration with three objective evaluations.
def custom_method(initial, contraction, callback, max_evaluations, tol, norm): x = initial evaluations = 0 while evaluations < max_evaluations: x0, (x, weights, _) = x, contraction(x) evaluations += 1 if evaluations % 3 == 0: callback() if weights is None: difference = norm(x - x0) else: difference = norm(weights * (x - x0)) if difference < tol: break return x, evaluations < max_evaluations
_____no_output_____
MIT
docs/notebooks/api/iteration.ipynb
yusukeaoki1223/pyblp
We can then use this custom method to build a custom iteration configuration.
iteration = pyblp.Iteration(custom_method) iteration
_____no_output_____
MIT
docs/notebooks/api/iteration.ipynb
yusukeaoki1223/pyblp
![redditscore](https://s3.us-east-2.amazonaws.com/redditscore/logo.png) ***A machine learning approach to predicting how badly you'll get roasted for your sub-par reddit comments.***Alex Hartford & Trevor Hacker **Dataset**Reddit comments from September, 2018 (source). This is well over 100gb of data. We will likely only use a subset of the data but will ultimately try to use the entire dataset. **Objectives**Create a linear regression model to predict the reddit score of a comment a user is considering posting. Stretch goal - narrow comment scoring down by subreddit, as comment popularity will differ between reddit communities.Allow users to use this model with a publicly available Website.Open source the project to allow further contributions if anyone is interested. Formal Hypothesis and Data AnalysisBy analyzing comments made on the Reddit platform by prior users, we believe that people who seek to gather as much reputation as possible on Reddit would find value in being able to predict whether their comments will be well received by the community. In the process, finding some of the most common highly/negatively received comments would be very interesting information as it can provide insight into the current trends of the web.This dataset is just one of many - there are datasets for all the information ever posted on Reddit, publicly available for use. Community members of Reddit have assembled the data by running scripts on the Reddit API and did most of the cleaning for us. Interestingly, people released these datasets in hope that people would create something out of them - quite awhile ago. From what I can tell, Redditscore is one of the first applications that uses this data, rather than just providing a few nice graphs. There is actually a problem potentially being solved here, as there are people who live for Reddit karma.
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.linear_model import SGDRegressor from sklearn.metrics import mean_squared_error from sklearn.feature_extraction.text import TfidfTransformer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import TruncatedSVD from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import SGDClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA print('Libraries loaded!')
Libraries loaded!
MIT
python/redditscore.ipynb
AlexHartford/redditscore
Import and Clean Data
print('Loading memes...') # df = pd.read_csv('https://s3.us-east-2.amazonaws.com/redditscore/2500rows.csv') df = pd.read_csv('https://s3.us-east-2.amazonaws.com/redditscore/2mrows.csv', error_bad_lines=False, engine='python', encoding='utf-8') print('Memes are fully operational!') print(df.dtypes) print() print(df.shape) df.head(10)
subreddit object body object score float64 dtype: object (1961645, 3)
MIT
python/redditscore.ipynb
AlexHartford/redditscore