path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
notebooks/exp225_analysis.ipynb | ###Markdown
Exp 225 analysisSee `./informercial/Makefile` for experimentaldetails.
###Code
import os
import numpy as np
from pprint import pprint
from IPython.display import Image
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import seaborn as sns
sns.set_style('ticks')
matplotlib.rcParams.update({'font.size': 16})
matplotlib.rc('axes', titlesize=16)
from infomercial.exp import meta_bandit
from infomercial.exp import epsilon_bandit
from infomercial.exp import beta_bandit
from infomercial.exp import softbeta_bandit
from infomercial.local_gym import bandit
from infomercial.exp.meta_bandit import load_checkpoint
import gym
def plot_ep_results(env_name, results):
"""Plots!"""
# Env info
env = gym.make(env_name)
best = env.best
# Plot
fig = plt.figure(figsize=(6, 10))
grid = plt.GridSpec(4, 1, wspace=0.0, hspace=0.4)
# Arm
plt.subplot(grid[0, 0])
for i, result in enumerate(results):
plt.scatter(result["episodes"],
result["actions"],
color="black",
s=2,
alpha=.2)
plt.plot(result["episodes"],
np.repeat(best,
np.max(result["episodes"])+1),
color="red",
alpha=0.8,
ls='--',
linewidth=2)
plt.ylim(-.1, np.max(result["actions"])+1.1)
plt.ylabel("Arm choice")
plt.xlabel("Episode")
# ep
plt.subplot(grid[1, 0])
for i, result in enumerate(results):
label = None
if i == 0:
label = "$\epsilon$"
plt.scatter(result["episodes"],
result["epsilons"],
color="grey",
alpha=0.4,
s=2,
label=label)
plt.ylabel("Exploration\nparameter")
plt.xlabel("Episode")
_ = sns.despine()
# Q
plt.subplot(grid[2, 0])
for i, result in enumerate(results):
# label = None
# if i == 0:
# label = "$Q_E$"
# plt.scatter(result["episodes"],
# result["values_E"],
# color="purple",
# alpha=0.4,
# s=2,
# label=label)
label = None
if i == 0:
label = "$Q_R$"
plt.scatter(result["episodes"],
result["values_R"],
color="grey",
alpha=0.4,
s=2,
label=label)
# plt.plot(results[0]["episodes"],
# np.repeat(results[0]["tie_threshold"],
# np.max(results[0]["episodes"])+1),
# color="violet",
# alpha=0.8,
# ls='--',
# linewidth=2)
plt.ylabel("Value")
plt.xlabel("Episode")
plt.semilogy()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# Bests
plt.subplot(grid[3, 0])
for i, result in enumerate(results):
plt.plot(result["episodes"],
result["p_bests"],
color="red",
alpha=.2)
plt.ylabel("p(best)")
plt.xlabel("Episode")
plt.ylim(0, 1)
###Output
_____no_output_____
###Markdown
Load and process data
###Code
data_path ="/Users/qualia/Code/infomercial/data/"
exp_name = "exp225"
num_trials = 100
env_name = "DeceptiveBanditOneHigh10-v0"
results = []
for n in range(num_trials):
results.append(load_checkpoint(os.path.join(data_path, f"{exp_name}_{n+1}.pkl")))
###Output
_____no_output_____
###Markdown
Series
###Code
plot_ep_results(env_name, results)
###Output
_____no_output_____
###Markdown
Distributions
###Code
results[0].keys()
plt.hist([r["total_R"] for r in results], color="black", bins=20)
plt.xlabel("Total R")
plt.ylabel("Count")
plt.hist([r["p_bests"][-1] for r in results], color="black", bins=20, range=(0,1))
plt.xlabel("Final p(best)")
plt.ylabel("Count")
plt.hist([np.sum(r["regrets"]) for r in results], color="black", bins=20)
plt.xlabel("Total regret")
plt.ylabel("Count")
###Output
_____no_output_____ |
iam_principle_incident_response_notebook.ipynb | ###Markdown
TDR254: Building incident response playbooks for AWS This notebook is to be used in case of malicious activity inside your AWS Account. We will query CloudTrail logs via Athena form within this notebook in order to discover and mitigate malicious account activity. We'll be following best practices from the Well-Architected Framework - Security Pillar as we go through our investigation and remediation efforts. Setup Load LibrariesIn order to query Athena and interact with AWS we need to load several libraries and configure our environment.
###Code
import boto3 # the Python SDK for AWS
import pandas as pd # Pandas is a data analysis tool for Python
import sys
pd.set_option("max_colwidth", 150) # Set maximum column width for outputs to make easier to read
!{sys.executable} -m pip install PyAthena # This libary lets us query Athena from our notebook
region='us-east-1' #Set region variable to us-east-1 for commands
###Output
Requirement already satisfied: PyAthena in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (2.3.0)
Requirement already satisfied: botocore>=1.5.52 in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (from PyAthena) (1.21.45)
Requirement already satisfied: tenacity>=4.1.0 in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (from PyAthena) (8.0.1)
Requirement already satisfied: boto3>=1.4.4 in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (from PyAthena) (1.18.45)
Requirement already satisfied: s3transfer<0.6.0,>=0.5.0 in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (from boto3>=1.4.4->PyAthena) (0.5.0)
Requirement already satisfied: jmespath<1.0.0,>=0.7.1 in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (from boto3>=1.4.4->PyAthena) (0.10.0)
Requirement already satisfied: urllib3<1.27,>=1.25.4 in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (from botocore>=1.5.52->PyAthena) (1.26.6)
Requirement already satisfied: python-dateutil<3.0.0,>=2.1 in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (from botocore>=1.5.52->PyAthena) (2.8.1)
Requirement already satisfied: six>=1.5 in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (from python-dateutil<3.0.0,>=2.1->botocore>=1.5.52->PyAthena) (1.15.0)
[33mWARNING: You are using pip version 21.2.4; however, version 21.3.1 is available.
You should consider upgrading via the '/home/ec2-user/anaconda3/envs/python3/bin/python -m pip install --upgrade pip' command.[0m
###Markdown
Connect to AthenaNext we'll setup a connection to our Athena endpoint.
###Code
from pyathena import connect
conn = connect(s3_staging_dir='s3://reinforce2021-tdr254-clo-athenaqueryresultsbucket-1inggpjk2pxo7',
region_name='us-east-1')
###Output
_____no_output_____
###Markdown
Create Athena TablesIn order to query our CloudTrail logs we need to create the tables in Athena.
###Code
create_table = """
CREATE EXTERNAL TABLE IF NOT EXISTS cloudtrail_logs (
eventversion STRING,
useridentity STRUCT<
type:STRING,
principalid:STRING,
arn:STRING,
accountid:STRING,
invokedby:STRING,
accesskeyid:STRING,
userName:STRING,
sessioncontext:STRUCT<
attributes:STRUCT<
mfaauthenticated:STRING,
creationdate:STRING>,
sessionissuer:STRUCT<
type:STRING,
principalId:STRING,
arn:STRING,
accountId:STRING,
userName:STRING>>>,
eventtime STRING,
eventsource STRING,
eventname STRING,
awsregion STRING,
sourceipaddress STRING,
useragent STRING,
errorcode STRING,
errormessage STRING,
requestparameters STRING,
responseelements STRING,
additionaleventdata STRING,
requestid STRING,
eventid STRING,
resources ARRAY<STRUCT<
ARN:STRING,
accountId:STRING,
type:STRING>>,
eventtype STRING,
apiversion STRING,
readonly STRING,
recipientaccountid STRING,
serviceeventdetails STRING,
sharedeventid STRING,
vpcendpointid STRING
)
ROW FORMAT SERDE 'com.amazon.emr.hive.serde.CloudTrailSerde'
STORED AS INPUTFORMAT 'com.amazon.emr.cloudtrail.CloudTrailInputFormat'
OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
LOCATION 's3://reinforce2021-cloudtrail-jerchen-20211014/AWSLogs/910003606845/CloudTrail';"""
pd.read_sql(create_table, conn)
###Output
_____no_output_____
###Markdown
Investigate Investigation 1 In the first investigation you have been made aware of a potential security incident in your AWS account, some suspicious behaviour has been identified by a development team due to numerous failed access attempts to different resources. At this time they can’t provide you any more details so you’ll need to do your own investigations! Step 1 - DiscoveryAs you know there are some failed access attempts a good starting point is to see what failed access attempts we have in our Amazon CloudTrail logs, the sample SQL query is included here and is in your Jupyter notebook as well. Check what are those failed user access activities.
###Code
query = """
select *
from cloudtrail_logs
where sourceIPAddress in
('54.240.193.129','10.0.0.1')
and errorMessage like '%Failed authentication%'
order by eventTime desc;
"""
results = pd.read_sql(query, conn)
results
query = """
select *
from cloudtrail_logs
where errorCode
('Client.UnauthorizedOperation','Client.InvalidPermission.NotFound','Client.OperationNotPermitted','AccessDenied')
and useridentity.arn like '%iam%'
order by eventTime desc;
"""
results = pd.read_sql(query, conn)
results
query = """
select *
from cloudtrail_logs
where errorMessage like '%Failed authentication%'
and userIdentity.type = 'IAMUser'
order by eventTime desc;
"""
results = pd.read_sql(query, conn)
results
query = """
select *
from cloudtrail_logs
where errorCode in ('Client.UnauthorizedOperation','Client.InvalidPermission.NotFound','Client.OperationNotPermitted','AccessDenied')
and useridentity.arn like '%iam%'
and eventTime >= '2021-10-14T11:45:00Z'
and eventTime < '2021-10-14T11:45:30Z'
order by eventTime desc;
"""
results = pd.read_sql(query, conn)
results
query = """
select eventTime, eventSource, eventName, errorCode, errorMessage, responseElements, awsRegion, userIdentity.arn, sourceIPAddress, userAgent
from cloudtrail_logs
where errorCode in ('Client.UnauthorizedOperation','Client.InvalidPermission.NotFound','Client.OperationNotPermitted','AccessDenied')
and useridentity.arn like '%iam%'
and eventTime >= '2021-10-14T11:45:00Z'
and eventTime < '2021-10-14T11:45:30Z'
order by eventTime desc;
"""
results = pd.read_sql(query, conn)
results
query = """
select userIdentity.arn, count(*) as total
from cloudtrail_logs
where errorCode in ('Client.UnauthorizedOperation','Client.InvalidPermission.NotFound','Client.OperationNotPermitted','AccessDenied')
and useridentity.arn like '%iam%'
and eventTime >= '2021-10-13T11:45:00Z'
and eventTime < '2021-10-15T11:45:30Z'
group by userIdentity.arn
order by total desc;
"""
results = pd.read_sql(query, conn)
results
###Output
_____no_output_____
###Markdown
Step 2 - Initial investigationNow that you have identified the potential IAM entity which has been compromised, you should do further investigations to identify what the entity has been attemping to do.
###Code
query = """
select *
from cloudtrail_logs
where userIdentity.arn='arn:aws:iam::910003606845:user/compromized_account'
and eventTime >= '2021-10-13T11:45:00Z'
and eventTime < '2021-10-15T11:45:30Z'
order by eventTime desc;
"""
results = pd.read_sql(query, conn)
results
###Output
_____no_output_____
###Markdown
Step 3 - Bring it all togetherBring together the previous queries to create a single query showing the event name, AWS service, and AWS region where requests were being made to by the compromised IAM entity.
###Code
query = """
select eventName, count(*) as total, eventSource, awsRegion
from cloudtrail_logs
where userIdentity.arn='arn:aws:iam::910003606845:user/compromized_account'
and eventTime >= '2021-10-13T11:45:00Z'
and eventTime < '2021-10-15T11:45:30Z'
group by eventName, eventSource, awsRegion
order by total desc;
"""
results = pd.read_sql(query, conn)
results
###Output
_____no_output_____
###Markdown
Step 4 - ContainmentNow that you have identified the potential IAM entity which has been compromised you need to perform containment activities. The first of these will be to find out what the Access Key ID is being used by the account.
###Code
query = """
select eventName, count(*) as total, eventSource, awsRegion
from cloudtrail_logs
where userIdentity.arn='arn:aws:iam::910003606845:user/compromized_account'
group by eventName, eventSource, awsRegion
order by total desc;
"""
results = pd.read_sql(query, conn)
results
query = """
select useridentity.accesskeyid, count(*) as total
from cloudtrail_logs
where userIdentity.arn='arn:aws:iam::910003606845:user/compromized_account'
group by useridentity.accesskeyid
order by total desc;
"""
results = pd.read_sql(query, conn)
results
access_key_to_deactivate='AKIA5HYDRCU6VSTSXB5G'
username='jerchen'
iam = boto3.resource('iam', region_name=region)
access_key = iam.AccessKey(username,access_key_to_deactivate)
response_status = access_key.deactivate()
status_code = response_status['ResponseMetadata']['HTTPStatusCode']
if status_code == 200:
print('Key Disabled Successfully')
else:
print('Key deactivation failed')
username='USERNAME'
iam = boto3.client('iam', region_name=region)
response = iam.put_user_policy(UserName=username,PolicyName='Block',PolicyDocument='{"Version":"2012-10-17","Statement":{"Effect":"Deny","Action":"*","Resource":"*"}}')
status_code = response['ResponseMetadata']['HTTPStatusCode']
if status_code == 200:
print('Policy attached successfully')
else:
print('Policy attachment failed')
###Output
_____no_output_____
###Markdown
Investigation 2 In the second investigation you have been made aware of a potential security incident in your AWS account, some suspicious behaviour has been identified by an engineering team due them identifying an unknown EC2 instance during a scheduled patching event. At this time they can’t provide you any more details so you’ll need to do your own investigations! Step 1 - DiscoveryAs you know there are some suspicious EC2 instance(s) the first step is to try to identify if any instance(s) are performing suspicious activities. Sometimes this would be discovery actions, such as listing S3 buckets to try and enumerate resources in your environment, this isn’t a normal activity that you’d expect EC2 instances to be performing.
###Code
query = """
select useridentity.principalid,eventsource,eventname,count(*) as total
from cloudtrail_logs
where useridentity.principalid like '%:i-%'
and eventsource = 's3.amazonaws.com'
and eventname = 'ListBuckets'
group by useridentity.principalid,eventsource,eventname
order by total desc;
"""
results = pd.read_sql(query, conn)
results
###Output
_____no_output_____
###Markdown
Step 2 - Initial investigationNow that you have identified the potential IAM entity which has been compromised, you should do further investigations to identify what the entity has been attemping to do.
###Code
query = """
"""
results = pd.read_sql(query, conn)
results
###Output
_____no_output_____
###Markdown
Step 3 - Bring it all together
###Code
query = """
"""
results = pd.read_sql(query, conn)
results
###Output
_____no_output_____
###Markdown
Step 4 - ContainmentTo secure the EC2 instance for further investigation, you should place it into a new, restricted security group (rather than changing the exisiting security group as this won’t terminate active sessions), and snapshot the EBS volume for future investigation.
###Code
import time
instance_id='<INSTANCE_ID>'
ec2=boto3.resource('ec2')
instance=ec2.Instance(instance_id)
vpc_id=instance.vpc_id
security_group=instance.security_groups[0]['GroupId']
ec2_client=boto3.client('ec2')
response=ec2_client.create_security_group(
GroupName='Restricted_Group',
Description='Restricts_access',
VpcId=vpc_id
)
restricted_sg=ec2.SecurityGroup(response['GroupId'])
response=restricted_sg.revoke_egress(
IpPermissions=restricted_sg.ip_permissions_egress
)
response=instance.modify_attribute(Groups=[restricted_sg.id])
ebs_vol=instance.block_device_mappings[0]['Ebs']['VolumeId']
response=ec2.create_snapshot(VolumeId=ebs_vol)
time.sleep(30)
instance.terminate()
###Output
_____no_output_____ |
004-Python 基础/022-Python 错误和异常.ipynb | ###Markdown
前言我们在学习过程中经常会看到这样或者那样的错误,学会读懂错误,也是很重要的。最常见的几种错误:- 语法错误- 名字错误- 类型错误- 文件不存在错误在编程过程中,出现错误是难免的,尤其是你在运行一些别人的代码的时候。这节课,我们就来讲讲 Python 的错误和异常该如何处理。本节课程你将学到:
###Code
print("hello world
###Output
_____no_output_____
###Markdown
解读观察上面的代码和输出,最后一行最明显,Python 程序提示出现了一个 SyntaxError,也就是「语法错误」。同时你看倒数第二行,可以看到有个小箭头,指向我们错误发生的位置,在world后面。我们继续看最后一行关于错误的具体描述,「在扫描字符串的时候遇到了 EOL」。这里的 EOL 是 end of line(行尾)的缩写。而第一行,是说明文件名和行号,错误发生在哪个文件的哪行代码。 语法错误(SyntaxError)语法错误一般比较容易发现。 代码异常(Exception)而更多的时候,是语法没有问题,但是运行起来出现错误。我们来看几个异常的错误,这些都是来自官方的文档。
###Code
# 案例1:把 0 当作除数
10*(1/0)
# 案例2:spam 名字尚未定义
4+spam+3
# 案例3:类型不同
'2'+2
###Output
_____no_output_____
###Markdown
解读以上三段代码一共出现三种异常,分别是:1. ZeroDivisionError(除零异常)2. NameError(名字异常)3. TypeError(类型异常)以上都是异常的类型,其实这里涉及到了面向对象编程里面的类,这个在 Python 进阶里面的[类]()会讲到。我们还是回过来看看上面每个异常的具体描述,三个分别是什么。同样的,我们也看到除了出错的位置,还有一个叫「Traaceback」的提示,它的意思就是追溯(最近一次的执行),而且阅读的方式应该是自上而下,我们这里写的比较简单所以看不出来,我们来稍微复杂一点。
###Code
# 定义函数
def devide(a,b):
return a / b
print(devide(1,0))
###Output
_____no_output_____
###Markdown
解读这样我们是不是可以清晰的看到先是第5行`print()`里面出错了,再深究下去,就是定义的`devide`函数出错了,什么错误?是「除零错误」。同时你也可以看到`in `和`in devide(a,b)`都是明确表明错误发生的位置的。module 翻译为模组,跟它的发音很像,表示错误发生在代码的最外层。当然我们这里的问题比较简单,比较复杂的时候就需要我们根据这些提示来好好分析了。关于函数定义 def,我们在 Python 进阶里面的[定义函数]()会讲到。 其他异常除了常见的,还有很多异常类型,未来在你自己编写的过程中也会遇到各种各样的异常。但是通常这些异常都可以通过以上方式来解决。 异常解决的好方法遇到异常,最简单直接的方式就是把异常拿到搜索引擎,找到最佳的解决方案,通常最好的答案会在这里:[stackoverflow](https://stackoverflow.com/)当然你也可以研究:[python 官方文档](https://docs.python.org/3/) 异常的预判和防护让我来回到我们之前提到的一个小游戏,就是下面这段代码:
###Code
import random
num = random.randint(1,100) # 获取一个1-100间的随机数
is_done = False # 是否猜中的标记
count = 0 # 玩家玩了几次
while not is_done:
guess = int(input('请输入一个[1,100]的整数:'))
if guess == num:
is_done = True
elif guess > num:
print('大了,再猜猜')
elif guess < num:
print('小了,再猜猜')
count += 1
print('恭喜你,猜了{}次,终于猜对了!答案是{}!'.format(count,num))
###Output
请输入一个[1,100]的整数:abc
###Markdown
解读第 8 行,我们用 input 函数来接受数据并返回字符串,用 int 函数讲字符串转换成[整形]()那要是有个用户调皮,他没有输入整数,他输入了其他的字符串呢?(**动手试试吧!**)试过之后,我们可以看到,程序抛出了一个 `ValueError`的错误,具体解释是「abc 这个无效输入,无法被转换成 10 进制的整数」。因为我们没有对异常进行任何处理,所以当用户不小心输入了不符合规则的字符,就会抛出异常退出程序。我们可以检查用户的输入,判断用户是否输入了合法的字符,在输入合法的时候,才将用户输入转换为整数。
###Code
guess_str = input('输入一个整数:')
if str.isdecimal(guess_str):
guess = int(guess_str)
else:
print('您输入的不是整数:' + guess_str)
###Output
输入一个整数:asdf
您输入的不是整数:asdf
###Markdown
在日常写代码中,有很多类似的异常情况需要我们去考虑。特别是当我们面对用户输入的时候,如果我们盲目信任用户的输入,那么不仅可能造成程序崩溃、结果错误,甚至,当我们的代码在网络上运行时,还会引发许多安全问题,使得我们暴露在骇客的攻击之下。在编码过程中对各种异常情况的防范处理,我们称作防御性编程。有很多资料为大家提供了防御性编程的介绍、建议等,建议同学们搜索学习一下,为安全编程打下意识基础。上面的代码里,我们使用了 IF 语句来对用户输入进行了额外的判断。实际上,使用 Python 的异常处理功能,能够更加简单优雅地解决这个问题。我们还是先来看例子:
###Code
try:
guess = int(input('输入一个整数:'))
except ValueError:
print('输入错误,请您再试一次')
continue
###Output
_____no_output_____
###Markdown
解读这段代码里,使用了 Python 的 try 和 except 关键字。这个语句的意思可以理解为:尝试运行 try 关键字后面的语句,如果代码运行没有问题,那么语句运行结束;如果代码抛出了 ValueError 异常,那么就执行 except 后面的语句。在我们游戏代码的循环语句中,我们可以使用类似的代码包裹用户输入的语句,并且在 except 后面的语句块中加一句 continue。这样,在用户输入错误抛出异常时,我们就结束当前循环的运行,进入下一次循环,让用户重新输入。大家可以自己尝试改写一下,或者拷贝可乐代码仓库中的代码,试一下看是不是游戏体验好了很多呢?
###Code
import random
num = random.randint(1,100) # 获取一个1-100间的随机数
is_done = False # 是否猜中的标记
count = 0 # 玩家玩了几次
while not is_done:
try:
guess = int(input('请输入一个[1,100]的整数:'))
except ValueError:
print('输入错误,请您再试一次')
continue
if guess > 100 or guess < 1:
print('输入超出范围,请您再试一次')
else:
if guess == num:
is_done = True
elif guess > num:
print('大了,再猜猜')
elif guess < num:
print('小了,再猜猜')
count += 1
print('恭喜你,猜了{}次,终于猜对了!答案是{}!'.format(count,num))
###Output
请输入一个[1,100]的整数:50
大了,再猜猜
请输入一个[1,100]的整数:25
大了,再猜猜
请输入一个[1,100]的整数:20
大了,再猜猜
请输入一个[1,100]的整数:10
小了,再猜猜
请输入一个[1,100]的整数:15
小了,再猜猜
请输入一个[1,100]的整数:16
恭喜你,猜了6次,终于猜对了!答案是16!
###Markdown
解读上面这种代码叫做异常的捕获处理。使用异常捕获,捕获了异常之后,通常我们会提示用户、记录日志,或者改变代码执行流程,就像上面的 continue 语句一样。和 IF 语句的 ELIF 子句类似,except 子句也可以重复多次;此外,和 IF 语句的 ELSE 子句类似,可以在 try 语句的最后使用 finally 关键字,Python 则会保证 finally 语句块中的代码总是会被运行;try 语句自己也支持 else 子句,这里的代码则在没有异常出现的时候总是会运行。我们来看下面的例子:
###Code
try:
guess = int(input('输入一个整数:')) # 仍然是提示用户输入整数
except ValueError:
print('输入错误,请您再试一次') # 提示用户输入错误
except KeyboardInterrupt: # 然后,我们捕获另一个异常,KeyboardInterrupt
print('输入中断') # 这个异常表示输入中断,我们提示给用户
else: # 我们加入 else 子句
print('输入正常') # else 子句总是在代码没有异常时运行,我们提示用户
finally: # 最后,我们加入 finally 子句
print('这句话总是会执行')
###Output
输入中断
这句话总是会执行
输入一个整数:abc
|
notebooks/exploratory/4. Grid_Search.ipynb | ###Markdown
Grid Search & Cross Validation___ Grid Search and CV* 1.1 Train test strategy and hyper parameter tuning* 1.2 Grid Search with Early Stopping* 1.3 Parameter grid* 1.4 Predicting offer completion after viewing* 1.5 Predicting offer completion binary Results* 2.0 Grid Search Results* 2.1 Results - completion after viewing multiclass* 2.2 Results - completion binary* 2.3 Results - comparison* 2.4 Results - analysis
###Code
# mount google drive if running in colab
import os
import sys
if os.path.exists('/usr/lib/python3.6/'):
from google.colab import drive
drive.mount('/content/drive/')
sys.path.append('/content/drive/My Drive/Colab Notebooks/Starbucks_Udacity')
%cd /content/drive/My Drive/Colab Notebooks/Starbucks_Udacity/notebooks/exploratory
else:
sys.path.append('../../')
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
import progressbar
import catboost
import joblib
from catboost import CatBoostClassifier
from catboost import Pool
from catboost import MetricVisualizer
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report, recall_score
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
import timeit
from sklearn.model_selection import train_test_split, GridSearchCV, GroupKFold
from sklearn.model_selection import ParameterGrid
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 500)
pd.options.display.max_seq_items = 1000
pd.set_option('max_colwidth', 200)
from sklearn.model_selection import GridSearchCV, TimeSeriesSplit, GroupShuffleSplit
import seaborn as sns
%load_ext autoreload
%autoreload 2
%aimport src.models.train_model
%aimport src.data.make_dataset
from src.data import make_dataset
from src.data.make_dataset import save_file
from src.models.train_model import gridsearch_early_stopping, generate_folds
from src.models.train_model import label_creater
from src.utilities import cf_matrix
from src.models.train_model import exploratory_training
###Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
###Markdown
1.1 Train test strategy and hyper parameter tuningEach offer is time dependent and we have embedded past data for each customer within the features of each offer observation. This is therefore a time series problem and in order to build a predictive model, our test data needs to be in the future relative to our training data. We will therefore use Time Series Split by 'time_day's for our cross validation strategy.A randomised train test split in this case would cause data leakage.* We will run a grid search with 5 fold cross validation on the training set to determine optimal learning rate, depth and 12_leaf_reg parameters.* Once optimal parameters are determined, we will rerun training using the whole train set, and measure results against the test set.* We can then analyse feature importance and determine if any features can be dropped to improve model performance. 1.2 Grid Search with Early StoppingIn order to utilise early stopping during gridsearch we will be unable to use the SKlearn GridSearchCV and will instead need to use our own custom function.Early stopping means we do not need to specify the number of iterations for CatBoost to run, instead CatBoost will check validation error vs the test data in each fold and stop training if logloss (our loss function) begins to increase for a given number of iterations.
###Code
def generate_folds(cv, X_train, y_train):
'''
Iterate through cv folds and split into list of folds
Checks that each fold has the same % of positive class
Parameters
-----------
cv: cross validation generator
Returns
-------
X_train, X_test, y_train, y_test: DataFrames
'''
train_X, train_y, test_X, test_y = [], [], [], []
for i in cv:
train_X.append(X_train.iloc[i[0]])
train_y.append(y_train.iloc[i[0]])
test_X.append(X_train.iloc[i[1]])
test_y.append(y_train.iloc[i[1]])
print('positive classification % per fold and length')
for i in range(len(train_X)):
print('train[' + str(i) + ']' , round(train_y[i].sum() / train_y[i].count(), 4),
train_y[i].shape)
print('test[' + str(i) + '] ' , round(test_y[i].sum() / test_y[i].count(), 4),
test_y[i].shape)
return train_X, train_y, test_X, test_y
def gridsearch_early_stopping(cv, X, y, folds, grid, cat_features=None, save=None):
'''
Perform grid search with early stopping across folds specified by index
Parameters
-----------
cv: cross validation
X: DataFrame or Numpy array
y: DataFrame or Numpy array
fold: list of fold indexes
grid: parameter grid
save: string, excluding file extension (default=None)
saves results_df for each fold to folder '../../data/interim'
'''
if np.unique(y).size <= 2:
loss_function = 'Logloss'
else:
loss_function = 'MultiClass'
# generate data folds
train_X, train_y, test_X, test_y = generate_folds(cv, X, y)
# iterate through specified folds
for fold in folds:
# assign train and test pools
test_pool = Pool(data=test_X[fold], label=test_y[fold], cat_features=cat_features)
train_pool = Pool(data=train_X[fold], label=train_y[fold], cat_features=cat_features)
# creating results_df dataframe
results_df = pd.DataFrame(columns=['params' + str(fold), loss_function + str(fold),
'Accuracy'+ str(fold), 'iteration'+ str(fold)])
best_score = 99999
# iterate through parameter grid
for params in ParameterGrid(grid):
# create catboost classifer with parameter params
model = CatBoostClassifier(cat_features=cat_features,
early_stopping_rounds=50,
task_type='GPU',
custom_loss=['Accuracy'],
iterations=3000,
#class_weights=weights,
**params)
# fit model
model.fit(train_pool, eval_set=test_pool, verbose=400)
# append results to results_df
print(model.get_best_score()['validation'])
results_df = results_df.append(pd.DataFrame(
[[params, model.get_best_score()['validation'][loss_function],
model.get_best_score()['validation']['Accuracy'],
model.get_best_iteration()]],
columns=['params' + str(fold), loss_function + str(fold),
'Accuracy' + str(fold), 'iteration' + str(fold)]))
# save best score and parameters
if model.get_best_score()['validation'][loss_function] < best_score:
best_score = model.get_best_score()['validation'][loss_function]
best_grid = params
print("Best logloss: ", best_score)
print("Grid:", best_grid)
save_file(results_df, save + str(fold) + '.joblib', dirName='../../models')
display(results_df)
###Output
_____no_output_____
###Markdown
1.3. Parameter gridWe will optimise across the following parameters:* Depth - The depth that each decision tree can grow to. Greater depth increases the algorithms ability to fit the data but higher depth can also lead to overfitting to the training set.* Learning rate - This is the step size rate of learning for each iteration. Higher learning rates will lead the algorithm to learn more quickly, however there may be a tendency to over step the optimal minimum of the loss function and therefore not capture enough detail. Learning rate balances a trade off between speed and accuracy.* 12_leaf_reg - This is a regularisation parameter utilised in Catboost. Values can range from 0 to infinity.For this dataset, CatBoost default parameters are:* depth: 6* learning_rate: 0.03* 12_leaf_reg: 3I have therefore chosen a parameter grid spread around these default values:
###Code
params = {'depth': [6,7,8,9],
'learning_rate': [0.07, 0.03, 0.01],
'l2_leaf_reg':[1,3,5,10]}
cat_features = [0,4,5,92,93,94,95,96,97]
###Output
_____no_output_____
###Markdown
1.4. Predicting Multiclass - offer completion after viewing
###Code
complete_from_view = {'completed_not_viewed': 2,
'completed_before_viewed': 2,
'complete_anyway': 1,
'completed_responsive': 1,
'incomplete_responsive': 0,
'no_complete_no_view': 0,
'unresponsive': 0}
df = joblib.load('../../data/interim/transcript_final_optimised.joblib')
df = src.models.train_model.label_creater(df, label_grid=complete_from_view)
df.sort_values('time_days', inplace=True)
X = df.drop('label', axis=1)
y = df.label
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=False,
random_state=42)
grid = params
cv = TimeSeriesSplit(n_splits=5).split(X_train, y_train)
folds = list(range(0,5))
gridsearch_early_stopping(cv, X_train, y_train, folds, grid, cat_features=cat_features,
save='multiclass_gridsearch_inc10')
###Output
_____no_output_____
###Markdown
1.5 Predicting Binary - offer completion
###Code
complete = {'completed_not_viewed': 1,
'completed_before_viewed': 1,
'complete_anyway': 1,
'completed_responsive': 1,
'incomplete_responsive': 0,
'no_complete_no_view': 0,
'unresponsive': 0}
df = joblib.load('../../data/interim/transcript_final_optimised.joblib')
df = src.models.train_model.label_creater(df, label_grid=complete)
df.sort_values('time_days', inplace=True)
X = df.drop('label', axis=1)
y = df.label
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=False,
random_state=42)
grid = params
cv = TimeSeriesSplit(n_splits=5).split(X_train, y_train)
folds = list(range(0,5))
gridsearch_early_stopping(cv, X_train, y_train, folds, grid, cat_features=cat_features,
save='complete_gridsearch')
###Output
_____no_output_____
###Markdown
2. Grid Search ResultsThe following function brings together the logloss and accuracy results for each fold. It calculates the accuracy mean and logloss mean for each parameter set across each fold, highlighting the best scores.
###Code
def grid_search_results(raw_file, num_folds):
'''
Loads raw cross validation fold results.
Displays results highlighting best scores
Parameters
-----------
raw_file: string, the name of the file excluding fold number and extension
extension: string, type of file, e.g '.joblib', '.pkl'
num_folds: number of cv folds
Returns
-------
results DataFrame
'''
# list of folds
results_files = [0 for i in range(0, num_folds)]
# read results files for each fold
for i in range(0, num_folds):
results_files[i] = joblib.load(f'../../models/{raw_file}{i}.joblib')
# join results files in one dataframe
results_df = pd.concat([results_files[i] for i in range(0, num_folds)], axis=1)
metrics = int(results_df.shape[1] / num_folds - 1)
# drop extra params columns
results_df.rename(columns={"params0": "Params"}, inplace=True)
results_df.drop([i for i in results_df.columns if 'params' in i], axis=1, inplace=True)
# convert data columns to numeric
def to_numeric_ignore(x, errors='ignore'):
return pd.to_numeric(x, errors=errors)
results_df = results_df.apply(to_numeric_ignore)
# loops through metrics and create mean column for each metric
metric_names=[]
for i in results_df.columns[0:metrics+1]:
i = i[:-1]
metric_names.append(i)
results_df[i + '_mean'] = results_df[[x for x in results_df.columns
if i in x]].mean(axis=1)
results_df.reset_index(drop=True, inplace=True)
# instantiating best_scores dataframe
best_scores = pd.DataFrame(columns=['Params', 'Metric', 'Score'])
negative_better = ['MultiClass', 'iteration', 'logloss']
positive_better = ['Accuracy']
# get index of best parameters
best_param_idx = []
for i in metric_names:
if i in negative_better:
best_param_idx = results_df[i+ '_mean'].idxmin(axis=0)
if i in positive_better:
best_param_idx = results_df[i+ '_mean'].idxmax(axis=0)
row = pd.DataFrame({'Metric': [i + '_mean'],
'Params': [results_df.loc[best_param_idx, 'Params']],
'Score': [results_df.loc[best_param_idx, i + '_mean']]})
best_scores = best_scores.append(row, ignore_index=True)
results_df.insert(0, 'Parameters', results_df.Params)
results_df.drop(['Params', 'Param_mean'], axis=1, inplace=True)
best_scores = best_scores[best_scores.Metric != 'Param_mean']
display(best_scores)
negative_columns = []
positive_columns = []
# highlight columns where negative metrics are better
for i in negative_better:
negative_columns.extend([x for x in results_df.columns if i in x])
# highlight columns where positive metrics are better
for i in positive_better:
positive_columns.extend([x for x in results_df.columns if i in x])
display(results_df.style
.highlight_max(subset = positive_columns, color='lightgreen')
.highlight_min(subset= negative_columns, color='lightgreen'))
return results_df, best_scores
###Output
_____no_output_____
###Markdown
2.1. Results - completion after viewing multiclass
###Code
results_complete_after, best_scores_complete_after = grid_search_results('multiclass_gridsearch_inc10', 5)
###Output
_____no_output_____
###Markdown
Here we can see that the parameters that generated the best mean accuracy score were:
###Code
best_params = best_scores_complete_after.Params[0]
best_params
###Output
_____no_output_____
###Markdown
Across the whole breadth of parameters the standard deviation of the mean accuracy per parameter combination was only 0.001454. This indicates very marginal difference in model accuracy when selecting between parameters.
###Code
results_complete_after.Accuracy_mean.describe()
###Output
_____no_output_____
###Markdown
2.2 Results - Completion binary
###Code
results_complete, best_scores = grid_search_results('complete_gridsearch', 5)
###Output
_____no_output_____
###Markdown
The best parameters were:
###Code
best_params = best_scores.Params[2]
best_params
results_complete.Accuracy_mean.describe()
###Output
_____no_output_____
###Markdown
Again we see the standard deviation of the accuracy mean very low at below 0.000987. Again there is marginal difference in accuracy between the chosen parameters. 2.3 Results Comparison We will now retrain the two models, this time using the full train set and scoring against the test set.We can there compare the accuracy for: * Default parameters with no feature engineering* Default parameters with feature engineering* Optimised parameters with no feature engineering* Optimised parameters with feature engineering
###Code
parameters = {'complete_from_view': {'depth': 7, 'l2_leaf_reg': 1, 'learning_rate': 0.01},
'complete': {'depth': 9, 'l2_leaf_reg': 1, 'learning_rate': 0.03},
'default': {'depth': 6, 'l2_leaf_reg': 3, 'learning_rate': 0.03}}
labelling = {'complete_from_view': {'failed': 0, 'complete after':1, 'complete before':2},
'complete': {'failed': 0, 'complete':1}}
experiments = ['complete_from_view', 'complete']
compact_label = {'complete_from_view': {'failed': 0, 'complete after':1, 'complete before':2},
'complete': {'failed': 0, 'complete':1}}
def compare_accuracies(experiments, compact_label, parameters):
'''
Trains CatBoost Classifer across specified hyper parameter, label, feature and experiment
sets. Compares and returns results in DataFrame
Saves results as '../../models/results_summary_compare.joblib'
Parameters
----------
experiments: list of experiment name strings
compact_label: dictionary of dictionary compact labels
parameters: dictionary of dictionary optimal and default parameters
Returns
-------
DataFrame
'''
results_summary=[]
# train classifer across parameter, label, feature, experiment combinations
for engineering in [True, False]:
for experiment in experiments:
for param_selection in ['default', experiment]:
compact = compact_label[experiment]
results_summary.append([engineering, experiment,
parameters[param_selection],
exploratory_training(
labels=labels[experiment],
labels_compact=compact_label,
feature_engineering=engineering, verbose=False,
return_model=False, **parameters[param_selection])])
pd.set_option('max_colwidth', 200)
#convert to DataFrame
results_accuracy = pd.DataFrame(results_summary,
columns=['Feature Engineering', 'Experiment', 'Parameters',
'Accuracy'])
# reorder columns
results_accuracy = results_accuracy[['Parameters', 'param', 'Experiment',
'Feature Engineering', 'Accuracy']]
results_accuracy.sort_values(['Experiment', 'Feature Engineering', 'Accuracy'], inplace=True)
# calculate differences between accuracies
results_accuracy['Delta'] = results_accuracy.Accuracy.diff(periods=1)
results_accuracy.fillna(0, inplace=True)
joblib.dump(results_summary, '../../models/results_summary.joblib', compress=True)
return results_accuracy.style.format({'Delta': "{:.2%}"})
# uncomment to run, otherwise load results from results_summary.joblib
#results_accuracy = compare_accuracies(experiments, compact_label, labelling, parameters)
results_accuracy = joblib.load('../../models/results_summary.joblib')
results_accuracy.sort_values(['Experiment', 'Feature Engineering', 'Accuracy'], inplace=True)
results_accuracy['Delta'] = results_accuracy.Accuracy.diff(periods=1).fillna(0)
results_accuracy.style.format({'Delta': "{:.2%}"})
###Output
_____no_output_____ |
python/basis/jupyter/String.ipynb | ###Markdown
字符串
###Code
str = "my name is baird."
str2 = "my name is {name}, I'm from {country}."
###Output
_____no_output_____
###Markdown
capitalize()
###Code
str.capitalize() #首字母转换成大写
###Output
_____no_output_____
###Markdown
count()
###Code
str.count("a") #统计字符a出现的次数
###Output
_____no_output_____
###Markdown
center()
###Code
str.center(50,"*") #字符串剧中显示,长度50,不足50时用*填充
###Output
_____no_output_____
###Markdown
endswith()
###Code
str.endswith("rd") #是否以 td 结尾
###Output
_____no_output_____
###Markdown
expandtabs()
###Code
str.expandtabs(tabsize=30) #将\t转换成30个空格
###Output
_____no_output_____
###Markdown
find()
###Code
str.find("m") #返回m第一次出现的位置,失败返回-1
###Output
_____no_output_____
###Markdown
rfind()
###Code
str.rfind("m") #返回最后一个m的位置,失败返回-1
###Output
_____no_output_____
###Markdown
format()
###Code
str2.format(name="Baird",country="China") #格式化字符串
str2.format_map({
"name":"Baird",
"country":"China"
}) #字典方式格式化字符串
###Output
_____no_output_____
###Markdown
isalnum()
###Code
str.isalnum() #是否为纯拉丁字符和阿拉伯数字
###Output
_____no_output_____
###Markdown
isalpha()
###Code
str.isalpha() #是否为存拉丁字符
###Output
_____no_output_____
###Markdown
isdigit()
###Code
str.isdigit() #是否为纯整数
###Output
_____no_output_____
###Markdown
isdecimal()
###Code
str.isdecimal() #是否为十进制数
###Output
_____no_output_____
###Markdown
islower()
###Code
str.islower() #是否为纯小写
###Output
_____no_output_____
###Markdown
isupper()
###Code
str.isupper() #是否为纯大写
###Output
_____no_output_____
###Markdown
join()
###Code
"+".join(['1','2','3']) #将 + 穿插在字符123之中
###Output
_____no_output_____
###Markdown
ljust()
###Code
str.ljust(50,"*") #字符串居左,长度不足50时用*填充
###Output
_____no_output_____
###Markdown
rjust()
###Code
str.rjust(50,"*") #字符串居右,长度不足50时用*填充
###Output
_____no_output_____
###Markdown
lower()
###Code
str.lower() #转化成小写
###Output
_____no_output_____
###Markdown
upper()
###Code
str.upper() #转化成大写
###Output
_____no_output_____
###Markdown
strip()
###Code
"\n123\n".strip() #去除换行
###Output
_____no_output_____
###Markdown
lstrip()
###Code
"\n123\n".lstrip() #去除字符串左边的换行
###Output
_____no_output_____
###Markdown
rstrip()
###Code
"\n123\n".rstrip() #去除字符串右边的换行
###Output
_____no_output_____
###Markdown
maketrans() 和 translate()
###Code
encryption = str.maketrans("ni","52") #以 n->5, i->2的规则,返回编码的字典
encryption
str.translate(encryption) #根据编码字典进行编码
decryption = str.maketrans("52","ni") #反向编码
decryption
str.translate(encryption).translate(decryption) #先编码,再解码
###Output
_____no_output_____
###Markdown
replace()
###Code
str.replace("my","His",1) #将字符串中第一个 my 用 His 替换
###Output
_____no_output_____
###Markdown
split()
###Code
str.split() #根据空格分割字符串,返回列表
str.split("m") #用自定义字符分割字符串,返回列表
###Output
_____no_output_____
###Markdown
swapcase()
###Code
"abcDEF".swapcase() #大小写互换
###Output
_____no_output_____
###Markdown
title()
###Code
str.title() #转换成英文标题,所有单词首字母大写
###Output
_____no_output_____ |
scripts/Antarctica_kmeans_test1.ipynb | ###Markdown
First full pipeline test on Antarctica
###Code
from icepyx import icesat2data as ipd
import numpy as np
import os
import shutil
import h5py
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import sys
import pyproj
import proplot as plot
%matplotlib widget
short_name = 'ATL06'
spatial_extent = [31.5, -70.56, 33.73, -69.29]
date_range = ['2020-03-30','2020-04-1']
region_a = ipd.Icesat2Data(short_name, spatial_extent, date_range)
username = "JordiBN"
email = "[email protected]"
region_a.earthdata_login(username,email)
#region_a.order_vars.avail()
region_a.order_vars.append(var_list=['count'])
region_a.download_granules('/home/jovyan/surface_classification/data')
FILE_NAME = '/home/jovyan/data/processed_ATL06_20200330121520_00600712_003_01.h5'
f = h5py.File(FILE_NAME, mode='r')
count = f['gt1l/residual_histogram/count'][:] # has units of n_histograms, n_bins
lat_mean = f['gt1l/residual_histogram/lat_mean'][:]
lon_mean = f['gt1l/residual_histogram/lon_mean'][:]
h_li = f['gt1l/land_ice_segments/h_li'][:]
h_lat = f['gt1l/land_ice_segments/latitude'][:]
h_lon = f['gt1l/land_ice_segments/longitude'][:]
#latitude = f['/gt2r/heights/lat_ph']
#longitude = f['/gt2r/heights/lon_ph']
#height = f['gt2r/heights/h_ph']
###Output
_____no_output_____
###Markdown
Cropping the data far from surface in each histogram.
###Code
data = count[:, 200:550]
fig=plt.figure(figsize=(10,8))
plt.title("Training data")
ax = fig.add_subplot(111)
h = ax.imshow(np.transpose(data),vmin=0,vmax=30,cmap='inferno')
plt.colorbar(h)
plt.show()
###Output
/srv/conda/envs/notebook/lib/python3.7/site-packages/ipykernel_launcher.py:1: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`).
"""Entry point for launching an IPython kernel.
###Markdown
Plot research area of the above file ** still needs track on this image
###Code
data_root='/srv/tutorial-data/land_ice_applications/'
! cd ..; [ -d pointCollection ] || git clone https://www.github.com/smithB/pointCollection.git
sys.path.append(os.path.join(os.getcwd(), '..'))
import pointCollection as pc
###Output
_____no_output_____
###Markdown
Plotting track on map
###Code
spatial_extent_ps = [spatial_extent[0], spatial_extent[2], spatial_extent[1], spatial_extent[3]]
## we will want to set colorbar parameters based on the chosen variable
vmin=0
vmax=6
ticks=np.arange(vmin,vmax+1,1)
plt.figure(figsize=(8,8), dpi= 90)
ax = plt.axes(projection=ccrs.SouthPolarStereo(central_longitude=0)) # choose polar sterographic for projection
ax.coastlines(resolution='50m', color='black', linewidth=1)
ax.set_extent(spatial_extent_ps, ccrs.PlateCarree())
plt.plot(lon_mean,lat_mean,transform=ccrs.PlateCarree())
plt.show()
###Output
/srv/conda/envs/notebook/lib/python3.7/site-packages/ipykernel_launcher.py:8: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`).
###Markdown
Plot comparing mean_lon and mean_lon from histrograms with beam lat and lon
###Code
plt.figure()
plt.plot(h_lon,h_lat,'ob' )
plt.plot(lon_mean, lat_mean,'.r')
###Output
/srv/conda/envs/notebook/lib/python3.7/site-packages/ipykernel_launcher.py:1: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`).
"""Entry point for launching an IPython kernel.
###Markdown
Unsupervised learning of ATL06 residual histograms
###Code
from sklearn.cluster import KMeans
print("Training data shape: " + str(data.shape))
# Use int random_state in order to make centroid initialization deterministic
kmeans = KMeans(n_clusters=4, random_state=1).fit(data)
# Display classified labels
print("\nClassified labels: " + str(kmeans.labels_))
print("\nK-means labels shape: " + str(kmeans.labels_.shape))
###Output
Training data shape: (523, 350)
Classified labels: [0 0 0 0 0 3 3 3 3 3 3 3 0 0 3 3 3 3 3 3 3 3 3 3 3 0 0 0 0 0 0 0 0 0 0 0 0
0 0 3 0 3 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
3 3 3 0 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 3 3 3 0 0 0 3 3 3 3 3 3 3 3 3 0 0 3
2 3 3 3 3 2 2 2 2 3 3 3 3 3 3 2 2 2 2 2 2 3 3 3 3 2 2 2 1 1 2 2 2 1 1 1 1
2 3 3 2 2 1 1 1 1 1 1 1 2 3 1 1 1 3 3 3 3 3 1 1 2 2 3 3 2 2 2 2 1 1 1 1 2
2 2 1 2 2 1 1 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 1 1 1 1 1
1 2 2 1 1 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 2 2 2 3 3 3 3 3 3 3 3
0 3 3 3 3 3 3 3 3 3 3 0 3 0 3 3 3 3 3 3 3 3 3 3 3 3 2 3 3 2 3 3 3 3 3 3 3
3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 2 2 3 3 3 3 3 3 2 3 2 2 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1]
K-means labels shape: (523,)
###Markdown
We plot the classified labels
###Code
fig1, ax1 = plot.subplots(ncols=1, nrows=2, share=0, width=5, height=6)
fig1.suptitle("Classified labels along transect")
ax1[0].set_ylabel('Histogram frequency')
ax1.format(
abc=True, abcloc='ul',
ygridminor=True,
ytickloc='both', yticklabelloc='left'
)
# Residual histograms
ax1[0].imshow(np.transpose(data),vmin=0,vmax=30,cmap='inferno')
ax1[0].colorbar(h)
# Classified labels
ax1[1].scatter(range(0,data.shape[0]), kmeans.labels_, c=kmeans.labels_, cmap='viridis')
ax1[1].set_ylabel('Classification label')
ax1[1].set_xlabel('Segments along track')
plt.show()
###Output
/srv/conda/envs/notebook/lib/python3.7/site-packages/proplot/ui.py:492: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`).
**kwargs
###Markdown
We display the labels on top of the raster map
###Code
spatial_extent = np.array(spatial_extent)
lat=spatial_extent[[1, 3, 3, 1, 1]]
lon=spatial_extent[[2, 2, 0, 0, 2]]
print(lat)
print(lon)
# project the coordinates to Antarctic polar stereographic
xy=np.array(pyproj.Proj(3031)(lon, lat))
# get the bounds of the projected coordinates
XR=[np.nanmin(xy[0,:]), np.nanmax(xy[0,:])]
YR=[np.nanmin(xy[1,:]), np.nanmax(xy[1,:])]
MOA=pc.grid.data().from_geotif(os.path.join(data_root, 'MOA','moa_2009_1km.tif'), bounds=[XR, YR])
# show the mosaic:
plt.figure()
MOA.show(cmap='gray', clim=[14000, 17000])
ax.stock_img()
plt.plot(xy[0,:], xy[1,:])
# This still needs to be fixed in order to properly display the transect on the map
x_polar, y_polar=np.array(pyproj.Proj(3031)(lon_mean, lat_mean))
plt.scatter(x_polar, y_polar, c=kmeans.labels_)
plt.title('Labeled transect')
###Output
[-70.56 -69.29 -69.29 -70.56 -70.56]
[33.73 33.73 31.5 31.5 33.73]
|
Netflix_subscription_fee.ipynb | ###Markdown
![unnamed.webp](data:image/webp;base64,UklGRsgMAABXRUJQVlA4ILwMAAAQTACdASqcAckAPj0ejkQiIaGSqSTwIAPEpu56quGlbUuZ+b/kV4V2Vuc/ih/SP93/tOs22/7s/t/1Mhiekr73/Rv7R/b/7n////189P8t/Wv53/Sfk79yXuAfrX/nf5l/Yfjr6EvMB/Rf6F/pP8r+//yuf3f/Qf8L3JfsB7AH6W/f/8Q/sg/up7DX8q/u//i9bf/pf4b4Uf2t/9P+T9nb/vayv5u/0Hbz5bvev9f68uz2V7wJ0sv6/6gHFhT/9sOY+eOgIhj5Wxv4BmPnje8Sfo9wxq83vADjhjV5veOPvGAccMavN7wA44Y1xB3wFBNNYDcD7+U9of4oWTAFpeE04TWYKAxHSESkpLgWC7zTuskLJEcFI0BiWHmoVdW584+31Vm+0ttQvk1xd+mMSR4lpptemY2p7ewJwCtxDL5Tqeue0fuQj1PcLvfL4v0ToT+4AVKE1778VUws/MF5hPEfHB+HdDBuHhMMQib4MH4G5yAwpJRgT5GtABlc9jw0qSnhu/QxNgmJ/U3nl2qSTqNYEB8XIODF+TVp5YHWn3NP0WupiY9+X+pRsN6NN1HOTaUCoVYCc4O7jb/ZMbjh0zrUovCPQ34gGw1KyZ/tUJE64DgbhHVGoJR4F3lfL+vTJ96pdqM58JQJn66xrFcBsgMOltJDHl0SxtysuoTFj4Mb9l49qgFJ1xViUlPOz19N9YVQ5QS+ScrnoIRvWNq5ITCEmenPXQ89Kt+JOn5RyRL4gFfB+Nh72yiuvIdX0nm1eb3gBxwxq83vHH3jAOOGNXm94AccMa3boegIp88dARDHytjfwDMfPHQBEAD+/5shM8lTP/rJeH/LOV1O/YNq0NVmOBlAAGlX8udgAAAAC3iKxZ9o4tZ5rImU/j3utsyPpqGck/8jvSvw4MgcVeYUtyEWE2Z/dXc5vAoz8xj4Mr57/RZbEYpvO2mV0gyDhZHh+StT4t05va33Cudnj6vhLNY46Q4q9FIU0YOuolZXazmOX5k6xkoWeBfmtL7Tmh+256zL2UtDpXlXh5KdSUcZzWKcQrWfWMdSZkbRHCDzRddfNLaIheAZS3wu6KrwV9eW/bHJ1874t6eJGSDLA/25sXg15c5yUO/Str6YysRrRAo5R1EMw7DbYgl+L19ZAq9dWKpKL8Lli4MF7CwARq4C9/d8DaIi1hTxU34mcmH5lwpcRWOoJkjnf0g1wVtQGZyvvMhayyPY8Mp2GFnj0Kkuy5g53bdiBFKJmK8kgJ0kXIm73k8AklFPsY99rvHL9JpJGRACXtZxf1ePIDHyjf0Sv71rgkRuDn9coNgRWd7C6fQeJYRbVh8OABLGBnqfMTOohdx9Z3qMAg6n+bZpJ7I9NOtD9atpFjeggljXMaDHAKYbWsKkpQdEe/NPBXBrJnGnN8cvIL0nPL4KToMIcD6ckAlwVcXqysT3vJhBSOz0raSZdFiOR1BcnWzz+kYK7eoQybW+SqogDJ7rPYrBDWLzcCSSIYL5fnJQsXid8JE+nfmAyZTeMwUXvoz4wRfFUo91lqHXaWgHhnkGG20KfTjFWsUjYbSuT/F5H33tzLG/PbF3gifGHVW+Ln5Aq8tcgDcnfOKQ61IU9GYb1bIdB4H3TaP4KlUIt2FezoVfqBmp0tAlXwRqDeTYephft9Fv7HKbdN3Q3AXyj/bJej2m6ZqOyqXzWujhlfEkTBv8Gf//DNt4t6t2KmzMRSi31yoZs8UMiDRp8MOz4x6ptSur6WQgWD6QrY1KHX1slnf1YkF5kb0ugyviSLZ367Crp9xq5AMAKFtBmOcBbwxMUVwUIaFGXekCYY/n6S/w4zHml/boe+grAfOa2WZPTCWCzT7tVk+sncSv6Yz5p2hnyNvbVRWJBjxJ29d+EiA7K33J546Wp3Tp6orS2VpLdxu3lD4hBIfmZzKemzc0WQJFPtfjdZIoG7DpuzZd1Xkv7ePFTnL/r+VYapLNlgvMr247hVuv4Z0I0+YW2n0BHJU96sm47sZEp90j0I4VtkHBWff1jZ2n6DTkUmZeiJERvlTPvLitFefynV+O5ne8tZijaEJKBOve0rC4L83cMwmsgir9YnW6CYAlyr+nZJhuYk9NHSphdEw3995myyJcyjQwW3QYQw0w7sYqe1UcwAJuOKTn2wENTm7wOD3kNQUXX/RYJ+ZFTRVoE8m22YDVRX03VbIg3Ep6X6HqeqHBzqFm8CTpmlfhSqOzOyDdCB8pWLfNa7euvPhW9B0jDnkE0eVUiowThcA6BJUeybEZ7OdolDBuhmto8sTQrZBgDO0dgWyYmYk1rPJq2sgCSKkfmi6KK6uXrGRfMOm78RG4UAAkWmlw3H2x/rkfxCPuUENVyoCBuJtWbEC2BYeJPyFFvkVOobutBrHM56KaN8ZH9WqxzN4RKUqsmGy4QpU1g+vUQJBe9NYHoqLdxY1/0b2VEpUlcJgKGqXhPwqc2V58JBO0HYAJXNz1StwEIQUvvf0YYZ97EGDO+tx1rENn8Jn7Le+AAVl/DpJGk6sekftsXhNYifxKU4S6R/xlotqoIMYGQriaDQbIeUAnz8IP9jeDZi7w/zXyx9YUcN+HLMleii1ZJ2oiK8ieg6OcbuB8UKUHWQN3h4MdNMrn3jYPuQWGmf9zSSPS9z/TyOPD5p9qDdITSqfv5XpNnfW4oTNtsO2Qvgif4sBJpDvmm8xl0xuFQ+Olt4DCI3FoFWJ0HdlKT2b1+x6u1V5Z1IsglRH8V7HGWaWuVFOIlwJbLmgWmmkPaGgiUXJRb8CaPO4TM8YblQs6ryQ0kTrUiLfJkb5l9tgN0dwMvp3LA6qNYUuXClXlcy1ShntOVaILIDVm/remXELiHOTgvEUCga7JjH+Nr0K5a1CDs4WB1I2Nh4NNhUP0VWd1glrJGI9LKc/1q1iI2nnrVF9iAkX55XYN8xGKVjtHDhr6YjtfS5cRE/6v/FxB7eBXYd3zeUzL1ta+zyZxf0Hjn7J7JX7IJ4NsAcPFqCcpCjoOu0Tzpz/5SBkWLLhOG8jx//W9dgXBOiKSScJAOJO0o+ZTaeUimEEcQbzpQK1H03yfexhLaIl2GtL7cf1o0+jtdV2F94bht1BDXXrAq1G3+4MDtnqV2KdD8O10DjOV7SOuTX2Cf6jkP1mk/hrc0HgwL+2VzJxAlRhAmpfZWq2ygKXWEyTdjGqQ66ayzX3u3OVLBWESUSo7chIOCWqrlNb4pjfZ/UkjnQBR/7i0oHtbGHFTqTIVUkaJ2OG5vUaijlntONr5wctXUFBSNqr5OMZYgTGpvRGoTOUQK12z4W6nQBIZJ8Cntq/6s6geyFcEZuMY+sobhTW8fc5ldibe4SQEQzWIobtb8PZDNePRzO4RF6YfboRj8xfnztemyh5TS2ozBM4wNeKZ8FM4HgPBkCZFFPLITjEjy4J0zqYxJDvJpxJPsQ1NoXOmgnwX3TrF5eBaKZcU1W6/GqUXUPuvM+os1i5ZgjbtC8GElZRas64IddUDvxbnRiixkvpplcnndeaSu/23kby1WofMJFlXnvM0qqnka//t8TtkKfYTo68cBmrZ9HWxOF3JSQ/i5FKaXbjiUD2fwk2ymGR7hEVJVEjc3xK3uWj4+nCacStn/O4AqB7d+r2bVvjQR8XlgcQq1PRUlMX7ovPprE9SbCvMXSVp0z+YdxIeNeMbV9snPaTQH1Rif4OhN8/508RKgqPIasalGklF6Xgr6YZilKbh9+A0PPqVqv+DnIJpgW3D0ujSihmUkHAl8OGecCi+nHXnuouH0dv1R6v8zz8d3lkjWC8RxAv0oXM5Fm3nk+PDaJq2Gpq3sMc0g5JxAHWifVcyPnCnp5r9VFoLqFLVoFGfY3NxJeNC2i8iAZEXg8rg/qRayTJDxw5o5GIBCkYff+u7qGFdlcrLEvmdTaqm5inel7ocyKCYMnCvykR1lGrp0/A10do8YZVHvH/Y0Z7fnxKPZAVinUKp88stiyJBWkGcHQpmKP9C/oHnKbGZ+qB56/XUwr7epwaIIM9/+Yna2CzYIt0ovz+SSmlAela3SXElo+M8PoGhjlShMr9lPITRo5W+p/lle2dYF8jijP818s1l2otlxAbvI5sTDyJ7BvfXz0kcbFpTZ82VyI1vpPqGSK6I9baRMlBcMDSocdHL+pDYf4vCNhqqkQvT6s44MkZ6vAFcp0h9XCP+3UX4G5SVakcvvEmh2nkkpp/kBYfoSgkeXFUONytNRDGtg1uS3Bhyq+/uZayddfOHhzlNowgb+UfDbyJp9sA6kJXfhdpxHk7MEAqDtC8i/meAAAADz+AeB2gJoUCAKvwsN6zwSaD1A952KQAAAAAhAoHcACO2uSej3uZQsuEAAA==) **Importing the Necessary Modules**
###Code
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
**Conversion of Dataframe**
###Code
a='https://raw.githubusercontent.com/Manikandan-professional/Netflix-Subscriptions/main/Netflix%20subscription%20fee%20Dec-2021.csv'
df=pd.DataFrame(pd.read_csv(a))
df.head()
###Output
_____no_output_____
###Markdown
**Top 10 Country wise Breakdown**
###Code
a1=df.sort_values(['Cost Per Month - Basic ($)'],ascending=False)
a1
for i in range(0,10,1):
print(a1.iloc[i:i+1,:4])
print('*********************************************************************')
###Output
Country_code Country Total Library Size No. of TV Shows
33 li Liechtenstein 3048 1712
*********************************************************************
Country_code Country Total Library Size No. of TV Shows
56 ch Switzerland 5506 3654
*********************************************************************
Country_code Country Total Library Size No. of TV Shows
13 dk Denmark 4558 2978
*********************************************************************
Country_code Country Total Library Size No. of TV Shows
55 se Sweden 4361 2973
*********************************************************************
Country_code Country Total Library Size No. of TV Shows
29 il Israel 5713 3650
*********************************************************************
Country_code Country Total Library Size No. of TV Shows
3 be Belgium 4990 3374
*********************************************************************
Country_code Country Total Library Size No. of TV Shows
17 fr France 5445 3604
*********************************************************************
Country_code Country Total Library Size No. of TV Shows
41 no Norway 4528 2955
*********************************************************************
Country_code Country Total Library Size No. of TV Shows
57 tw Taiwan 5105 3134
*********************************************************************
Country_code Country Total Library Size No. of TV Shows
50 sg Singapore 6303 4109
*********************************************************************
###Markdown
**Least 10 Country wise Breakdown**
###Code
a2=df.sort_values(['Cost Per Month - Basic ($)'],ascending=True)
a2
a2=a2.reset_index()
country=[]
CPM_Basic=[]
CPM_Standard=[]
CPM_Premium=[]
for i in range(0,10,1):
print(a2.iloc[i:i+1,:4])
print('Cost Per Month in the Basic Rental in INR is: ',a2.iloc[i:i+1,6:7]['Cost Per Month - Basic ($)'][i]*72)
CPM_Basic.append(a2.iloc[i:i+1,6:7]['Cost Per Month - Basic ($)'][i]*72)
country.append(a2.iloc[i:i+1,2:3]['Country'][i])
print('Cost Per Month in the Standard Rental in INR is: ',a2.iloc[i:i+1,7:8]['Cost Per Month - Standard ($)'][i]*72)
CPM_Standard.append(a2.iloc[i:i+1,7:8]['Cost Per Month - Standard ($)'][i]*72)
print('Cost Per Month in the Premium Rental in INR is: ',a2.iloc[i:i+1,8:9]['Cost Per Month - Premium ($)'][i]*72)
CPM_Premium.append(a2.iloc[i:i+1,8:9]['Cost Per Month - Premium ($)'][i]*72)
print('*********************************************************************')
country
plt.figure(figsize=(16,8),dpi=80)
ax=plt.axes()
ax.set_facecolor('black')
plt.bar(country,CPM_Basic,color='white')
plt.title('Country Name vs Basic Plan Rates with Bar Graph',color='blue',fontsize=25)
plt.xlabel('Country Name',color='blue',fontsize=20)
plt.ylabel('Rates for Basic Plan',color='blue',fontsize=20)
plt.xticks(rotation=50,color='blue',fontsize=20)
plt.yticks(color='blue',fontsize=20)
plt.show()
a2=df.sort_values(['Cost Per Month - Standard ($)'],ascending=True)
a2
a2=a2.reset_index()
country=[]
CPM_Basic=[]
CPM_Standard=[]
CPM_Premium=[]
for i in range(0,10,1):
print(a2.iloc[i:i+1,:4])
print('Cost Per Month in the Basic Rental in INR is: ',a2.iloc[i:i+1,6:7]['Cost Per Month - Basic ($)'][i]*72)
CPM_Basic.append(a2.iloc[i:i+1,6:7]['Cost Per Month - Basic ($)'][i]*72)
country.append(a2.iloc[i:i+1,2:3]['Country'][i])
print('Cost Per Month in the Standard Rental in INR is: ',a2.iloc[i:i+1,7:8]['Cost Per Month - Standard ($)'][i]*72)
CPM_Standard.append(a2.iloc[i:i+1,7:8]['Cost Per Month - Standard ($)'][i]*72)
print('Cost Per Month in the Premium Rental in INR is: ',a2.iloc[i:i+1,8:9]['Cost Per Month - Premium ($)'][i]*72)
CPM_Premium.append(a2.iloc[i:i+1,8:9]['Cost Per Month - Premium ($)'][i]*72)
print('*********************************************************************')
plt.figure(figsize=(16,8),dpi=80)
ax=plt.axes()
ax.set_facecolor('black')
plt.bar(country,CPM_Standard,color='white')
plt.title('Country Name vs Standard Plan Rates with Bar Graph',color='blue',fontsize=25)
plt.xlabel('Country Name',color='blue',fontsize=20)
plt.ylabel('Rates for Standard Plan',color='blue',fontsize=20)
plt.xticks(rotation=50,color='blue',fontsize=20)
plt.yticks(color='blue',fontsize=20)
plt.show()
a2=df.sort_values(['Cost Per Month - Premium ($)'],ascending=True)
a2
a2=a2.reset_index()
country=[]
CPM_Basic=[]
CPM_Standard=[]
CPM_Premium=[]
for i in range(0,10,1):
print(a2.iloc[i:i+1,:4])
print('Cost Per Month in the Basic Rental in INR is: ',a2.iloc[i:i+1,6:7]['Cost Per Month - Basic ($)'][i]*72)
CPM_Basic.append(a2.iloc[i:i+1,6:7]['Cost Per Month - Basic ($)'][i]*72)
country.append(a2.iloc[i:i+1,2:3]['Country'][i])
print('Cost Per Month in the Standard Rental in INR is: ',a2.iloc[i:i+1,7:8]['Cost Per Month - Standard ($)'][i]*72)
CPM_Standard.append(a2.iloc[i:i+1,7:8]['Cost Per Month - Standard ($)'][i]*72)
print('Cost Per Month in the Premium Rental in INR is: ',a2.iloc[i:i+1,8:9]['Cost Per Month - Premium ($)'][i]*72)
CPM_Premium.append(a2.iloc[i:i+1,8:9]['Cost Per Month - Premium ($)'][i]*72)
print('*********************************************************************')
plt.figure(figsize=(16,8),dpi=80)
ax=plt.axes()
ax.set_facecolor('black')
plt.bar(country,CPM_Premium,color='white')
plt.title('Country Name vs Premium Plan Rates with Bar Graph',color='blue',fontsize=25)
plt.xlabel('Country Name',color='blue',fontsize=20)
plt.ylabel('Rates for Premium Plan',color='blue',fontsize=20)
plt.xticks(rotation=50,color='blue',fontsize=20)
plt.yticks(color='blue',fontsize=20)
plt.show()
###Output
_____no_output_____
###Markdown
**Calculation on Average Price of the Subscription Plans**
###Code
df['Average Plan']=(df['Cost Per Month - Basic ($)']+df['Cost Per Month - Premium ($)']+df['Cost Per Month - Standard ($)'])/3
###Output
_____no_output_____
###Markdown
**Top 10 Countries of Netflix Subscription plan from low to high**
###Code
a3=df.sort_values(['Average Plan'])
a3.head(10)
###Output
_____no_output_____
###Markdown
**Pictorial Representation on top 10 Averaged Price from low to high**
###Code
a3=a3.reset_index()
countrys=[]
INR=[]
for i in range(0,10,1):
countrys.append(a3.iloc[i:i+1,2:3]['Country'][i])
INR.append(a3.iloc[i:i+1,9:10]['Average Plan'][i])
plt.figure(figsize=(16,8),dpi=80)
ax=plt.axes()
ax.set_facecolor('black')
plt.bar(countrys,INR,color='white',width=0.5)
plt.title('Country Name vs Average Plan Rates with Bar Graph',color='blue',fontsize=25)
plt.xlabel('Country Name',color='blue',fontsize=20)
plt.ylabel('Rates for Average Plan',color='blue',fontsize=20)
plt.xticks(rotation=50,color='blue',fontsize=20)
plt.yticks(color='blue',fontsize=20)
plt.show()
###Output
_____no_output_____ |
Jupyter_Notes/.ipynb_checkpoints/Lecture08_Sec3-1_DefDet-checkpoint.ipynb | ###Markdown
Section 3.1 $\quad$ Definition of Determinants Definition of Permutation >Let $S=\{1,2,\cdots,n\}$ be the set of integers from $1$ to $n$, arranged in ascending order. We can consider a permutation of $S$ to be a one-to-one mapping of $S$ to itself. For example, let $S=\{1,2,3,4\}$ **Question:** How many permutations does $S=\{1,2,\cdots, n\}$ have? Example 1 Let $S=\{1,2,3\}$. Find all permutations of $S$. A permutation $j_1j_2\cdots j_n$ is said to have an **inversion** if A permutation is called **even** if A permutation is called **odd** if Example 2 - $S_1$ - $S_2$ - The permutation $4312$ is in $S_4$ - $S_3$ Definition of Determinant >Let $A=[a_{ij}]$ be an $n\times n$ matrix. The **determinant** function, **Remark** Example 3 Calculate $\det(A)$ if\begin{equation*}A = \left[ \begin{array}{cc} a_{11} & a_{12} \\ a_{21} & a_{22} \\ \end{array}\right]\end{equation*}
###Code
from sympy import *
a11, a12, a21, a22 = symbols('a11 a12 a21 a22');
A = Matrix([[a11, a12], [a21, a22]]);
A.det()
###Output
_____no_output_____
###Markdown
Example 4 Calculate $\det(A)$ if\begin{equation*}A = \left[ \begin{array}{ccc} a_{11} & a_{12} & a_{13}\\ a_{21} & a_{22} & a_{23}\\ a_{31} & a_{32} & a_{33}\\ \end{array}\right]\end{equation*}
###Code
from sympy import *
a11, a12, a13, a21, a22, a23, a31, a32, a33 = symbols('a11, a12, a13, a21, a22, a23, a31, a32, a33');
A = Matrix([[a11, a12, a13], [a21, a22, a23], [a31, a32, a33]]);
A.det()
###Output
_____no_output_____
###Markdown
Another way to calculate $det(A)$, $\underline{\text{if $A$ is a $3\times 3$ matrix}}$: Example 5 Compute the determinant\begin{equation*}\det\left(\left[ \begin{array}{ccc} 1 & 2 & 3\\ 2 & 1 & 3\\ 3 & 1 & 2 \end{array}\right]\right)= \qquad \qquad \qquad \qquad \qquad \qquad \qquad\end{equation*}
###Code
from sympy import *
A = Matrix([[1, 2, 3], [2, 1, 3], [3, 1, 2]]);
A.det()
###Output
_____no_output_____ |
StudyNotesOfML/4. Backforward /.ipynb_checkpoints/Optimize the training of CNN -checkpoint.ipynb | ###Markdown
Try to use gpu 1 Import package
###Code
import numpy
import minpy.numpy
import cupy
import pandas
import matplotlib.pyplot as plt
import random
from scipy.io import loadmat
from scipy.optimize import minimize
from sklearn.preprocessing import OneHotEncoder
###Output
[31mW1024 19:23:37 2009 minpy.dispatch.registry:register:47][0m Type MXNet for name reshape has already existed
###Markdown
2 Choose whether to use gpu
###Code
np = numpy # Only use cpu
###Output
_____no_output_____
###Markdown
3 Determine the network structure
###Code
num_units = 5 # the CNN ' size
inrow, incol = 20, 20 # input size is (20, 20)
krow, kcol = 5, 5 # the filtter size is (5, 5)
crow, ccol = inrow - krow + 1, incol - kcol + 1 # the convolution result's size is (16, 16)
pfrow, pfcol = 2, 2 # the pooling fillters' size is (2, 2)
prow, pcol = crow // pfrow, ccol // pfcol # the pooling results' size is (8, 8)
output_size = 10
weights_size = (krow * kcol + 1 +# w and b of convolution layer
prow * pcol * output_size) * num_units + output_size # w of output layer
params = (np.random.random(size=weights_size) - 0.5) * 0.25 # all weights
params.shape
###Output
_____no_output_____
###Markdown
4 Initializate data set
###Code
data = loadmat("ex4data1.mat")
X = data["X"]
m = X.shape[0]
X = X.reshape((m, inrow, incol))
y = data["y"]
training_set_scale = 0.7
tr_m = int(m * training_set_scale)
tr_X = np.array(X[:tr_m])
ts_m = m - tr_m
ts_X = np.array(X[tr_m:])
onehot_encoder = OneHotEncoder(sparse=False, categories="auto")
y_onehot = onehot_encoder.fit_transform(y)
tr_y = np.array(y_onehot[:tr_m]).reshape((tr_m, output_size, 1))
ts_y = np.array(y[tr_m:])
tr_X.shape, tr_y.shape, ts_X.shape, ts_y.shape
###Output
_____no_output_____
###Markdown
5 Initializate weights
###Code
weights = (np.random.random(size=weights_size) - 0.5) * 0.25
weights.shape
###Output
_____no_output_____
###Markdown
6 Encode and decode weights
###Code
def encode(theta1, b1, theta2, b2):
return np.concatenate((theta1.ravel(), b1.ravel(), theta2.ravel(), b2.ravel()))
def decode(weights, num_units, krow, kcol, prow, pcol, output_size):
theta1 = weights[:num_units*krow*kcol].reshape((num_units, krow, kcol))
b1 = weights[num_units*krow*kcol:num_units*krow*kcol+num_units].reshape((num_units, 1))
theta2 = weights[num_units*krow*kcol+num_units:
-output_size].reshape((num_units, prow, pcol, output_size))
b2 = weights[-output_size:].reshape((output_size, 1))
return theta1, b1, theta2, b2
theta1, b1, theta2, b2 = decode(weights, num_units, krow, kcol, prow, pcol, output_size)
theta1.shape, b1.shape, theta2.shape, b2.shape
encode(theta1, b1, theta2, b2).shape
theta1.size + b1.size + theta2.size + b2.size
###Output
_____no_output_____
###Markdown
7 Convolution
###Code
def convolution(X, w, krow, kcol, crow, ccol):
res = np.zeros((crow, ccol))
for i in range(crow):
for j in range(ccol):
temp = w * X[i:i+krow,j:j+kcol]
return res # (16, 16)
a = convolution(tr_X[0], theta1[0], krow, kcol, crow, ccol)
a.shape
###Output
_____no_output_____
###Markdown
8 Pooling
###Code
def maxPooling(conv, crow, ccol, pfrow, pfcol, prow, pcol):
res = np.zeros((prow, pcol))
grad = np.zeros((crow, ccol))
for i in range(0, crow, pfrow):
for j in range(0, ccol, pfrow):
res[i//2,j//2] = np.max(conv[i:i+pfrow,j:j+pcol])
idx = np.argmax(conv[i:i+pfrow,j:j+pcol])
grad[i+idx//pfrow,j+idx%pcol] = 1
return res, grad
###Output
_____no_output_____
###Markdown
9 Sigmod
###Code
from scipy.special import expit
sigmod = expit
###Output
_____no_output_____
###Markdown
10 Forward propagate
###Code
def forwardPropagate(X, theta1, b1, theta2, b2, num_units, krow, kcol,
crow, ccol, prow, pcol, output_size):
a1 = X # (20, 20)
z2 = np.zeros((num_units, crow, ccol)) # (5, 16, 16)
a2 = z2.copy() # (5, 16, 16)
pooling_grad = z2.copy() # (5, 16, 16)
a3 = np.zeros((num_units, prow, pcol)) # (5, 8, 8)
z4 = np.zeros((output_size, 1)) # (10, 1)
a4 = z4.copy() # (10, 1)
for i in range(num_units):
z2[i] = convolution(X, theta1[i], krow, kcol,
crow, ccol) + b1[i] # (16, 16)
a2 = sigmod(z2) # (5, 16, 16)
for i in range(num_units):
a3[i], pooling_grad[i] = maxPooling(a2[i], crow, ccol, pfrow, pfcol, prow, pcol)
for i in range(output_size):
z4[i] += np.sum(theta2[:,:,:,i] * a3)
z4 += b2
a4 = sigmod(z4)
return a1, z2, a2, pooling_grad, a3, z4, a4
%%time
for i in range(1000):
a = forwardPropagate(X[0], theta1, b1, theta2, b2, num_units, krow, kcol,
crow, ccol, prow, pcol, output_size)
###Output
CPU times: user 3.92 s, sys: 0 ns, total: 3.92 s
Wall time: 3.91 s
###Markdown
11 Predict
###Code
def predict(X, theta1, b1, theta2, b2, num_units, krow, kcol,
crow, ccol, prow, pcol, output_size):
*t, h = forwardPropagate(X, theta1, b1, theta2, b2, num_units, krow, kcol,
crow, ccol, prow, pcol, output_size)
return np.argmax(h) + 1
###Output
_____no_output_____
###Markdown
12 Comupte accuracy
###Code
def computeAccuracy(X, y, theta1, b1, theta2, b2, num_units, krow, kcol,
crow, ccol, prow, pcol, output_size):
m = X.shape[0]
correct = 0
for i in range(m):
ans = predict(X[i], theta1, b1, theta2, b2, num_units, krow, kcol,
crow, ccol, prow, pcol, output_size)
correct += ans == y[i]
return f"m:{m} correct:{correct} accuracy:{100 * correct / m}%"
#computeAccuracy(X, y, theta1, b1, theta2, b2, num_units, krow, kcol,
# crow, ccol, prow, pcol, output_size)
###Output
_____no_output_____
###Markdown
The accuracy in all data
###Code
%%time
computeAccuracy(X, y, theta1, b1, theta2, b2, num_units, krow, kcol,
crow, ccol, prow, pcol, output_size)
###Output
CPU times: user 19.5 s, sys: 7 ms, total: 19.5 s
Wall time: 19.5 s
###Markdown
13 Sigmod gradient
###Code
def sigmodGradient(z):
t = expit(z)
return t * (1 - t)
###Output
_____no_output_____
###Markdown
14 Backpropagation
###Code
def backPropagate(weights, X, num_units, krow, kcol,
crow, ccol, pfrow, pfcol, prow, pcol, output_size, lam=0.):
m = X.shape[0]
theta1, b1, theta2, b2 = decode(weights, num_units,
krow, kcol, prow, pcol, output_size)
J = 0.
theta1_grad = np.zeros(theta1.shape) # (5, 5, 5)
b1_grad = np.zeros(b1.shape) # (5, 1)
theta2_grad = np.zeros(theta2.shape) # (5, 8, 10, 10)
b2_grad = np.zeros(b2.shape) # (10, 1)
for i in range(m):
a1, z2, a2, pooling_grad, a3, z4, a4 = forwardPropagate(X[i],
theta1, b1, theta2, b2, num_units, krow, kcol,
crow, ccol, prow, pcol, output_size)
J += -np.sum(y[i] * np.log(a4) +
(1 - y[i]) * np.log(1 - a4)) # cost
dt2 = a4 - y[i] # (10, 1)
b2_grad += dt2 # (10, 1)
temp = dt2.reshape((1, 1, 1, output_size))
theta2_grad += a3.reshape((*a3.shape, 1)) * temp # (5, 8, 8, 10)
temp2 = theta2 * temp # (5, 8, 8, 10)
temp3 = np.zeros((num_units, crow, ccol)) # (5, 16, 16)
for j in range(num_units): #
for p in range(0, crow, pfrow):
for q in range(0, ccol, pfcol):
val = np.sum(temp2[j,p//pfcol,q//pfcol])
for p1 in range(pfrow):
for q1 in range(pfcol):
temp3[j,p+p1,q+q1] = val
dt1 = temp3 * pooling_grad * a2 * (1 - a2) # (5, 16, 16)
for j in range(num_units):
b1_grad[j] = np.sum(dt1[j])
for p in range(krow):
for q in range(kcol):
theta1_grad[j,p,q] += np.sum(dt1[j] * a1[p:p+crow,q:q+ccol])
J /= m
theta1_grad /= m
b1_grad /= m
theta2_grad /=m
b2_grad /= m
#Regulation
J += (float(lam) / (2 * m)) * (np.sum(theta1 ** 2) + np.sum(theta2 ** 2))
theta1_grad += theta1 * lam / m
theta2_grad += theta2 * lam / m
return J, encode(theta1, b1, theta2, b2)
%%time
J, grad = backPropagate(weights,tr_X[:5], num_units, krow, kcol,
crow, ccol, pfrow, pfcol, prow, pcol, output_size)
###Output
CPU times: user 46.9 ms, sys: 0 ns, total: 46.9 ms
Wall time: 44.5 ms
###Markdown
15 Gradient checking
###Code
def checkGradient(weights, X,num_units, krow, kcol,
crow, ccol, pfrow, pfcol, prow, pcol, output_size, lam=0.)
###Output
_____no_output_____ |
starter_code/WeatherPy-SP.ipynb | ###Markdown
WeatherPy - Conclusions The data sugests the following:1. There is a stronger correlation between latitudes and temperatures in the northern hemisphere compared to the southern hemisphere. Caution is warranted in drawing this conclusion as it may be biased based on the number of southern hemisphere cities that our program randomly drew.2. It appears as though latitude has little to no impact on wind speed across all cities sampled.3. It appears that cloudiness is not significantly impacted by either latitude or temperature. --- Code
###Code
pip install citipy
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import time
from scipy.stats import linregress
import json
from pprint import pprint
import scipy.stats as stats
# Import API key
from config import weather_api_key
# Incorporated citipy to determine city based on latitude and longitude
from citipy import citipy
# Output File (CSV)
output_data_file = "output_data/cities.csv"
# Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
###Output
_____no_output_____
###Markdown
Generate Cities List
###Code
# List for holding lat_lngs and cities
lat_lngs = []
cities = []
# Create a set of random lat and lng combinations
lats = np.random.uniform(lat_range[0], lat_range[1], size=1500)
lngs = np.random.uniform(lng_range[0], lng_range[1], size=1500)
lat_lngs = zip(lats, lngs)
# Identify nearest city for each lat, lng combination
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# If the city is unique, then add it to a our cities list
if city not in cities:
cities.append(city)
# Print the city count to confirm sufficient count
len(cities)
cities_df = pd.DataFrame(data=cities)
cities_df
###Output
_____no_output_____
###Markdown
Perform API Calls* Perform a weather check on each city using a series of successive API calls.* Include a print log of each city as it'sbeing processed (with the city number and city name).
###Code
# list of cities = cities
# creating empty lists to be appended into
names =[]
countries = []
latitudes = []
longitudes = []
temperatures = []
humidities = []
wind_speeds = []
cloudiness = []
# creating record counter variable for print functions
record = 0
base_url = "http://api.openweathermap.org/data/2.5/weather?"
print(f"Beginning Data Retrieval\n-------------------------")
for city in cities:
params= {"q":city, "appid":weather_api_key, "units":"imperial"}
weather_response = requests.get(url=base_url, params=params)
weather_json = weather_response.json()
try:
names.append(weather_json["name"])
countries.append(weather_json["sys"]["country"])
latitudes.append(weather_json["coord"]["lat"])
longitudes.append(weather_json["coord"]["lon"])
temperatures.append(weather_json["main"]["temp"])
humidities.append(weather_json["main"]["humidity"])
cloudiness.append(weather_json["clouds"]["all"]) # gives the % of cloudiness reading
wind_speeds.append(weather_json["wind"]["speed"])
record = record + 1
print(f"Processing Record {record} of Set | {city}")
except:
print(f"City {city} not found. Skipping...")
print(f"-----------------------------\nData Retrieval Complete\n-----------------------------")
###Output
Beginning Data Retrieval
-------------------------
City taolanaro not found. Skipping...
Processing Record 1 of Set | mar del plata
Processing Record 2 of Set | tuktoyaktuk
Processing Record 3 of Set | verkh-usugli
Processing Record 4 of Set | hilo
Processing Record 5 of Set | klaksvik
Processing Record 6 of Set | christchurch
Processing Record 7 of Set | batagay-alyta
Processing Record 8 of Set | deputatskiy
Processing Record 9 of Set | puerto ayora
Processing Record 10 of Set | bredasdorp
Processing Record 11 of Set | vao
Processing Record 12 of Set | cap malheureux
Processing Record 13 of Set | eenhana
Processing Record 14 of Set | ribeira grande
Processing Record 15 of Set | lander
Processing Record 16 of Set | rikitea
Processing Record 17 of Set | kirakira
City belushya guba not found. Skipping...
Processing Record 18 of Set | barrow
City sug-aksy not found. Skipping...
City attawapiskat not found. Skipping...
Processing Record 19 of Set | georgetown
Processing Record 20 of Set | punta arenas
Processing Record 21 of Set | ponta do sol
Processing Record 22 of Set | jamestown
Processing Record 23 of Set | key largo
Processing Record 24 of Set | ostersund
Processing Record 25 of Set | mayumba
Processing Record 26 of Set | karanpura
Processing Record 27 of Set | kendari
Processing Record 28 of Set | albany
Processing Record 29 of Set | marawi
Processing Record 30 of Set | hithadhoo
Processing Record 31 of Set | aklavik
Processing Record 32 of Set | haines junction
Processing Record 33 of Set | kaeo
Processing Record 34 of Set | upernavik
Processing Record 35 of Set | gwanda
Processing Record 36 of Set | butaritari
Processing Record 37 of Set | bethel
Processing Record 38 of Set | la asuncion
Processing Record 39 of Set | saint-philippe
Processing Record 40 of Set | busselton
Processing Record 41 of Set | broome
Processing Record 42 of Set | alofi
Processing Record 43 of Set | poum
City kuche not found. Skipping...
Processing Record 44 of Set | fredericksburg
Processing Record 45 of Set | airai
Processing Record 46 of Set | castro
Processing Record 47 of Set | lorengau
Processing Record 48 of Set | nizwa
Processing Record 49 of Set | hailun
Processing Record 50 of Set | pangnirtung
Processing Record 51 of Set | faya
Processing Record 52 of Set | coihaique
Processing Record 53 of Set | nikolskoye
Processing Record 54 of Set | saskylakh
Processing Record 55 of Set | torbay
Processing Record 56 of Set | avarua
Processing Record 57 of Set | cape town
Processing Record 58 of Set | ushuaia
Processing Record 59 of Set | san policarpo
Processing Record 60 of Set | hermanus
Processing Record 61 of Set | chalons-en-champagne
Processing Record 62 of Set | pevek
Processing Record 63 of Set | san cristobal
Processing Record 64 of Set | baturaja
Processing Record 65 of Set | new norfolk
Processing Record 66 of Set | leh
Processing Record 67 of Set | beringovskiy
Processing Record 68 of Set | tanggu
Processing Record 69 of Set | faanui
Processing Record 70 of Set | kidal
Processing Record 71 of Set | tasiilaq
Processing Record 72 of Set | at-bashi
Processing Record 73 of Set | kamenskiy
Processing Record 74 of Set | hambantota
Processing Record 75 of Set | dauphin
Processing Record 76 of Set | coquimbo
Processing Record 77 of Set | los llanos de aridane
City grand centre not found. Skipping...
City vaitupu not found. Skipping...
Processing Record 78 of Set | kapaa
City tumannyy not found. Skipping...
Processing Record 79 of Set | santa fe
Processing Record 80 of Set | hay river
Processing Record 81 of Set | constitucion
Processing Record 82 of Set | sultanpur
Processing Record 83 of Set | coahuayana
Processing Record 84 of Set | sao filipe
Processing Record 85 of Set | ucluelet
Processing Record 86 of Set | khatanga
Processing Record 87 of Set | vaini
Processing Record 88 of Set | longyearbyen
Processing Record 89 of Set | souillac
Processing Record 90 of Set | kiunga
Processing Record 91 of Set | lyndon
Processing Record 92 of Set | atar
Processing Record 93 of Set | mahebourg
Processing Record 94 of Set | saint-joseph
City palabuhanratu not found. Skipping...
Processing Record 95 of Set | cherskiy
Processing Record 96 of Set | kenai
Processing Record 97 of Set | salalah
Processing Record 98 of Set | east london
Processing Record 99 of Set | san patricio
City jiddah not found. Skipping...
Processing Record 100 of Set | soe
Processing Record 101 of Set | honningsvag
Processing Record 102 of Set | atuona
Processing Record 103 of Set | tabou
Processing Record 104 of Set | hasaki
Processing Record 105 of Set | chokurdakh
Processing Record 106 of Set | hualmay
Processing Record 107 of Set | carnarvon
Processing Record 108 of Set | touros
Processing Record 109 of Set | harrison
Processing Record 110 of Set | saurimo
Processing Record 111 of Set | mujiayingzi
Processing Record 112 of Set | boyolangu
Processing Record 113 of Set | tautira
Processing Record 114 of Set | tottori
City mys shmidta not found. Skipping...
Processing Record 115 of Set | fairbanks
Processing Record 116 of Set | maceio
Processing Record 117 of Set | mount gambier
Processing Record 118 of Set | meulaboh
Processing Record 119 of Set | hobart
Processing Record 120 of Set | lamar
Processing Record 121 of Set | bengkulu
Processing Record 122 of Set | laguna
City halalo not found. Skipping...
Processing Record 123 of Set | novikovo
Processing Record 124 of Set | pahrump
Processing Record 125 of Set | puno
Processing Record 126 of Set | qurayyat
Processing Record 127 of Set | walvis bay
Processing Record 128 of Set | altagracia de orituco
Processing Record 129 of Set | waddan
Processing Record 130 of Set | hami
Processing Record 131 of Set | eureka
Processing Record 132 of Set | pundaguitan
Processing Record 133 of Set | guatire
Processing Record 134 of Set | sinnamary
Processing Record 135 of Set | iqaluit
Processing Record 136 of Set | shingu
City barentsburg not found. Skipping...
Processing Record 137 of Set | saint-augustin
Processing Record 138 of Set | mwingi
Processing Record 139 of Set | yulara
Processing Record 140 of Set | sault sainte marie
Processing Record 141 of Set | la ciotat
Processing Record 142 of Set | camacha
Processing Record 143 of Set | olinda
City maghama not found. Skipping...
Processing Record 144 of Set | clyde river
Processing Record 145 of Set | bluff
Processing Record 146 of Set | santa marta
Processing Record 147 of Set | gravdal
Processing Record 148 of Set | chuy
Processing Record 149 of Set | sola
Processing Record 150 of Set | alakurtti
Processing Record 151 of Set | thompson
Processing Record 152 of Set | sidi ali
Processing Record 153 of Set | luwuk
Processing Record 154 of Set | bambous virieux
Processing Record 155 of Set | moroni
Processing Record 156 of Set | codrington
Processing Record 157 of Set | neuquen
Processing Record 158 of Set | tiznit
Processing Record 159 of Set | geraldton
Processing Record 160 of Set | sabha
Processing Record 161 of Set | pisco
Processing Record 162 of Set | saint-pierre
Processing Record 163 of Set | sambava
Processing Record 164 of Set | taitung
Processing Record 165 of Set | anadyr
Processing Record 166 of Set | chapais
Processing Record 167 of Set | vestmanna
Processing Record 168 of Set | cidreira
Processing Record 169 of Set | la ronge
Processing Record 170 of Set | bulungu
Processing Record 171 of Set | san quintin
Processing Record 172 of Set | qaanaaq
Processing Record 173 of Set | coos bay
Processing Record 174 of Set | klaeng
Processing Record 175 of Set | caravelas
Processing Record 176 of Set | puerto narino
Processing Record 177 of Set | bilma
Processing Record 178 of Set | sukhumi
Processing Record 179 of Set | teahupoo
Processing Record 180 of Set | port hardy
City illoqqortoormiut not found. Skipping...
Processing Record 181 of Set | shiyan
Processing Record 182 of Set | margate
Processing Record 183 of Set | nelson bay
Processing Record 184 of Set | siirt
Processing Record 185 of Set | palo alto
Processing Record 186 of Set | leshan
Processing Record 187 of Set | mataundh
Processing Record 188 of Set | vila velha
Processing Record 189 of Set | dno
Processing Record 190 of Set | dombarovskiy
Processing Record 191 of Set | turan
Processing Record 192 of Set | roebourne
Processing Record 193 of Set | port alfred
Processing Record 194 of Set | salamiyah
Processing Record 195 of Set | flin flon
Processing Record 196 of Set | sobolevo
Processing Record 197 of Set | bako
Processing Record 198 of Set | nome
Processing Record 199 of Set | port elizabeth
Processing Record 200 of Set | sioux lookout
Processing Record 201 of Set | grafton
Processing Record 202 of Set | loa janan
Processing Record 203 of Set | mehamn
Processing Record 204 of Set | yellowknife
Processing Record 205 of Set | araouane
Processing Record 206 of Set | noyabrsk
Processing Record 207 of Set | senador jose porfirio
Processing Record 208 of Set | mataura
Processing Record 209 of Set | sherlovaya gora
Processing Record 210 of Set | vestmannaeyjar
Processing Record 211 of Set | havre-saint-pierre
Processing Record 212 of Set | iskateley
City nizhneyansk not found. Skipping...
Processing Record 213 of Set | jadu
Processing Record 214 of Set | dudinka
Processing Record 215 of Set | thinadhoo
Processing Record 216 of Set | pitimbu
Processing Record 217 of Set | betera
Processing Record 218 of Set | griffith
Processing Record 219 of Set | galle
Processing Record 220 of Set | lusambo
Processing Record 221 of Set | lleida
Processing Record 222 of Set | sangar
Processing Record 223 of Set | vanimo
Processing Record 224 of Set | la union
Processing Record 225 of Set | sitka
City chagda not found. Skipping...
Processing Record 226 of Set | lebu
Processing Record 227 of Set | esperance
Processing Record 228 of Set | maravilha
Processing Record 229 of Set | kahului
Processing Record 230 of Set | guozhen
Processing Record 231 of Set | morozovsk
Processing Record 232 of Set | barguzin
Processing Record 233 of Set | neiafu
Processing Record 234 of Set | rawson
Processing Record 235 of Set | mumias
Processing Record 236 of Set | hokitika
Processing Record 237 of Set | san miguel
City lolua not found. Skipping...
Processing Record 238 of Set | edgewater
Processing Record 239 of Set | nemuro
Processing Record 240 of Set | komsomolskiy
City marcona not found. Skipping...
Processing Record 241 of Set | pandan
Processing Record 242 of Set | royan
City louisbourg not found. Skipping...
Processing Record 243 of Set | okha
Processing Record 244 of Set | karabash
Processing Record 245 of Set | sisimiut
Processing Record 246 of Set | angoche
Processing Record 247 of Set | ancud
Processing Record 248 of Set | sao joao da barra
Processing Record 249 of Set | dikson
Processing Record 250 of Set | hobyo
Processing Record 251 of Set | quimper
Processing Record 252 of Set | sur
Processing Record 253 of Set | bosconia
Processing Record 254 of Set | nantucket
Processing Record 255 of Set | sinnar
Processing Record 256 of Set | guerrero negro
Processing Record 257 of Set | paola
Processing Record 258 of Set | bystryy istok
Processing Record 259 of Set | kamenka
City grand river south east not found. Skipping...
Processing Record 260 of Set | green river
Processing Record 261 of Set | kavieng
Processing Record 262 of Set | rondonopolis
Processing Record 263 of Set | bull savanna
Processing Record 264 of Set | blagoyevo
Processing Record 265 of Set | zunyi
Processing Record 266 of Set | raga
Processing Record 267 of Set | portland
Processing Record 268 of Set | dali
Processing Record 269 of Set | jintur
Processing Record 270 of Set | loiza
City amderma not found. Skipping...
Processing Record 271 of Set | bathsheba
Processing Record 272 of Set | jalu
Processing Record 273 of Set | luderitz
Processing Record 274 of Set | raudeberg
Processing Record 275 of Set | lasa
Processing Record 276 of Set | kutum
Processing Record 277 of Set | vuktyl
Processing Record 278 of Set | dhidhdhoo
Processing Record 279 of Set | tharad
City dolbeau not found. Skipping...
Processing Record 280 of Set | yenagoa
Processing Record 281 of Set | norman wells
Processing Record 282 of Set | sakakah
Processing Record 283 of Set | karasjok
Processing Record 284 of Set | saldanha
City samusu not found. Skipping...
Processing Record 285 of Set | tonota
Processing Record 286 of Set | yambio
Processing Record 287 of Set | launceston
Processing Record 288 of Set | honiara
Processing Record 289 of Set | the pas
Processing Record 290 of Set | bartica
Processing Record 291 of Set | kaitangata
Processing Record 292 of Set | saint george
City solovetskiy not found. Skipping...
Processing Record 293 of Set | voyvozh
Processing Record 294 of Set | dondo
Processing Record 295 of Set | paamiut
Processing Record 296 of Set | touba
Processing Record 297 of Set | huntsville
Processing Record 298 of Set | vila franca do campo
City solsvik not found. Skipping...
Processing Record 299 of Set | niamey
Processing Record 300 of Set | hastings
Processing Record 301 of Set | svetlogorsk
Processing Record 302 of Set | provideniya
Processing Record 303 of Set | comodoro rivadavia
Processing Record 304 of Set | ondjiva
Processing Record 305 of Set | victoria
Processing Record 306 of Set | rockhampton
Processing Record 307 of Set | cabo san lucas
Processing Record 308 of Set | changji
Processing Record 309 of Set | ginda
Processing Record 310 of Set | marsabit
Processing Record 311 of Set | quatre cocos
Processing Record 312 of Set | olafsvik
Processing Record 313 of Set | isangel
Processing Record 314 of Set | talnakh
City falealupo not found. Skipping...
Processing Record 315 of Set | marquette
Processing Record 316 of Set | half moon bay
Processing Record 317 of Set | krasnyy bor
Processing Record 318 of Set | claresholm
Processing Record 319 of Set | sheffield lake
City kamenskoye not found. Skipping...
Processing Record 320 of Set | krasnoselkup
Processing Record 321 of Set | healdsburg
Processing Record 322 of Set | huarmey
Processing Record 323 of Set | karratha
Processing Record 324 of Set | kattivakkam
Processing Record 325 of Set | belaya gora
Processing Record 326 of Set | sayyan
Processing Record 327 of Set | beroroha
Processing Record 328 of Set | samarai
Processing Record 329 of Set | arraial do cabo
Processing Record 330 of Set | srikakulam
Processing Record 331 of Set | saint paul
Processing Record 332 of Set | alekseyevsk
Processing Record 333 of Set | harnosand
City masandra not found. Skipping...
Processing Record 334 of Set | dobroye
Processing Record 335 of Set | vila
Processing Record 336 of Set | kutina
Processing Record 337 of Set | marienburg
City phan rang not found. Skipping...
Processing Record 338 of Set | brae
Processing Record 339 of Set | xucheng
Processing Record 340 of Set | abengourou
Processing Record 341 of Set | yerbogachen
Processing Record 342 of Set | okhotsk
Processing Record 343 of Set | goito
Processing Record 344 of Set | pelym
Processing Record 345 of Set | agadez
Processing Record 346 of Set | yarada
Processing Record 347 of Set | tuatapere
Processing Record 348 of Set | severo-kurilsk
Processing Record 349 of Set | hailey
Processing Record 350 of Set | bhatkal
Processing Record 351 of Set | mackay
Processing Record 352 of Set | te anau
Processing Record 353 of Set | paracuru
Processing Record 354 of Set | srednekolymsk
Processing Record 355 of Set | ossora
Processing Record 356 of Set | baghdad
Processing Record 357 of Set | slonim
Processing Record 358 of Set | bubaque
Processing Record 359 of Set | nakhon nayok
City aflu not found. Skipping...
Processing Record 360 of Set | dunedin
Processing Record 361 of Set | mitsamiouli
Processing Record 362 of Set | ambanja
Processing Record 363 of Set | barra do garcas
Processing Record 364 of Set | tigil
Processing Record 365 of Set | kozhevnikovo
Processing Record 366 of Set | alyangula
Processing Record 367 of Set | mtwango
Processing Record 368 of Set | bulgan
City meyungs not found. Skipping...
Processing Record 369 of Set | almeirim
Processing Record 370 of Set | khawhai
Processing Record 371 of Set | minab
Processing Record 372 of Set | havoysund
Processing Record 373 of Set | tinaquillo
Processing Record 374 of Set | lampa
Processing Record 375 of Set | sharjah
Processing Record 376 of Set | abha
Processing Record 377 of Set | havelock
Processing Record 378 of Set | qorveh
Processing Record 379 of Set | batagay
Processing Record 380 of Set | kulhudhuffushi
City skagastrond not found. Skipping...
Processing Record 381 of Set | homer
Processing Record 382 of Set | acajutla
Processing Record 383 of Set | mweka
City afmadu not found. Skipping...
Processing Record 384 of Set | sassandra
Processing Record 385 of Set | lokosovo
Processing Record 386 of Set | naze
Processing Record 387 of Set | ayan
Processing Record 388 of Set | ipora
Processing Record 389 of Set | tomatlan
Processing Record 390 of Set | sibolga
City jazzin not found. Skipping...
Processing Record 391 of Set | kavaratti
Processing Record 392 of Set | baruun-urt
Processing Record 393 of Set | onalaska
Processing Record 394 of Set | collipulli
Processing Record 395 of Set | palmer
Processing Record 396 of Set | richards bay
Processing Record 397 of Set | ismailia
Processing Record 398 of Set | dalvik
Processing Record 399 of Set | katsuura
Processing Record 400 of Set | sigli
Processing Record 401 of Set | auburn
Processing Record 402 of Set | ahipara
City khonuu not found. Skipping...
Processing Record 403 of Set | buritis
City kargapolye not found. Skipping...
Processing Record 404 of Set | alice springs
Processing Record 405 of Set | tiksi
Processing Record 406 of Set | nishihara
Processing Record 407 of Set | whitehorse
Processing Record 408 of Set | nauta
Processing Record 409 of Set | matara
Processing Record 410 of Set | lerwick
Processing Record 411 of Set | hamilton
Processing Record 412 of Set | porosozero
Processing Record 413 of Set | balabac
Processing Record 414 of Set | belmonte
Processing Record 415 of Set | vila do maio
Processing Record 416 of Set | price
Processing Record 417 of Set | kingsport
Processing Record 418 of Set | vicuna
City lushunkou not found. Skipping...
Processing Record 419 of Set | sabang
Processing Record 420 of Set | fortuna
Processing Record 421 of Set | gorontalo
Processing Record 422 of Set | kokkinokhoma
Processing Record 423 of Set | ketchikan
Processing Record 424 of Set | umarkot
Processing Record 425 of Set | gbadolite
Processing Record 426 of Set | big rapids
Processing Record 427 of Set | chifeng
Processing Record 428 of Set | kampot
Processing Record 429 of Set | avera
Processing Record 430 of Set | okahandja
Processing Record 431 of Set | khandyga
Processing Record 432 of Set | altay
Processing Record 433 of Set | yaan
Processing Record 434 of Set | ixtapa
Processing Record 435 of Set | kokkola
Processing Record 436 of Set | wuwei
Processing Record 437 of Set | leningradskiy
Processing Record 438 of Set | hervey bay
Processing Record 439 of Set | micheweni
Processing Record 440 of Set | ingham
Processing Record 441 of Set | perth
Processing Record 442 of Set | lima
Processing Record 443 of Set | xai-xai
Processing Record 444 of Set | churapcha
Processing Record 445 of Set | kargil
Processing Record 446 of Set | beloha
City jahrom not found. Skipping...
Processing Record 447 of Set | port hedland
City katha not found. Skipping...
Processing Record 448 of Set | dayong
Processing Record 449 of Set | baykit
Processing Record 450 of Set | yanam
City sentyabrskiy not found. Skipping...
Processing Record 451 of Set | banting
Processing Record 452 of Set | corpus christi
Processing Record 453 of Set | san pedro de los naranjos
Processing Record 454 of Set | santa barbara
City tabiauea not found. Skipping...
Processing Record 455 of Set | padre bernardo
Processing Record 456 of Set | chulman
Processing Record 457 of Set | yeysk
Processing Record 458 of Set | taviano
Processing Record 459 of Set | kefalos
Processing Record 460 of Set | shimoda
Processing Record 461 of Set | koscierzyna
Processing Record 462 of Set | lujiang
Processing Record 463 of Set | sebeta
Processing Record 464 of Set | rocha
Processing Record 465 of Set | bouna
Processing Record 466 of Set | port lincoln
Processing Record 467 of Set | vinh
Processing Record 468 of Set | puerto quijarro
Processing Record 469 of Set | chilon
Processing Record 470 of Set | bonthe
Processing Record 471 of Set | boa vista
Processing Record 472 of Set | kruisfontein
Processing Record 473 of Set | florianopolis
Processing Record 474 of Set | shieli
Processing Record 475 of Set | kysyl-syr
Processing Record 476 of Set | adrar
Processing Record 477 of Set | la crosse
Processing Record 478 of Set | lavrentiya
Processing Record 479 of Set | puerto ayacucho
Processing Record 480 of Set | bereda
City longlac not found. Skipping...
Processing Record 481 of Set | kourou
Processing Record 482 of Set | mo i rana
Processing Record 483 of Set | esna
Processing Record 484 of Set | mandalgovi
Processing Record 485 of Set | north bend
Processing Record 486 of Set | bogorodskoye
Processing Record 487 of Set | port macquarie
Processing Record 488 of Set | dawson creek
Processing Record 489 of Set | berlevag
City fierze not found. Skipping...
Processing Record 490 of Set | benghazi
Processing Record 491 of Set | tura
Processing Record 492 of Set | kopavogur
Processing Record 493 of Set | zabaykalsk
Processing Record 494 of Set | chitral
Processing Record 495 of Set | malanje
City vedaranniyam not found. Skipping...
Processing Record 496 of Set | pindwara
Processing Record 497 of Set | were ilu
Processing Record 498 of Set | vitim
Processing Record 499 of Set | calamar
Processing Record 500 of Set | gambat
Processing Record 501 of Set | puerto madero
Processing Record 502 of Set | tangjiazhuang
Processing Record 503 of Set | monclova
Processing Record 504 of Set | caraballeda
Processing Record 505 of Set | palana
Processing Record 506 of Set | warri
Processing Record 507 of Set | dire
Processing Record 508 of Set | busembatia
Processing Record 509 of Set | narsaq
Processing Record 510 of Set | padang
Processing Record 511 of Set | longview
Processing Record 512 of Set | tanda
Processing Record 513 of Set | grand gaube
Processing Record 514 of Set | challans
Processing Record 515 of Set | sindor
Processing Record 516 of Set | byron bay
Processing Record 517 of Set | kristinehamn
Processing Record 518 of Set | taoudenni
Processing Record 519 of Set | gamboula
Processing Record 520 of Set | selishche
Processing Record 521 of Set | agua branca
City yanan not found. Skipping...
Processing Record 522 of Set | dicabisagan
Processing Record 523 of Set | santa cruz
Processing Record 524 of Set | san-pedro
Processing Record 525 of Set | devonport
Processing Record 526 of Set | farmington
Processing Record 527 of Set | kargasok
Processing Record 528 of Set | beyneu
Processing Record 529 of Set | trinidad
Processing Record 530 of Set | khor
Processing Record 531 of Set | road town
Processing Record 532 of Set | qaqortoq
Processing Record 533 of Set | khorramshahr
Processing Record 534 of Set | navirai
Processing Record 535 of Set | saint-francois
Processing Record 536 of Set | kuusamo
Processing Record 537 of Set | safaga
City satitoa not found. Skipping...
Processing Record 538 of Set | mocuba
Processing Record 539 of Set | cagli
Processing Record 540 of Set | biak
Processing Record 541 of Set | lagoa
Processing Record 542 of Set | buchanan
Processing Record 543 of Set | dingzhou
Processing Record 544 of Set | sakaiminato
Processing Record 545 of Set | puerto escondido
Processing Record 546 of Set | san jose
Processing Record 547 of Set | moose factory
Processing Record 548 of Set | broken hill
Processing Record 549 of Set | lompoc
Processing Record 550 of Set | oranjemund
Processing Record 551 of Set | springfield
Processing Record 552 of Set | aksarka
Processing Record 553 of Set | acapulco
Processing Record 554 of Set | kuryk
Processing Record 555 of Set | arman
Processing Record 556 of Set | orel-izumrud
Processing Record 557 of Set | wakkanai
Processing Record 558 of Set | harper
Processing Record 559 of Set | daliao
Processing Record 560 of Set | soyo
Processing Record 561 of Set | nipawin
Processing Record 562 of Set | zhigansk
Processing Record 563 of Set | kempsey
Processing Record 564 of Set | shuiji
Processing Record 565 of Set | nouadhibou
City rungata not found. Skipping...
Processing Record 566 of Set | mumbwa
Processing Record 567 of Set | odweyne
Processing Record 568 of Set | uusikaupunki
Processing Record 569 of Set | placido de castro
Processing Record 570 of Set | johnstown
Processing Record 571 of Set | roald
Processing Record 572 of Set | hit
Processing Record 573 of Set | san luis
Processing Record 574 of Set | koppies
Processing Record 575 of Set | ayios nikolaos
Processing Record 576 of Set | muzaffarabad
Processing Record 577 of Set | kisangani
Processing Record 578 of Set | champerico
Processing Record 579 of Set | kings point
Processing Record 580 of Set | newport
Processing Record 581 of Set | north platte
Processing Record 582 of Set | japura
Processing Record 583 of Set | bitkine
Processing Record 584 of Set | zinder
Processing Record 585 of Set | veraval
Processing Record 586 of Set | ulaangom
City haapu not found. Skipping...
Processing Record 587 of Set | brufut
Processing Record 588 of Set | ukiah
Processing Record 589 of Set | shizunai
City tsihombe not found. Skipping...
Processing Record 590 of Set | flinders
Processing Record 591 of Set | hammerfest
Processing Record 592 of Set | viedma
-----------------------------
Data Retrieval Complete
-----------------------------
###Markdown
Convert Raw Data to DataFrame* Export the city data into a .csv.* Display the DataFrame
###Code
# creating dictionary of lists to input into dataframe
data = {
"City":names,
"Country":countries,
"Latitude":latitudes,
"Longitude":longitudes,
"Temperature":temperatures,
"Humidity":humidities,
"Wind Speed":wind_speeds,
"Cloudiness %":cloudiness
}
# creating dataframe and showing head and tail
city_temps_df = pd.DataFrame(data=data)
city_temps_df
# count of values per column
city_temps_df.count()
###Output
_____no_output_____
###Markdown
Inspect the data and remove the cities where the humidity > 100%.----Skip this step if there are no cities that have humidity > 100%.
###Code
# check for max humidity reading using .max
city_temps_df.max()
# check for max humidity reading by sorting values
city_temps_df.sort_values(by="Humidity", ascending=False)
# check for max humidity using summary statistics
city_temps_df.describe()
###Output
_____no_output_____
###Markdown
Since no cities have a humidity reading over 100%, this step will be skipped.
###Code
# Export the City_Data into a csv
city_temps_df.to_csv("../output_data/city_temps.csv")
###Output
_____no_output_____
###Markdown
Plotting the Data* Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.* Save the plotted figures as .pngs.
###Code
city_temps_df.columns
# creating variables to use as x/y data in scatter plots
lat_scatter = city_temps_df["Latitude"]
temp_scatter = city_temps_df["Temperature"]
humi_scatter = city_temps_df["Humidity"]
cloud_scatter = city_temps_df["Cloudiness %"]
wind_scatter = city_temps_df["Wind Speed"]
###Output
_____no_output_____
###Markdown
Latitude vs. Temperature Plot
###Code
plt.scatter(lat_scatter,temp_scatter, alpha=0.5)
plt.xlabel("Latitude")
plt.ylabel("Temperature (F)")
plt.title("City Latitude vs Temperature (01/31/21)")
plt.grid()
plt.savefig("../output_data/latitude_vs_temperature.png")
plt.show()
###Output
_____no_output_____
###Markdown
Latitude vs. Humidity Plot
###Code
plt.scatter(lat_scatter,humi_scatter, alpha=0.5)
plt.xlabel("Latitude")
plt.ylabel("Humidity (%)")
plt.title("City Latitude vs Humidity (01/31/21)")
plt.grid()
plt.savefig("../output_data/latitude_vs_humidity.png")
plt.show()
###Output
_____no_output_____
###Markdown
Latitude vs. Cloudiness Plot
###Code
plt.scatter(lat_scatter,cloud_scatter, alpha=0.5)
plt.xlabel("Latitude")
plt.ylabel("Cloudiness (%)")
plt.title("City Latitude vs Cloudiness (01/31/21)")
plt.grid()
plt.savefig("../output_data/latitude_vs_cloudiness.png")
plt.show()
###Output
_____no_output_____
###Markdown
Temperature vs. Cloudiness Plot
###Code
plt.scatter(temp_scatter,cloud_scatter, alpha=0.5)
plt.xlabel("Temperature (F)")
plt.ylabel("Cloudiness (%)")
plt.title("Temperature vs Cloudiness (01/31/21)")
plt.grid()
plt.savefig("../output_data/temperature_vs_cloudiness.png")
plt.show()
###Output
_____no_output_____
###Markdown
Temperature vs. Humidity Plot
###Code
plt.scatter(humi_scatter,temp_scatter, alpha=0.5)
plt.ylabel("Temperature (F)")
plt.xlabel("Humidity (%)")
plt.title("Humidity vs Temperature (01/31/21)")
plt.grid()
plt.savefig("../output_data/humid_vs_temp.png")
plt.show()
###Output
_____no_output_____
###Markdown
Latitude vs. Wind Speed Plot
###Code
plt.scatter(lat_scatter,wind_scatter, alpha=0.5)
plt.xlabel("Latitude")
plt.ylabel("Wind Speed (mph)")
plt.title("City Latitude vs Wind Speed (01/31/21)")
plt.grid()
plt.savefig("../output_data/latitude_vs_windspeed.png")
plt.show()
###Output
_____no_output_____
###Markdown
Linear Regression
###Code
# creating function with defined arguments to make the linear regression for all plots faster
def line_regress(x, y):
(slope, intercept, rvalue, pvalue, stderr) = stats.linregress(x, y)
regress_values = x * slope + intercept
line_equation = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x, y)
plt.plot(x, regress_values,color= "red")
plt.annotate(line_equation, (0,50), fontsize=15,color="red", annotation_clip=False)
print(line_equation)
print(f"The r squared is {rvalue}")
# boolean variables to create northern and southern dataframes
is_northern = city_temps_df["Latitude"] >= 0
is_southern = city_temps_df["Latitude"] < 0
# northern hemisphere dataframe
northern_hem_df = city_temps_df.loc[is_northern]
assert northern_hem_df["Latitude"].min() >= 0 # check that all lats are greater than or equal to 0
# southern hemisphere
southern_hem_df = city_temps_df.loc[is_southern]
assert southern_hem_df["Latitude"].max() < 0 # check that all lats are less than 0
# northern variables
north_lats = northern_hem_df["Latitude"]
north_temp = northern_hem_df["Temperature"]
north_humi = northern_hem_df["Humidity"]
north_wind = northern_hem_df["Wind Speed"]
north_cloud = northern_hem_df["Cloudiness %"]
# southern variables
south_lats = southern_hem_df["Latitude"]
south_temp = southern_hem_df["Temperature"]
south_humi = southern_hem_df["Humidity"]
south_wind = southern_hem_df["Wind Speed"]
south_cloud = southern_hem_df["Cloudiness %"]
###Output
_____no_output_____
###Markdown
--- Northern vs Southern - Latitude vs Temperature Based on the linear regressions and scatter plots showcased below, we can draw the following conclusions: 1. There is a strong negative correlation between latitude and temperature, meaning the farther south you travel, the hotter the temperature is. 2. There appears to be a stronger correlation between latitudes and temperatures in the northern hemisphere compared to the southern hemisphere as showcased by their respective r squared values. Northern Hemisphere - Max Temp vs. Latitude Linear Regression
###Code
line_regress(north_lats, north_temp)
plt.title("Northern Hemisphere: Latitude vs Temp (1/31/21)")
plt.xlabel("Latitude")
plt.ylabel("Temperature (F)")
plt.savefig("../output_data/north_lat_vs_temp.png")
plt.show()
###Output
y = -1.36x + 89.75
The r squared is -0.8534579983708654
###Markdown
Southern Hemisphere - Max Temp vs. Latitude Linear Regression
###Code
line_regress(south_lats, south_temp)
plt.title("Southern Hemisphere: Latitude vs Temp (1/31/21)")
plt.xlabel("Latitude")
plt.ylabel("Temperature (F)")
plt.savefig("../output_data/south_lat_vs_temp.png")
plt.show()
###Output
y = 0.3x + 80.29
The r squared is 0.4750136511464984
###Markdown
--- Northern vs Southern - Latitude vs Humidity Based on the linear regressions and scatter plots showcased below, we can draw the following conclusion: 1. The data suggests that there is a weak positive correlation between latitude and humidity both in the northern and southern hemispheres Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression
###Code
line_regress(north_lats, north_humi)
plt.title("Northern Hemisphere: Latitude vs Humidity (1/31/21)")
plt.xlabel("Latitude")
plt.ylabel("Humidity (%)")
plt.savefig("../output_data/north_lat_vs_humid.png")
plt.show()
###Output
y = 0.36x + 60.04
The r squared is 0.35710503549611083
###Markdown
Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression
###Code
line_regress(south_lats, south_humi)
plt.title("Southern Hemisphere: Latitude vs Humidity (1/31/21)")
plt.xlabel("Latitude")
plt.ylabel("Humidity (%)")
plt.savefig("../output_data/south_lat_vs_humid.png")
plt.show()
###Output
y = 0.46x + 85.02
The r squared is 0.3755993688637467
###Markdown
--- Northern vs Southern - Latitude vs Cloudiness Based on the linear regressions and scatter plots showcased below, we can draw the following conclusions: 1. There is a weak correlation between latitude and cloudiness in both the northern and southern hemispheres Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
###Code
line_regress(north_lats, north_cloud)
plt.title("Northern Hemisphere: Latitude vs Cloudiness (1/31/21)")
plt.xlabel("Latitude")
plt.ylabel("Cloudiness (%)")
plt.savefig("../output_data/north_lat_vs_cloudiness.png")
plt.show()
###Output
y = 0.78x + 24.48
The r squared is 0.3870336185379589
###Markdown
Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
###Code
line_regress(south_lats, south_cloud)
plt.title("Southern Hemisphere: Latitude vs Cloudiness (1/31/21)")
plt.xlabel("Latitude")
plt.ylabel("Cloudiness (%)")
plt.savefig("../output_data/south_lat_vs_cloudiness.png")
plt.show()
###Output
y = 0.76x + 78.2
The r squared is 0.26600243149061736
###Markdown
Northern vs Southern - Latitude vs Wind Speed Based on the linear regressions and scatter plots showcased below, we can draw the following conclusions: 1. There is a weak negative correlation between latitude and wind speeds in the southern hemisphere and close to no correlation in the northern hemisphere Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
###Code
line_regress(north_lats, north_wind)
plt.title("Northern Hemisphere: Latitude vs Wind Speed (mph)")
plt.xlabel("Latitude")
plt.ylabel("Wind Speed (mph)")
plt.savefig("../output_data/north_lat_vs_wind.png")
plt.show()
###Output
y = 0.04x + 6.51
The r squared is 0.12202530866318315
###Markdown
Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
###Code
line_regress(south_lats, south_wind)
plt.title("Southern Hemisphere: Latitude vs Wind Speed (mph)")
plt.xlabel("Latitude")
plt.ylabel("Wind Speed (mph)")
plt.savefig("../output_data/south_lat_vs_wind.png")
plt.show()
###Output
y = -0.11x + 6.9
The r squared is -0.20954303811363334
|
notebooks/query_ESO.ipynb | ###Markdown
Accessing ESO databaseDirect query* `http://archive.eso.org/ssap?REQUEST=queryData&POS=123.269560,-34.57804&SIZE=0.2` and [parse XML](https://stackoverflow.com/questions/45755513/parse-xml-to-table-in-python) Example 1[eso_ssa.py](http://archive.eso.org/programmatic/eso_ssa.py)
###Code
import sys
import pyvo as vo
from astropy.coordinates import SkyCoord
from astropy import units as u
import urllib
# Define the end point and the SSA service to query
ssap_endpoint = "http://archive.eso.org/ssap"
ssap_service = vo.dal.SSAService(ssap_endpoint)
ra = '05:37:09.9'
dec= '-80:28:09'
diameter = 0.5
pos = SkyCoord(ra=ra, dec=dec, unit=(u.hourangle, u.deg))
size = diameter*u.deg #u.Quantity(diameter, unit="deg")
ssap_resultset = ssap_service.search(pos=pos.fk5, diameter=size)
fields = ["COLLECTION", "TARGETNAME", "s_ra", "s_dec", "APERTURE",
"em_min", "em_max", "SPECRP", "SNR", "t_min", "t_max",
"CREATORDID", "access_url"]
###Output
_____no_output_____
###Markdown
fields="ARCFILE, HDR, Object, RA, DEC, Wavelength, coverage, R (λ/δλ), Flux Calibration, Product category, SNR (spectra), ABMAGLIM (cubes), Aperture, Spatial Resolution, Spectral bin, Dispersive element, Extended object, Normalised, Total flux, Instrument, Date Obs, Exptime, Collection, Product version, Release Description, Run/Program ID, ORIGFILE, REFERENCE".split(',')
###Code
# - Wavelengths are expressed in meters, for display they are converted to nanometers
# Also, count how many spectra have a SNR > min_SNR
harps_files=0
min_SNR = 0
separator=' '
for row in ssap_resultset:
instrument = row["COLLECTION"].decode()
if (instrument=='HARPS') and (row["SNR"] > min_SNR):
harps_files += 1
harps_files
df=ssap_resultset.table.to_pandas()
df.head()
df.columns
df['TARGETNAME'].apply(lambda x: str(x.decode()).replace('-','')).unique()
df["COLLECTION"] = df["COLLECTION"].apply(lambda x: x.decode())
df["dp_id"] = df["dp_id"].apply(lambda x: x.decode())
df["CREATORDID"] = df["CREATORDID"].apply(lambda x: x.decode())
df["access_url"][0][23:]
df["access_url"] = df["access_url"].apply(lambda x: x.decode()) #, row["CREATORDID"].decode()[23:]]
#url
df["access_url"][0]
#filename
df['CREATORDID'][0][23:].replace(':','')
from tqdm import tqdm
from os.path import join
id={}
for i in tqdm(range(len(df))[:3]):
instrument = df.loc[i,"COLLECTION"]
if (instrument=='HARPS') and (df.loc[i,"SNR"] > min_SNR):
dp_id = df.loc[i,"dp_id"]
origfile = df.loc[i,"CREATORDID"][23:]
id[origfile] = dp_id
# The downloaded file is saved with the name provided by the creator of the file: origfile.
# Though, care should be taken, because reduced products
# generated by external users might have colliding CREATORDID!
# This "demo" script does not take into consideration this risk.
print("Fetching file with SNR=%f: %s.fits renamed to %s" %(df.loc[i,"SNR"], dp_id, origfile))
url = df.loc[i,"access_url"]
filename = join('../data',df.loc[i,"CREATORDID"][23:].replace(':',''))
#urllib.request.urlretrieve(url, filename)
###Output
100%|██████████| 3/3 [00:00<00:00, 636.30it/s]
###Markdown
reading downloaded fits files[see example](http://archive.eso.org/cms/eso-data/help/1dspectra.htmlPython)
###Code
%matplotlib inline
import sys
import matplotlib.pyplot as pl
from astropy.io import fits as pf
import numpy as np
hdulist = pf.open( "../data/HARPS.2006-11-12T065347.333_s1d_A_DRS_HARPS_3.5_ESOSDP.fits" )
hdulist.info()
###Output
Filename: ../data/HARPS.2006-11-12T065347.333_s1d_A_DRS_HARPS_3.5_ESOSDP.fits
No. Name Ver Type Cards Dimensions Format
0 PRIMARY 1 PrimaryHDU 3051 ()
1 SPECTRUM 1 BinTableHDU 46 1R x 3C [313133D, 313133E, 313133E]
###Markdown
hdulist[0].header
###Code
# print column information
hdulist[1].columns
# get to the data part (in extension 1)
scidata = hdulist[1].data
wave = scidata[0][0]
flux = scidata[0][1]
err = scidata[0][2]
pl.errorbar(wave, flux, yerr=err)
pl.ylabel('FLUX [ADU]')
pl.xlabel('Wavelength [$\AA$]')
###Output
_____no_output_____
###Markdown
querying all tois
###Code
sys.path.append('../archive_digger')
import archive_digger as ad
tois = ad.get_tois()
ras = tois['RA'].values#[:15]
decs= tois['Dec'].values#[:15]
tics= tois['TIC ID'].values#[:15]
min_snr = 1
harps = {}
uves = {}
feros = {}
for ra,dec,tic in tqdm(zip(ras,decs,tics)):
pos = SkyCoord(ra=ra, dec=dec, unit=(u.hourangle, u.deg))
size = diameter*u.deg
ssap_resultset = ssap_service.search(pos=pos.fk5, diameter=size)
df=ssap_resultset.to_table().to_pandas()
collection=df['COLLECTION'].apply(lambda x: x.decode())
snr = df['SNR']
df1=df[(collection=='HARPS') & (snr>min_snr)]
df2=df[(collection=='UVES') & (snr>min_snr)]
df3=df[(collection=='FEROS') & (snr>min_snr)]
harps[tic] = len(df1)
uves[tic] = len(df2)
feros[tic] = len(df3)
import pandas as pd
d=pd.DataFrame([harps,uves,feros]).T
d.columns=['harps','uves','feros']
final=pd.merge(d,tois,right_on='TIC ID', left_index=True)
# final=final[['harps','feros','TIC ID','TOI']].sort_values('TOI')
final.sort_values('TOI').to_csv('toi_with_eso_data.csv',index=False)
###Output
_____no_output_____
###Markdown
Example 2[raw_eso.py](http://archive.eso.org/programmatic/eso_raw.py)
###Code
import sys
import math
from pyvo.dal import tap
from astropy.coordinates import SkyCoord
from astropy import units as u
ESO_TAP_OBS = "http://archive.eso.org/tap_obs"
tapobs = tap.TAPService(ESO_TAP_OBS)
target = "Pi Men"
radius = 0.5 # degrees
print()
print("Looking for public SCIENCE HARPS frames around target %s in a cone of radius %f deg." %(target, radius))
print("Querying the ESO TAP service at %s" %(ESO_TAP_OBS))
# --------------------------------------------------
# The actual position of the selected target
# is queried by the from_name() function,
# which queries the CDS SESAME service
# (http://cdsweb.u-strasbg.fr/cgi-bin/Sesame).
# --------------------------------------------------
print("The provided target is being resolved by SESAME...")
pos = SkyCoord(ra=ra, dec=dec, unit=(u.hourangle, u.deg))
print("SESAME coordinates for %s: %s" % (target, pos.to_string()))
cosd = math.cos( pos.dec.to_value() * math.pi/180. )
if ( math.fabs( pos.dec.to_value() - 90 ) < radius ):
cosd = 1;
ra_min = pos.ra.to_value() - radius * cosd
ra_max = pos.ra.to_value() + radius * cosd
dec_min = pos.dec.to_value() - radius
dec_max = pos.dec.to_value() + radius
top = "TOP %d" % (3)
query="""SELECT %s object, ra, dec, tpl_start, prog_id, filter_path, dp_id
from dbo.raw
where ra between %f and %f
and dec between %f and %f
and instrument='HARPS'
and release_date < getdate()
and dp_cat='SCIENCE' """ % ( top, ra_min, ra_max, dec_min, dec_max );
print(query)
res = tapobs.search(query=query)
print(res.to_table())
###Output
Looking for public SCIENCE HARPS frames around target Pi Men in a cone of radius 0.500000 deg.
Querying the ESO TAP service at http://archive.eso.org/tap_obs
The provided target is being resolved by SESAME...
SESAME coordinates for Pi Men: 84.2912 -80.4692
SELECT TOP 3 object, ra, dec, tpl_start, prog_id, filter_path, dp_id
from dbo.raw
where ra between 84.208461 and 84.374039
and dec between -80.969167 and -79.969167
and instrument='HARPS'
and release_date < getdate()
and dp_cat='SCIENCE'
|
notebooks/data-cleaning/2.0-clean-unneccessary-columns.ipynb | ###Markdown
Clean Unneccesary Columns from DataFrame and Cast
###Code
filename = '../../data/interim/1.3-output.csv'
df = pd.read_csv(filename)
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1109656 entries, 0 to 1109655
Data columns (total 20 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 year 1109656 non-null int64
1 zipcode 1109656 non-null int64
2 MSA 688481 non-null object
3 CBSA 688481 non-null float64
4 state 944005 non-null object
5 EQI_zip 824710 non-null float64
6 SFR_zip 824710 non-null float64
7 growth_zip 824710 non-null float64
8 RECPI_zip 824710 non-null float64
9 REAI_zip 703797 non-null float64
10 EQI_MSA 688481 non-null float64
11 SFR_MSA 688481 non-null float64
12 growth_MSA 688481 non-null float64
13 RECPI_MSA 688481 non-null float64
14 REAI_MSA 595961 non-null float64
15 EQI_state 944005 non-null float64
16 SFR_state 944005 non-null float64
17 growth_state 944005 non-null float64
18 RECPI_state 944005 non-null float64
19 REAI_state 812501 non-null float64
dtypes: float64(16), int64(2), object(2)
memory usage: 169.3+ MB
###Markdown
Drop growth
###Code
df = df.drop(['growth_zip', 'growth_MSA', 'growth_state'], axis=1)
###Output
_____no_output_____
###Markdown
Drop REAI
###Code
df = df.drop(['REAI_zip','REAI_MSA','REAI_state'], axis=1)
###Output
_____no_output_____
###Markdown
Drop Descriptors
###Code
df = df.drop(['MSA','CBSA','state'], axis=1)
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1109656 entries, 0 to 1109655
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 year 1109656 non-null int64
1 zipcode 1109656 non-null int64
2 EQI_zip 824710 non-null float64
3 SFR_zip 824710 non-null float64
4 RECPI_zip 824710 non-null float64
5 EQI_MSA 688481 non-null float64
6 SFR_MSA 688481 non-null float64
7 RECPI_MSA 688481 non-null float64
8 EQI_state 944005 non-null float64
9 SFR_state 944005 non-null float64
10 RECPI_state 944005 non-null float64
dtypes: float64(9), int64(2)
memory usage: 93.1 MB
###Markdown
Cast Columns
###Code
df.SFR_state = df.SFR_state.astype('float64')
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1109656 entries, 0 to 1109655
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 year 1109656 non-null int64
1 zipcode 1109656 non-null int64
2 EQI_zip 824710 non-null float64
3 SFR_zip 824710 non-null float64
4 RECPI_zip 824710 non-null float64
5 EQI_MSA 688481 non-null float64
6 SFR_MSA 688481 non-null float64
7 RECPI_MSA 688481 non-null float64
8 EQI_state 944005 non-null float64
9 SFR_state 944005 non-null float64
10 RECPI_state 944005 non-null float64
dtypes: float64(9), int64(2)
memory usage: 93.1 MB
###Markdown
Output DataFrame to file
###Code
filename = '../../data/interim/2.0-output.csv'
df.to_csv(filename, index=False)
###Output
_____no_output_____ |
CSK VS SRH/Player Analysis CSK VS SRH.ipynb | ###Markdown
Player Analysis
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
df1 = pd.read_csv("matches.csv")
df2 = pd.read_csv("deliveries.csv")
df1.rename(columns={"id" : 'match_id'}, inplace=True)
overall= pd.merge(df1, df2, on='match_id', how='outer')
overall.columns
overall = overall[['match_id', 'season','team1', 'team2', 'toss_winner','toss_decision','winner',
'inning', 'batting_team', 'bowling_team', 'over', 'ball',
'batsman', 'bowler','wide_runs','noball_runs',
'batsman_runs', 'extra_runs', 'total_runs', 'player_dismissed',
'dismissal_kind']]
overall.head()
###Output
_____no_output_____
###Markdown
Taking in consideration only CSK VS SRH Matches
###Code
DM=overall[np.logical_or(np.logical_and(overall['team1']=='Chennai Super Kings',overall['team2']=='Sunrisers Hyderabad'),
np.logical_and(overall['team2']=='Chennai Super Kings',overall['team1']=='Sunrisers Hyderabad'))]
DM.head()
DM.season.unique()
DM.batsman.unique()
###Output
_____no_output_____
###Markdown
David Warner Performance Analysis
###Code
warner = DM[DM["batsman"]=='DA Warner']
batsmen_score = pd.DataFrame(warner.groupby(['season',"match_id","batsman"]).agg({'batsman_runs' : 'sum', 'ball' :'count'}))
batsmen_score
for value in DM.groupby("batsman"):
if value[0] == "DA Warner":
print("Total Runs scored by David Warner against CSK :",value[1]['batsman_runs'].sum())
warner = DM[DM["batsman"]=='DA Warner']
warner = warner[warner["player_dismissed"]=="DA Warner"]
warner= warner.groupby("bowler")["player_dismissed"].size()
dismissal = pd.DataFrame(warner)
dismissal
batsman_name = 'DA Warner'
def batsmen_out_type(batsman_name):
for value in DM.groupby(['player_dismissed']):
if value[0] == batsman_name :
return value[1]['dismissal_kind'].value_counts()
stats = batsmen_out_type(batsman_name)
stats = stats.to_frame().reset_index().rename(columns = {'index' : 'Dismissal Kind' , 'dismissal_kind' : 'Count'})
print("Dismissal Type of David Warner :")
stats
###Output
Dismissal Type of David Warner :
###Markdown
Confidence Interval for Warner 's innings
###Code
batsmen_score.describe().iloc[[0,1,2,7],:]
mean = batsmen_score["batsman_runs"].mean()
sd = batsmen_score["batsman_runs"].std()
n = len(batsmen_score)
n
tstar = 2.064
se = sd/np.sqrt(n)
se
lcb = mean - tstar * se
ucb = mean + tstar * se
lcb = round(lcb)
ucb = round(ucb)
print("Confidence Interval for DA Warner's innings today :{}".format((lcb, ucb)))
###Output
Confidence Interval for DA Warner's innings today :(28.0, 77.0)
###Markdown
From all of the above estimations , we can see that David Warner's form against CSK bowlers is quite impressive in recent year. So we can predict from here that if David Warner is able to score 30+ runs today then he is expected to go for a long innings. CSK Wicket fall down Analysis
###Code
wicket = DM[DM["bowling_team"] == "Sunrisers Hyderabad"]
CSK_wickets = pd.DataFrame(wicket.groupby(['season',"match_id","bowling_team"]).agg({'player_dismissed' : 'count'}))
print("CSK wickets fall down against SRH in all matches :")
CSK_wickets.columns = ["Total Wicket Fall of CSK"]
CSK_wickets
count = DM.match_id.unique()
count = len(count)
wicket = DM[DM["bowling_team"] == "Sunrisers Hyderabad"]
wkt = wicket["player_dismissed"].count()
print("Total wickets fall of CSK against SRH : {} in {} matches".format(wkt ,count))
per_match = wkt/count
per_match = round(per_match)
print("On an average , Per match wicket fall down for CSK :",per_match)
###Output
On an average , Per match wicket fall down for CSK : 4.0
###Markdown
Confidence Interval
###Code
CSK_wickets.describe()
mean = CSK_wickets["Total Wicket Fall of CSK"].mean()
sd = CSK_wickets["Total Wicket Fall of CSK"].std()
n = len(CSK_wickets)
n
tstar = 2.064
se = sd/np.sqrt(n)
se
lcb = mean - tstar * se
ucb = mean + tstar * se
lcb = round(lcb)
ucb = round(ucb)
print("Confidence Interval for Total Wicket fall of CSK today :{}".format((lcb, ucb)))
###Output
Confidence Interval for Total Wicket fall of CSK today :(3.0, 5.0)
###Markdown
From the above analysis we can predict that CSK has a good record while chasing against SRH . So taking in consideration the past trends only, we predict CSK to loose 3-5 wickets if they will chase based on our analysis. Total Runs Scored in the Match Analysis
###Code
third = DM[["batting_team","batsman_runs"]]
Each_team_overall_score = pd.DataFrame(third.groupby("batting_team").batsman_runs.sum())
Each_team_overall_score.columns=["Total Runs"]
Each_team_overall_score
count = DM.match_id.unique()
count = len(count)
Total_runs_scored = third.batsman_runs.sum()
Avg_score = Total_runs_scored/(count*2)
print("On an average runs scored in each innnings in CSK VS SRH :",round(Avg_score))
Total_avg = Total_runs_scored/count
print("On an average total runs scored in a match of CSK VS SRH :" , round(Total_avg))
###Output
On an average total runs scored in a match of CSK VS SRH : 330.0
###Markdown
From the above analysis we can predict total score to be scored today will be around 330 if the trend is continued Wide Balls Analysis
###Code
wides = DM.where(DM["wide_runs"] >0)
plt.figure(figsize = (18,9))
sns.countplot(wides['wide_runs'],hue=DM['bowling_team'])
plt.title("Wides balled by CSK vs SRH (overall)",fontsize=15)
plt.yticks(fontsize=15)
plt.xticks(fontsize=15)
plt.xlabel('wide runs',fontsize=15)
plt.ylabel('count',fontsize=15)
plt.legend(loc=1,fontsize=15)
cond2 = DM["season"] == 2018
cond3 = DM["season"] == 2019
final = DM[cond2 | cond3]
wides2 = final.where(final["wide_runs"] >0)
plt.figure(figsize = (18,9))
sns.countplot(wides2['wide_runs'],hue=DM['bowling_team'])
plt.title("Wides balled by CSK vs SRH in recent years",fontsize=15)
plt.yticks(fontsize=15)
plt.xticks(fontsize=15)
plt.xlabel('wide runs',fontsize=15)
plt.ylabel('count',fontsize=15)
plt.legend(loc=1,fontsize=15)
###Output
_____no_output_____ |
.ipynb_checkpoints/Voting-Age Population USA 2012 Election-checkpoint.ipynb | ###Markdown
Choropleth [Documentation](https://plot.ly/python/reference/choropleth) Imports Plotly
###Code
import chart_studio.plotly as py
import plotly.graph_objs as go
from plotly.offline import init_notebook_mode,iplot
init_notebook_mode(connected=True)
import pandas as pd
###Output
_____no_output_____
###Markdown
**Import CSV file**
###Code
election = pd.read_csv('2012_Election_Data')
###Output
_____no_output_____
###Markdown
**Check Header**
###Code
election.head()
###Output
_____no_output_____
###Markdown
**Check Types**
###Code
election.dtypes
##(VAP) is float, ok
fig = go.Figure(data=go.Choropleth(
locationmode='USA-states',
locations=election['State Abv'],
z=election['Voting-Age Population (VAP)'],
colorbar_title='VAP',
))
fig.update_layout(
title_text='Voting-Age Population on 2012 USA Election',
geo = dict(scope='usa',showlakes=True)
)
###Output
_____no_output_____ |
output/v3/notebook.ipynb | ###Markdown
MARATONA BEHIND THE CODE 2020 DESAFIO 7 - TNT Installing Libs
###Code
!pip install scikit-learn --upgrade
!pip install xgboost --upgrade
!pip install imblearn --upgrade
!pip install tensorflow keras
###Output
_____no_output_____
###Markdown
Download dos conjuntos de dados em formato .csv
###Code
import pandas as pd
# Insira aqui o pandasDataFrame.
df_data_1 = pd.read_csv('data/train_dataset_algartech.csv')
# df_data_1 = pd.read_csv('training_dataset.csv')
df_data_1.head()
df_training_dataset = df_data_1
df_training_dataset.tail()
###Output
_____no_output_____
###Markdown
Sobre o arquivo "training_dataset.csv", temos algumas informações gerais sobre os pontos de vendas da TNT:**Tempo****Estação****LAT****LONG****Movimentação****Original_473****Original_269****Zero****Maçã-Verde****Tangerina****Citrus****Açaí-Guaraná****Pêssego****TARGET**
###Code
df_training_dataset.info()
df_training_dataset.nunique()
###Output
_____no_output_____
###Markdown
Detalhamento do desafio: classificação bináriaEste é um desafio cujo objetivo de negócio é a segmentação dos usuários de aplicativo de um banco. Para tal, podemos utilizar duas abordagens: aprendizado de máquina supervisionado (classificação) ou não-supervisionado (clustering). Neste desafio será aplicada a classificação, pois é disponível um dataset já com "labels", ou em outras palavras, já com exemplos de dados juntamente com a variável alvo.Na biblioteca scikit-learn temos diversos algoritmos para classificação. O participante é livre para utilizar o framework que desejar para completar esse desafio.Neste notebook será mostrado um exeplo de uso do algoritmo "Decision Tree" para classificar parte dos estudantes em seis diferentes perfís. Atenção!A coluna-alvo neste desafio é a coluna ``TARGET`` Pre-processando o dataset antes do treinamento Processando valores NaN com o SimpleImputer do sklearnPara os valores NaN, usaremos a substituição pela constante 0 como **exemplo**.Você pode escolher a estratégia que achar melhor para tratar os valores nulos :)Docs: https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html?highlight=simpleimputersklearn.impute.SimpleImputer
###Code
from sklearn.impute import SimpleImputer
import numpy as np
impute_zeros = SimpleImputer(
missing_values=np.nan,
strategy='constant',
fill_value=0,
verbose=0,
copy=True
)
# Exibindo os dados ausentes do conjunto de dados antes da primeira transformação (df)
print("Valores nulos no df_training_dataset antes da transformação SimpleImputer: \n\n{}\n".format(df_training_dataset.isnull().sum(axis = 0)))
# Aplicando a transformação ``SimpleImputer`` no conjunto de dados base
impute_zeros.fit(X=df_training_dataset)
# Reconstruindo um Pandas DataFrame com os resultados
df_training_dataset_imputed = pd.DataFrame.from_records(
data=impute_zeros.transform(
X=df_training_dataset
),
columns=df_training_dataset.columns
)
# Exibindo os dados ausentes do conjunto de dados após a primeira transformação (df)
print("Valores nulos no df_training_dataset após a transformação SimpleImputer: \n\n{}\n".format(df_training_dataset_imputed.isnull().sum(axis = 0)))
###Output
Valores nulos no df_training_dataset antes da transformação SimpleImputer:
Idade 0
Local de trabalho 0
Pontuação teste 0
Departmento 0
Distancia casa-trabalho 0
Educacao 0
Area 0
Possui carro 0
Subordinado 0
Satisfação com o ambiente no emprego atual 0
Genero 0
Horas voluntariado 0
Envolvimento com trabalho 0
Posicao 0
Cargo 0
Satisfação com emprego 0
Estado civil 0
Renda 0
Bonus de performance 0
Quantidade de empresas que trabalho 0
Maior de idade 0
Necessita de hora extra 0
Aumento de salario% 0
Performance na entrevista 0
Satisfação com a relação 0
Horas de trabalho padrão 0
Beneficios 0
Anos de experiencia 0
Horas de treinamento ultimo ano 0
Estilo de vida 0
Anos na última empresa 0
Anos na posição atual 0
Anos desde última promoção 0
Anos com a mesma gerência 0
Contratar 0
dtype: int64
Valores nulos no df_training_dataset após a transformação SimpleImputer:
Idade 0
Local de trabalho 0
Pontuação teste 0
Departmento 0
Distancia casa-trabalho 0
Educacao 0
Area 0
Possui carro 0
Subordinado 0
Satisfação com o ambiente no emprego atual 0
Genero 0
Horas voluntariado 0
Envolvimento com trabalho 0
Posicao 0
Cargo 0
Satisfação com emprego 0
Estado civil 0
Renda 0
Bonus de performance 0
Quantidade de empresas que trabalho 0
Maior de idade 0
Necessita de hora extra 0
Aumento de salario% 0
Performance na entrevista 0
Satisfação com a relação 0
Horas de trabalho padrão 0
Beneficios 0
Anos de experiencia 0
Horas de treinamento ultimo ano 0
Estilo de vida 0
Anos na última empresa 0
Anos na posição atual 0
Anos desde última promoção 0
Anos com a mesma gerência 0
Contratar 0
dtype: int64
###Markdown
Eliminando colunas indesejadasVamos **demonstrar** abaixo como usar o método **DataFrame.drop()**.Docs: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop.html
###Code
df_training_dataset_imputed.tail()
df_training_dataset_rmcolumns = df_training_dataset_imputed.drop(columns=['Idade', 'Educacao', 'Area', 'Possui carro', 'Horas de trabalho padrão', 'Genero', 'Estado civil'], inplace=False)
df_training_dataset_rmcolumns.tail()
###Output
_____no_output_____
###Markdown
Atenção!As colunas removidas acima são apenas para fim de exemplo, você pode usar as colunas que quiser e inclusive criar novas colunas com dados que achar importantes! Tratamento de de variáveis categóricasComo mencionado antes, os computadores não são bons com variáveis "categóricas" (ou strings).Dado uma coluna com variável categórica, o que podemos realizar é a codificação dessa coluna em múltiplas colunas contendo variáveis binárias. Esse processo é chamado de "one-hot-encoding" ou "dummy encoding". Se você não é familiarizado com esses termos, você pode pesquisar mais sobre isso na internet :)
###Code
# Tratando variáveis categóricas com o método Pandas ``get_dummies()''
df_training = pd.get_dummies(df_training_dataset_rmcolumns, columns=['Cargo', 'Local de trabalho', 'Departmento'])
# df_training = df_training_dataset_rmcolumns
df_training['Contratar'] = df_training['Contratar'].replace({'Sim': 1, 'Não': 0})
df_training['Necessita de hora extra'] = df_training['Necessita de hora extra'].replace({'Sim': 1, 'Não': 0})
df_training.tail()
###Output
_____no_output_____
###Markdown
Atenção!A coluna **TARGET** deve ser mantida como uma string. Você não precisa processar/codificar a variável-alvo. Treinando um classificador com base em uma árvore de decisão Selecionando FEATURES e definindo a variável TARGET
###Code
df_training.columns
features = df_training[
[
'Pontuação teste', 'Distancia casa-trabalho', 'Subordinado',
'Satisfação com o ambiente no emprego atual', 'Horas voluntariado',
'Envolvimento com trabalho', 'Posicao', 'Satisfação com emprego',
'Renda', 'Bonus de performance', 'Quantidade de empresas que trabalho',
'Maior de idade', 'Necessita de hora extra', 'Aumento de salario%',
'Performance na entrevista', 'Satisfação com a relação', 'Beneficios',
'Anos de experiencia', 'Horas de treinamento ultimo ano',
'Estilo de vida', 'Anos na última empresa', 'Anos na posição atual',
'Anos desde última promoção', 'Anos com a mesma gerência',
'Cargo_Analista', 'Cargo_Assistente', 'Cargo_Diretor',
'Cargo_Engenheiro', 'Cargo_Gerente', 'Cargo_Supervisor',
'Cargo_Tecnico', 'Cargo_Vendedo senior', 'Cargo_Vendedor junior',
'Local de trabalho_Cliente', 'Local de trabalho_Escritório',
'Local de trabalho_Misto', 'Departmento_Engenharia', 'Departmento_RH',
'Departmento_Vendas'
]
]
target = df_training['Contratar'] ## NÃO TROQUE O NOME DA VARIÁVEL TARGET.
###Output
_____no_output_____
###Markdown
Dividindo nosso conjunto de dados em conjuntos de treinamento e teste
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(features, target, test_size=0.2, random_state=133)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
###Output
_____no_output_____
###Markdown
Treinando uma árvore de decisão
###Code
# Método para criar um árvore de decisão
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.neural_network import MLPClassifier
from sklearn import ensemble
# dtc = DecisionTreeClassifier(max_depth=15).fit(X_train, y_train)
# dtc = RandomForestClassifier(max_depth=21).fit(X_train, y_train)
# dtc = XGBClassifier(max_depth=15).fit(X_train, y_train)
# dtc.fit(X_train,y_train)
# dtc = MLPClassifier(hidden_layer_sizes=(8,8,8), activation='relu', solver='adam', max_iter=1500)
# dtc.fit(X_train,y_train)
# params = {'n_estimators': 500,
# 'max_depth': 4,
# 'min_samples_split': 5,
# 'learning_rate': 0.01,
# 'loss': 'ls'}
# dtc = ensemble.GradientBoostingRegressor(**params)
# dtc.fit(X_train, y_train)
from sklearn.linear_model import LogisticRegression
dtc = LogisticRegression()
dtc.fit(X_train, y_train)
# from sklearn import svm
# dtc = svm.SVC(kernel='linear', C = 1.0)
# dtc.fit(X_train, y_train)
# from sklearn.neighbors import KNeighborsClassifier
# dtc = KNeighborsClassifier(n_neighbors = 9)
# dtc.fit(X_train, y_train)
# n_estimators=96, random_state=133,
###Output
_____no_output_____
###Markdown
Fazendo previsões na amostra de teste
###Code
y_pred = dtc.predict(X_test)
print(y_pred)
from sklearn.metrics import f1_score
f1_score(y_test, y_pred, average=None)
###Output
_____no_output_____
###Markdown
Keras
###Code
from sklearn import preprocessing
#Select numerical columns which needs to be normalized
train_norm = X_train
test_norm = X_test
# Normalize Training Data
std_scale = preprocessing.StandardScaler().fit(train_norm)
x_train_norm = std_scale.transform(train_norm)
#Converting numpy array to dataframe
training_norm_col = pd.DataFrame(x_train_norm, index=train_norm.index, columns=train_norm.columns)
X_train.update(training_norm_col)
print (X_train.head())
# Normalize Testing Data by using mean and SD of training set
x_test_norm = std_scale.transform(test_norm)
testing_norm_col = pd.DataFrame(x_test_norm, index=test_norm.index, columns=test_norm.columns)
X_test.update(testing_norm_col)
print (X_test.head())
print (X_train.head())
from numpy import loadtxt
import tensorflow as tf
from keras.models import Sequential, save_model, load_model
from keras.layers import Dense, Flatten
# define the keras model
model = Sequential()
model.add(Dense(12, input_dim=39, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# model = Sequential([
# Flatten(input_shape=(50,)),
# Dense(16, activation=tf.nn.relu),
# Dense(16, activation=tf.nn.relu),
# Dense(16, activation=tf.nn.relu),
# Dense(16, activation=tf.nn.relu),
# Dense(16, activation=tf.nn.relu),
# Dense(16, activation=tf.nn.relu),
# Dense(16, activation=tf.nn.relu),
# Dense(16, activation=tf.nn.relu),
# Dense(16, activation=tf.nn.relu),
# Dense(16, activation=tf.nn.relu),
# Dense(16, activation=tf.nn.relu),
# Dense(16, activation=tf.nn.relu),
# Dense(16, activation=tf.nn.relu),
# Dense(16, activation=tf.nn.relu),
# Dense(16, activation=tf.nn.relu),
# Dense(16, activation=tf.nn.relu),
# Dense(16, activation=tf.nn.relu),
# Dense(16, activation=tf.nn.relu),
# Dense(16, activation=tf.nn.relu),
# Dense(16, activation=tf.nn.relu),
# Dense(1, activation=tf.nn.sigmoid),
# ])
# compile the keras model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit the keras model on the dataset
# model.fit(X_train, y_train, epochs=50, batch_size=32)
model.fit(X_train, y_train, validation_data=(X_test,y_test), epochs=1500)
# evaluate the keras model
_, accuracy = model.evaluate(X_train, y_train)
print('Accuracy: %.2f' % (accuracy*100))
np.set_printoptions(precision=4, suppress=True)
eval_results = model.evaluate(X_test, y_test, verbose=0)
print("\nLoss, accuracy on test data: ")
print("%0.4f %0.2f%%" % (eval_results[0], \
eval_results[1]*100))
# # Save the model
filepath = './saved_model'
save_model(model, filepath)
# Load the model
model = load_model(filepath, compile = True)
# y_pred = dtc.predict(X_test)
# print(y_pred)
# np.set_printoptions(threshold=np.inf)
# print(X_test)
# y_pred = np.argmax(model.predict(X_test), axis = 1)
# print(y_pred.shape)
# make a prediction
y_pred = model.predict_classes(X_test)
# show the inputs and predicted outputs
print(y_pred)
from sklearn.metrics import f1_score
f1_score(y_test, y_pred, average=None)
###Output
_____no_output_____
###Markdown
Analisando a qualidade do modelo através da matriz de confusão
###Code
import matplotlib.pyplot as plt
import numpy as np
import itertools
from sklearn.neighbors import KNeighborsClassifier
# dtc = KNeighborsClassifier(n_neighbors = 9)
# dtc.fit(X_train, y_train)
def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=None, normalize=True):
accuracy = np.trace(cm) / float(np.sum(cm))
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
plt.figure(figsize=(8, 6))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.2f}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))
plt.show()
from sklearn.metrics import confusion_matrix
# y_test = y_test.replace({1: 'REABASTECER', 0: 'NORMAL'})
# y_pred = y_pred.replace({1: 'REABASTECER', 0: 'NORMAL'})
plot_confusion_matrix(confusion_matrix(y_test, y_pred), ['Sim', 'Não'])
###Output
_____no_output_____
###Markdown
Scoring dos dados necessários para entregar a solução Como entrega da sua solução, esperamos os resultados classificados no seguinte dataset chamado "to_be_scored.csv": Download da "folha de respostas"
###Code
!wget --no-check-certificate --content-disposition https://gitlab.com/JoaoPedroPP/datasets/-/raw/master/ntn/to_be_scored.csv
df_to_be_scored = pd.read_csv(r'to_be_scored.csv')
df_to_be_scored.tail()
df_to_be_scored = pd.read_csv('data/to_be_scored_algartech.csv')
df_to_be_scored.tail()
###Output
_____no_output_____
###Markdown
Atenção!O dataframe ``to_be_scored`` é a sua "folha de respostas". Note que a coluna "TARGET" não existe nessa amostra, que não pode ser então utilizada para treino de modelos de aprendizado supervisionado.
###Code
df_to_be_scored.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 100 entries, 0 to 99
Data columns (total 34 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Idade 100 non-null int64
1 Local de trabalho 99 non-null object
2 Pontuação teste 100 non-null int64
3 Departmento 100 non-null object
4 Distancia casa-trabalho 100 non-null int64
5 Educacao 100 non-null object
6 Area 100 non-null object
7 Possui carro 100 non-null int64
8 Subordinado 100 non-null int64
9 Satisfação com o ambiente no emprego atual 100 non-null int64
10 Genero 100 non-null object
11 Horas voluntariado 100 non-null int64
12 Envolvimento com trabalho 100 non-null int64
13 Posicao 100 non-null int64
14 Cargo 100 non-null object
15 Satisfação com emprego 100 non-null int64
16 Estado civil 100 non-null object
17 Renda 100 non-null int64
18 Bonus de performance 100 non-null int64
19 Quantidade de empresas que trabalho 100 non-null int64
20 Maior de idade 100 non-null int64
21 Necessita de hora extra 100 non-null object
22 Aumento de salario% 100 non-null int64
23 Performance na entrevista 100 non-null int64
24 Satisfação com a relação 100 non-null int64
25 Horas de trabalho padrão 100 non-null int64
26 Beneficios 100 non-null int64
27 Anos de experiencia 100 non-null int64
28 Horas de treinamento ultimo ano 100 non-null int64
29 Estilo de vida 100 non-null int64
30 Anos na última empresa 100 non-null int64
31 Anos na posição atual 100 non-null int64
32 Anos desde última promoção 100 non-null int64
33 Anos com a mesma gerência 100 non-null int64
dtypes: int64(26), object(8)
memory usage: 26.7+ KB
###Markdown
Atenção! Para poder aplicar seu modelo e classificar a folha de respostas, você precisa primeiro aplicar as mesmas transformações com colunas que você aplicou no dataset de treino. Não remova ou adicione linhas na folha de respostas. Não altere a ordem das linhas na folha de respostas. Ao final, as 1000 entradas devem estar classificadas, com os valores previstos em uma coluna chamada "target" Na célula abaixo, repetimos rapidamente os mesmos passos de pré-processamento usados no exemplo dado com árvore de decisão
###Code
# 1 - Removendo linhas com valores NaN
# df_to_be_scored_1 = df_to_be_scored.dropna(axis='index', how='any', subset=['Local de trabalho'', 'Estação', 'LAT', 'LONG', 'Movimentação', 'Original_473', 'Original_269', 'Zero', 'Maçã-Verde', 'Tangerina', 'Citrus', 'Açaí-Guaraná', 'Pêssego'])
df_to_be_scored_1 = df_to_be_scored
# 2 - Inputando zeros nos valores faltantes
impute_zeros.fit(X=df_to_be_scored_1)
df_to_be_scored_2 = pd.DataFrame.from_records(
data=impute_zeros.transform(
X=df_to_be_scored_1
),
columns=df_to_be_scored_1.columns
)
# 3 - Remoção de colunas
df_to_be_scored_3 = df_to_be_scored_2.drop(columns=['Idade', 'Educacao', 'Area', 'Possui carro', 'Horas de trabalho padrão', 'Genero', 'Estado civil'], inplace=False)
# 4 - Encoding com "dummy variables" (se necessário)
df_to_be_scored_4 = pd.get_dummies(df_to_be_scored_3, columns=['Cargo', 'Local de trabalho', 'Departmento'])
df_to_be_scored_4 = df_to_be_scored_4.drop(columns=['Local de trabalho_0'], inplace=False)
df_to_be_scored_4['Necessita de hora extra'] = df_to_be_scored_4['Necessita de hora extra'].replace({'Sim': 1, 'Não': 0})
# df_to_be_scored_4 = df_to_be_scored_3
df_to_be_scored_4.tail()
###Output
_____no_output_____
###Markdown
Pode ser verificado abaixo que as colunas da folha de resposta agora são idênticas às que foram usadas para treinar o modelo:
###Code
df_training[
[
'Pontuação teste', 'Distancia casa-trabalho', 'Subordinado',
'Satisfação com o ambiente no emprego atual', 'Horas voluntariado',
'Envolvimento com trabalho', 'Posicao', 'Satisfação com emprego',
'Renda', 'Bonus de performance', 'Quantidade de empresas que trabalho',
'Maior de idade', 'Necessita de hora extra', 'Aumento de salario%',
'Performance na entrevista', 'Satisfação com a relação', 'Beneficios',
'Anos de experiencia', 'Horas de treinamento ultimo ano',
'Estilo de vida', 'Anos na última empresa', 'Anos na posição atual',
'Anos desde última promoção', 'Anos com a mesma gerência',
'Cargo_Analista', 'Cargo_Assistente', 'Cargo_Diretor',
'Cargo_Engenheiro', 'Cargo_Gerente', 'Cargo_Supervisor',
'Cargo_Tecnico', 'Cargo_Vendedo senior', 'Cargo_Vendedor junior',
'Local de trabalho_Cliente', 'Local de trabalho_Escritório',
'Local de trabalho_Misto', 'Departmento_Engenharia', 'Departmento_RH',
'Departmento_Vendas'
]
].columns
df_to_be_scored_4.columns
###Output
_____no_output_____
###Markdown
AtençãoPara todas colunas que não existirem no "df_to_be_scored", você pode usar a técnica abaixo para adicioná-las:
###Code
y_pred = dtc.predict(df_to_be_scored_4)
# y_pred = model.predict_classes(df_to_be_scored_4)
df_to_be_scored_4['Contratar'] = y_pred
df_to_be_scored_4['Contratar'] = df_to_be_scored_4['Contratar'].replace({1: 'Sim', 0: 'Não'})
df_to_be_scored_4.head()
###Output
_____no_output_____
###Markdown
Salvando a folha de respostas como um arquivo .csv para ser submetido
###Code
# project.save_data(file_name="results.csv", data=df_to_be_scored_4.to_csv(index=False))
df_to_be_scored_4.to_csv('results.csv', index=False)
###Output
_____no_output_____ |
examples/sampling-hamiltonian-mcmc.ipynb | ###Markdown
Inference: Hamiltonian MCMCThis example shows you how to perform Bayesian inference on a Gaussian distribution and a time-series problem, using [Hamiltonian Monte Carlo](http://pints.readthedocs.io/en/latest/mcmc_samplers/hamiltonian_mcmc.html). First, we create a simple normal distribution
###Code
import os
os.chdir("../")
import pints
import pints.toy
import numpy as np
import matplotlib.pyplot as plt
# Create log pdf
log_pdf = pints.toy.GaussianLogPDF([2, 4], [[1, 0], [0, 3]])
# Contour plot of pdf
levels = np.linspace(-3,12,20)
num_points = 100
x = np.linspace(-1, 5, num_points)
y = np.linspace(-0, 8, num_points)
X, Y = np.meshgrid(x, y)
Z = np.zeros(X.shape)
Z = np.exp([[log_pdf([i, j]) for i in x] for j in y])
plt.contour(X, Y, Z)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
###Output
_____no_output_____
###Markdown
Now we set up and run a sampling routine using Hamiltonian MCMC
###Code
# Choose starting points for 3 mcmc chains
xs = [
[2, 1],
[3, 3],
[5, 4],
]
# Set a standard deviation, to give the method a sense of scale
#sigma = [1, 1]
# Create mcmc routine
mcmc = pints.MCMCController(log_pdf, 3, xs, method=pints.HamiltonianMCMC)
# Add stopping criterion
mcmc.set_max_iterations(1000)
# Set up modest logging
mcmc.set_log_to_screen(True)
mcmc.set_log_interval(100)
# # Update step sizes used by individual samplers
for sampler in mcmc.samplers():
sampler.set_leapfrog_step_size(0.5)
# Run!
print('Running...')
full_chains = mcmc.run()
print('Done!')
# Show traces and histograms
import pints.plot
pints.plot.trace(full_chains)
plt.show()
# Discard warm up
chains = full_chains[:, 200:]
# Check convergence using rhat criterion
print('R-hat:')
print(pints.rhat_all_params(chains))
# Check Kullback-Leibler divergence of chains
print(log_pdf.kl_divergence(chains[0]))
print(log_pdf.kl_divergence(chains[1]))
print(log_pdf.kl_divergence(chains[2]))
# Look at distribution in chain 0
pints.plot.pairwise(chains[0], kde=True)
plt.show()
###Output
R-hat:
[1.001282663344726, 1.0017550416251924]
0.00403543726715
0.0141062325781
0.00973924695993
###Markdown
Hamiltonian MCMC on a time-series problemWe now try the same method on a time-series problemFirst, we try it in 1d, using a wrapper around the LogisticModel to make it one-dimensional.
###Code
import pints.toy as toy
# Create a wrapper around the logistic model, turning it into a 1d model
class Model(pints.ForwardModel):
def __init__(self):
self.model = toy.LogisticModel()
def simulate(self, x, times):
return self.model.simulate([x[0], 500], times)
def simulateS1(self, x, times):
values, gradient = self.model.simulateS1([x[0], 500], times)
gradient = gradient[:, 0]
return values, gradient
def n_parameters(self):
return 1
# Load a forward model
model = Model()
# Create some toy data
real_parameters = np.array([0.015])
times = np.linspace(0, 1000, 50)
org_values = model.simulate(real_parameters, times)
# Add noise
np.random.seed(1)
noise = 10
values = org_values + np.random.normal(0, noise, org_values.shape)
plt.figure()
plt.plot(times, values)
plt.plot(times, org_values)
plt.show()
###Output
_____no_output_____
###Markdown
We can use optimisation to find the parameter value that maximises the loglikelihood, and note that it's become slightly biased due to noise.
###Code
# Create an object with links to the model and time series
problem = pints.SingleOutputProblem(model, times, values)
# Create a log-likelihood function
log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, noise)
# Find the best parameters with XNES
best_parameters, fx = pints.optimise(log_likelihood, real_parameters, method=pints.XNES)
print(best_parameters[0])
# Show the likelihood near the true parameters
plt.figure()
x = np.linspace(0.01497, 0.01505, 500)
y = [log_likelihood([i]) for i in x]
plt.axvline(real_parameters[0], color='tab:orange', label='real')
plt.axvline(best_parameters[0], color='tab:green', label='found')
plt.legend()
plt.plot(x, y)
plt.show()
###Output
_____no_output_____
###Markdown
Because the LogisticModel (and our wrapper) support the `evaluatS1()` method, we can also evaluate the gradient of the loglikelihood at different points:
###Code
# Show derivatives at two points
y1, dy1 = log_likelihood.evaluateS1(real_parameters)
y2, dy2 = log_likelihood.evaluateS1(best_parameters)
# Show the likelihood near the true parameters
x = np.linspace(0.01498, 0.01502, 500)
y = [log_likelihood([i]) for i in x]
z1 = y1 + (x - real_parameters[0]) * dy1
z2 = y2 + (x - best_parameters[0]) * dy2
plt.figure()
plt.plot(x, y)
plt.plot(x, z1, label='real parameters')
plt.plot(x, z2, label='found parameters')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Satisfied that this works, we now run a HamiltonianMCMC routine (which uses the derivative information)
###Code
# Choose starting points for mcmc chains
xs = [
real_parameters * 1.01,
real_parameters * 0.9,
real_parameters * 1.15,
]
# Choose a covariance matrix for the proposal step
#sigma0 = (best_parameters - real_parameters) * 0.1
sigma0 = np.abs(real_parameters)
# Create mcmc routine
mcmc = pints.MCMCController(log_likelihood, len(xs), xs, method=pints.HamiltonianMCMC)
# Add stopping criterion
mcmc.set_max_iterations(1000)
# Set up modest logging
mcmc.set_log_to_screen(True)
mcmc.set_log_interval(100)
# Set small step size
# for sampler in mcmc.samplers():
# sampler.set_leapfrog_step_size(3e-5) # This is very sensitive!
# Run!
print('Running...')
chains = mcmc.run()
print('Done!')
# Show trace and histogram
pints.plot.trace(chains)
plt.show()
# Show predicted time series for the first chain
pints.plot.series(chains[0, 200:], problem, real_parameters)
plt.show()
###Output
_____no_output_____
###Markdown
2d Time seriesFinally, we try Hamiltonian MCMC on a 2d logistic model problem:
###Code
from __future__ import print_function
import pints
import pints.toy as toy
import pints.plot
import numpy as np
import matplotlib.pyplot as plt
# Load a forward model
model = toy.LogisticModel()
# Create some toy data
real_parameters = np.array([0.015, 500])
org_values = model.simulate(real_parameters, times)
# Add noise
np.random.seed(1)
noise = 10
values = org_values + np.random.normal(0, noise, org_values.shape)
# Create an object with links to the model and time series
problem = pints.SingleOutputProblem(model, times, values)
# Create a log-likelihood function
log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, noise)
# Create a uniform prior over the parameters
log_prior = pints.UniformLogPrior(
[0.01, 400],
[0.02, 600]
)
# Create a posterior log-likelihood (log(likelihood * prior))
log_posterior = pints.LogPosterior(log_likelihood, log_prior)
# Choose starting points for 3 mcmc chains
xs = [
real_parameters * 1.01,
real_parameters * 0.9,
real_parameters * 1.1,
]
# Create mcmc routine
mcmc = pints.MCMCController(log_posterior, len(xs), xs, method=pints.HamiltonianMCMC)
# Add stopping criterion
mcmc.set_max_iterations(1000)
# Set up modest logging
mcmc.set_log_to_screen(True)
mcmc.set_log_interval(100)
# Run!
print('Running...')
chains = mcmc.run()
print('Done!')
# Show traces and histograms
pints.plot.trace(chains)
plt.show()
###Output
_____no_output_____
###Markdown
Chains have converged!
###Code
# Discard warm up
chains = chains[:, 200:]
# Check convergence using rhat criterion
print('R-hat:')
print(pints.rhat_all_params(chains))
###Output
R-hat:
[0.99947031132463748, 0.99979989669817704]
###Markdown
Extract any divergent iterations
###Code
div = len(sampler.divergent_iterations())
print("There were " + str(div) + " divergent iterations.")
###Output
There were 0 divergent iterations.
|
day1/ModelV1.ipynb | ###Markdown
Straight from the Original Dataset
###Code
import pandas as pd
training_data = pd.read_csv("training_data_modified_fe.csv", index_col = 0)
test_data = pd.read_csv("test_data_modified_fe.csv", index_col = 0)
###Output
_____no_output_____
###Markdown
from sklearn.preprocessing import MinMaxScaler, StandardScalerscaler = StandardScaler()need_to_standarize = ['Ability_to_pay_AUG', '']training_data[need_to_standarize] = scaler.fit_transform(training_data[need_to_standarize])test_data[need_to_standarize] = scaler.fit_transform(test_data[need_to_standarize])
###Code
training_x = training_data.drop('NEXT_MONTH_DEFAULT', 1)
training_y = training_data['NEXT_MONTH_DEFAULT']
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(training_x, training_y, test_size = 0.1)
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
ada_clf = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=9), n_estimators=300,
algorithm="SAMME.R", learning_rate=0.5
)
ada_clf.fit(x_train, y_train)
y_pred_rf = ada_clf.predict(x_test)
accuracy_score(y_test,y_pred_rf)
confusion_matrix(y_test,y_pred_rf)
accuracy_score(y_test,y_pred_rf)
print(classification_report(y_test,y_pred_rf))
###Output
precision recall f1-score support
0 0.83 0.94 0.88 1873
1 0.59 0.32 0.42 527
accuracy 0.80 2400
macro avg 0.71 0.63 0.65 2400
weighted avg 0.78 0.80 0.78 2400
###Markdown
Two-way Method
###Code
#getting column names
col_names = list(training_data.columns)
print(col_names)
life_col = ['Balance_Limit_V1', 'Gender', 'EDUCATION_STATUS', 'MARITAL_STATUS', 'AGE','NEXT_MONTH_DEFAULT']
amount_col = list(set(col_names)-set(life_col)) + ['NEXT_MONTH_DEFAULT']
training_x_life = training_data.drop(amount_col,1)
training_y_life = training_data['NEXT_MONTH_DEFAULT']
training_x_life.head()
x_train, x_test, y_train, y_test = train_test_split(training_x_life, training_y_life, test_size = 0.2)
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
ada_clf_life = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=10), n_estimators=100,
algorithm="SAMME.R", learning_rate=0.5
)
ada_clf_life.fit(x_train, y_train)
y_pred_life = ada_clf_life.predict(x_test)
accuracy_score(y_test,y_pred_life)
print(classification_report(y_test,y_pred_life))
training_x_amount = training_data.drop(life_col,1)
training_y_amount = training_data['NEXT_MONTH_DEFAULT']
training_x_amount.head()
x_train, x_test, y_train, y_test = train_test_split(training_x_amount, training_y_amount, test_size = 0.2)
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
ada_clf_amount = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=10), n_estimators=100,
algorithm="SAMME.R", learning_rate=0.5
)
ada_clf_amount.fit(x_train, y_train)
y_pred_amount = ada_clf_amount.predict(x_test)
accuracy_score(y_test,y_pred_amount)
print(classification_report(y_test,y_pred_amount))
life_pred = ada_clf_life.predict(training_x_life)
amount_pred = ada_clf_amount.predict(training_x_amount)
all_data = pd.DataFrame({"Life" : life_pred,"Amount":amount_pred})
x_train, x_test, y_train, y_test = train_test_split(all_data, training_y, test_size = 0.2)
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
ada_clf_all = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=10), n_estimators=100,
algorithm="SAMME.R", learning_rate=0.5
)
ada_clf_all.fit(x_train, y_train)
y_pred_all = ada_clf_all.predict(x_test)
accuracy_score(y_test,y_pred_all)
print(classification_report(y_test,y_pred_all))
life_col_test = ['Balance_Limit_V1', 'Gender', 'EDUCATION_STATUS', 'MARITAL_STATUS', 'AGE']
amount_col_test = list(set(list(test_data.columns))-set(life_col_test))
test_x_life = test_data.drop(amount_col_test,1)
test_x_amount = test_data.drop(life_col_test,1)
test_life_pred = ada_clf_life.predict(test_x_life)
test_amount_pred = ada_clf_amount.predict(test_x_amount)
all_data_test = pd.DataFrame({"Life" : test_life_pred,"Amount":test_amount_pred})
test_pred = ada_clf_all.predict(all_data_test)
list(test_pred).count(1)
test_pred = pd.DataFrame({"NEXT_MONTH_DEFAULT":test_pred})
test_pred.to_csv("Predictions2.csv")
###Output
_____no_output_____ |
sample-code/defense_adversairal_training.ipynb | ###Markdown
Demo - Adversarial Training (MNIST)
###Code
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.utils
from torchvision import models
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import torchattacks
from torchattacks import PGD, FGSM
from models import CNN
print("PyTorch", torch.__version__)
print("Torchvision", torchvision.__version__)
print("Torchattacks", torchattacks.__version__)
print("Numpy", np.__version__)
###Output
PyTorch 1.9.0
Torchvision 0.10.0
Torchattacks 3.0.0
Numpy 1.20.1
###Markdown
1. Load Data
###Code
mnist_train = dsets.MNIST(root='./data/',
train=True,
transform=transforms.ToTensor(),
download=True)
mnist_test = dsets.MNIST(root='./data/',
train=False,
transform=transforms.ToTensor(),
download=True)
batch_size = 128
train_loader = torch.utils.data.DataLoader(dataset=mnist_train,
batch_size=batch_size,
shuffle=False)
test_loader = torch.utils.data.DataLoader(dataset=mnist_test,
batch_size=batch_size,
shuffle=False)
###Output
_____no_output_____
###Markdown
2. Define Model
###Code
model = CNN().cuda()
loss = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
atk = PGD(model, eps=0.3, alpha=0.1, steps=7)
###Output
_____no_output_____
###Markdown
3. Train Model
###Code
num_epochs = 5
for epoch in range(num_epochs):
total_batch = len(mnist_train) // batch_size
for i, (batch_images, batch_labels) in enumerate(train_loader):
X = atk(batch_images, batch_labels).cuda()
Y = batch_labels.cuda()
pre = model(X)
cost = loss(pre, Y)
optimizer.zero_grad()
cost.backward()
optimizer.step()
if (i+1) % 100 == 0:
print('Epoch [%d/%d], lter [%d/%d], Loss: %.4f'
%(epoch+1, num_epochs, i+1, total_batch, cost.item()))
###Output
_____no_output_____
###Markdown
4. Test Model 4.1 Standard Accuracy
###Code
model.eval()
correct = 0
total = 0
for images, labels in test_loader:
images = images.cuda()
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.cuda()).sum()
print('Standard accuracy: %.2f %%' % (100 * float(correct) / total))
###Output
Standard accuracy: 96.00 %
###Markdown
4.2 Robust Accuracy
###Code
model.eval()
correct = 0
total = 0
atk = FGSM(model, eps=0.3)
for images, labels in test_loader:
images = atk(images, labels).cuda()
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.cuda()).sum()
print('Robust accuracy: %.2f %%' % (100 * float(correct) / total))
###Output
Robust accuracy: 88.81 %
|
SageMakerGT-ComprehendNER-A2I-Notebook.ipynb | ###Markdown
Easily setup human review of your NLP based Entity Recognition workflows with Amazon SageMaker Ground Truth, Amazon Comprehend AutoML and Amazon Augmented AI (A2I) 1. [Introduction](Introduction)2. [Solution Overview](Solution-Overview)3. [Pre-processing input documents](Step1---Pre-processing-of-input-documents)4. [Named Entity Recognition Labeling using Amazon SageMaker Ground Truth](Create-an-Amazon-SageMaker-Ground-Truth-Named-Entity-Recognition-Labeling-Job)5. [Train an Amazon Comprehend Custom Entity Recognizer using the labeled dataset](Train-an-Amazon-Comprehend-AutoML-model)6. [Setup a Human Review workflow using Amazon Augmented AI](Setup-a-Human-Review-loop-for-low-confidence-detections-using-Amazon-Augmented-AI)7. [Conclusion](Conclusion) IntroductionAmazon A2I provides built-in human review workflows for common machine learning use cases, such as NLP based entity recognition from documents, which allows predictions from Amazon Comprehend AutoML to be reviewed easily. You can also create your own workflows for ML models built on Amazon SageMaker or any other tools. Using Amazon A2I, you can allow human reviewers to step in when a model is unable to make a high confidence prediction or to audit its predictions on an on-going basis. Learn more hereIn this tutorial, we will first setup a NLP based workflow for custom entity recognition by Amazon Comprehend from an input document using a labeled dataset created by Amazon SageMaker Ground Truth Named Entity Recognition. We will then show how you can set up an Amazon A2I human loop with a flow definition to trigger a review task for low confidence predictions.For more in depth instructions, visit https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-getting-started.html Prerequisites Before you proceed, verify that your Sagemaker Execution Role has the right policies* Comprehend Full Access* Sagemaker Full Access* Your Sagemaker Execution Role should have access to S3 already. If not add the policy to access any S3 bucket. Please verify that your Sagemaker Execution Role has the following statementAdd iam:passRole as an inline policy{ "Version": "2012-10-17", "Statement": [ { "Action": [ "iam:PassRole" ], "Effect": "Allow", "Resource": "*" } ]} Finally you will need the following trust policies setup in your execution role.{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": [ "sagemaker.amazonaws.com", "s3.amazonaws.com", "comprehend.amazonaws.com" ] }, "Action": "sts:AssumeRole" } ]} Step 1 - Pre-processing of input documentsDeclare some essential variables to be used throughout the notebook
###Code
# Lets declare commonly used variables and do some initial checks
import boto3
import os
import json
import random
import time
import sagemaker
import uuid
s3 = boto3.client('s3')
s3res = boto3.resource('s3')
BUCKET = "a2i-experiments"
role = sagemaker.get_execution_role()
region = boto3.session.Session().region_name
prefix = "a2i-comprehend-gtner" + str(uuid.uuid1())
print(prefix)
bucket_region = s3.head_bucket(Bucket=BUCKET)['ResponseMetadata']['HTTPHeaders']['x-amz-bucket-region']
assert bucket_region == region, "Your S3 bucket {} and this notebook need to be in the same region.".format(BUCKET)
def upload_to_s3(s3path, file):
data = open(file, "rb")
key = s3path
s3res.Bucket(BUCKET).put_object(Key=key, Body=data)
###Output
_____no_output_____
###Markdown
For our example, let's say we are looking at Slack messages or a bunch of tickets in JIRA. We would like to know if they're related to an AWS offering. We will use Amazon SageMaker Ground Truth’s Named Entity Recognition labeling feature to label a SERVICE or/and VERSION entity from the input. We will then train an Amazon Comprehend Custom Entity Recognizer to recognize the entities from text like tweets or ticket comments. The sample dataset is provided in **data/rawinput/aws-service-offerings.txt**. As an optional step, read the file, strip the HTML tags if any. Now convert it into a text document made up of multiple sentences and upload the processed document to a S3 bucket. Note: If you would like to directly go to the Amazon Comprehend Custom Entity Recognition training and Amazon Augmented AI human review steps, please execute Steps 1a to 1c and then skip to Step 3a, to the cell that executes code to create the annotations file and continue from there. The output.manifest created by Amazon SageMaker Ground Truth in this notebook is already available in **data/output.manifest**. Step 1a - Split the text into sentences for more clarityWe will use the regular expressions package to split the chunk of text we got from above to a set of sentences. This is particularly important when we use Amazon SageMaker Ground Truth for the Named Entity labeling task
###Code
import re
alphabets= "([A-Za-z])"
prefixes = "(Mr|St|Mrs|Ms|Dr)[.]"
suffixes = "(Inc|Ltd|Jr|Sr|Co)"
starters = "(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|net|org|io|gov)"
def split_into_sentences(text):
text = " " + text + " "
text = text.replace("\n"," ")
text = re.sub(prefixes,"\\1<prd>",text)
text = re.sub(websites,"<prd>\\1",text)
if "Ph.D" in text: text = text.replace("Ph.D.","Ph<prd>D<prd>")
text = re.sub("\s" + alphabets + "[.] "," \\1<prd> ",text)
text = re.sub(acronyms+" "+starters,"\\1<stop> \\2",text)
text = re.sub(alphabets + "[.]" + alphabets + "[.]" + alphabets + "[.]","\\1<prd>\\2<prd>\\3<prd>",text)
text = re.sub(alphabets + "[.]" + alphabets + "[.]","\\1<prd>\\2<prd>",text)
text = re.sub(" "+suffixes+"[.] "+starters," \\1<stop> \\2",text)
text = re.sub(" "+suffixes+"[.]"," \\1<prd>",text)
text = re.sub(" " + alphabets + "[.]"," \\1<prd>",text)
text = re.sub(r'(?<=\d)[\.](?=\d)','',text)
if "”" in text: text = text.replace(".”","”.")
if "\"" in text: text = text.replace(".\"","\".")
if "!" in text: text = text.replace("!\"","\"!")
if "?" in text: text = text.replace("?\"","\"?")
text = text.replace(".",".<stop>")
text = text.replace("?","?<stop>")
text = text.replace("!","!<stop>")
text = text.replace("<prd>",".")
text = text.replace('"','')
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [s.strip() for s in sentences]
return sentences
###Output
_____no_output_____
###Markdown
We will use the above function to create a list of sentences which will be needed to generate input file. You can use the below code to generate input file for multiple documents.
###Code
# We will now read our input file aws-service-offerings.txt from data/rawinput, pass to strip_tags, and write the resulting file back
folderpath = r"data/rawinput" # make sure to put the 'r' in front and provide the folder where your files are
filepaths = [os.path.join(folderpath, name) for name in os.listdir(folderpath) if not name.startswith('.')] # do not select hidden directories
print(filepaths)
all_files = []
for path in filepaths:
with open(path, 'r') as f:
# Execute this step below only if your input text was HTML
#stripped_text = strip_tags(f.read())
# If your input text was not HTML, replace the "stripped_text" below with f.read()
structured_text = split_into_sentences(f.read())
all_files.append(structured_text)
f.close()
###Output
_____no_output_____
###Markdown
Create a input file with 200 entries to be used to generate Ground Truth Named Entity Recognition labeling manifest
###Code
import csv
fnfull = "inputs.csv"
!rm inputs.csv #in case there is already a file with the same name
with open(fnfull, "w", encoding='utf-8') as ff:
csv_writer = csv.writer(ff, delimiter='\n')
for infile in all_files:
for num, sentence in enumerate(infile):
csv_writer.writerow([sentence])
if num == 201:
break
print(num)
ff.close()
s3_manifest_key = prefix + "/input/" + fnfull
upload_to_s3(s3_manifest_key, fnfull)
###Output
_____no_output_____
###Markdown
Step 1b - Creating training and test dataset for Amazon Comprehend Custom Entity
###Code
# Create the training file - Comprehend requires a minimum of 1000 samples in the training file
fntrain = "train.csv"
!rm train.csv #in case there is already a file with the same name
with open(fntrain, "w", encoding='utf-8') as fn:
csv_writer = csv.writer(fn, delimiter='\n')
for infile in all_files:
for num, sentence in enumerate(infile):
csv_writer.writerow([sentence])
fn.close()
s3_train_key = prefix + "/training/input/train/" + fntrain
upload_to_s3(s3_train_key, fntrain)
# Create the testing file - We will select 100+ rows after the training sample ends
fntest = "test.csv"
!rm test.csv #in case there is already a file with the same name
with open(fntest, "w", encoding='utf-8') as ft:
csv_writer = csv.writer(ft, delimiter='\n')
for infile in all_files:
for num, sentence in enumerate(structured_text):
if num > 1000:
csv_writer.writerow([sentence])
if num > 1200:
break
ft.close()
s3_test_key = prefix + "/training/input/test/" + fntest
upload_to_s3(s3_test_key, fntest)
###Output
_____no_output_____
###Markdown
Step 2 - Create an Amazon SageMaker Ground Truth Named Entity Recognition Labeling JobNow that we have processed our input file and converted it into a text file with multiple sentences, we can use this file to create a named entity recognition labeling job using Amazon SageMaker Ground Truth. The purpose is to annotate/label sentences within the input document as belonging to a custom entity that we define. There are some prerequisites to create the labeling job - a) we create a manifest file that Amazon SageMaker Ground Truth needs, b) we setup a labeling workforce, and c) we select a UI template that the workforce will use Step 2a - Create a manifest file
###Code
# Create and upload the input manifest by appending a source tag to each of the lines in the input text file.
# Ground Truth uses the manifest file to determine labeling tasks
manifest_name = prefix + '-text-input.manifest'
# remove existing file with the same name to avoid duplicate entries
!rm *.manifest
s3bucket = s3res.Bucket(BUCKET)
with open(manifest_name, 'w') as f:
for fn in s3bucket.objects.filter(Prefix=prefix +'/input/'):
fn_obj = s3res.Object(BUCKET, fn.key)
for line in fn_obj.get()['Body'].read().splitlines():
f.write('{"source":"' + line.decode('utf-8') +'"}\n')
f.close()
s3.upload_file(manifest_name, BUCKET, prefix + "/manifest/" + manifest_name)
###Output
_____no_output_____
###Markdown
Note: Steps 2b and 2c below will be performed using the AWS ConsoleWe will use the AWS Console to create the Private Labeling Workforce and start a Labeling job. You can use your email address to send the labeling task request and completing the labeling yourself as a private workforce labeler. Step 2b - Create a Private Labeling WorkforceWith Amazon SageMaker Ground Truth, you can build high-quality training datasets for your machine learning models. With Ground Truth, you can use workers from either Amazon Mechanical Turk, a vendor company that you choose, or an internal, private workforce along with machine learning to enable you to create a labeled dataset. You can use the labeled dataset output from Ground Truth to train your own models, as a training dataset for an Amazon SageMaker model or in our case, we will use this labeled dataset to train an Amazon Comprehend Custom Entity Recognizer. This step requires you to use the AWS Console. However, we highly recommend that you follow it, especially when creating your own task with a custom dataset, label set, and template.We will create a private workteam and add only one user (you) to it. Then, we will create a Ground Truth labeling job request to send the task to that workforce. You will then be able to see your annotation job and can even annotate the whole dataset yourself!To create a private team:1. Go to AWS Console > Amazon SageMaker > Labeling workforces2. Click "Private" and then "Create private team".3. Enter the desired name for your private workteam.4. Enter your own email address in the "Email addresses" section.5. Enter the name of your organization and a contact email to administer the private workteam.6. Click "Create Private Team".7. The AWS Console should now return to AWS Console > Amazon SageMaker > Labeling workforces. Your newly created team should be visible under "Private teams". Next to it you will see an ARN which is a long string that looks like arn:aws:sagemaker:region-name-123456:workteam/private-crowd/team-name. 8. You should get an email from [email protected] that contains your workforce username and password.9. In AWS Console > Amazon SageMaker > Labeling workforces, click on the URL in Labeling portal sign-in URL. Use the email/password combination from Step 8 to log in (you will be asked to create a new, non-default password).10. This is your private worker's interface. When we create a verification task in Verify your task using a private team below, your task should appear in this window. You can invite your colleagues to participate in the labeling job by clicking the "Invite new workers" button.The Amazon SageMaker Ground Truth documentation has more details on the management of private workteams. Step 2c - Create and Start the Amazon SageMaker Ground Truth Labeling job Setup a Labeling Job1. Go to AWS Console > Amazon SageMaker > Labeling Jobs (under Ground Truth heading on the left pane)2. Click on Create Labeling Job3. Execute the cell below and use the labeling job name displayed for the Job Name 4. For the Input Dataset Location, provide the S3 location of the manifest file you created in Step 2a5. Provide a S3 bucket with an "output" prefix (for example, s3://bucket-name/output) in the Output Dataset Location6. In the IAM role field, choose - Create a new Role and select "Any S3 Bucket" and click Create7. In the Task Type field, select - Text and select - Named Entity Recognition8. Click on Next at the bottom of the page9. In the next page, select worker type as "Private"10. In the Private Teams listbox, select the team you created in Step 2b11. In the Named Entity Recognition Labeling Tool section, do the following: a. In the text box that says "Enter a brief description of the task", type "Highlight the word or group of words and select the corresponding most appropriate label from the right" b. In the box on the left, clear out the instructions and type "Your labeling will be used to train a Machine Learning model for predictions. Please think carefully on the most appropriate label for the word selection. Remember to highlight at least 10 entries for each Label" and select Bold Italics c. In the Labels section, type the Label names you want to display to your workforce. As a best practice provide 10+ Labels that your workforce will use. d. Click Create Start Labeling1. You/your workforce should have received an email as mentioned in point 8 in Step 2b above2. Login to the URL provided with the username and password3. This will take you to the Labeling Task UI. Complete the Labeling tasks by selecting labels for groups of words and clicking on Submit4. When all entries have been labeled, the UI will automatically exit5. Go back to AWS Console > Amazon Sagemaker > Labeling Jobs and check the status of Labeling Job6. Please wait until the status reflects "Complete" Verify annotation outputsGo to the S3 bucket location mentioned in point 5 in Setup a Labeling Job above and review the ouputs. Ground Truth creates several directories in your Amazon S3 output path. These directories contain the results of your labeling job and other artifacts of the job. The top-level directory for a labeling job is given the same name as your labeling job, the output directories are placed beneath it. Since we did not use Active Learning in our example, we will have 2 directories - Annotations and Manifest.Annotations: The annotations directory contains all of the annotations made by the workforce. These are the responses from individual workers that have not been consolidated into a single label for the data object.Manifests: The manifests directory contains the output manifest from your labeling job. There is one subdirectory in the manifest directory, output. The output directory contains the output manifest file for your labeling job. The file is named output.manifest.Please go to your S3 bucket and navigate to "output/directory with your labeling job name/manifests/output/output.manifest" to review the annotated file Step 3 - Train an Amazon Comprehend AutoML modelWe will now use the annotated dataset created by Amazon SageMaker Ground Truth in Step 2 to train an Amazon Comprehend Custom Entity Recognizer. We will have to make minor adjustments to the format of the annotated dataset to feed as an input for training the Recognizer. Step 3a - Process the annotated datasetWe will extract and transform the content we need from the annotated dataset. As per guidelines in the Amazon Comprehend documentation: 1. A minimum of 200 annotations are needed per entity to train a model for custom entity recognition. 2. It is important that your annotations be in a properly configured CSV file so your chance of having problems with your annotations file is minimal. The following must be true: a. UTF-8 encoding must be explicitly specified, even if its used as a default in most cases. b. It must include the column names: File, Line, Begin Offset, End Offset, Type. c. We highly recommended that CSV input files are generated programmatically to avoid potential issues. Note: If you don't have a manifest file, you can use the output.manifest created for this notebook from the data folder For more details on Amazon Comprehend Custom Entity Recognizer inputs refer to this link
###Code
# Let's download the output.manifest file for format conversion
labeling_job_name = '<your labeling job name>'
s3.download_file(BUCKET, 'output/' + labeling_job_name + '/manifests/output/output.manifest', 'data/groundtruth/output.manifest')
###Output
_____no_output_____
###Markdown
Lets use the python csv function to create an annotations file by parsing the output manifest file created by Ground Truth
###Code
# Read the output manifest json and convert into a csv format as expected by Amazon Comprehend Custom Entity Recognizer
import json
import csv
# Here we specify the labeling job name from the output.manifest file - the one below is what was used in the example output.manifest included
labeling_job_name = 'a2i-comprehend-gtner7c0bf6de-b5a1-11ea-bf5f-12acee018271'
# this will be the file that will be written by the format conversion code block below
csvout = 'annotations.csv'
with open(csvout, 'w', encoding="utf-8") as nf:
csv_writer = csv.writer(nf)
csv_writer.writerow(["File", "Line", "Begin Offset", "End Offset", "Type"])
with open("data/groundtruth/output.manifest", "r") as fr:
for num, line in enumerate(fr.readlines()):
lj = json.loads(line)
#print(str(lj))
if lj and labeling_job_name in lj:
for ent in lj[labeling_job_name]['annotations']['entities']:
csv_writer.writerow([fntrain,num,ent['startOffset'],ent['endOffset'],ent['label'].upper()])
fr.close()
nf.close()
s3_annot_key = "output/" + labeling_job_name + "/comprehend/" + csvout
upload_to_s3(s3_annot_key, csvout)
###Output
_____no_output_____
###Markdown
Step 3b - Setup an Amazon Comprehend Custom Entity RecognizerAmazon Comprehend's custom entity recognition enables you to analyze your documents to find entities specific to your needs, rather than limiting you to the preset entity types already available. You can identify almost any kind of entity, simply by providing a sufficient number of details to train your model effectively.The training process usually requires extensive knowledge of machine learning (ML) and a complex process for model optimization. Amazon Comprehend automates this for you using a technique called transfer learning to build your model on a sophisticated general-purpose entities recognition model framework. With this in place, all you need to supply is the data. However, it's important that you supply it with high quality data as input. Without good data the model won't learn how to correctly identify entities.You can choose one of two ways to provide data to Amazon Comprehend in order to train a custom entity recognition model:**Annotations** — This uses an annotation list that provides the location of your entities in a large number of documents so Amazon Comprehend can train on both the entity and its context.**Entity Lists** — This provides only a limited context, and uses only a list of the specific entities list so Amazon Comprehend can train to identify the custom entity.For our experiment, we created an annotation manifest using an Amazon SageMaker Ground Truth Named Entity Recognizer labeling job in **Step 2c**, formatted it to the csv structure that Amazon Comprehend Customer Entity Recognizer in **Step 3a**.
###Code
comprehend = boto3.client('comprehend')
s3_train_channel = 's3://{}/{}'.format(BUCKET, s3_train_key)
s3_annot_channel = 's3://{}/{}'.format(BUCKET, s3_annot_key)
custom_entity_request = {
"Documents": {
"S3Uri": s3_train_channel
},
"Annotations": {
"S3Uri": s3_annot_channel
},
"EntityTypes": [
{
"Type": "SERVICE"
},
{
"Type": "VERSION"
}
]
}
###Output
_____no_output_____
###Markdown
Create the Entity Recognizer
###Code
import datetime
id = str(datetime.datetime.now().strftime("%s"))
create_custom_entity_response = comprehend.create_entity_recognizer(
RecognizerName = prefix + "-CER",
DataAccessRoleArn = role,
InputDataConfig = custom_entity_request,
LanguageCode = "en"
)
###Output
_____no_output_____
###Markdown
Lets review the status of the training job in 1 minute incrementsFor a sample of a 1000 entries, training should typically complete within 15 minutes
###Code
jobArn = create_custom_entity_response['EntityRecognizerArn']
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_custom_recognizer = comprehend.describe_entity_recognizer(
EntityRecognizerArn = jobArn
)
status = describe_custom_recognizer["EntityRecognizerProperties"]["Status"]
print("Custom entity recognizer: {}".format(status))
if status == "TRAINED" or status == "IN_ERROR":
break
time.sleep(60)
###Output
_____no_output_____
###Markdown
Step 3c - Test the Amazon Comprehend Custom Entity RecognizerWe now use the StartEntitiesDetectionJob operation to detect custom entities in our documents. Using this operation, you provide the same information as you would when detecting preset entities. However, in addition to the input and output locations (S3 buckets), you also provide the EntityRecognizerArn, which is the Amazon Resource Name (ARN) of the trained model. This ARN is supplied by the response to the CreateEntityRecognizer operation.You can examine one document or many, and each model can be trained on up to 12 custom entities at a time. You can search for up to 12 entities per StartEntitiesDetectionJob operation Lets first look at the Recognizer metrics
###Code
print(json.dumps(describe_custom_recognizer["EntityRecognizerProperties"]["RecognizerMetadata"]["EntityTypes"], indent=2, default=str))
###Output
_____no_output_____
###Markdown
With more training samples and annotations per entity type, we can improve the Evaluation Metrics for our Recognizer. Please refer to this link for best practices during training Execute the Entity Detection job to get some predictions on our test dataset
###Code
s3_test_channel = 's3://{}/{}'.format(BUCKET, s3_test_key)
s3_output_test_data = 's3://{}/{}'.format(BUCKET, "output/testresults/")
test_response = comprehend.start_entities_detection_job(
InputDataConfig={
'S3Uri': s3_test_channel,
'InputFormat': 'ONE_DOC_PER_LINE'
},
OutputDataConfig={
'S3Uri': s3_output_test_data
},
DataAccessRoleArn=role,
JobName='a2i-comprehend-gt-blog',
EntityRecognizerArn=jobArn,
LanguageCode='en'
)
###Output
_____no_output_____
###Markdown
Lets monitor the status of our job in 1 minute intervals
###Code
jobId = test_response['JobId']
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_job = comprehend.describe_entities_detection_job(
JobId = jobId
)
status = describe_job["EntitiesDetectionJobProperties"]["JobStatus"]
print("Job Status: {}".format(status))
if status == "COMPLETED" or status == "FAILED":
break
time.sleep(60)
###Output
_____no_output_____
###Markdown
Lets review the test results
###Code
#Download the test output to this notebook
job_output = describe_job["EntitiesDetectionJobProperties"]["OutputDataConfig"]["S3Uri"]
path_prefix = 's3://{}/'.format(BUCKET)
job_key = os.path.relpath(job_output, path_prefix)
s3res.Bucket(BUCKET).download_file(job_key, 'output.tar.gz')
!tar xvzf output.tar.gz
#Display the results from the detection
import json
data = []
for line in open('output', 'r'):
entities = json.loads(line)['Entities']
file = json.loads(line)['File']
ln = json.loads(line)['Line']
rd = open(file, 'r', encoding='utf-8')
nr = rd.readlines()
orig_text = nr[ln]
#print(line)
if entities != None and len(entities) > 0:
data.append({'ORIGINAL_TEXT': orig_text, 'CONFIDENCE_SCORE': round(entities[0]['Score']*100,0),'END_OFFSET': entities[0]['EndOffset'], 'BEGIN_OFFSET': entities[0]['BeginOffset'], 'SELECTED_TEXT': entities[0]['Text'], 'ENTITY': entities[0]['Type']})
rd.close()
for line in data:
print(line)
###Output
_____no_output_____
###Markdown
Step 4 - Setup a Human Review loop for low confidence detections using Amazon Augmented AIAmazon Augmented AI (Amazon A2I) makes it easy to build the workflows required for human review of ML predictions. Amazon A2I brings human review to all developers, removing the undifferentiated heavy lifting associated with building human review systems or managing large numbers of human reviewers.To incorporate Amazon A2I into your human review workflows, you need three resources:**A worker task template** to create a worker UI. The worker UI displays your input data, such as documents or images, and instructions to workers. It also provides interactive tools that the worker uses to complete your tasks. For more information, see A2I instructions overview**A human review workflow**, also referred to as a flow definition. You use the flow definition to configure your human workforce and provide information about how to accomplish the human review task. For built-in task types, you also use the flow definition to identify the conditions under which a review human loop is triggered. For example, with Amazon Rekognition can perform image content moderation using machine learning. You can use the flow definition to specify that an image will be sent to a human for content moderation review if Amazon Rekognition's confidence score output is low for your use case. You can create a flow definition in the Amazon Augmented AI console or with the Amazon A2I APIs. To learn more about both of these options, see create flow definition**A human loop** to start your human review workflow. When you use one of the built-in task types, the corresponding AWS service creates and starts a human loop on your behalf when the conditions specified in your flow definition are met or for each object if no conditions were specified. When a human loop is triggered, human review tasks are sent to the workers as specified in the flow definition.When using a custom task type, you start a human loop using the Amazon Augmented AI Runtime API. When you call StartHumanLoop in your custom application, a task is sent to human reviewers. Step 4a - Workteam or Workforce setupA workforce is the group of workers that you have selected to label your dataset. You can choose either the Amazon Mechanical Turk workforce, a vendor-managed workforce, or you can create your own private workforce for human reviews. Whichever workforce type you choose, Amazon Augmented AI takes care of sending tasks to workers.When you use a private workforce, you also create work teams, a group of workers from your workforce that are assigned to Amazon Augmented AI human review tasks. You can have multiple work teams and can assign one or more work teams to each job.To create your Workteam, visit the instructions here After you have created your workteam, replace YOUR_WORKTEAM_ARN below
###Code
import botocore
REGION = 'us-east-1'
WORKTEAM_ARN= "<Enter your workteam ARN>"
###Output
_____no_output_____
###Markdown
Let's setup the clients for Amazon S3, Amazon SageMaker A2I Runtime and Amazon Comprehend.
###Code
import boto3
import io
import json
import uuid
import botocore
import time
import botocore
# Amazon SageMaker client
sagemaker = boto3.client('sagemaker', REGION)
# A2I Runtime client
a2i_runtime_client = boto3.client('sagemaker-a2i-runtime', REGION)
import pprint
# Pretty print setup
pp = pprint.PrettyPrinter(indent=2)
# Function to pretty-print AWS SDK responses
def print_response(response):
if 'ResponseMetadata' in response:
del response['ResponseMetadata']
pp.pprint(response)
###Output
_____no_output_____
###Markdown
Step 4b - Create Human Task UICreate a human task UI resource, giving a UI template in liquid html. This template will be rendered to the human workers whenever human loop is required.Below we've provided a simple demo template that is compatible with Amazon Comprehend Entity detection.For over 70 pre built UIs, check: https://github.com/aws-samples/amazon-a2i-sample-task-uis
###Code
template = """
<script src="https://assets.crowd.aws/crowd-html-elements.js"></script>
<style>
.highlight {
background-color: yellow;
}
</style>
<crowd-entity-annotation
name="crowd-entity-annotation"
header="Highlight parts of the text below"
labels="[{'label': 'service', 'fullDisplayName': 'Service'}, {'label': 'version', 'fullDisplayName': 'Version'}]"
text="{{ task.input.originalText }}"
>
<full-instructions header="Named entity recognition instructions">
<ol>
<li><strong>Read</strong> the text carefully.</li>
<li><strong>Highlight</strong> words, phrases, or sections of the text.</li>
<li><strong>Choose</strong> the label that best matches what you have highlighted.</li>
<li>To <strong>change</strong> a label, choose highlighted text and select a new label.</li>
<li>To <strong>remove</strong> a label from highlighted text, choose the X next to the abbreviated label name on the highlighted text.</li>
<li>You can select all of a previously highlighted text, but not a portion of it.</li>
</ol>
</full-instructions>
<short-instructions>
Select the word or words in the displayed text corresponding to the entity, label it and click submit
</short-instructions>
<div id="recognizedEntities" style="margin-top: 20px">
<h3>Label the Entity below in the text above</h3>
<p>{{ task.input.entities }}</p>
</div>
</crowd-entity-annotation>
<script>
function highlight(text) {
var inputText = document.getElementById("inputText");
var innerHTML = inputText.innerHTML;
var index = innerHTML.indexOf(text);
if (index >= 0) {
innerHTML = innerHTML.substring(0,index) + "<span class='highlight'>" + innerHTML.substring(index,index+text.length) + "</span>" + innerHTML.substring(index + text.length);
inputText.innerHTML = innerHTML;
}
}
document.addEventListener('all-crowd-elements-ready', () => {
document
.querySelector('crowd-entity-annotation')
.shadowRoot
.querySelector('crowd-form')
.form
.appendChild(recognizedEntities);
});
</script>
"""
###Output
_____no_output_____
###Markdown
Step 4c - Create a Worker Task Template Creator FunctionThis function would be a higher level abstration, on the SageMaker package's method to create the Worker Task Template which we will use in the next step to create a human review workflow.
###Code
def create_task_ui():
'''
Creates a Human Task UI resource.
Returns:
struct: HumanTaskUiArn
'''
response = sagemaker.create_human_task_ui(
HumanTaskUiName=taskUIName,
UiTemplate={'Content': template})
return response
# Task UI name - this value is unique per account and region. You can also provide your own value here.
taskUIName = prefix + '-ui'
# Create task UI
humanTaskUiResponse = create_task_ui()
humanTaskUiArn = humanTaskUiResponse['HumanTaskUiArn']
print(humanTaskUiArn)
###Output
_____no_output_____
###Markdown
Step 4d - Creating the Flow DefinitionIn this section, we're going to create a flow definition definition. Flow Definitions allow us to specify:The workforce that your tasks will be sent to.The instructions that your workforce will receive. This is called a worker task template.Where your output data will be stored.This demo is going to use the API, but you can optionally create this workflow definition in the console as well.For more details and instructions, see: https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-create-flow-definition.html.
###Code
# Flow definition name - this value is unique per account and region. You can also provide your own value here.
flowDefinitionName = prefix + '-fd-a2i'
create_workflow_definition_response = sagemaker.create_flow_definition(
FlowDefinitionName= flowDefinitionName,
RoleArn= role,
HumanLoopConfig= {
"WorkteamArn": WORKTEAM_ARN,
"HumanTaskUiArn": humanTaskUiArn,
"TaskCount": 1,
"TaskDescription": "Label the context of words in the providex text as PERSON or THING",
"TaskTitle": "Detect Context of words in Text"
},
OutputConfig={
"S3OutputPath" : "s3://"+BUCKET+"/output"
}
)
flowDefinitionArn = create_workflow_definition_response['FlowDefinitionArn'] # let's save this ARN for future use
# Describe flow definition - status should be active
for x in range(60):
describeFlowDefinitionResponse = sagemaker.describe_flow_definition(FlowDefinitionName=flowDefinitionName)
print(describeFlowDefinitionResponse['FlowDefinitionStatus'])
if (describeFlowDefinitionResponse['FlowDefinitionStatus'] == 'Active'):
print("Flow Definition is active")
break
time.sleep(2)
print(flowDefinitionArn)
###Output
_____no_output_____
###Markdown
Lets setup the condition for triggering the human loop review
###Code
#Display the results from the detection
human_loops_started = []
import json
CONFIDENCE_SCORE_THRESHOLD = 90
for line in data:
print("Line is: " + str(line))
begin_offset=line['BEGIN_OFFSET']
end_offset=line['END_OFFSET']
if(line['CONFIDENCE_SCORE'] < CONFIDENCE_SCORE_THRESHOLD):
humanLoopName = str(uuid.uuid4())
human_loop_input = {}
human_loop_input['labels'] = line['ENTITY']
human_loop_input['entities']= line['ENTITY']
human_loop_input['originalText'] = line['ORIGINAL_TEXT']
start_loop_response = a2i_runtime_client.start_human_loop(
HumanLoopName=humanLoopName,
FlowDefinitionArn=flowDefinitionArn,
HumanLoopInput={
"InputContent": json.dumps(human_loop_input)
}
)
print(human_loop_input)
human_loops_started.append(humanLoopName)
print(f'Score is less than the threshold of {CONFIDENCE_SCORE_THRESHOLD}')
print(f'Starting human loop with name: {humanLoopName} \n')
else:
print('No human loop created. \n')
###Output
_____no_output_____
###Markdown
Step 4e - Check human loop status and wait for reviewers to complete taskLet's define a function that allows us to check the status of Human Loop progress.
###Code
completed_human_loops = []
for human_loop_name in human_loops_started:
resp = a2i_runtime_client.describe_human_loop(HumanLoopName=human_loop_name)
print(f'HumanLoop Name: {human_loop_name}')
print(f'HumanLoop Status: {resp["HumanLoopStatus"]}')
print(f'HumanLoop Output Destination: {resp["HumanLoopOutput"]}')
print('\n')
if resp["HumanLoopStatus"] == "Completed":
completed_human_loops.append(resp)
###Output
_____no_output_____
###Markdown
Wait For workers to complete the tasks
###Code
workteamName = WORKTEAM_ARN[WORKTEAM_ARN.rfind('/') + 1:]
print("Navigate to the private worker portal and do the tasks. Make sure you've invited yourself to your workteam!")
print('https://' + sagemaker.describe_workteam(WorkteamName=workteamName)['Workteam']['SubDomain'])
###Output
_____no_output_____
###Markdown
Check status of Human Loop again
###Code
completed_human_loops = []
for human_loop_name in human_loops_started:
resp = a2i_runtime_client.describe_human_loop(HumanLoopName=human_loop_name)
print(f'HumanLoop Name: {human_loop_name}')
print(f'HumanLoop Status: {resp["HumanLoopStatus"]}')
print(f'HumanLoop Output Destination: {resp["HumanLoopOutput"]}')
print('\n')
if resp["HumanLoopStatus"] == "Completed":
completed_human_loops.append(resp)
###Output
_____no_output_____
###Markdown
Lets review the annotation output from our A2I labeling task
###Code
# I selected the last output.json entry from the list of human loops that were completed above
s3 = boto3.client('s3')
s3obj = s3.get_object(Bucket=BUCKET, Key='output/a2i-comprehend-gtner9dcaba0e-1928-11eb-94ff-f91d18329acb-fd-a2i/2020/10/28/15/49/36/d1b9b32c-4b97-47fd-9f48-ca349f8a1278/output.json')
s3data = s3obj['Body'].read().decode('utf-8')
abc = json.loads(s3data)
print(str(abc['humanAnswers']))
###Output
_____no_output_____ |
cnn_pedestrians/cnn_detect_pedestrians.ipynb | ###Markdown
CNN 활용 보행자 검출 import
###Code
import numpy as np
import cv2
import matplotlib.pyplot as plt
from keras.models import load_model
###Output
Using TensorFlow backend.
###Markdown
학습한 모델 파일 Load
###Code
model = load_model('dpcnn.h5')
###Output
_____no_output_____
###Markdown
128 x 64 크기 16 step으로 탐색
###Code
hroi = 128
wroi = 64
step = 16
image = cv2.imread('pedestrian_test.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
guide_img = np.zeros((image.shape[0],image.shape[1],3), np.uint8)
detect_img = np.zeros((image.shape[0],image.shape[1],3), np.uint8)
heatmap = np.zeros_like(image[:,:,0]).astype(np.float)
wroi_count = (image.shape[1] - wroi) // step
hroi_count = (image.shape[0] - hroi) // step
for i in range(wroi_count):
startx = i * step
endx = wroi + i * step
for j in range(hroi_count):
# 128 x 64 이미지를 16step 돌면서 탐색
starty = j * step
endy = hroi + j * step
# 관심영역 지정 후 predict 실행
roi = image[starty:endy, startx:endx]
Y_hats = model.predict(np.array([roi,]))
y_hats = Y_hats.argmax(axis=-1)
test_prediction = y_hats[0]
# Grid 표시
cv2.rectangle(guide_img, (startx, starty), (endx, endy), (0,255,0), 1)
# predict 성공 시 Grid 표시, Heatmap list
if test_prediction == 1:
cv2.rectangle(detect_img, (startx, starty), (endx, endy), (255,0, 0), 2)
heatmap[starty:endy, startx:endx] += 1
###Output
_____no_output_____
###Markdown
가이드 이미지 출력
###Code
added_img = cv2.addWeighted(image, 1, guide_img, 1, 0)
plt.figure(figsize = (5,5))
plt.imshow(added_img)
###Output
_____no_output_____
###Markdown
predict 된 이미지 구간 Grid 표시
###Code
added_img = cv2.addWeighted(added_img, 1, detect_img, 1, 0)
plt.figure(figsize = (5,5))
plt.imshow(added_img)
###Output
_____no_output_____
###Markdown
predict 된 이미지 구간 Heatmap 표시
###Code
plt.figure(figsize = (5,5))
plt.imshow(heatmap, cmap='gray')
###Output
_____no_output_____
###Markdown
1보다 작은 값은 0으로 변환
###Code
heatmap[heatmap <= 1] = 0
plt.figure(figsize = (5,5))
plt.imshow(heatmap, cmap='gray')
heatmap = np.clip(heatmap, 0, 1)
plt.figure(figsize = (5,5))
plt.imshow(heatmap, cmap='gray')
###Output
_____no_output_____
###Markdown
라벨링 : 도형을 구분하고 갯수 확인
###Code
from scipy.ndimage.measurements import label
labels = label(heatmap)
plt.figure(figsize = (5,5))
plt.imshow(labels[0], cmap='gray')
###Output
_____no_output_____
###Markdown
이미지에 라벨들을 rectangle 로 표시
###Code
def draw_labeled_bboxes(img, labels):
box_list = []
# Iterate through all detected cars
for index in range(1, labels[1]+1):
# Find pixels with each index label value
nonzero = (labels[0] == index).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
# Draw the box on the image
# could be [0,255] or [0,1] depending how the file was read in
cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 3)
box_list.append(bbox)
return img, box_list
###Output
_____no_output_____
###Markdown
라벨 결과 이미지
###Code
result_img, box_list = draw_labeled_bboxes(image, labels)
plt.figure(figsize = (5,5))
plt.imshow(result_img)
###Output
_____no_output_____ |
2.1. Getting my NN Training Data with Doc2Vec.ipynb | ###Markdown
Getting my NN Training Data With Doc2Vec
###Code
import pandas as pd
import numpy as np
# Word Embeddings
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
import gensim
print(f'gensim: {gensim.__version__}')
# Text
from nltk.tokenize import word_tokenize
from nltk.tokenize import TweetTokenizer
from gensim.test.utils import common_texts
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from gensim.test.utils import get_tmpfile
# Storing as objects via serialization
from tempfile import mkdtemp
import pickle
import joblib
# Visualization
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="ticks", color_codes=True)
# Directory
import os
import yaml
import collections
import scattertext as st
import math
# Cool progress bars
from tqdm import tqdm_notebook as tqdm
tqdm().pandas() # Enable tracking of execution progress
## LOADING OBJECTS
processed_inbound = pd.read_pickle('objects/processed_inbound_extra.pkl')
processed = pd.read_pickle('objects/processed.pkl')
# Reading back in intents
with open(r'objects/intents.yml') as file:
intents = yaml.load(file, Loader=yaml.FullLoader)
# Previewing
print(f'\nintents:\n{intents}')
print(f'\nprocessed:\n{processed.head()}')
###Output
gensim: 3.8.0
###Markdown
Tweet Collection with Doc2VecI can use my Doc2Vec representation to find top 1000 Tweets most similar to a generalized intent version of a Tweet based on it's cosine similarity. Heuristic search refers to a search strategy that attempts to optimize a problem by iteratively improving the solution based on a given heuristic function or a cost measure. My cost measure is trying to get the closest cosine distances.This is really cool. So I basically trained my doc2vec model with my training data, which is the processed_inbound. I can actually compute a vector based on my training data to vectorize that word. Training my Doc2VecThis is a way that was developed for word2vec to generalize to paragraphs. Doc2Vec takes the average across them, and each tweet is represented as a single embedding so you have consistent dimensionality.Word2Vec uses Continuous Bag of Words, which creates a sliding window around each word to predict it from context (surrouding words), and the Skip Gram model. Doc2Vec is based off that.* https://medium.com/wisio/a-gentle-introduction-to-doc2vec-db3e8c0cce5e* https://radimrehurek.com/gensim/models/doc2vec.html* https://rare-technologies.com/doc2vec-tutorial/It's basically word to vec, but basically takes the standard word to vec model and adds in an extra vector to represent the paragraph - called a paragraph vector. Take in sequences of words, then they use them to predict the next word and check if that prediction is correct. If the prediction is correct, it does this multiple times for different combinations of words.It's the same as word2vec, but at a document level as opposed to a word level. My implementation below had it's base from [here](https://medium.com/wisio/a-gentle-introduction-to-doc2vec-db3e8c0cce5e) and from scrolling through Gensim's documentation for a more granular understanding of each step. My Intents:Updates:* Decided to remove lost_replace because it's quite indistinguishable from repair because most of the customers who lost something will technically also need to fix a problem as well Data SynthesisBasically, there are two ways I get my intent training data (1000 for each): * **Doc2Vec:** Some intent examples I will synthetically generate from an idealized example using doc2vec* **Manual:** Some intent examples I will synthetically generate by duplicating and manual (like greeting, because the current data does not represent this)* **Hybrid:** Some intents I will do a hybrid approach, where 50 percent might be my generated data, and 50 percent might be
###Code
# Making my idealized dataset - generating N Tweets similar to this artificial Tweet
# This will then be concatenated to current inbound data so it can be included in the doc2vec training
# Version 1
ideal = {'Greeting': 'hi hello yo hey whats up howdy morning',
'Update': 'have problem with update'}
# Version 2 - I realized that keywords might get the job done, and it's less risky to
# add more words for the association power because it's doc2vec
ideal = {'battery': 'battery power',
'forgot_password': 'password account login',
'payment': 'credit card payment pay',
'update': 'update upgrade',
'info': 'info information'
# ,'lost_replace': 'replace lost gone missing trade'
,'location': 'nearest apple location store'
}
def add_extra(current_tokenized_data, extra_tweets):
''' Adding extra tweets to current tokenized data'''
# Storing these extra Tweets in a list to concatenate to the inbound data
extra_tweets = pd.Series(extra_tweets)
# Making string form
print('Converting to string...')
string_processed_data = current_tokenized_data.progress_apply(" ".join)
# Adding it to the data, updating processed_inbound
string_processed_data = pd.concat([string_processed_data, extra_tweets], axis = 0)
# We want a tokenized version
tknzr = TweetTokenizer(strip_handles = True, reduce_len = True)
# print('Tokenizing...')
# string_processed_data.progress_apply(tknzr.tokenize)
return string_processed_data
# Getting the lengthened data
processed_inbound_extra = add_extra(processed['Processed Inbound'], list(ideal.values()))
# Saving updated processed inbound into a serialized saved file
processed_inbound_extra.to_pickle('objects/processed_inbound_extra.pkl')
processed_inbound_extra
processed_inbound_extra[-7:]
intents_repr
processed_inbound_extra.shape
ideal
processed.shape
###Output
_____no_output_____
###Markdown
I first tag, then I start training my model! It's like training a neural network. As for the paramaters, I set each vector to be of 20 dimensions.
###Code
def train_doc2vec(string_data, max_epochs, vec_size, alpha):
# Tagging each of the data with an ID, and I use the most memory efficient one of just using it's ID
tagged_data = [TaggedDocument(words=word_tokenize(_d.lower()), tags=[str(i)])
for i, _d in enumerate(string_data)]
# Instantiating my model
model = Doc2Vec(size=vec_size, alpha=alpha, min_alpha=0.00025, min_count=1, dm =1)
model.build_vocab(tagged_data)
for epoch in range(max_epochs):
print('iteration {0}'.format(epoch))
model.train(tagged_data, total_examples = model.corpus_count, epochs=model.iter)
# Decrease the learning rate
model.alpha -= 0.0002
# Fix the learning rate, no decay
model.min_alpha = model.alpha
# Saving model
model.save("models/d2v.model")
print("Model Saved")
# Training
train_doc2vec(processed_inbound_extra, max_epochs = 100, vec_size = 20, alpha = 0.025)
# Loading in my model
model = Doc2Vec.load("models/d2v.model")
# Storing my data into a list - this is the data I will cluster
inbound_d2v = np.array([model.docvecs[i] for i in range(processed_inbound_extra.shape[0])])
# Saving
with open('objects/inbound_d2v.pkl', 'wb') as f:
pickle.dump(inbound_d2v, f)
inbound_d2v
inbound_d2v.shape
###Output
_____no_output_____
###Markdown
Before, we did not have a concept of distance in our vectorizers, they don't really have a specific meaning. This is a much better way because it captures the contextual representations between words! My clustering should be a lot better than tfidf or bag of words. What was doc2vec trained on?One thing to keep in mind on, is finding what this embedding is trained on. We don't want it to be trained on something like academic data, because Tweets are on a completely different domain than academic papers.Looking at the gensim [documentation](https://radimrehurek.com/gensim/models/doc2vec.html) for doc2vec, it seems to be trained same as word2vec, except now they use a paragraph context vector in addition. This means it was likely trained on Google News. MethodologyInitially, to get the top 1000 similar Tweets, I tried using the existing data. But I don't think that would yield the most accurate results because you're capturing not the best representative Tweet for an intent. For that reason, I made all these base representative Tweets myself (you could see this in the `ideal` dict above. The goal is to find trying to find an idealized, wholistic representation of an intent. Then from there I use my doc2vec representations to find the top 1000 tweets most similar based on cosine similarity. Package Exploration
###Code
# Finding and making idealized versions of each intent so that I can find top 1000 to it:
intents_ideal = {'app': ['app', 'prob']}
inferred_vectors = []
for keywords in intents_ideal.values():
inferred_vectors.append(model.infer_vector(keywords))
inferred_vectors
# model.similarity(inferred_vectors[0], inbound_d2v[0])
'hi hello yo hey whats up'.split()
###Output
_____no_output_____
###Markdown
Finding Intent TagsI want to get the tags of my representative Tweets because that's what doc2vec's `model.similarity` method takes in as paramater to generate top N Tweets similar to it.
###Code
ideal
# Getting the indexes
###Output
_____no_output_____
###Markdown
The following code block isn't the most efficient code, and it's quite lengthy to figure out, but it works! It's basically searches all my processed inbound tweets, and looks for the tag of my representative tweets, as shown in my output and my `intents_tags` dictionary.
###Code
# Storing my representative tweets and intents in this dictionary
# Just need to add to this dictionary and the rest of the code block does the work for you
# To find a suitable representative tweet for this: I used the keyword EDA functions in notebook 1.1
# Version 1
intents_repr = {'Battery': ['io', 'drain', 'battery', 'iphone', 'twice', 'fast', 'io', 'help'],
'Update': ['new', 'update', 'i️', 'make', 'sure', 'download', 'yesterday'],
'iphone': ['instal', 'io', 'make', 'iphone', 'slow', 'work', 'properly', 'help'],
'app': ['app', 'still', 'longer', 'able', 'control', 'lockscreen'],
'mac': ['help','mac','app','store','open','can','not','update','macbook','pro','currently','run','o','x',
'yosemite'], 'greeting': ['hi', 'hello', 'yo', 'hey', 'whats', 'up']
}
# You could see that in version 1, I try to use existing tweets, but that isn't really the best strategy and
# it doesn't yield the best results
# Version 2
tknzr = TweetTokenizer(strip_handles = True, reduce_len = True)
## Just tokenizing all the values of ideal' values to be able to be fed in to matching function
# intents_repr = dict(zip(ideal.keys(), [tknzr.tokenize(v) for v in ideal.values()]))
# Pythonic way
intents_repr = {k:tknzr.tokenize(v) for k, v in ideal.items()}
print(intents_repr)
# Saving intents_repr into YAML
with open('objects/intents_repr.yml', 'w') as outfile:
yaml.dump(intents_repr, outfile, default_flow_style=False)
# Storing tags in order of the dictionary above
tags = []
tokenized_processed_inbound = processed_inbound.apply(tknzr.tokenize)
# Find the index locations of specific Tweets
def report_index_loc(tweet, intent_name):
''' Takes in the Tweet to find the index for and returns a report of that Tweet index along with what the
representative Tweet looks like'''
try:
tweets = []
for i,j in enumerate(tokenized_processed_inbound):
if j == tweet:
tweets.append((i, True))
else:
tweets.append((i, False))
index = []
get_index = [index.append(i[0]) if i[1] == True else False for i in tweets] # Comprehension saves space
preview = processed_inbound.iloc[index]
# Appending to indexes for dictionary
tags.append(str(index[0]))
except IndexError as e:
print('Index not in list, move on')
return
return intent_name, str(index[0]), preview
# Reporting and storing indexes with the function
print('TAGGED INDEXES TO LOOK FOR')
for j,i in intents_repr.items():
try:
print('\n{} \nIndex: {}\nPreview: {}'.format(*report_index_loc(i,j)))
except Exception as e:
print('Index ended')
# Pythonic way of making new dictionary from 2 lists
intents_tags = dict(zip(intents_repr.keys(), tags))
intents_tags
# Great! Now I can get the training data for my battery intent (as an example)
similar_doc = model.docvecs.most_similar('76066',topn = 1000)
# Preview
similar_doc[:5]
similar_doc = model.docvecs.most_similar('76070',topn = 1000)
similar_doc
###Output
_____no_output_____
###Markdown
Training Data Synthesis 1. Adding intents to training data based on similarityAs can be seen above, the right tuple element is the cosine similarity. We are just taking the top 1000 similar to a base, idealized version of intents (which we based off keywords mostly). 2. Adding the intents manuallyThese ones are generated with a different method that is more manual. I will generate as much examples of this as I can, then I brute force it by duplicating it until it reaches 1000 training examples for preserving class balance.Once again, here are all the intents I want to add: 3. Adding in the hybrid intentsI use my Keyword Exploration shown in the previous notebook and find that there is a lot of overlap between update and repair. So for both I am going to generate a proportion of it using doc2vec, and the rest I will manually insert examples - the idea is to balance overfitting or noise and putting in the correct signal._One special case might be for out of scope, I might find an alternative way to deal with that because I cannot generate all the examples of that intent._Step 4 is converting the data in long format that the NN could be fed to, and the last one is to save it.I was made aware of the catostrophic forgetting problem from the spaCy docs, where you aren't supposed to iterate over the same values, because doing that effectively changes the loss function, and you will make a model that cannot generalize well. This is an emperical process in the end, because I have to experiment what works best.
###Code
# Checking for stopwords because I don't want to include them in the manually representative intents
# This is something that I manually tune to the dataframe (for step 2 of this process)
import nltk
from nltk.corpus import stopwords
stopwords.words('english').index('to')
intents_tags
model.docvecs.most_similar('10')
intents_tags
###Output
_____no_output_____
###Markdown
prompt the user for update or broken.
###Code
# Testing how to tokenize numpy array
vals = [word_tokenize(tweet) for tweet in list(processed_inbound.iloc[[10,1]].values)]
vals
## Getting top n tweets similar to the 0th Tweet
# This will return the a list of tuples (i,j) where i is the index and j is
# the cosine similarity to the tagged document index
# Storing all intents in this dataframe
train = pd.DataFrame()
# intent_indexes = {}
# 1. Adding intent content based on similarity
def generate_intent(target, itag):
similar_doc = model.docvecs.most_similar(itag,topn = target)
# Getting just the indexes
indexes = [int(i[0]) for i in similar_doc]
# intent_indexes[intent_name] = indexes
# Actually seeing the top 1000 Tweets similar to the 0th Tweet which seems to be about updates
# Adding just the values, not the index
# Tokenizing the output
return [word_tokenize(tweet) for tweet in list(processed_inbound.iloc[indexes].values)]
# Updating train data
for intent_name, itag in intents_tags.items():
train[intent_name] = generate_intent(1000, itag)
# 2. Manually added intents
# These are the remainder intents
manually_added_intents = {
'speak_representative': [['talk','human','please'],
['let','me','talk','to','apple','support'],
['can','i','speak','agent','person']],
'greeting': [['hi'],['hello'], ['whats','up'], ['good','morning'],
['good','evening'], ['good','night']],
'goodbye': [['goodbye'],['bye'],['thank'],['thanks'], ['done']],
'challenge_robot': [['robot','human'], ['are','you','robot'],
['who','are','you']]
}
# Inserting manually added intents to data
def insert_manually(target, prototype):
''' Taking a prototype tokenized document to repeat until
you get length target'''
factor = math.ceil(target / len(prototype))
content = prototype * factor
return [content[i] for i in range(target)]
# Updating training data
for intent_name in manually_added_intents.keys():
train[intent_name] = insert_manually(1000, [*manually_added_intents[intent_name]])
# 3. Adding in the hybrid intents
hybrid_intents = {'update':(300,700,[['want','update'], ['update','not','working'],
['phone','need','update']],
intents_tags['update']),
'info': (800,200, [['need','information'],
['want','to','know','about'], ['what','are','macbook','stats'],
['any','info','next','release','?']],
intents_tags['info']),
'payment': (300,700, [['payment','not','through'],
['iphone', 'apple', 'pay', 'but', 'not', 'arrive'],
['how','pay','for', 'this'],
['can','i','pay','for','this','first']],
intents_tags['payment']),
'forgot_password': (600,400, [['forgot','my','pass'], ['forgot','my','login'
,'details'], ['cannot','log','in','password'],['lost','account','recover','password']],
intents_tags['forgot_password'])
}
def insert_hybrid(manual_target, generated_target, prototype, itag):
return insert_manually(manual_target, prototype) + list(generate_intent(generated_target, itag))
# Updating training data
for intent_name, args in hybrid_intents.items():
train[intent_name] = insert_hybrid(*args)
# 4. Converting to long dataframe from wide that my NN model can read in for the next notebook - and wrangling
neat_train = pd.DataFrame(train.T.unstack()).reset_index().iloc[:,1:].rename(columns={'level_1':'Intent', 0: 'Utterance'})
# Reordering
neat_train = neat_train[['Utterance','Intent']]
# 5. Saving this raw training data into a serialized file
neat_train.to_pickle('objects/train.pkl')
# Styling display
show = lambda x: x.head(10).style.set_properties(**{'background-color': 'black',
'color': 'lawngreen',
'border-color': 'white'})\
.applymap(lambda x: f"color: {'lawngreen' if isinstance(x,str) else 'red'}")\
.background_gradient(cmap='Blues')
print(train.shape)
show(train)
###Output
(1000, 10)
###Markdown
I am quite pleased with these. They seem quite promising if you inspect them! I am not too worried about the emojis my preprocesser missed - they are few in frequency and just add noise. Same thing applies for other things like langauges, because I saw one Indonesian Tweet as well. It might be a good thing because we do not want our model to overfit, it might even aid with the generalizability of my model.The worst results probably come from the `lost_replace` intent, because as could be seen in the keyword EDA, there isn't much of this anyway. I might remove it.
###Code
print(neat_train.shape)
show(neat_train)
neat_train.tail(44)
###Output
_____no_output_____
###Markdown
These all actually look really promising as they all seem to have some relation to their respective buckets. An emoji escaped from my preprocessing function, but they are not so few in quantity that I feel like I don't need to remove it for now, they are just noise.Also notice that if you compare the tail and head of this data, `update` was generated as a hybrid between a template and my Tweets. Lost and replaced - issues with product. My iphone is getting really hot, can you replace it. Lost.
###Code
# Seeing the real data for an intent
intent_name = 'lost_replace'
view = processed.iloc[intent_indexes[intent_name]]['Real Inbound']
[*view]
###Output
_____no_output_____
###Markdown
Intent Bucket Evaluation
###Code
# Storing word rank table dataframes in this dict
wordranks = {}
# For visualizing top 10
def top10_bagofwords(data, output_name, title):
''' Taking as input the data and plots the top 10 words based on counts in this text data'''
bagofwords = CountVectorizer()
# Output will be a sparse matrix
inbound = bagofwords.fit_transform(data)
# Inspecting of often contractions and colloquial language is used
word_counts = np.array(np.sum(inbound, axis=0)).reshape((-1,))
words = np.array(bagofwords.get_feature_names())
words_df = pd.DataFrame({"word":words,
"count":word_counts})
words_rank = words_df.sort_values(by="count", ascending=False)
wordranks[output_name] = words_rank
# words_rank.to_csv('words_rank.csv') # Storing it in a csv so I can inspect and go through it myself
# Visualizing top 10 words
plt.figure(figsize=(12,6))
sns.barplot(words_rank['word'][:10], words_rank['count'][:10].astype(str), palette = 'inferno')
plt.title(title)
# Saving
plt.savefig(f'visualizations/next_ver/{output_name}.png')
plt.show()
# Doing my bucket evaluations here - seeing what each distinct bucket intent means
for i in train.columns:
top10_bagofwords(train[i].apply(" ".join), f'bucket_eval/{i}', f'Top 10 Words in {i} Intent')
###Output
_____no_output_____
###Markdown
Initial thoughts:To be honest, I feel like the way I should get my training data for greeting is not the best. There are a lot of words that are similar between buckets. As an example, for mac, it's a little concerning that iphone is the most common word!After changing method (version 2):The words and results make a lot more sense.
###Code
# Investigating bag of word frequencies at a more granular level
wordranks['bucket_eval/mac'].head(50)
[*train.columns]
###Output
_____no_output_____
###Markdown
Generating text file for rasaRasa API requires this format of data to be inputted into their bot. I work with my own training data in train, however this was made for experimenting with their tool.
###Code
# Getting NLU.md training data in correct form for Rasa Bot
with open('data/train_rasa/train_v3.txt', 'w') as t:
for intent in train.columns:
t.write(f'## intent: {intent}\n')
for tweet in train[intent]:
t.write('- ' + " ".join(tweet) + '\n')
t.write('\n')
###Output
_____no_output_____
###Markdown
This is just a cell to log my progress of how my method was doing at firstTweets with similarities without emojis to `['new', 'update', 'i️', 'make', 'sure', 'download', 'yesterday']`Format is: `(index tag, cosine similarity)`[('72326', 0.8154675364494324), ('32166', 0.8151031732559204), ('29461', 0.8027088642120361), ('5942', 0.7968393564224243), ('54836', 0.7879305481910706), ('30359', 0.7861931324005127), ('66201', 0.7817540168762207), ('50109', 0.7796376943588257), ('59490', 0.7793254852294922), ('46644', 0.7775745391845703), ('58410', 0.7734568119049072), ('26164', 0.7674931287765503), ('14867', 0.7673683166503906), ('25813', 0.766610860824585), ('47880', 0.7642890214920044), ('30945', 0.76273113489151), ('74155', 0.7582229971885681), ('33346', 0.7577282190322876), ('9502', 0.7569847702980042), ('64871', 0.7567278146743774) Using scattertext from the spaCy universe for EDAThis [kernel](https://www.kaggle.com/psbots/customer-support-meets-spacy-universehttps://www.kaggle.com/psbots/customer-support-meets-spacy-universe) showed me what spaCy's scattertext tool is capable of doing! So I wanted to do it myself as well to hopefully get useful insights.As said in the docs, scatter-text is "a tool for finding distinguishing terms in small-to-medium-sized corpora, and presenting them in a sexy, interactive scatter plot with non-overlapping term labels."However, the `CorpusFromParsedDocuments` implemented in that kernel seemed to be deprecated or had a dependency issue, so I looked at the docs and used `CorpusFromPandas` instead, which I think is very suitable especially for the data I have.
###Code
def term_freqs(intent_name):
bagofwords = CountVectorizer()
# Output will be a sparse matrix
inbound = bagofwords.fit_transform(visualize_train[visualize_train['Intent'] == intent_name]['Utterance'])
# Inspecting of often contractions and colloquial language is used
word_counts = np.array(np.sum(inbound, axis=0)).reshape((-1,))
words = np.array(bagofwords.get_feature_names())
words_df = pd.DataFrame({"word":words,
"count":word_counts})
words_rank = words_df.sort_values(by="count", ascending=False)
return words_rank
update_df = term_freqs('update')
repair_df = term_freqs('repair')
combined = pd.concat([update_df, repair_df], axis = 0)
import spacy
import scattertext as st
# Data munging
visualize_train = neat_train.copy()
visualize_train['Utterance'] = visualize_train['Utterance'].progress_apply(" ".join)
# Subsetting to the two intents I want to compare
visualize_train = visualize_train[(visualize_train['Intent'] == 'repair') |
(visualize_train['Intent'] == 'update')]
# Load spacy model
nlp = spacy.load('en',disable_pipes=["tagger","ner"])
visualize_train['parsed'] = visualize_train['Utterance'].progress_apply(nlp)
visualize_train.head()
corpus = st.CorpusFromParsedDocuments(visualize_train,
category_col='Intent',
parsed_col='parsed').build()
html = st.produce_scattertext_explorer(corpus,
category='Intent',
category_name='repair',
not_category_name='update',
width_in_pixels=600,
minimum_term_frequency=10,
term_significance = st.LogOddsRatioUninformativeDirichletPrior(),
)
###Output
_____no_output_____ |
change-cnn.ipynb | ###Markdown
Imports
###Code
!pip install ../input/kerasapplications/keras-team-keras-applications-3b180cb -f ./ --no-index
!pip install ../input/efficientnet/efficientnet-1.1.0/ -f ./ --no-index
import os
import cv2
import pydicom
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import random
from tqdm.notebook import tqdm
from sklearn.model_selection import train_test_split, KFold,GroupKFold
from sklearn.metrics import mean_absolute_error
from tensorflow_addons.optimizers import RectifiedAdam
from tensorflow.keras import Model
import tensorflow.keras.backend as K
import tensorflow.keras.layers as L
import tensorflow.keras.models as M
from tensorflow.keras.optimizers import Nadam
import seaborn as sns
from PIL import Image
from tensorflow.keras.layers import (
Dense, Dropout, Activation, Flatten, Input, BatchNormalization, GlobalAveragePooling2D, Add, Conv2D, AveragePooling2D,
LeakyReLU, Concatenate
)
import efficientnet.tfkeras as efn
def seed_everything(seed=2020):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
seed_everything(42)
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)
train = pd.read_csv('../input/osic-pulmonary-fibrosis-progression/train.csv')
###Output
_____no_output_____
###Markdown
Linear Decay (based on EfficientNets)
###Code
DROPOUT = 0.385
FVC_WEIGHT = 0.45
CONFIDENCE_WEIGHT = 0.3
def get_tab(df):
vector = [(df.Age.values[0] - 30) / 30]
if df.Sex.values[0] == 'male':
vector.append(0)
else:
vector.append(1)
if df.SmokingStatus.values[0] == 'Never smoked':
vector.extend([0,0])
elif df.SmokingStatus.values[0] == 'Ex-smoker':
vector.extend([1,1])
elif df.SmokingStatus.values[0] == 'Currently smokes':
vector.extend([0,1])
else:
vector.extend([1,0])
return np.array(vector)
A = {}
TAB = {}
P = []
for i, p in tqdm(enumerate(train.Patient.unique())):
sub = train.loc[train.Patient == p, :]
fvc = sub.FVC.values
weeks = sub.Weeks.values
c = np.vstack([weeks, np.ones(len(weeks))]).T
a, b = np.linalg.lstsq(c, fvc)[0]
A[p] = a
TAB[p] = get_tab(sub)
P.append(p)
###Output
_____no_output_____
###Markdown
CNN for coeff prediction
###Code
def get_img(path):
d = pydicom.dcmread(path)
return cv2.resize(d.pixel_array / 2**11, (512, 512))
from tensorflow.keras.utils import Sequence
class IGenerator(Sequence):
BAD_ID = ['ID00011637202177653955184', 'ID00052637202186188008618']
def __init__(self, keys, a, tab, batch_size=32):
self.keys = [k for k in keys if k not in self.BAD_ID]
self.a = a
self.tab = tab
self.batch_size = batch_size
self.train_data = {}
for p in train.Patient.values:
self.train_data[p] = os.listdir(f'../input/osic-pulmonary-fibrosis-progression/train/{p}/')
def __len__(self):
return 1000
def __getitem__(self, idx):
x = []
a, tab = [], []
keys = np.random.choice(self.keys, size = self.batch_size)
for k in keys:
try:
i = np.random.choice(self.train_data[k], size=1)[0]
img = get_img(f'../input/osic-pulmonary-fibrosis-progression/train/{k}/{i}')
x.append(img)
a.append(self.a[k])
tab.append(self.tab[k])
except:
print(k, i)
x,a,tab = np.array(x), np.array(a), np.array(tab)
x = np.expand_dims(x, axis=-1)
return [x, tab] , a
%%time
def get_efficientnet(model, shape):
models_dict = {
'b0': efn.EfficientNetB0(input_shape=shape,weights=None,include_top=False),
'b1': efn.EfficientNetB1(input_shape=shape,weights=None,include_top=False),
'b2': efn.EfficientNetB2(input_shape=shape,weights=None,include_top=False),
'b3': efn.EfficientNetB3(input_shape=shape,weights=None,include_top=False),
'b4': efn.EfficientNetB4(input_shape=shape,weights=None,include_top=False),
'b5': efn.EfficientNetB5(input_shape=shape,weights=None,include_top=False),
'b6': efn.EfficientNetB6(input_shape=shape,weights=None,include_top=False),
'b7': efn.EfficientNetB7(input_shape=shape,weights=None,include_top=False)
}
return models_dict[model]
def build_model(shape=(512, 512, 1), model_class=None):
inp = Input(shape=shape)
base = get_efficientnet(model_class, shape)
x = base(inp)
x = GlobalAveragePooling2D()(x)
inp2 = Input(shape=(4,))
x2 = tf.keras.layers.GaussianNoise(0.2)(inp2)
x = Concatenate()([x, x2])
x = Dropout(DROPOUT)(x)
x = Dense(1)(x)
model = Model([inp, inp2] , x)
weights = [w for w in os.listdir('../input/osic-model-weights') if model_class in w][0]
model.load_weights('../input/osic-model-weights/' + weights)
return model
model_classes = ['b5'] #['b0','b1','b2','b3',b4','b5','b6','b7']
models = [build_model(shape=(512, 512, 1), model_class=m) for m in model_classes]
print('Number of models: ' + str(len(models)))
from sklearn.model_selection import train_test_split
tr_p, vl_p = train_test_split(P,
shuffle=True,
train_size= 0.7)
sns.distplot(list(A.values()));
def score(fvc_true, fvc_pred, sigma):
sigma_clip = np.maximum(sigma, 70) # changed from 70, trie 66.7 too
delta = np.abs(fvc_true - fvc_pred)
delta = np.minimum(delta, 1000)
sq2 = np.sqrt(2)
metric = (delta / sigma_clip)*sq2 + np.log(sigma_clip* sq2)
return np.mean(metric)
%%time
subs = []
for model in models:
metric = []
for q in tqdm(range(1, 10)):
m = []
for p in vl_p:
x = []
tab = []
ldir = os.listdir(f'../input/osic-pulmonary-fibrosis-progression/train/{p}/')
for i in ldir:
if int(i[:-4]) / len(ldir) < 0.8 and int(i[:-4]) / len(ldir) > 0.15:
x.append(get_img(f'../input/osic-pulmonary-fibrosis-progression/train/{p}/{i}'))
tab.append(get_tab(train.loc[train.Patient == p, :]))
if len(x) < 1:
continue
tab = np.array(tab)
x = np.expand_dims(x, axis=-1)
_a = model.predict([x, tab])
a = np.quantile(_a, q / 10)
percent_true = train.Percent.values[train.Patient == p]
fvc_true = train.FVC.values[train.Patient == p]
weeks_true = train.Weeks.values[train.Patient == p]
fvc = a * (weeks_true - weeks_true[0]) + fvc_true[0]
percent = percent_true[0] - a * abs(weeks_true - weeks_true[0])
m.append(score(fvc_true, fvc, percent))
print(np.mean(m))
metric.append(np.mean(m))
q = (np.argmin(metric) + 1)/ 10
sub = pd.read_csv('../input/osic-pulmonary-fibrosis-progression/sample_submission.csv')
test = pd.read_csv('../input/osic-pulmonary-fibrosis-progression/test.csv')
A_test, B_test, P_test,W, FVC= {}, {}, {},{},{}
STD, WEEK = {}, {}
for p in test.Patient.unique():
x = []
tab = []
ldir = os.listdir(f'../input/osic-pulmonary-fibrosis-progression/test/{p}/')
for i in ldir:
if int(i[:-4]) / len(ldir) < 0.8 and int(i[:-4]) / len(ldir) > 0.15:
x.append(get_img(f'../input/osic-pulmonary-fibrosis-progression/test/{p}/{i}'))
tab.append(get_tab(test.loc[test.Patient == p, :]))
if len(x) <= 1:
continue
tab = np.array(tab)
x = np.expand_dims(x, axis=-1)
_a = model.predict([x, tab])
a = np.quantile(_a, q)
A_test[p] = a
B_test[p] = test.FVC.values[test.Patient == p] - a*test.Weeks.values[test.Patient == p]
P_test[p] = test.Percent.values[test.Patient == p]
WEEK[p] = test.Weeks.values[test.Patient == p]
for k in sub.Patient_Week.values:
p, w = k.split('_')
w = int(w)
fvc = A_test[p] * w + B_test[p]
sub.loc[sub.Patient_Week == k, 'FVC'] = fvc
sub.loc[sub.Patient_Week == k, 'Confidence'] = (
P_test[p] - A_test[p] * abs(WEEK[p] - w)
)
_sub = sub[["Patient_Week","FVC","Confidence"]].copy()
subs.append(_sub)
###Output
_____no_output_____
###Markdown
Averaging Predictions
###Code
N = len(subs)
sub = subs[0].copy() # ref
# sub["FVC"] = 0
# sub["Confidence"] = 0
# b5_confidence = 0.8
# sub["FVC"] = subs[0]["FVC"]*b5_confidence + subs[1]["FVC"]*(1-b5_confidence)
# sub["Confidence"] = subs[0]["Confidence"]*b5_confidence + subs[1]["Confidence"]*(1-b5_confidence)
sub.head()
sub[["Patient_Week","FVC","Confidence"]].to_csv("submission_img.csv", index=False)
img_sub = sub[["Patient_Week","FVC","Confidence"]].copy()
###Output
_____no_output_____
###Markdown
Osic-Multiple-Quantile-Regression
###Code
ROOT = "../input/osic-pulmonary-fibrosis-progression"
BATCH_SIZE=128
tr = pd.read_csv(f"{ROOT}/train.csv")
tr.drop_duplicates(keep=False, inplace=True, subset=['Patient','Weeks'])
chunk = pd.read_csv(f"{ROOT}/test.csv")
print("add infos")
sub = pd.read_csv(f"{ROOT}/sample_submission.csv")
sub['Patient'] = sub['Patient_Week'].apply(lambda x:x.split('_')[0])
sub['Weeks'] = sub['Patient_Week'].apply(lambda x: int(x.split('_')[-1]))
sub = sub[['Patient','Weeks','Confidence','Patient_Week']]
sub = sub.merge(chunk.drop('Weeks', axis=1), on="Patient")
tr['WHERE'] = 'train'
chunk['WHERE'] = 'val'
sub['WHERE'] = 'test'
data = tr.append([chunk, sub])
print(tr.shape, chunk.shape, sub.shape, data.shape)
print(tr.Patient.nunique(), chunk.Patient.nunique(), sub.Patient.nunique(),
data.Patient.nunique())
#
data['min_week'] = data['Weeks']
data.loc[data.WHERE=='test','min_week'] = np.nan
data['min_week'] = data.groupby('Patient')['min_week'].transform('min')
data['avg_percent'] = data.groupby(['Patient', 'WHERE'])['Percent'].transform('mean')
base = data.loc[data.Weeks == data.min_week]
base = base[['Patient','FVC']].copy()
base.columns = ['Patient','min_FVC']
base['nb'] = 1
base['nb'] = base.groupby('Patient')['nb'].transform('cumsum')
base = base[base.nb==1]
base.drop('nb', axis=1, inplace=True)
data = data.merge(base, on='Patient', how='left')
data['base_week'] = data['Weeks'] - data['min_week']
del base
COLS = ['Sex','SmokingStatus'] #,'Age'
FE = []
for col in COLS:
for mod in data[col].unique():
FE.append(mod)
data[mod] = (data[col] == mod).astype(int)
#
data['age'] = (data['Age'] - data['Age'].min() ) / ( data['Age'].max() - data['Age'].min() )
data['BASE'] = (data['min_FVC'] - data['min_FVC'].min() ) / ( data['min_FVC'].max() - data['min_FVC'].min() )
data['week'] = (data['base_week'] - data['base_week'].min() ) / ( data['base_week'].max() - data['base_week'].min() )
# data['percent'] = (data['Percent'] - data['Percent'].min() ) / ( data['Percent'].max() - data['Percent'].min() )
data['percent'] = (data['avg_percent'] - data['avg_percent'].min() ) / ( data['avg_percent'].max() - data['avg_percent'].min() )
FE += ['age','percent','week','BASE']
tr = data.loc[data.WHERE=='train']
chunk = data.loc[data.WHERE=='val']
sub = data.loc[data.WHERE=='test']
del data
categories = ['Male',
'Female',
'Ex-smoker',
'Never smoked',
'Currently smokes',
'age',
'percent']
continous = [
'week',
'BASE'
]
tr.shape, chunk.shape, sub.shape
###Output
_____no_output_____
###Markdown
The change of mlossHere is where I tuned the `mloss` from 0.8 to 0.65. You can try a grid-search to maybe find an optimal value - however, I have only tried a very few choices like 0.65, 0.7 and 0.75.
###Code
C1, C2 = tf.constant(70, dtype='float32'), tf.constant(1000, dtype="float32")
def score(y_true, y_pred):
tf.dtypes.cast(y_true, tf.float32)
tf.dtypes.cast(y_pred, tf.float32)
sigma = y_pred[:, 2] - y_pred[:, 0]
fvc_pred = y_pred[:, 1]
#sigma_clip = sigma + C1
sigma_clip = tf.maximum(sigma, C1)
delta = tf.abs(y_true[:, 0] - fvc_pred)
delta = tf.minimum(delta, C2)
sq2 = tf.sqrt( tf.dtypes.cast(2, dtype=tf.float32) )
metric = (delta / sigma_clip)*sq2 + tf.math.log(sigma_clip* sq2)
return K.mean(metric)
def qloss(y_true, y_pred):
# Pinball loss for multiple quantiles
qs = [0.2, 0.50, 0.8]
q = tf.constant(np.array([qs]), dtype=tf.float32)
e = y_true - y_pred
v = tf.maximum(q*e, (q-1)*e)
return K.mean(v)
def mloss(_lambda):
def loss(y_true, y_pred):
return _lambda * qloss(y_true, y_pred) + (1 - _lambda)*score(y_true, y_pred)
return loss
def make_model(shape1, shape2):
z1 = L.Input((shape1,), name="Category")
z2 = L.Input((shape2,), name="Continuous")
encode1 = L.Dense(30, activation="relu", name='encode1')(z1)
encode2 = L.Dense(20, activation='relu', name='encode2')(z2)
z = L.Concatenate()([encode1, encode2])
# x = L.Dense(60, activation="relu", name="d1")(z)
# x = L.Dense(60, activation="relu", name="d2")(z)
# x = L.Dropout(0.1)(x)
x = L.Dense(100, activation="relu", name="d3")(z)
x = L.Dense(100, activation="relu", name="d4")(x)
p1 = L.Dense(3, activation="linear", name="p1")(x)
p2 = L.Dense(3, activation="relu", name="p2")(x)
preds = L.Lambda(lambda x: x[0] + tf.cumsum(x[1], axis=1),
name="preds")([p1, p2])
model = M.Model([z1, z2], preds, name="CNN")
model.compile(loss=mloss(0.65), optimizer=tf.keras.optimizers.Adam(lr=0.05, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.01, amsgrad=False), metrics=[score])
return model
y = tr['FVC'].astype(float).values
z1 = tr[categories].values
z2 = tr[continous].values
ze1 = sub[categories].values
ze2 = sub[continous].values
shape1 = z1.shape[1]
shape2 = z2.shape[1]
pe = np.zeros((ze1.shape[0], 3))
pred = np.zeros((z1.shape[0], 3))
net = make_model(shape1, shape2)
print(net.summary())
print(net.count_params())
NFOLD = 5 # originally 5
kf = KFold(n_splits=NFOLD, shuffle=True, random_state=42)
gkf = GroupKFold(n_splits=NFOLD)
%%time
cnt = 0
EPOCHS = 950
for tr_idx, val_idx in gkf.split(z1, y, groups=tr.Patient):
cnt += 1
print(f"FOLD {cnt}")
net = make_model(shape1, shape2)
net.fit([z1[tr_idx], z2[tr_idx]], y[tr_idx], batch_size=BATCH_SIZE, epochs=EPOCHS,
validation_data=([z1[val_idx], z2[val_idx]], y[val_idx]), verbose=0) #
print("train", net.evaluate([z1[tr_idx], z2[tr_idx]], y[tr_idx], verbose=0, batch_size=BATCH_SIZE))
print("val", net.evaluate([z1[val_idx], z2[val_idx]], y[val_idx], verbose=0, batch_size=BATCH_SIZE))
print("predict val...")
pred[val_idx] = net.predict([z1[val_idx], z2[val_idx]], batch_size=BATCH_SIZE, verbose=0)
print("predict test...")
pe += net.predict([ze1, ze2], batch_size=BATCH_SIZE, verbose=0) / NFOLD
sigma_opt = mean_absolute_error(y, pred[:, 1])
unc = pred[:,2] - pred[:, 0]
sigma_mean = np.mean(unc)
print(sigma_opt, sigma_mean)
idxs = np.random.randint(0, y.shape[0], 100)
plt.plot(y[idxs], label="ground truth")
plt.plot(pred[idxs, 0], label="q25")
plt.plot(pred[idxs, 1], label="q50")
plt.plot(pred[idxs, 2], label="q75")
plt.legend(loc="best")
plt.show()
print(unc.min(), unc.mean(), unc.max(), (unc>=0).mean())
plt.hist(unc)
plt.title("uncertainty in prediction")
plt.show()
sub.head()
# PREDICTION
sub['FVC1'] = 1.*pe[:, 1]
sub['Confidence1'] = pe[:, 2] - pe[:, 0]
subm = sub[['Patient_Week','FVC','Confidence','FVC1','Confidence1']].copy()
subm.loc[~subm.FVC1.isnull()].head(10)
subm.loc[~subm.FVC1.isnull(),'FVC'] = subm.loc[~subm.FVC1.isnull(),'FVC1']
if sigma_mean<70:
subm['Confidence'] = sigma_opt
else:
subm.loc[~subm.FVC1.isnull(),'Confidence'] = subm.loc[~subm.FVC1.isnull(),'Confidence1']
subm.head()
subm.describe().T
otest = pd.read_csv('../input/osic-pulmonary-fibrosis-progression/test.csv')
for i in range(len(otest)):
subm.loc[subm['Patient_Week']==otest.Patient[i]+'_'+str(otest.Weeks[i]), 'FVC'] = otest.FVC[i]
subm.loc[subm['Patient_Week']==otest.Patient[i]+'_'+str(otest.Weeks[i]), 'Confidence'] = 0.1
subm[["Patient_Week","FVC","Confidence"]].to_csv("submission_regression.csv", index=False)
reg_sub = subm[["Patient_Week","FVC","Confidence"]].copy()
###Output
_____no_output_____
###Markdown
Ensemble (Simple Blend)
###Code
df1 = img_sub.sort_values(by=['Patient_Week'], ascending=True).reset_index(drop=True)
df2 = reg_sub.sort_values(by=['Patient_Week'], ascending=True).reset_index(drop=True)
df = df1[['Patient_Week']].copy()
df['FVC'] = FVC_WEIGHT*df1['FVC'] + (1 - FVC_WEIGHT)*df2['FVC']
df['Confidence'] = CONFIDENCE_WEIGHT*df1['Confidence'] + (1 - CONFIDENCE_WEIGHT)*df2['Confidence']
df.head()
df.to_csv('submission.csv', index=False)
os.listdir('../input')
###Output
_____no_output_____ |
Problem_003_-_Add_Linked_Lists.ipynb | ###Markdown
Add two numbers represented by linked listsTwo numbers are represented by two lists: the digits are stored in reverse order and each node contains a single digit. Write a function that returns a list representing the sum of these two numbers. ExampleInput: The first number 99785 represented as a list: 5→8→7→9→9 The second number 325 represented as a list: 5→2→3 Output: Sum list: 0→1→1→0→0→1 (representing the number 100110) Solution First we need to implement the linked list data structure. We first create a Node class
###Code
class Node(object):
def __init__(self, value):
self.value = value
self.next = None
###Output
_____no_output_____
###Markdown
Now we need to create a class for the simply linked list
###Code
class SinglyLinkedList:
# __init__ Constructor
def __init__(self):
self.head = None
self.tail = None
# String representation
def __str__(self):
current_node = self.head
string = str(current_node.value)
while current_node.next != None:
current_node = current_node.next
string = string + '→' + str(current_node.value)
return string
# Function to insert a new head node
def push_front(self, val):
new_node = Node(val)
new_node.next = self.head
self.head = new_node
if self.tail is None:
self.tail = self.head
# Function to insert a new tail node
def push_back(self, val):
new_node = Node(val)
if self.tail is None:
self.tail = new_node
self.head = self.tail
else:
self.tail.next = new_node
self.tail = new_node
list_1 = SinglyLinkedList()
list_2 = SinglyLinkedList()
# Create the first list (number 99785; 5 -> 8 -> 7 -> 9 -> 9)
list_1.push_front(9)
list_1.push_front(9)
list_1.push_front(7)
list_1.push_front(8)
list_1.push_front(5)
print('The first list is: ', end="")
print(list_1)
# Create the second list (number 325; 5 -> 2 -> 3)
list_2.push_back(5)
list_2.push_back(2)
list_2.push_back(3)
print('The second list is: ', end="")
print(list_2)
###Output
The first list is: 5→8→7→9→9
The second list is: 5→2→3
###Markdown
Now we need to add the two lists.
###Code
def add_two_singly_linked_lists(l1, l2):
result = SinglyLinkedList()
# Read the first elements
e1 = l1.head
e2 = l2.head
n = 0
while (e1 != None) or (e2 != None): # At least one list still has extra nodes/digits
# Get values from each node
val1 = e1.value if (e1 != None) else 0
val2 = e2.value if (e2 != None) else 0
# Compute sum, quotient and remainder
s = val1 + val2 + n
n = s // 10 # quotient (carry to the next level)
r = s % 10 # reminder
# Inset r in the tail of result
result.push_back(r)
if (e1 != None):
e1 = e1.next
if (e2 != None):
e2 = e2.next
# If there is a carry number n > 0, create a new node for it
if n>0:
result.push_back(n)
return result
sum_list = add_two_singly_linked_lists(list_1, list_2)
print('The sum list is: ', end="")
print(sum_list)
###Output
The sum list is: 0→1→1→0→0→1
|
pages/themes/dataScienceBasics/examples/pandas/DataFrame/MergeJoin/DataFramesJoin.ipynb | ###Markdown
DataFrames Join
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
Load data - not good for join!!!
###Code
devs = pd.read_csv("datasets/developers/developers.csv", sep=";")
devs
langs = pd.read_csv("datasets/developers/languages.csv", sep=";")
langs
###Output
_____no_output_____
###Markdown
Load data - good for join!!!
###Code
bg_towns = pd.DataFrame(["sofia", "sandanski", "pleven","varna"], columns=["town"])
bg_towns
bg_weather = pd.DataFrame([25, 35, 20 ], columns=["temp"])
bg_weather
###Output
_____no_output_____
###Markdown
Inner Join with Join()
###Code
# on="did" => matches devs.did == langs.index
# how="inner" => removes NaN value
dev_langs_inner = devs.join(langs,lsuffix="_l",rsuffix="_r", on="dname", how="inner")
dev_langs_inner
bg_weather_inner = bg_weather.join(bg_towns, how="inner")
bg_weather_inner
###Output
_____no_output_____
###Markdown
Outer Join with Join()
###Code
dev_langs_outer = devs.join(langs,lsuffix="_l",rsuffix="_r", on="did", how="outer")
dev_langs_outer
bg_weather_outer = bg_weather.join(bg_towns, how="outer")
bg_weather_outer
###Output
_____no_output_____ |
Python_Basic_Assingment_Assingment_10.ipynb | ###Markdown
**1.** How do you distinguish between shutil.copy() and shutil.copytree()? **Answer:** shutil.copy() copies only one file while shutil.copytree() copies entire folder with its subfolders and files. **2.** What function is used to rename files? **Answer:** os.rename(old_name_here, new_name_here) **3.** What is the difference between the delete functions in the send2trash and shutil modules? **Answer:** shutil's delete function removes files and folders permanently, while send2trash delete function moves them to recycle bin. **4.** ZipFile objects have a close() method just like File objects’ close() method. What ZipFile method isequivalent to File objects’ open() method? **Answer:** ZipFile() **5.** Create a programme that searches a folder tree for files with a certain file extension (such as .pdfor .jpg). Copy these files from whatever location they are into a new folder. **Answer:**
###Code
import os
import shutil
# please specify file extension to search for below
file_extension = '.pdf'
# please specify a folder path where search will conducted
path = 'C:\\Users\\Vahe\\Desktop\\ML Projects\\INeuron\\PhishingClassifier\\Documentation'
# please specify a path to create a new folder to copy the searched files
new_path = 'C:\\Users\\Vahe\\Desktop\\ML Projects\\INeuron\\PhishingClassifier'
# please specify a folder name to create
new_folder_name = 'SearchResult'
searched_files = [i for i in os.listdir(path) if file_extension in i]
os.chdir(new_path)
os.mkdir(new_folder_name)
for file in searched_files:
shutil.copy(src=os.path.join(path, file), dst=os.path.join(new_path, new_folder_name))
###Output
_____no_output_____ |
docs/contents/user/basic/convert.ipynb | ###Markdown
ConvertThe meaning of molecular system 'form', in the context of MolSysMT, has been described previously in the section XXX. There is in MolSysMT a method to convert a form into other form: `molsysmt.convert()`. This method is the keystone of this library, the hinge all other methods and tools in MolSysMT rotates on. And in addition, the joining piece connecting the pipes of your work-flow when using different python libraries.The method `molsysmt.convert()` requires at least two input arguments: the original pre-existing item in whatever form accepted by MolSysMT (see XXX), and the name of the output form:
###Code
molecular_system = msm.convert('pdb_id:1TCD', 'molsysmt.MolSys')
###Output
/home/diego/projects/MolSysMT/molsysmt/form/mmtf_MMTFDecoder/to_molsysmt_Topology.py:34: UserWarning: The structure in the PDB has biological assemblies. There are geometrical transformations proposed in the structure. See the following issue in the source code repository: https://github.com/uibcdf/MolSysMT/issues/33
warnings.warn(warning_message)
###Markdown
The id code `1TCD` from the Protein Data Bank is converted into a native `molsysmt.MolSys` python object. At this point, you probably think that this operation can also be done with the method `molsysmt.load()`. And you are right. Actually, `molsysmt.load()` is nothing but an alias of `molsysmt.convert()`. Although redundant, a loading method was included in MolSysMT just for the sake of intuitive usability. But it could be removed from the library since `molsysmt.convert()` has the same functionality.The following cells illustrate some conversions you can do with `molsysmt.convert()`:
###Code
msm.convert('pdb_id:1SUX', '1sux.pdb') # fetching a pdb file to save it locally
msm.convert('pdb_id:1SUX', '1sux.mmtf') # fetching an mmtf to save it locally
pdb_file = msm.demo['TcTIM']['1tcd.pdb']
molecular_system = msm.convert(pdb_file, 'mdtraj.Trajectory') # loading a pdb file as an mdtraj.Trajectory object
seq_aa3 = msm.convert(molecular_system, selection='molecule_type=="protein"', to_form='string:aminoacids3') # converting an mdtraj.Trajectory into a sequence form
seq_aa3
###Output
_____no_output_____
###Markdown
How to convert just a selection The conversion can be done over the entiry system or over a part of it. The input argument `selection` works with most of the MolSysMT methods, with `molsysmt.convert()` also. To know more about how to perform selections there is a section on this documentation entitled "XXX". By now, lets see some simple selections to see how it operates:
###Code
pdb_file = msm.demo['TcTIM']['1tcd.pdb']
whole_molecular_system = msm.convert(pdb_file, to_form='openmm.Topology')
msm.info(whole_molecular_system)
aa = msm.convert(pdb_file, to_form='string:pdb_text')
msm.get_form(aa)
molecular_system = msm.convert(pdb_file, to_form='openmm.Topology',
selection='molecule_type=="protein"')
msm.info(molecular_system)
###Output
_____no_output_____
###Markdown
How to combine multiple forms into one Sometimes the molecular system comes from the combination of more than a form. For example, we can have two files with topology and coordinates to be converted into an only molecular form:
###Code
prmtop_file = msm.demo['pentalanine']['pentalanine.prmtop']
inpcrd_file = msm.demo['pentalanine']['pentalanine.inpcrd']
molecular_system = msm.convert([prmtop_file, inpcrd_file], to_form='molsysmt.MolSys')
msm.info(molecular_system)
###Output
_____no_output_____
###Markdown
How to convert a form into multiple ones at once In the previous section the way to convert multiple forms into one was illustrated. Lets see now how to produce more than an output form in just a single line:
###Code
h5_file = msm.demo['pentalanine']['traj.h5']
topology, structures = msm.convert(h5_file, to_form=['molsysmt.Topology','molsysmt.Structures'])
msm.info(topology)
msm.info(structures)
msm.info([topology, structures])
###Output
_____no_output_____
###Markdown
Lets now combine both forms into one to see their were properly converted:
###Code
pdb_string = msm.convert([topology, structures], to_form='string:pdb_text', structure_indices=1000)
print(pdb_string)
###Output
REMARK 1 CREATED WITH OPENMM 7.7 BY MOLSYSMT 0+untagged.792.g6189638.dirty, 2022-04-26
CRYST1 20.000 20.000 20.000 90.00 90.00 90.00 P 1 1
HETATM 1 H1 ACE 0 1 -0.543 17.716 0.339 1.00 0.00 H
HETATM 2 CH3 ACE 0 1 0.128 18.016 -0.466 1.00 0.00 C
HETATM 3 H2 ACE 0 1 0.702 18.811 0.010 1.00 0.00 H
HETATM 4 H3 ACE 0 1 -0.534 18.283 -1.290 1.00 0.00 H
HETATM 5 C ACE 0 1 1.095 16.881 -0.794 1.00 0.00 C
HETATM 6 O ACE 0 1 1.119 16.351 -1.907 1.00 0.00 O
ATOM 7 N ALA 0 2 2.030 16.563 0.123 1.00 0.00 N
ATOM 8 H ALA 0 2 1.862 16.985 1.025 1.00 0.00 H
ATOM 9 CA ALA 0 2 3.294 16.016 -0.068 1.00 0.00 C
ATOM 10 HA ALA 0 2 3.448 15.867 -1.137 1.00 0.00 H
ATOM 11 CB ALA 0 2 4.420 16.939 0.426 1.00 0.00 C
ATOM 12 HB1 ALA 0 2 4.130 17.983 0.548 1.00 0.00 H
ATOM 13 HB2 ALA 0 2 4.787 16.677 1.418 1.00 0.00 H
ATOM 14 HB3 ALA 0 2 5.125 16.819 -0.397 1.00 0.00 H
ATOM 15 C ALA 0 2 3.387 14.616 0.507 1.00 0.00 C
ATOM 16 O ALA 0 2 2.778 14.379 1.609 1.00 0.00 O
ATOM 17 N ALA 0 3 4.246 13.737 -0.110 1.00 0.00 N
ATOM 18 H ALA 0 3 4.473 13.965 -1.067 1.00 0.00 H
ATOM 19 CA ALA 0 3 4.819 12.530 0.388 1.00 0.00 C
ATOM 20 HA ALA 0 3 4.984 12.665 1.457 1.00 0.00 H
ATOM 21 CB ALA 0 3 3.825 11.315 0.288 1.00 0.00 C
ATOM 22 HB1 ALA 0 3 3.003 11.617 -0.361 1.00 0.00 H
ATOM 23 HB2 ALA 0 3 4.317 10.378 0.027 1.00 0.00 H
ATOM 24 HB3 ALA 0 3 3.537 10.999 1.291 1.00 0.00 H
ATOM 25 C ALA 0 3 6.089 12.014 -0.377 1.00 0.00 C
ATOM 26 O ALA 0 3 6.092 12.020 -1.582 1.00 0.00 O
ATOM 27 N ALA 0 4 7.107 11.550 0.415 1.00 0.00 N
ATOM 28 H ALA 0 4 6.876 11.513 1.398 1.00 0.00 H
ATOM 29 CA ALA 0 4 8.365 11.004 -0.130 1.00 0.00 C
ATOM 30 HA ALA 0 4 8.163 10.660 -1.144 1.00 0.00 H
ATOM 31 CB ALA 0 4 9.433 12.095 -0.080 1.00 0.00 C
ATOM 32 HB1 ALA 0 4 9.168 13.049 -0.537 1.00 0.00 H
ATOM 33 HB2 ALA 0 4 9.729 12.215 0.962 1.00 0.00 H
ATOM 34 HB3 ALA 0 4 10.313 11.799 -0.650 1.00 0.00 H
ATOM 35 C ALA 0 4 8.818 9.774 0.558 1.00 0.00 C
ATOM 36 O ALA 0 4 8.981 9.626 1.786 1.00 0.00 O
ATOM 37 N ALA 0 5 9.185 8.808 -0.291 1.00 0.00 N
ATOM 38 H ALA 0 5 9.296 8.991 -1.278 1.00 0.00 H
ATOM 39 CA ALA 0 5 9.868 7.543 0.200 1.00 0.00 C
ATOM 40 HA ALA 0 5 10.253 7.718 1.204 1.00 0.00 H
ATOM 41 CB ALA 0 5 8.726 6.463 0.274 1.00 0.00 C
ATOM 42 HB1 ALA 0 5 8.019 6.908 0.973 1.00 0.00 H
ATOM 43 HB2 ALA 0 5 8.192 6.380 -0.673 1.00 0.00 H
ATOM 44 HB3 ALA 0 5 9.216 5.590 0.705 1.00 0.00 H
ATOM 45 C ALA 0 5 10.900 7.033 -0.794 1.00 0.00 C
ATOM 46 O ALA 0 5 10.584 6.766 -1.933 1.00 0.00 O
ATOM 47 N ALA 0 6 12.118 6.804 -0.309 1.00 0.00 N
ATOM 48 H ALA 0 6 12.260 7.172 0.620 1.00 0.00 H
ATOM 49 CA ALA 0 6 13.247 6.357 -1.150 1.00 0.00 C
ATOM 50 HA ALA 0 6 13.000 5.465 -1.727 1.00 0.00 H
ATOM 51 CB ALA 0 6 13.567 7.445 -2.131 1.00 0.00 C
ATOM 52 HB1 ALA 0 6 12.832 7.534 -2.932 1.00 0.00 H
ATOM 53 HB2 ALA 0 6 13.491 8.394 -1.601 1.00 0.00 H
ATOM 54 HB3 ALA 0 6 14.518 7.285 -2.640 1.00 0.00 H
ATOM 55 C ALA 0 6 14.507 6.042 -0.296 1.00 0.00 C
ATOM 56 O ALA 0 6 14.522 6.407 0.855 1.00 0.00 O
HETATM 57 N NME 0 7 15.520 5.286 -0.870 1.00 0.00 N
HETATM 58 H NME 0 7 15.492 5.034 -1.848 1.00 0.00 H
HETATM 59 C NME 0 7 16.685 4.819 -0.080 1.00 0.00 C
HETATM 60 H1 NME 0 7 16.723 3.732 -0.014 1.00 0.00 H
HETATM 61 H2 NME 0 7 17.601 5.184 -0.544 1.00 0.00 H
HETATM 62 H3 NME 0 7 16.676 5.227 0.931 1.00 0.00 H
TER 63 NME 0 7
CONECT 1 2
CONECT 2 3 4 1 5
CONECT 3 2
CONECT 4 2
CONECT 5 6 7 2
CONECT 6 5
CONECT 7 5
CONECT 55 57
CONECT 57 58 55 59
CONECT 58 57
CONECT 59 60 61 62 57
CONECT 60 59
CONECT 61 59
CONECT 62 59
END
###Markdown
Some examples with files
###Code
PDB_file = msm.demo['TcTIM']['1tcd.pdb']
system_pdbfixer = msm.convert(PDB_file, to_form='pdbfixer.PDBFixer')
system_parmed = msm.convert(PDB_file, to_form='parmed.Structure')
MOL2_file = msm.demo['caffeine']['caffeine.mol2']
system_openmm = msm.convert(MOL2_file, to_form='openmm.Modeller')
system_mdtraj = msm.convert(MOL2_file, to_form='mdtraj.Trajectory')
MMTF_file = msm.demo['TcTIM']['1tcd.mmtf']
system_aminoacids1_seq = msm.convert(MMTF_file, selection='molecule_type=="protein"', to_form='string:aminoacids1')
system_molsys = msm.convert(MMTF_file, to_form='molsysmt.MolSys')
print('Form of object system_pdbfixer: ', msm.get_form(system_pdbfixer))
print('Form of object system_parmed: ', msm.get_form(system_parmed))
print('Form of object system_openmm: ', msm.get_form(system_openmm))
print('Form of object system_mdtraj: ', msm.get_form(system_mdtraj))
print('Form of object system_aminoacids1_seq: ', msm.get_form(system_aminoacids1_seq))
print('Form of object system_molsys: ', msm.get_form(system_molsys))
###Output
Form of object system_pdbfixer: pdbfixer.PDBFixer
Form of object system_parmed: parmed.Structure
Form of object system_openmm: openmm.Modeller
Form of object system_mdtraj: mdtraj.Trajectory
Form of object system_aminoacids1_seq: string:aminoacids1
Form of object system_molsys: molsysmt.MolSys
###Markdown
Some examples with IDs
###Code
molecular_system = msm.convert('pdb_id:1TCD', to_form='mdtraj.Trajectory')
###Output
_____no_output_____
###Markdown
Conversions implemented in MolSysMT
###Code
msm.help.convert(from_form='mdtraj.Trajectory', to_form_type='string')
msm.help.convert(from_form='mdtraj.Trajectory', to_form_type='file', as_rows='to')
from_list=['pytraj.Trajectory','mdanalysis.Universe']
to_list=['mdtraj.Trajectory', 'openmm.Topology']
msm.help.convert(from_form=from_list, to_form=to_list)
###Output
_____no_output_____ |
prototypes/sentiment-analysis-eager-execution.ipynb | ###Markdown
Implementation of a sentiment analysis
###Code
tf.enable_eager_execution()
dirname = os.getcwd()
dirname = os.path.dirname(dirname)
dataset_path = os.path.join(dirname, 'datasets/')
print(dataset_path)
gloveSet = pd.read_csv(dataset_path+'glove.42B.10d.txt', sep=' ', header=None)
print(gloveSet.shape)
print(gloveSet.head())
gloveWords = gloveSet.iloc[:,0:1]
gloveVectors = gloveSet.iloc[:,1:]
gloveCorpus = set()
gloveWords.iloc[:,0].str.lower().apply(gloveCorpus.add)
print(len(gloveCorpus))
glove_corpus_word_to_int = {}
glove_corpus_int_to_word = {}
for word in gloveCorpus:
temp = len(glove_corpus_word_to_int)
glove_corpus_word_to_int[word] = temp
glove_corpus_int_to_word[temp] = word
#print(imdb_corpus_word_to_int)
#print(imdb_corpus_int_to_word)
UNK = '<unk>'
temp = len(glove_corpus_word_to_int)
outfile = glovePath +'glove_word_corpus.pic'
with open(outfile, 'wb') as pickle_file:
pickle.dump([gloveCorpus,glove_corpus_word_to_int, glove_corpus_int_to_word], pickle_file)
#with open(outfile, 'rb') as pickle_file:
# loaded_data1 = pickle.load(pickle_file)
sentiment_dataset = pd.read_csv(dataset_path+'imdb_labelled.txt', sep='\t', header=None)
sentiment_dataset.rename({0:'comments',1:'sentiment'}, axis='columns', inplace=True)
print(sentiment_dataset.shape)
print(sentiment_dataset.head())
def convert_imdb_corpus_into_int(sentence):
words = sentence.lower().split()
words = [word if word in gloveCorpus else UNK for word in words]
words_to_num = [glove_corpus_word_to_int[word] for word in words]
return words_to_num
sentiment_dataset_num = sentiment_dataset.copy()
sentiment_dataset_num['comments'] = sentiment_dataset['comments'].apply(convert_imdb_corpus_into_int)
sentiment_dataset_num.head()
imdb_train, imdb_test = sklearn.model_selection.train_test_split(sentiment_dataset_num, test_size=0.2)
print(imdb_train.head())
print(imdb_train.shape)
print(imdb_test.head())
print(imdb_test.shape)
unkCount = 0
totalCount = 0
def count_UNK(token_list):
global unkCount, totalCount
match = glove_corpus_word_to_int[UNK]
#print(token_list)
#print(match)
unkCount += token_list.count(match)
totalCount += len(token_list)
sentiment_dataset_num['comments'].apply(count_UNK)
print(totalCount)
print(unkCount)
embeddings = tfe.Variable(name='embeddings', validate_shape= gloveVectors.shape,
initial_value=gloveVectors.values,
dtype=tf.float32, trainable=False)
w = tfe.Variable(name='w', validate_shape=(gloveVectors.shape[1], 1),
initial_value=0.01 * tf.random_normal(shape=(gloveVectors.shape[1], 1)),
dtype=tf.float32)
b = tfe.Variable(name='b', validate_shape=(1, 1),
initial_value=0.01 * tf.random_normal(shape=(1, 1)),
dtype=tf.float32)
optimizer = tf.train.AdamOptimizer()
epoch = 1000
for i in range(epoch):
for j in range(imdb_train.shape[0]):
with tf.GradientTape() as tape:
words = tf.nn.embedding_lookup(embeddings, imdb_train.iat[j,0])
#print(words)
cbow_words_avg = tf.math.reduce_mean(words, axis=0, keepdims=True)
#print(cbow_words_avg)
z = tf.matmul(cbow_words_avg, w) + b
y_predict = tf.sigmoid(z)
#print(y_predict)
loss = tf.losses.sigmoid_cross_entropy(tf.constant(imdb_train.iat[j,1], shape=(1,1)), y_predict)
grads = tape.gradient(loss, [w,b])
#print(grads)
optimizer.apply_gradients(zip(grads, [w,b]),
global_step=tf.train.get_or_create_global_step())
if(i % 100 == 0):
print('Epoch ', i+1)
print(loss.numpy())
a = tf.Variable(1.0)
assign = a.assign(2.0)
with tf.control_dependencies([assign]):
b = a.read_value()
with tf.control_dependencies([b]):
other_assign = a.assign(3.0)
with tf.control_dependencies([other_assign]):
# Will print 2.0 because the value was read before other_assign ran. If
# `a` was a tf.Variable instead, 2.0 or 3.0 could be printed.
tf.Print(b, [b])
b
###Output
_____no_output_____ |
notebooks/INOVAlife_datalake_datasus.ipynb | ###Markdown
Para conseguir as chaves de acesso, envie um email para [email protected] com o texto abaixo:```Solicito chaves de acesso para o datalake "datasus" da INOVAlife.Concordo em não compartilhar com ninguém as chaves de acesso ou qualquer outro material marcado como confidencial oferecido pela INOVAlife, por penalidades da lei.Estou ciente e concordo que a INOVAlife colete informações como registros de acesso aos dados, utilizando como identificador de acesso as referidas chaves, endereços de IP ou quaisquer outros metadados disponíveis. Do mesmo modo, estou ciente e concordo que as informações sobre acesso serão para uso exclusivo de controle de segurança e não serão vendidas, compartilhadas ou reutilizadas em nenhum outro produto ou serviço, além deste.```Vamos avaliar a solicitação e retornaremos o contato.
###Code
# Substitua as strings vazias pelas chaves de acesso:
access_key = ""
secret_key = ""
data = DataLakeClient(access_key, secret_key)
data.carregar_tabela("SINASC","DN","1996")
consulta = data.sql("select contador,IDADEMAE,PESO,UF from tabela_SINASC_DN_1996")
consulta.show()
consulta.count()
df = consulta.toPandas()
###Output
_____no_output_____ |
notebooks/SU-2015-10-VrednovanjeModela.ipynb | ###Markdown
Sveučilište u ZagrebuFakultet elektrotehnike i računarstva Strojno učenjehttp://www.fer.unizg.hr/predmet/suAk. god. 2015./2016. Bilježnica 10: Vrednovanje modela(c) 2015 Jan ŠnajderVerzija: 0.1 (2015-12-19)
###Code
# Učitaj osnovne biblioteke...
import scipy as sp
import sklearn
import pandas as pd
%pylab inline
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
Sadržaj* Matrica zabune* Osnovne mjere* F-mjera* Višeklasna klasifikacija* Procjena pogreške* Statističko testiranje* Usporedba klasifikatora Matrica zabune
###Code
y_test = sp.random.choice((0,1), size=10); y_test
y_pred = sp.random.choice((0,1), size=10); y_pred
###Output
_____no_output_____
###Markdown
* [Skica: retci -> klasifikacija, stupci -> stvarno]
###Code
def cm(y_true, y_pred):
tp = 0
fp = 0
fn = 0
tn = 0
for (t, p) in zip(y_true, y_pred):
if t == 0 and p == 1: fp += 1
elif t == 1 and p == 0: fn += 1
elif t == 1 and p == 1: tp += 1
else: tn += 1
return sp.array([[tp, fp], [fn, tn]])
cm(y_test, y_pred)
from sklearn.metrics import confusion_matrix
###Output
_____no_output_____
###Markdown
* [Skica: retci -> stvarno, stupci -> klasifikacija]
###Code
confusion_matrix(y_test, y_pred)
confusion_matrix(y_test, y_pred, labels=[1,0])
###Output
_____no_output_____
###Markdown
Osnovne mjere * [Skica: TP-FP-TN-FN]* **Točnost** (engl. *accuracy*)$$\mathrm{Acc} = \frac{\mathrm{TP}+\mathrm{TN}}{N} = \frac{\mathrm{TP}+\mathrm{TN}}{\mathrm{TP}+\mathrm{TN}+\mathrm{FP}+\mathrm{FN}}$$* **Preciznost** (engl. *precision*)$$\mathrm{P} = \frac{\mathrm{TP}}{\mathrm{TP}+\mathrm{FP}}$$* **Odziv** (engl. *recall*), true positive rate, specificity$$\mathrm{R} = \mathrm{TPR} = \frac{\mathrm{TP}}{\mathrm{TP}+\mathrm{FN}}$$* **Fall-out**, false positive rate$$\mathrm{FPR} = \frac{\mathrm{FP}}{\mathrm{FP}+\mathrm{TN}}$$ Primjer
###Code
cm(y_test, y_pred)
from sklearn.metrics import accuracy_score, precision_score, recall_score
accuracy_score(y_test, y_pred)
precision_score(y_test, y_pred)
recall_score(y_test, y_pred)
###Output
_____no_output_____
###Markdown
Primjer: Titanic dataset
###Code
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
titanic_df = pd.read_csv("../data/titanic-train.csv")
titanic_df.drop(['PassengerId'], axis=1, inplace=True)
titanic_df1 = titanic_df[['Pclass', 'Sex', 'Age','Survived']]
titanic_X = titanic_df[['Pclass', 'Sex', 'Age']].as_matrix()
titanic_y = titanic_df['Survived'].as_matrix()
le = LabelEncoder()
titanic_X[:,1] = le.fit_transform(titanic_X[:,1])
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
titanic_X = imp.fit_transform(titanic_X)
titanic_X
titanic_y
shape(titanic_X), shape(titanic_y)
from sklearn import cross_validation
X_train, X_test, y_train, y_test = cross_validation.train_test_split(titanic_X, titanic_y, train_size=2.0/3, random_state=42)
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(C=1)
lr.fit(X_train, y_train)
lr.predict(X_train)
y_pred_lr = lr.predict(X_test); y_pred_lr
y_test
cm(y_test, y_pred_lr)
accuracy_score(y_test, y_pred_lr)
lr.score(X_test, y_test)
lr.score(X_train, y_train)
precision_score(y_test, y_pred_lr, pos_label=1)
recall_score(y_test, y_pred_lr, pos_label=1)
from sklearn.svm import SVC
svm = SVC(C=1)
svm.fit(X_train, y_train)
svm.score(X_test, y_test)
y_pred_svm = svm.predict(X_test); y_pred_svm
cm(y_test, y_pred_svm)
precision_score(y_test, y_pred_svm, pos_label=1)
recall_score(y_test, y_pred_svm, pos_label=1)
###Output
_____no_output_____
###Markdown
Variranje klasifikacijskog praga
###Code
y_scores_lr = lr.predict_proba(X_test)[:,1]; y_scores_lr
print precision_score(y_test, y_pred_lr)
print recall_score(y_test, y_pred_lr)
threshold = 0.4
y_pred_lr_tweaked = map(lambda s : 1 if s > threshold else 0, y_scores_lr)
print y_pred_lr_tweaked
print precision_score(y_test, y_pred_lr_tweaked)
print recall_score(y_test, y_pred_lr_tweaked)
###Output
0.752
0.783333333333
###Markdown
Krivulja preciznost-odziv
###Code
from sklearn.metrics import precision_recall_curve
pr, re, _ = precision_recall_curve(y_test, y_scores_lr, pos_label=1)
pr
re
plt.plot(re, pr)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.show()
from sklearn.metrics import average_precision_score
average_precision_score(y_test, y_scores_lr)
y_scores_svm = svm.decision_function(X_test)[:,0]
print y_scores_svm
pr_lr, re_lr, _ = precision_recall_curve(y_test, y_scores_lr, pos_label=1)
pr_svm, re_svm, _ = precision_recall_curve(y_test, y_scores_svm, pos_label=1)
plt.plot(re_lr, pr_lr, label='LR')
plt.plot(re_svm, pr_svm, label='SVM')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.legend()
plt.show()
print average_precision_score(y_test, y_scores_lr)
print average_precision_score(y_test, y_scores_svm)
###Output
0.86434051934
0.805608502352
###Markdown
ROC i AUC* ROC = Receiver Operating Characteristics * TPR kao funkcija od FPR* AUC = Area Under the (ROC) Curve
###Code
from sklearn.metrics import roc_curve, auc
fpr_lr, tpr_lr, _ = roc_curve(y_test, y_scores_lr)
roc_auc_lr = auc(fpr_lr, tpr_lr)
fpr_svm, tpr_svm, _ = roc_curve(y_test, y_scores_svm)
roc_auc_svm = auc(fpr_svm, tpr_svm)
plt.plot(fpr_lr, tpr_lr, label='LR ROC curve (area = %0.2f)' % roc_auc_lr)
plt.plot(fpr_svm, tpr_svm, label='SVM ROC curve (area = %0.2f)' % roc_auc_svm)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.legend(loc='lower right')
plt.show()
###Output
_____no_output_____
###Markdown
F-mjera* F1-mjera:$$F = \frac{2}{\frac{1}{P}+\frac{1}{R}} = \frac{2PR}{P+R}$$* F-beta:$$F_\beta = \frac{(1+\beta^2)PR}{\beta^2 P +R}$$* $F_{0.5}$ dvostruko naglašava preciznost, $F_{2}$ dvostruko naglašava recall
###Code
def f_beta(p, r, beta):
return ((1 + beta**2) * p * r) / (beta**2 * p + r)
f_beta(0.5, 0.9, 1)
f_beta(0.5, 0.9, 0.5)
f_beta(0.5, 0.9, 2)
(0.5 + 0.9) / 2
sqrt(0.5 * 0.9)
2/(1/0.5 + 1/0.9)
r = 0.5
xs = sp.linspace(0, 1)
plt.plot(xs, (xs + r)/2, label='aritm')
plt.plot(xs, sp.sqrt(xs*r), label='geom')
plt.plot(xs, 2/(1/xs + 1/r), label='harm')
plt.legend(loc='lower right')
plt.show()
###Output
_____no_output_____
###Markdown
Višeklasna klasifikacija
###Code
data = sp.loadtxt("path/do/glass.data", delimiter=",", skiprows=1)
print data
shape(data)
glass_X, glass_y = data[:,1:10], data[:,10]
from sklearn import cross_validation
X_train, X_test, y_train, y_test = cross_validation.train_test_split(glass_X, glass_y, train_size=2.0/3, random_state=42)
X_train.shape, X_test.shape
from sklearn.svm import SVC
m = SVC() # SVC(C=1, gamma='auto')
m.fit(X_train, y_train)
m.classes_
y_pred = m.predict(X_test); y_pred
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, y_pred)
from sklearn.metrics import f1_score
f1_score(y_test, y_pred, pos_label=l, average=None)
sp.mean(_)
f1_score(y_test, y_pred, average='macro')
f1_score(y_test, y_pred, average='micro')
###Output
_____no_output_____ |
InvoiceNet.ipynb | ###Markdown
InvoiceNetI used the processed version of the tokens from the 'group' column strored in the 'type' column of the dataframe for classification. I generate the vocabulary and the corresponding word vectors for the vocabulary using the word2vec model. Using these word embeddings, each token in the input data is converted into a word vector and these word vectors are joined together to produce a matrix-like version of the input data entity where each slice of the matrix represents the word vector for each word in the input data.A convolution neural network is used to extract information from the word embeddings. A fully-connected layer is connected to the output of the convolution layers. The midpoint information for the data entitiy is concatonated to the output of the last fully-connected layer. A final fully-connected layer is used to produce the class prediction. Dropout is used in the fully-connected layers to prevent overfitting.
###Code
import numpy as np
import pickle
import gzip
from gensim.models import Word2Vec
# Dependencies for the DataHandler Class
###Output
_____no_output_____
###Markdown
DataHandlerDataHandler class takes in a pandas dataframe as input and provides functions to process and prepare the data for training
###Code
class DataHandler:
def __init__(self, data=None, max_len=10):
self.data = data
self.max_length = max_len
self.vocab_size = 0
self.word2idx = {}
self.idx2word = {}
self.embeddings = None
self.embed_size = 300
self.PAD = '<pad>'
self.UNKNOWN = '<unk>'
self.START = '<start>'
self.END = '<end>'
self.label_dict = {0: 0, 1: 1, 2: 2, 8: 3, 14: 4, 18: 5}
self.num_classes = len(self.label_dict)
self.train_data = {}
# self.type_dict = {'text': 0.1, 'number': 0.2, 'email': 0.3, 'date': 0.4, '': 0.5, 'money': 0.6, 'phone': 0.7}
def read(self, data, max_len=10):
"""Read DataFrame"""
self.data = data
self.max_length = max_len
def process_data(self, tokens, coordinates):
tokens = [self.START] + tokens[:self.max_length - 2] + [self.END]
tokens += [self.PAD] * (self.max_length - len(tokens))
inp = np.array([self.get_word_id(token) for token in tokens])
coordinates = np.array(coordinates)
return inp, coordinates
def prepare_data(self):
"""Prepares data for training"""
inputs = []
labels = []
coordinates = []
for i, row in self.data.iterrows():
text = row['type']
coords = row['coords']
label = self.label_dict[int(row['label'])]
tokens = text[0].strip().split(' ')
# dtypes = [self.type_dict[dtype] for dtype in text[1].split(',')]
height = float(text[-2])
width = float(text[-1])
min_x = float(coords[0]) / width
min_y = float(coords[1]) / height
max_x = float(coords[2]) / width
max_y = float(coords[3]) / height
tokens = [self.START] + tokens[:self.max_length - 2] + [self.END]
tokens += [self.PAD] * (self.max_length - len(tokens))
inp = [self.get_word_id(token) for token in tokens]
inputs.append(np.array(inp))
labels.append(np.array(label))
coordinates.append(np.array([min_x, min_y, max_x, max_y]))
self.train_data['inputs'] = np.array(inputs)
self.train_data['labels'] = np.array(labels)
self.train_data['coordinates'] = np.array(coordinates)
def load_embeddings(self, model_path):
"""Loads pre-trained gensim model"""
print("\nLoading pre-trained embeddings...")
model = Word2Vec.load(model_path)
words = list(model.wv.vocab)
embed_size = model.layer1_size
embed = []
word2idx = {self.PAD: 0, self.UNKNOWN: 1, self.START: 2, self.END: 3}
idx2word = {0: self.PAD, 1: self.UNKNOWN, 2: self.START, 3: self.END}
embed.append(np.zeros(embed_size, dtype=np.float32))
embed.append(np.random.uniform(-0.1, 0.1, embed_size))
embed.append(np.random.uniform(-0.1, 0.1, embed_size))
embed.append(np.random.uniform(-0.1, 0.1, embed_size))
for word in words:
vector = model.wv[word]
embed.append(vector)
word2idx[word] = len(word2idx)
idx2word[word2idx[word]] = word
self.vocab_size = len(word2idx)
self.word2idx = word2idx
self.idx2word = idx2word
self.embeddings = np.array(embed, dtype=np.float32)
print("\nSuccessfully loaded pre-trained embeddings!")
def get_word_id(self, token):
"""Returns the id of a token"""
token = token.lower()
if token in self.word2idx:
return self.word2idx[token]
return self.word2idx[self.UNKNOWN]
def save_data(self, out_path='./data/processed.pkl.gz'):
"""Saves the embeddings and vocab as a zipped pickle file"""
assert (self.embeddings is not None or self.word2idx), "Data has not been processed yet"
pkl = {'embeddings': self.embeddings,
'word2idx': self.word2idx,
'idx2word': self.idx2word
}
with gzip.open(out_path, 'wb') as out_file:
pickle.dump(pkl, out_file)
print("\nData stored as {}".format(out_path))
def load_data(self, path):
"""Loads embeddings and vocab from a zipped pickle file"""
with gzip.open(path, 'rb') as in_file:
pkl = pickle.load(in_file)
self.embeddings = pkl['embeddings']
self.embed_size = self.embeddings.shape[1]
self.word2idx = pkl['word2idx']
self.vocab_size = len(self.word2idx)
self.idx2word = pkl['idx2word']
print("\nSuccessfully loaded data from {}".format(path))
###Output
_____no_output_____
###Markdown
Gensim - Word2VecWe use gensim word2vec implementation to train vector embeddings for our vocabulary
###Code
%matplotlib inline
import pickle
from gensim.models import Word2Vec
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
# Dependencies for word2vec
with open('train_api.pk', 'rb') as pklfile:
df = pickle.load(pklfile)
sentences = []
# Iterate through all the rows of the dataframe and split each processed
for i, row in df.iterrows():
text = row['type'][0].strip()
sentences.append(text.split(' '))
model = Word2Vec(sentences, size=300, window=5, min_count=3, workers=4)
model.save('model.bin')
print(model)
X = model[model.wv.vocab]
pca = PCA(n_components=2)
result = pca.fit_transform(X)
plt.scatter(result[:, 0], result[:, 1])
words = list(model.wv.vocab)
for i, word in enumerate(words):
plt.annotate(word, xy=(result[i, 0], result[i, 1]))
plt.show()
###Output
Word2Vec(vocab=689, size=300, alpha=0.025)
###Markdown
InvoiceNet - ModelThe InvoiceNet class builds the model architecture and provides utility functions to train the model, perform inference on a trained model and load saved weights.
###Code
import numpy as np
import os
import keras
from keras.models import Model
from keras.layers import Input, Dense, Dropout, concatenate
from keras.layers import Embedding
from keras.layers import Convolution1D, GlobalMaxPooling1D
from keras import regularizers
from sklearn.utils.class_weight import compute_class_weight
# Dependencies for InvoiceNet class
# Setting random seed
np.random.seed(1337)
class InvoiceNet:
def __init__(self, data_handler, config):
coordinates = Input(shape=(data_handler.train_data['coordinates'].shape[1],), dtype='float32', name='coordinates')
words_input = Input(shape=(data_handler.max_length,), dtype='int32', name='words_input')
words = Embedding(data_handler.embeddings.shape[0], data_handler.embeddings.shape[1],
weights=[data_handler.embeddings],
trainable=False)(words_input)
conv1 = Convolution1D(filters=config.num_filters,
kernel_size=3,
padding='same',
activation='relu',
strides=1,
kernel_regularizer=regularizers.l2(config.reg_rate))(words)
pool1 = GlobalMaxPooling1D()(conv1)
conv2 = Convolution1D(filters=config.num_filters,
kernel_size=4,
padding='same',
activation='relu',
strides=1,
kernel_regularizer=regularizers.l2(config.reg_rate))(words)
pool2 = GlobalMaxPooling1D()(conv2)
conv3 = Convolution1D(filters=config.num_filters,
kernel_size=5,
padding='same',
activation='relu',
strides=1,
kernel_regularizer=regularizers.l2(config.reg_rate))(words)
pool3 = GlobalMaxPooling1D()(conv3)
output = concatenate([pool1, pool2, pool3])
output = Dropout(0.5)(output)
output = concatenate([output, coordinates])
output = Dense(config.num_hidden, activation='relu')(output)
output = Dropout(0.5)(output)
output = Dense(data_handler.num_classes, activation='softmax')(output)
self.model = Model(inputs=[words_input, coordinates], outputs=[output])
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
# self.model.summary()
self.data_handler = data_handler
self.config = config
def train(self):
print("\nInitializing training...")
if not os.path.exists(self.config.log_dir):
os.makedirs(self.config.log_dir)
if not os.path.exists(self.config.checkpoint_dir):
os.makedirs(self.config.checkpoint_dir)
if not os.path.exists(self.config.model_path):
os.makedirs(self.config.model_path)
tensorboard = keras.callbacks.TensorBoard(log_dir=self.config.log_dir, histogram_freq=1, write_graph=True)
modelcheckpoints = keras.callbacks.ModelCheckpoint(os.path.join(self.config.checkpoint_dir, "InvoiceNet_") +
".{epoch:02d}-{val_loss:.2f}-{val_acc:.2f}.hdf5",
monitor='val_loss', verbose=0, save_best_only=True,
save_weights_only=False, mode='auto')
class_weights = compute_class_weight('balanced', np.unique(self.data_handler.train_data['labels']), self.data_handler.train_data['labels'])
d_class_weights = dict(enumerate(class_weights))
self.model.fit([self.data_handler.train_data['inputs'], self.data_handler.train_data['coordinates']],
self.data_handler.train_data['labels'],
batch_size=self.config.batch_size,
verbose=True,
epochs=self.config.num_epochs,
callbacks=[tensorboard, modelcheckpoints],
validation_split=0.125,
shuffle=self.config.shuffle,
class_weight=d_class_weights)
self.model.save_weights(os.path.join(self.config.model_path, "InvoiceNet.model"))
def load_weights(self, path):
"""Loads weights from the given model file"""
self.model.load_weights(path)
print("\nSuccessfully loaded weights from {}".format(path))
def predict(self, tokens, coordinates):
"""Performs inference on the given tokens and coordinates"""
inp, coords = self.data_handler.process_data(tokens, coordinates)
pred = self.model.predict([inp, coords], verbose=True)
pred = pred.argmax(axis=-1)
return pred
def evaluate(self):
predictions = self.model.predict([self.data_handler.train_data['inputs'], self.data_handler.train_data['coordinates']], verbose=True)
predictions = predictions.argmax(axis=-1)
acc = np.sum(predictions == self.data_handler.train_data['labels']) / float(len(self.data_handler.train_data['labels']))
print("\nTest Accuracy: {}".format(acc))
return predictions
@staticmethod
def get_precision(predictions, true_labels, target_label):
target_label_count = 0
correct_target_label_count = 0
for idx in xrange(len(predictions)):
if predictions[idx] == target_label:
target_label_count += 1
if predictions[idx] == true_labels[idx]:
correct_target_label_count += 1
if correct_target_label_count == 0:
return 0
return float(correct_target_label_count) / target_label_count
def f1_score(self, predictions):
f1_sum = 0
f1_count = 0
for target_label in xrange(0, max(self.data_handler.train_data['labels'])):
precision = self.get_precision(predictions, self.data_handler.train_data['labels'], target_label)
recall = self.get_precision(self.data_handler.train_data['labels'], predictions, target_label)
f1 = 0 if (precision+recall) == 0 else 2*precision*recall/(precision+recall)
f1_sum += f1
f1_count += 1
macrof1 = f1_sum / float(f1_count)
print("\nMacro-Averaged F1: %.4f\n" % macrof1)
return macrof1
###Output
_____no_output_____
###Markdown
ConfigConfig class to define training/testing parameters. Change mode to 'test' to run the model on the test data.
###Code
class Config:
def __init__(self, data):
self.data = data # path to training or testing data
self.word2vec = "model.bin" # path to trained word2vec model
self.model_path = "./model" # path to directory where trained model should be stored
self.load_weights = "./model/InvoiceNet.model" # path to saved weights file
self.checkpoint_dir = "./checkpoints" # path to directory where checkpoints should be stored
self.log_dir = "./logs" # path to directory where tensorboard logs should be stored
self.num_epochs = 200 # number of epochs
self.num_hidden = 512 # size of hidden layer
self.num_filters = 100 # number of filters
self.batch_size = 64 # size of mini-batch
self.reg_rate = 0.0001 # rate of regularization
self.shuffle = True # shuffle dataset
###Output
_____no_output_____
###Markdown
Training
###Code
# Training the network on the data
# Defining a DataHandler object and preparing data for training
config = Config(data="train_api.pk")
with open(config.data, 'rb') as pklfile:
df = pickle.load(pklfile)
data = DataHandler(df, max_len=12)
data.load_embeddings(config.word2vec)
data.prepare_data()
print(data.train_data['inputs'].shape)
print(data.train_data['labels'].shape)
print(data.train_data['coordinates'].shape)
net = InvoiceNet(data_handler=data, config=config)
net.train()
###Output
Loading pre-trained embeddings...
Successfully loaded pre-trained embeddings!
###Markdown
InferenceRuns the model on the test data and calculates the test accuracy and f1 score.
###Code
label_dict = {0: "Other",
1: "Invoice Date",
2: "Invoice Number",
3: "Buyer GST",
4: "Seller GST",
5: "Total Amount"}
config = Config(data="test_api.pk")
with open(config.data, 'rb') as pklfile:
df = pickle.load(pklfile)
data = DataHandler(df, max_len=12)
data.load_embeddings(config.word2vec)
data.prepare_data()
print(data.train_data['inputs'].shape)
print(data.train_data['labels'].shape)
print(data.train_data['coordinates'].shape)
net = InvoiceNet(data_handler=data, config=config)
net.load_weights(config.load_weights)
predictions = net.evaluate()
net.f1_score(predictions)
###Output
Loading pre-trained embeddings...
Successfully loaded pre-trained embeddings!
(1023, 12)
(1023,)
(1023, 4)
|
3_models/uncertainty.ipynb | ###Markdown
Accounting for error in simple models In this notebook, we explore some of the ways that error can enter into a model, and methods for handling it. The major forms of error and uncertainty are:1. Observational error. 2. Parameter uncertainty.3. Prediction uncertainty.4. Structural error.Each is demonstrated by it's own code snippet below for the case of a simple linear model with two parameters: gradient, $m_0$, and $y$-intercept, $c_0$. The model is denoted $$y = mx+c = f(x;\boldsymbol{\theta}),$$ where $\boldsymbol{\theta} = [m,c]$ is the parameter vector, and $\boldsymbol{\theta}_0$ its true value we are trying to determine. Observational error We concede that no instrument makes perfect measurements, and therefore there is some amount of random error, $\epsilon$, in an observation, i.e., $ \tilde{y}_i = y_i+\epsilon$, where $y_i$ is the true value of a quantity we are trying to measure, and $\tilde{y_i}$ is the actual measurement made.We assume that $\epsilon$ is a normally distributed random variable with variance, $\sigma_i^2$.For the simple linear model, we demonstrate the "observation" process by sampling the true model, $f(x;\boldsymbol{\theta}_0)$, at the observation points, $x_i$, and adding normally distributed random error. Execute the code below to see how the observation points (open circles) deviate from the true model (blue line). You can also plot the best-fit model.***Is the best-fit model the same as the "true" model?***
###Code
%matplotlib inline
# import modules
from scipy.stats import multivariate_normal
from scipy.optimize import curve_fit
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from ipywidgets import interact
# define a model
ts = 14.
x = np.linspace(0,1,101)
# model parameters
m0 = 2. # true gradient
c0 = 3. # true intercept
v = 0.15 # variance of random error when making measurements
# define the model, a simple linear function
def func(x,m,c):
return m*x+c
# define the error, a random draw from a normal distribution
def e(x,v):
return np.random.randn(len(x))*np.sqrt(v)
# compute the "true" model, using the "true" parameters
y = func(x,m0,c0)
# seed the random number generator so we get the same numbers each time
np.random.seed(13)
# define some values of the independent variable at which we will be taking our "observations"
xo = np.linspace(0,1,12)[1:-1]
# compute the observations - "true" model + random error (drawn from normal distribution)
yo = func(xo,m0,c0) + e(xo,v)
def plot_observations(N_obs, true_model, RMS_fit, error_dist):
# check N_obs does not exceed length of observations
i = np.min([len(xo), N_obs])
# initialize figure window and axes
fig, ax = plt.subplots(nrows=1, ncols=1)
fig.set_size_inches([8.,5.])
# plot the "true" model
if true_model:
ln1 = ax.plot(x,y,'b-', label = 'true model',zorder = 10)
else:
ln1 = []
# plot the observations
ln2 = ax.plot(xo[:i],yo[:i],'wo', mec = 'k', mew = 1.5, ms = 5, label = r'observations', zorder = 10)
# add "best-fit" model if appropriate
if RMS_fit:
# find best-fit model
p2,pc = curve_fit(func, xo[:i], yo[:i], [1,1])
# plot model
ax.plot(x,func(x,*p2),'r-')
# add normal distributions
ylim = ax.get_ylim()
ye = np.linspace(-ylim[1],ylim[1],101)*0.2
ye2 = np.linspace(-ylim[1],ylim[1],101)*0.25
# loop over plotted observations
for xoi, yoi in zip(xo[:i],yo[:i]):
# normal dist
xi = 0.05*np.exp(-(ye)**2/v)+xoi
# add to plot
if error_dist:
ax.plot(xi, ye+func(xoi,m0,c0), 'k-', lw = 0.5, zorder = 0)
ax.plot(xi*.0+xoi, ye2+func(xoi,m0,c0), '-', lw = 0.5, zorder = 0, color = [0.5, 0.5, 0.5])
# plot upkeep + legend
ax.set_xlim(ax.get_xlim())
lns = ln1+ln2
lbs = [ln.get_label() for ln in lns]
ax.legend(lns,lbs,loc = 2,prop={'size':ts})
ax.set_ylim([1,7])
ax.set_xlim([0,1])
ax.set_xlabel('x',size = ts)
ax.set_ylabel('y',size = ts)
for t in ax.get_xticklabels()+ax.get_yticklabels(): t.set_fontsize(ts)
plt.show()
interact(plot_observations, N_obs = (2,10,1), true_model = True, RMS_fit = False, error_dist = False)
###Output
_____no_output_____
###Markdown
Parameter uncertainty Grid search parameter estimation We use observations to learn about parameters during model calibration. However, if the observations are uncertain, then so too must be the parameters.One way to describe parameter uncertainty is via a *probability distribution*. When that distribution has been informed by the observations, we call it the *posterior* and denote it $P(\boldsymbol{\theta}|\tilde{y}_i)$.For a uniform *prior distribution* (our initial expectations about the parameter, before seeing any data), and normally distributed errors, we can compute the posterior directly.$$ r(\boldsymbol{\theta}) = -\sum_i^n \frac{1}{\sigma_i^2}(\tilde{y}_i-f(x_i;\boldsymbol{\theta}))^2, \quad\quad\quad\quad P(\boldsymbol{\theta}|\tilde{y}_i) = A \exp(r/2),$$where $r$ is the objective function to be maximized during calibration, and $A$ is a normalizing constant.We compute $r(\boldsymbol{\theta})$ by performing a grid search over parameter space, running the model and computing the objective function at discrete points, $\boldsymbol{\theta}_i$.Execute the code below to see a plot of the posterior over parameter space. Use the slider bars to manually adjust the parameters of an arbitrary model, and see how the fit of this model to the data (left plot) corresponds to different likelihoods of the posterior (right).***Does the true model correspond to the maximum of the posterior? Explain.***
###Code
# generate parameter grid for grid search
m = np.linspace(m0-2,m0+2,31); dm = m[1]-m[0]
c = np.linspace(c0-1,c0+1,31); dc = c[1]-c[0]
M,C = np.meshgrid(m,c)
# compute objective function
# empty vector, correct size, for storing computed objective function
r = 0.*M.flatten()
# for each parameter combination in the grid search
for i,theta in enumerate(zip(M.flatten(), C.flatten())):
# unpack parameter vector
mi,ci = theta
# compute objective function
r[i]=-np.sum((yo-func(xo,mi,ci))**2)/v
# reshape objective function to meshgrid dimensions
R = np.array(r).reshape([len(c), len(m)])
# compute posterior
post = np.exp(R/2.)
# convert 2D mesh to vectors
mv, cv, pv = [vi.flatten() for vi in [M,C,post]]
# plotting function
def plot_posterior(m1,c1):
# initialize figure window and axes
fig = plt.figure(figsize=[16.,6.5])
ax1 = plt.axes([0.15,0.15,0.35,0.75])
ax2 = fig.add_subplot(122, projection='3d')
# show data and fitted models
# plot the "true" model
ln1 = ax1.plot(x,y,'b-', label = 'true model',zorder = 10)
# plot the observations
ln2 = ax1.plot(xo,yo,'wo', mec = 'k', mew = 1.5, ms = 5, label = r'observations', zorder = 10)
# best-fit model
p2,pc = curve_fit(func, xo, yo, [1,1])
ln3 = ax1.plot(x,func(x,*p2),'r-', label = 'best-fit model')
# show model with [m1,c1]
i,j = np.argmin(abs(m-m1)), np.argmin(abs(c-c1)) # snap to nearest grid value
ln4 = ax1.plot(x,func(x,m[i],c[j]),'g-', label = 'aribtrary model: r=%3.2f'%R[i,j])
lns = ln1+ln2+ln3+ln4
lbs = [ln.get_label() for ln in lns]
ax1.legend(lns,lbs,loc=2)
# show posterior
# plot posterior as surface over 2D parameter space
ax2.plot_surface(M, C, post, rstride=1, cstride=1,cmap=cm.Oranges, lw = 0.5, zorder = 10)
# show models on posterior
# best-fit model
im = np.argmax(pv)
ax2.plot3D([mv[im],],[cv[im],],[pv[im],],'ro',mec = 'r', ms = 7)
# true model
ax2.plot3D([m0,],[c0,],[np.exp(-np.sum((yo-func(xo,m0,c0))**2)/(2.*v)),],'bo',mec = 'b', ms = 7)
# arbitrary model
im = np.argmax(pv)
ax2.plot3D([M[j,i],],[C[j,i],],[post[j,i],],'go',mec = 'g', ms = 7)
# plot upkeep, labels
ax2.set_xlabel('m',size = ts)
ax2.set_ylabel('c',size = ts)
ax2.set_xticklabels([])
ax2.set_yticklabels([])
ax2.set_zticklabels([])
ax1.set_xlabel('x',size = ts)
ax1.set_ylabel('y',size = ts)
for ax in [ax1,ax2]:
for t in ax.get_xticklabels()+ax.get_yticklabels(): t.set_fontsize(ts)
# plot view angle
ax2.view_init(45, 15-90)
plt.show()
interact(plot_posterior, m1 = (m0-2,m0+2,4./51), c1 = (c0-1,c0+1,2./51))
###Output
_____no_output_____
###Markdown
Monte-Carlo parameter estimation Another method for estimating posterior parameter distributions is Markov-Chain Monte-Carlo. The mathematics of the method are well beyond the scope of this notebook (but there are plenty of excellent descriptions online). There is a excellent Python package called [emcee](http://dan.iel.fm/emcee/current/) that implements a number of MCMC algorithms. We will use the package to replicate the grid search above.If you are using the Anaconda Python distribution, then emcee can be installed from the command line using `pip``pip install emcee`
###Code
import emcee
# log likelihood for the model, given the data
def lnprob(pars, obs):
v = 0.15
return -np.sum((obs[:,1]-func(obs[:,0],*pars))**2)/v
ndim = 2 # parameter space dimensionality
nwalkers=10 # number of walkers
# create the emcee object (set threads>1 for multiprocessing)
data = np.array([xo,yo]).T
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, threads=1, args=[data,])
# set the initial location of the walkers
pars = [2.5, 2.5] # initial guess
p0 = np.array([pars + 1e-3*np.random.randn(ndim) for i in range(nwalkers)]) # add some noise
# set the emcee sampler to start at the initial guess and run 100 burn-in jumps
pos,prob,state=sampler.run_mcmc(p0,100)
sampler.reset()
# run 1000 jumps and save the results
pos,prob,state=sampler.run_mcmc(pos,1000)
f = open("chain.dat", "w")
nk,nit,ndim=sampler.chain.shape
for k in range(nk):
for i in range(nit):
f.write("{:d} {:d} ".format(k, i))
for j in range(ndim):
f.write("{:15.7f} ".format(sampler.chain[k,i,j]))
f.write("{:15.7f}\n".format(sampler.lnprobability[k,i]))
f.close()
###Output
_____no_output_____
###Markdown
We'll use the corner module to display some of the results. This can be installed by `pip` in the same way as `emcee`.
###Code
# show corner plot of the results
import corner
chain = np.genfromtxt('chain.dat')
weights = chain[:,-1]
weights -= np.max(weights)
weights = np.exp(weights)
labels = ['m','c']
fig = corner.corner(chain[:,2:-1], labels=labels, weights=weights, smooth=1, bins=30)
###Output
_____no_output_____
###Markdown
Prediction uncertainty Establishing the posterior gives us a quantitative measure of which parameter combinations are more likely than others. To make a forecast of the future (a probabilistic description of all outcomes), we sample parameter combinations from the posterior and use the model to make predictions for these. The key is that predictions corresponding to the more likely parameter combinations will have a greater weight in the final forecast.Execute the code below. Use the slider bar to take more and more samples from the posterior. Note how the forecast - a histogram of predictions - starts to approximate a probability distribution as more and more samples are drawn. ***How well does the 90% forecast interval approximate the true model?***
###Code
# construct covariance matrix
# means
pv = pv/np.sum(pv)
m1 = np.sum(pv*mv)
c1 = np.sum(pv*cv)
# variances
smm = np.sum(pv*(mv-m1)**2)
scc = np.sum(pv*(cv-c1)**2)
scm = np.sum(pv*(mv-m1)*(cv-c1))
# matrix
cov = np.array([[smm,scm],[scm,scc]])
# plotting function
def plot_ensemble(logN,predict):
# initialize figure window and axes
fig, axs = plt.subplots(nrows=1, ncols=2)
fig.set_size_inches([16.,5.])
# vector for long range forecast
if predict:
x2 = np.linspace(0,3.2, 101)
ix = np.argmin(abs(x2 - 3.0))
else:
x2 = np.linspace(0,1,101)
# plot the "true" model
ln1 = axs[0].plot(x2,func(x2,m0,c0),'b-', label = 'true model',zorder = 10)
# plot the observations
ln2 = axs[0].plot(xo,yo,'wo', mec = 'k', mew = 1.5, ms = 5, label = r'observations', zorder = 10)
# take samples from multivariate normal
np.random.seed(13)
samples = multivariate_normal.rvs(mean = [m1, c1], cov = cov, size = 2**logN)
if logN == 0: samples = [samples,]
# plot line for each sample
prediction = []
for i,s in enumerate(samples):
# plot sample model
if i==0:
ln3 = axs[0].plot(x2,func(x2,*s),'k-', zorder = 0, lw = 0.5, label = 'sample model')
else:
axs[0].plot(x2,func(x2,*s),'k-', zorder = 0, lw = 0.5, alpha = 1./np.sqrt(2**logN))
# save prediction of sample model
if predict:
prediction.append(func(x2[ix],*s))
# x-limits: choice to show prediction
if not predict:
axs[0].set_xlim([0,1])
else:
axs[0].set_xlim([0,3.2])
ylim = axs[0].get_ylim(); axs[0].set_ylim(ylim)
axs[0].plot([3,3],ylim, 'k--')
# plot histogram of predictions
if predict:
h,e = np.histogram(prediction, bins = np.linspace(6,14,19))
axs[1].bar(e[:-1], h, e[1]-e[0], color = [0.5,0.5,0.5])
pcs = np.percentile(prediction, [5,95])
ylim = axs[1].get_ylim(); axs[1].set_ylim(ylim)
axs[1].plot([func(x2[ix],m0,c0), func(x2[ix],m0,c0)],ylim,'b-')
axs[1].plot([pcs[0],pcs[0]],ylim,'k--')
axs[1].plot([pcs[1],pcs[1]],ylim,'k--')
# legend
lns = ln1+ln2+ln3
lbs = [ln.get_label() for ln in lns]
axs[0].legend(lns,lbs,loc=2)
# plot upkeep
axs[0].set_xlabel('x',size = ts)
axs[0].set_ylabel('y',size = ts)
axs[1].set_xlabel('prediction at x=3',size = ts)
axs[1].set_ylabel('frequency',size = ts)
for ax in axs:
for t in ax.get_xticklabels()+ax.get_yticklabels(): t.set_fontsize(ts)
plt.show()
interact(plot_ensemble, logN = (0,10,1), predict = False)
###Output
_____no_output_____
###Markdown
Structural (or model) error This refers to deficiencies in the construction of the model, which could include: the physics that are represented, discretization of the equations, assumptions made about homogeneity or features that are excluded.In this example, we will consider structural error introduced by incorrect physics. When physical laws are used to formulate governing equations for a model, the result is a class of functions, $f(\cdot;\boldsymbol{\theta})$, whose parameters determine the behaviour of the model under particular circumstances. It is entirely possible to use the wrong parameters, $\boldsymbol{\theta}$, to try and predict the future: this is *parameter error*.However, it is also possible that our interpretation of the relevant physics was incorrect and, actually, a different class of functions, $g(\cdot;\boldsymbol{\theta})$, provides a more appropriate description of the phenomenon we are attempting to model. We call this *structural (or model) error*.In the examples presented here, we "know" the underlying model is linear in nature. However, let's assume that we mistakenly thought it was in fact logarithmic, i.e.,$$y = a \ln(x_i-x_0)+b,$$where $a$, $x_0$ and $b$ are the three parameters of this different model. Executing the code below will run through the same process in the cells above: a grid search is undertaken to find the objective function, and hence posterior, the posterior is sampled from to provide a forecast of the future. Compare how the two models' forecasts of the future differ and how they agree with the true outcome.***Which model provides a better fit to the data?******How do the forecasts of the two models differ?***
###Code
# vectors for making predictions
x2 = np.linspace(0.01,3.2, 101)
ix = np.argmin(abs(x2 - 3.0))
N = 2**10
# linear model samples
np.random.seed(13)
LNsamples = multivariate_normal.rvs(mean = [m1, c1], cov = cov, size = N)
# log model samples
# define log model, three parameters
def func2(x,*p):
return p[0]*np.log(x+p[1])+p[2]
# find best-fit log model
p2,pc = curve_fit(func2, xo, yo, [1,1,1])
# construct parameter search grid
pi = np.linspace(p2[0]/10.,p2[0]*10.,51); dpi = pi[1]-pi[0]
pj = np.linspace(p2[1]/10.,p2[1]*10.,51); dpj = pj[1]-pj[0]
pk = np.linspace(p2[2]/10.,p2[2]*10.,51); dpk = pk[1]-pk[0]
PI, PJ, PK = np.meshgrid(pi,pj,pk)
# compute posterior for log model
# empty vector, correct size, for storing computed objective function
r2 = 0.*PI.flatten()
# for each parameter combination in the grid search
for i,theta in enumerate(zip(PI.flatten(), PJ.flatten(), PK.flatten())):
# compute objective function
r2[i]=-np.sum((yo-func2(xo,*theta))**2)/v
# reshape objective function to meshgrid dimensions
R2 = np.array(r2).reshape([len(pi), len(pj), len(pk)])
# compute posterior
post2 = np.exp(R2/2.)
# convert 2D mesh to vectors
pi,pj,pk,pv2 = [vi.flatten() for vi in [PI,PJ,PK,post2]]
# compute covariance matrix for log model
# normalize posterior
pv2 = pv2/(np.sum(pv2))
# means
pi1 = np.sum(pv2*pi)
pj1 = np.sum(pv2*pj)
pk1 = np.sum(pv2*pk)
# variances
sii = np.sum(pv2*(pi-pi1)**2)
sjj = np.sum(pv2*(pj-pj1)**2)
skk = np.sum(pv2*(pk-pk1)**2)
sij = np.sum(pv2*(pi-pi1)*(pj-pj1))
sik = np.sum(pv2*(pi-pi1)*(pk-pk1))
sjk = np.sum(pv2*(pk-pk1)*(pj-pj1))
# assemble matrix
cov2 = np.array([[sii,sij, sik],[sij, sjj, sjk],[sik, sjk, skk]])
np.random.seed(13)
LGsamples = multivariate_normal.rvs(mean = [pi1, pj1, pk1], cov = cov2, size = N)
# plotting function
def plot_structural(LNmodel, LNset, LGmodel, LGset):
# initialize figure window and axes
fig, axs = plt.subplots(nrows=1, ncols=2)
fig.set_size_inches([16.,5.])
# plot the observations
axs[0].plot(xo,yo,'wo', mec = 'k', mew = 1.5, ms = 5, zorder = 10)
# plot best-fit log model
if LGmodel:
axs[0].plot(x2,func2(x2,*p2),'g-',lw = 2)
axs[0].plot(x2,func2(x2,*p2),'k--',lw = 2)
# plot ensemble for best-fit log model
if LGset:
# plot line for each sample
prediction = []
for s in LGsamples:
axs[0].plot(x2, func2(x2,*s), 'g-', alpha = 1./np.sqrt(N))
prediction.append(func2(x2[ix],*s))
# plot histogram of predictions
h,e = np.histogram(prediction, bins = np.linspace(4,14,33))
axs[1].bar(e[:-1], h, e[1]-e[0], color = 'g', alpha = 0.5)
pcs = np.percentile(prediction, [5,95])
ylim = axs[1].get_ylim(); axs[1].set_ylim(ylim)
axs[1].plot([func(x2[ix],m0,c0), func(x2[ix],m0,c0)],ylim,'b-')
axs[1].plot([pcs[0],pcs[0]],ylim,'g--')
axs[1].plot([pcs[1],pcs[1]],ylim,'g--')
# plot best-fit linear model
if LNmodel:
p1,pc = curve_fit(func, xo, yo, [1,1])
axs[0].plot(x2,func(x2,*p1),'r-',lw = 2)
axs[0].plot(x2,func(x2,*p1),'k--',lw = 2)
# plot ensemble for linear model
if LNset:
# plot line for each sample
prediction = []
for s in LNsamples:
axs[0].plot(x2, func(x2,*s), 'r-', alpha = 1./np.sqrt(N))
prediction.append(func(x2[ix],*s))
# plot histogram of predictions
h,e = np.histogram(prediction, bins = np.linspace(4,14,33))
axs[1].bar(e[:-1], h, e[1]-e[0], color = 'r', alpha = 0.5)
pcs = np.percentile(prediction, [5,95])
ylim = axs[1].get_ylim(); axs[1].set_ylim(ylim)
axs[1].plot([func(x2[ix],m0,c0), func(x2[ix],m0,c0)],ylim,'b-')
axs[1].plot([pcs[0],pcs[0]],ylim,'r--')
axs[1].plot([pcs[1],pcs[1]],ylim,'r--')
# axis lims
if LNset or LGset:
axs[0].set_xlim([0,3.2])
ylim = axs[0].get_ylim()
else:
axs[0].set_xlim([0,1])
axs[0].set_ylim([1,6])
# plot upkeep
axs[0].set_xlabel('x',size = ts)
axs[0].set_ylabel('y',size = ts)
axs[1].set_xlabel('prediction at x=3',size = ts)
axs[1].set_ylabel('frequency',size = ts)
for ax in axs:
for t in ax.get_xticklabels()+ax.get_yticklabels(): t.set_fontsize(ts)
plt.show()
interact(plot_structural, LNmodel=False, LNset=False, LGmodel=False, LGset=False)
###Output
_____no_output_____ |
docs/HasField.ipynb | ###Markdown
*has_field(tableName, varName)*Returns True if the specified column (field) exists in the table.Returns False if either table or variable does not exist (also see [Dataset Columns](Columns.ipynb)). > **Parameters:** >> **tableName: string**>> The name of table associated with the dataset. A full list of table names can be found in the [catalog](Catalog.ipynb).>> >> **varName: string or list of string**>> Variable short name. A full list of variable short names can be found in the [catalog](Catalog.ipynb).>**Returns:** >> Boolean Example
###Code
#!pip install pycmap -q #uncomment to install pycmap, if necessary
import pycmap
api = pycmap.API(token='<YOUR_API_KEY>')
api.has_field('tblAltimetry_REP', 'sla')
###Output
_____no_output_____ |
notebooks/git-integration.ipynb | ###Markdown
Git Integration`rubicon` offers a way to automatically log relevant `git` information if you're running from within a `git` repo. We can use this information to pinpoint exactly what point in a repo's history an experiment was run at. To enable this feature, instantiate the `Rubicon` object with `auto_git_enabled=True`.
###Code
from rubicon import Rubicon
rubicon = Rubicon(persistence="memory", auto_git_enabled=True)
###Output
_____no_output_____
###Markdown
Any project created with this client will have the URL of the GitHub repo's origin automatically populated in the `github_url` property.
###Code
project = rubicon.create_project("Automatic Git Integration")
project.github_url
###Output
_____no_output_____
###Markdown
Experiments will have the current active branch name and last commit hash populated in the `branch_name` and `commit_hash` fields, respectively.
###Code
experiment = project.log_experiment(model_name="GitHub Model")
experiment.branch_name, experiment.commit_hash
###Output
_____no_output_____
###Markdown
These properties can help easily associate projects and experiments with the exact branches and commits they were run against so we can go back and reference the code later. On the dashboard, experiments can be sorted and filtered by their commit hash.
###Code
from rubicon.ui import Dashboard
Dashboard("memory").run_server()
###Output
_____no_output_____ |
examples/estimator/classifier/DecisionTreeClassifier/js/basics_embedded.ipynb | ###Markdown
sklearn-porterRepository: https://github.com/nok/sklearn-porter DecisionTreeClassifierDocumentation: [sklearn.tree.DecisionTreeClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html) Loading data:
###Code
from sklearn.datasets import load_iris
iris_data = load_iris()
X = iris_data.data
y = iris_data.target
print(X.shape, y.shape)
###Output
((150, 4), (150,))
###Markdown
Train classifier:
###Code
from sklearn.tree import tree
clf = tree.DecisionTreeClassifier()
clf.fit(X, y)
###Output
_____no_output_____
###Markdown
Transpile classifier:
###Code
%%time
from sklearn_porter import Porter
porter = Porter(clf, language='js')
output = porter.export(embed_data=True)
print(output)
###Output
var DecisionTreeClassifier = function() {
var findMax = function(nums) {
var index = 0;
for (var i = 0; i < nums.length; i++) {
index = nums[i] > nums[index] ? i : index;
}
return index;
};
this.predict = function(features) {
var classes = new Array(3);
if (features[3] <= 0.800000011921) {
classes[0] = 50;
classes[1] = 0;
classes[2] = 0;
} else {
if (features[3] <= 1.75) {
if (features[2] <= 4.94999980927) {
if (features[3] <= 1.65000009537) {
classes[0] = 0;
classes[1] = 47;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 1;
}
} else {
if (features[3] <= 1.54999995232) {
classes[0] = 0;
classes[1] = 0;
classes[2] = 3;
} else {
if (features[2] <= 5.44999980927) {
classes[0] = 0;
classes[1] = 2;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 1;
}
}
}
} else {
if (features[2] <= 4.85000038147) {
if (features[0] <= 5.94999980927) {
classes[0] = 0;
classes[1] = 1;
classes[2] = 0;
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 2;
}
} else {
classes[0] = 0;
classes[1] = 0;
classes[2] = 43;
}
}
}
return findMax(classes);
};
};
if (typeof process !== 'undefined' && typeof process.argv !== 'undefined') {
if (process.argv.length - 2 === 4) {
// Features:
var features = process.argv.slice(2);
// Prediction:
var clf = new DecisionTreeClassifier();
var prediction = clf.predict(features);
console.log(prediction);
}
}
CPU times: user 1.96 ms, sys: 1.02 ms, total: 2.98 ms
Wall time: 2.63 ms
###Markdown
Run classification in JavaScript: Save the transpiled estimator:
###Code
with open('DecisionTreeClassifier.js', 'w') as f:
f.write(output)
###Output
_____no_output_____
###Markdown
Prediction:
###Code
%%bash
if hash node 2/dev/null; then
node DecisionTreeClassifier.js 1 2 3 4
fi
###Output
1
|
Python/05 Scikit-learn/02.01 Regressao Linear Simples.ipynb | ###Markdown
Regressão Linear Simples O modelo de regressão linear simples, obtém a resposta com apenas uma variável preditora e é dado utilizando a equação: y= ax+b, onde: y -> Variável dependente a -> Coeficiente angular x -> Variável independente b -> Inclinação da reta Para importarmos o módulo, utilizamos: **from sklearn.linear_model import LinearRegression**
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
###Output
_____no_output_____
###Markdown
Exemplo **Gerando os dados y=2x-5**
###Code
rng = np.random.RandomState(1) # Gerando o seed para os valores aleatórios
x = 10 * rng.rand(50) # Criando os valores independentes
y = 2 * x - 5 + rng.randn(50) # Criando os valores dependentes
plt.scatter(x, y); # Plotando
###Output
_____no_output_____
###Markdown
**Utilizando o modelo LienarRegression**
###Code
from sklearn.linear_model import LinearRegression
# Instanciando o modelo
model = LinearRegression(fit_intercept=True)
# Treinando o modelo
model.fit(x[:, np.newaxis], y) # np.newaxis deixa o X bidimensional, que deve sempre ser uma matriz e nunca um vetor
# Gerando novos dados de x
xfit = np.linspace(0, 10, 1000)
# Predizendo os dados de y através do modelo treinado
yfit = model.predict(xfit[:, np.newaxis])
# Plotando
plt.scatter(x, y)
plt.plot(xfit, yfit);
###Output
_____no_output_____
###Markdown
**Verificando os parâmetros**
###Code
print("Coeficiente angular:", model.coef_[0])
print("Inclinação da reta:", model.intercept_)
###Output
Coeficiente angular: 2.027208810360695
Inclinação da reta: -4.998577085553202
###Markdown
Verificando a cost function (mean squared error)
###Code
from sklearn.metrics import mean_squared_error
y_true = y
y_pred = model.predict(x[:, np.newaxis])
mean_squared_error(y_true, y_pred)
###Output
_____no_output_____ |
tests_jupyter/save_test.ipynb | ###Markdown
Test save
###Code
ag.optimize(ngen=10)
ag.plot(close_to=(0,8))
ag.select(close_to=(0,8))
ag.save("saved_ag.pickle")
###Output
_____no_output_____
###Markdown
Test save with interrupt
###Code
ag.optimize(ngen=10)
ag.save("interrupted_ag.pickle")
###Output
_____no_output_____ |
05_SimpleStock_ReadWriteCSV.ipynb | ###Markdown
Read CSV in R Current working directory
###Code
print(getwd())
data <- read.csv("AAPL.csv")
head(data)
is.data.frame(data)
# Check how many columns
ncol(data)
# Check how many rows
nrow(data)
# Find the max closing price
max(data$Adj.Close)
# Find the min closing price
max(data$Adj.Close)
# Add Typical Price in the data frame
# Sum of rows in certain columns and divide by 3
data$TypicalPrice <- rowSums(data[,c("High", "Low", "Adj.Close")]/3, na.rm=TRUE)
head(data)
# Save new data to CSV
write.csv(data,"NewAAPL.csv", row.names = FALSE)
newdata <- read.csv("NewAAPL.csv")
print(newdata)
###Output
Date Open High Low Close Adj.Close Volume TypicalPrice
1 2016-01-04 102.61 105.37 102.00 105.35 99.49911 67649400 102.28970
2 2016-01-05 105.75 105.85 102.41 102.71 97.00573 55791000 101.75524
3 2016-01-06 100.56 102.37 99.87 100.70 95.10736 68457400 99.11579
4 2016-01-07 98.68 100.13 96.43 96.45 91.09340 81094400 95.88447
5 2016-01-08 98.55 99.11 96.76 96.96 91.57507 70798000 95.81503
6 2016-01-11 98.97 99.06 97.34 98.53 93.05787 49739400 96.48595
7 2016-01-12 100.55 100.69 98.84 99.96 94.40845 49154200 97.97948
8 2016-01-13 100.32 101.19 97.30 97.39 91.98119 62439600 96.82373
9 2016-01-14 97.96 100.48 95.74 99.52 93.99289 63170100 96.73763
10 2016-01-15 96.20 97.71 95.36 97.13 91.73563 79010000 94.93521
11 2016-01-19 98.41 98.65 95.50 96.66 91.29172 53087700 95.14724
12 2016-01-20 95.10 98.19 93.42 96.79 91.41452 72334400 94.34151
13 2016-01-21 97.06 97.88 94.94 96.30 90.95174 52161500 94.59058
14 2016-01-22 98.63 101.46 98.37 101.42 95.78737 65800500 98.53912
15 2016-01-25 101.52 101.53 99.21 99.44 93.91734 51794500 98.21911
16 2016-01-26 99.93 100.88 98.07 99.99 94.43679 75077000 97.79560
17 2016-01-27 96.04 96.63 93.34 93.42 88.23166 133369700 92.73388
18 2016-01-28 93.79 94.52 92.39 94.09 88.86445 55678800 91.92481
19 2016-01-29 94.79 97.34 94.35 97.34 91.93396 64416500 94.54132
20 2016-02-01 96.47 96.71 95.40 96.43 91.07450 40943500 94.39483
21 2016-02-02 95.42 96.04 94.28 94.48 89.23280 37357200 93.18427
22 2016-02-03 95.00 96.84 94.08 96.35 90.99893 45964300 93.97298
23 2016-02-04 95.86 97.33 95.19 96.60 91.73011 46471700 94.75004
24 2016-02-05 96.52 96.92 93.69 94.02 89.28019 46418100 93.29673
25 2016-02-08 93.13 95.70 93.04 95.01 90.22028 54021400 92.98676
26 2016-02-09 94.29 95.94 93.93 94.99 90.20129 44331200 93.35710
27 2016-02-10 95.92 96.35 94.10 94.27 89.51759 42343600 93.32253
28 2016-02-11 93.79 94.72 92.59 93.70 88.97632 50074700 92.09544
29 2016-02-12 94.19 94.50 93.01 93.99 89.25169 40351400 92.25390
30 2016-02-16 95.02 96.85 94.61 96.64 91.76809 49057900 94.40936
31 2016-02-17 96.67 98.21 96.15 98.12 93.17351 44863200 95.84450
32 2016-02-18 98.84 98.89 96.09 96.26 91.40727 39021000 95.46242
33 2016-02-19 96.00 96.76 95.80 96.04 91.19834 35374200 94.58612
34 2016-02-22 96.31 96.90 95.92 96.88 91.99599 34280800 94.93867
35 2016-02-23 96.40 96.50 94.55 94.69 89.91642 31942600 93.65547
36 2016-02-24 93.98 96.38 93.32 96.10 91.25533 36255700 93.65178
37 2016-02-25 96.05 96.76 95.25 96.76 91.88206 27582700 94.63069
38 2016-02-26 97.20 98.02 96.58 96.91 92.02449 28991100 95.54150
39 2016-02-29 96.86 98.23 96.65 96.69 91.81560 35216300 95.56520
40 2016-03-01 97.65 100.77 97.42 100.53 95.46200 50407100 97.88400
41 2016-03-02 100.51 100.89 99.64 100.75 95.67091 33169600 98.73364
42 2016-03-03 100.58 101.71 100.45 101.50 96.38309 36955700 99.51436
43 2016-03-04 102.37 103.75 101.37 103.01 97.81699 46055100 100.97900
44 2016-03-07 102.39 102.83 100.96 101.87 96.73444 35828900 100.17481
45 2016-03-08 100.78 101.76 100.40 101.03 95.93680 31561900 99.36560
46 2016-03-09 101.31 101.58 100.27 101.12 96.02226 27201700 99.29075
47 2016-03-10 101.41 102.24 100.15 101.17 96.06973 33513600 99.48658
48 2016-03-11 102.24 102.28 101.50 102.26 97.10480 27408200 100.29493
49 2016-03-14 101.91 102.91 101.78 102.52 97.35168 25076100 100.68056
50 2016-03-15 103.96 105.18 103.85 104.58 99.30783 40067700 102.77928
51 2016-03-16 104.61 106.31 104.59 105.97 100.62776 38303500 103.84259
52 2016-03-17 105.52 106.47 104.96 105.80 100.46632 34420700 103.96544
53 2016-03-18 106.34 106.50 105.19 105.92 100.58028 44205200 104.09009
54 2016-03-21 105.93 107.65 105.14 105.91 100.57078 35502700 104.45359
55 2016-03-22 105.25 107.29 105.21 106.72 101.33994 32444400 104.61331
56 2016-03-23 106.48 107.07 105.90 106.13 100.77969 25703500 104.58323
57 2016-03-24 105.47 106.25 104.89 105.67 100.34287 26133000 103.82762
58 2016-03-28 106.00 106.19 105.06 105.19 99.88709 19411400 103.71236
59 2016-03-29 104.89 107.79 104.88 107.68 102.25156 31190100 104.97385
60 2016-03-30 108.65 110.42 108.60 109.56 104.03676 45601100 107.68558
61 2016-03-31 109.72 109.90 108.88 108.99 103.49551 25888400 107.42517
62 2016-04-01 108.78 110.00 108.20 109.99 104.44509 25874000 107.54836
63 2016-04-04 110.42 112.19 110.27 111.12 105.51813 37356200 109.32604
64 2016-04-05 109.51 110.73 109.42 109.81 104.27416 26578700 108.14139
65 2016-04-06 110.23 110.98 109.20 110.96 105.36620 26404100 108.51540
66 2016-04-07 109.95 110.42 108.12 108.54 103.06819 31801900 107.20273
67 2016-04-08 108.91 109.77 108.17 108.66 103.18214 23581700 107.04071
68 2016-04-11 108.97 110.61 108.83 109.02 103.52399 29407500 107.65466
69 2016-04-12 109.34 110.50 108.66 110.44 104.87241 27232300 108.01080
70 2016-04-13 110.80 112.34 110.80 112.04 106.39175 33257300 109.84392
71 2016-04-14 111.62 112.39 111.33 112.10 106.44872 25473900 110.05624
72 2016-04-15 112.11 112.30 109.73 109.85 104.31215 46939000 108.78072
73 2016-04-18 108.89 108.95 106.94 107.48 102.06164 60834000 105.98388
74 2016-04-19 107.88 108.00 106.23 106.91 101.52037 32384900 105.25012
75 2016-04-20 106.64 108.09 106.06 107.13 101.72928 30611000 105.29309
76 2016-04-21 106.93 106.93 105.52 105.97 100.62776 31552500 104.35925
77 2016-04-22 105.01 106.48 104.62 105.68 100.35237 33683100 103.81746
78 2016-04-25 105.00 105.65 104.51 105.08 99.78263 28031600 103.31421
79 2016-04-26 103.91 105.30 103.91 104.35 99.08941 56016200 102.76647
80 2016-04-27 96.00 98.71 95.68 97.82 92.88863 114602100 95.75954
81 2016-04-28 97.61 97.88 94.25 94.83 90.04936 82242700 94.05979
82 2016-04-29 93.99 94.72 92.51 93.74 89.01430 68531500 92.08143
83 2016-05-02 93.97 94.08 92.40 93.64 88.91933 48160100 91.79978
84 2016-05-03 94.20 95.74 93.68 95.18 90.38171 56831300 93.26724
85 2016-05-04 95.20 95.90 93.82 94.19 89.44161 41025500 93.05387
86 2016-05-05 94.00 94.07 92.68 93.24 89.07858 35890500 91.94286
87 2016-05-06 93.37 93.45 91.85 92.72 88.58179 43458200 91.29393
88 2016-05-09 93.00 93.77 92.59 92.79 88.64865 32936400 91.66955
89 2016-05-10 93.33 93.57 92.11 93.42 89.25054 33686800 91.64351
90 2016-05-11 93.48 93.57 92.46 92.51 88.38116 28719100 91.47039
91 2016-05-12 92.72 92.78 89.47 90.34 86.30801 76314700 89.51934
92 2016-05-13 90.00 91.67 90.00 90.52 86.47997 44392800 89.38332
93 2016-05-16 92.39 94.39 91.65 93.88 89.69002 61259800 91.91001
94 2016-05-17 94.55 94.70 93.01 93.49 89.31741 46916900 92.34247
95 2016-05-18 94.16 95.21 93.89 94.56 90.33968 42062400 93.14656
96 2016-05-19 94.64 94.64 93.57 94.20 89.99573 30442100 92.73524
97 2016-05-20 94.64 95.43 94.52 95.22 90.97021 32026000 93.64007
98 2016-05-23 95.87 97.19 95.67 96.43 92.12620 38018600 94.99540
99 2016-05-24 97.22 98.09 96.84 97.90 93.53061 35140200 96.15353
100 2016-05-25 98.67 99.74 98.11 99.62 95.17383 38168800 97.67461
101 2016-05-26 99.68 100.73 98.64 100.41 95.92857 56331200 98.43286
102 2016-05-27 99.44 100.47 99.25 100.35 95.87124 36229500 98.53041
103 2016-05-31 99.60 100.40 98.82 99.86 95.40312 42307200 98.20771
104 2016-06-01 99.02 99.54 98.33 98.46 94.06560 29173300 97.31187
105 2016-06-02 97.60 97.84 96.63 97.72 93.35864 40191600 95.94288
106 2016-06-03 97.79 98.27 97.45 97.92 93.54971 28062900 96.42323
107 2016-06-06 97.99 101.89 97.55 98.63 94.22800 23292500 97.88934
108 2016-06-07 99.25 99.87 98.96 99.03 94.61016 22409500 97.81339
109 2016-06-08 99.02 99.56 98.68 98.94 94.52419 20848100 97.58806
110 2016-06-09 98.50 99.99 98.46 99.65 95.20249 26601400 97.88416
111 2016-06-10 98.53 99.35 98.48 98.83 94.41910 31712900 97.41637
112 2016-06-13 98.69 99.12 97.10 97.34 92.99558 38020500 96.40519
113 2016-06-14 97.32 98.48 96.75 97.46 93.11023 31931900 96.11341
114 2016-06-15 97.82 98.41 97.03 97.14 92.80452 29445200 96.08151
115 2016-06-16 96.45 97.75 96.07 97.55 93.19623 31326800 95.67208
116 2016-06-17 96.62 96.65 95.30 95.33 91.07530 61008200 94.34177
117 2016-06-20 96.00 96.57 95.03 95.10 90.85556 34411900 94.15185
118 2016-06-21 94.94 96.35 94.68 95.91 91.62942 35546400 94.21981
119 2016-06-22 96.25 96.89 95.35 95.55 91.28548 29219100 94.50849
120 2016-06-23 95.94 96.29 95.25 96.10 91.81095 32240200 94.45032
121 2016-06-24 92.91 94.66 92.65 93.40 89.23144 75311400 92.18048
122 2016-06-27 93.00 93.05 91.50 92.04 87.93213 45489600 90.82738
123 2016-06-28 92.90 93.66 92.14 93.59 89.41295 40444900 91.73765
124 2016-06-29 93.97 94.55 93.63 94.40 90.18681 36531000 92.78894
125 2016-06-30 94.44 95.77 94.30 95.60 91.33325 35836400 93.80108
126 2016-07-01 95.49 96.47 95.33 95.89 91.61031 26026500 94.47010
127 2016-07-05 95.39 95.40 94.46 94.99 90.75047 27705200 93.53682
128 2016-07-06 94.60 95.66 94.37 95.53 91.26638 30949100 93.76546
129 2016-07-07 95.70 96.50 95.62 95.94 91.65807 25139600 94.59269
130 2016-07-08 96.49 96.89 96.05 96.68 92.36503 28912100 95.10168
131 2016-07-11 96.75 97.65 96.73 96.98 92.65166 23794900 95.67722
132 2016-07-12 97.17 97.70 97.12 97.42 93.07204 24167500 95.96401
133 2016-07-13 97.41 97.67 96.84 96.87 92.54656 25892200 95.68552
134 2016-07-14 97.39 98.99 97.32 98.79 94.38088 38919000 96.89696
135 2016-07-15 98.92 99.30 98.50 98.78 94.37131 30137000 97.39044
136 2016-07-18 98.70 100.13 98.60 99.83 95.37445 36493900 98.03482
137 2016-07-19 99.56 100.00 99.34 99.87 95.41267 23779900 98.25089
138 2016-07-20 100.00 100.46 99.74 99.96 95.49866 26276000 98.56622
139 2016-07-21 99.83 101.00 99.13 99.43 94.99231 32702000 98.37410
140 2016-07-22 99.26 99.30 98.31 98.66 94.25668 28313700 97.28889
141 2016-07-25 98.25 98.84 96.92 97.34 92.99558 40382900 96.25186
142 2016-07-26 96.82 97.97 96.42 96.67 92.35549 56239800 95.58183
143 2016-07-27 104.27 104.35 102.75 102.95 98.35522 92344800 101.81840
144 2016-07-28 102.83 104.45 102.82 104.34 99.68316 39869800 102.31772
145 2016-07-29 104.19 104.55 103.68 104.21 99.55898 27733700 102.59633
146 2016-08-01 104.41 106.15 104.41 106.05 101.31685 38167900 103.95895
147 2016-08-02 106.05 106.07 104.00 104.48 99.81692 33816600 103.29564
148 2016-08-03 104.81 105.84 104.77 105.79 101.06845 30202600 103.89281
149 2016-08-04 105.58 106.00 105.28 105.87 101.69282 27408700 104.32427
150 2016-08-05 106.27 107.65 106.18 107.48 103.23929 40553400 105.68976
151 2016-08-08 107.52 108.37 107.16 108.37 104.09418 28037200 106.54139
152 2016-08-09 108.23 108.94 108.01 108.81 104.51681 26315200 107.15561
153 2016-08-10 108.71 108.90 107.76 108.00 103.73876 24008500 106.79959
154 2016-08-11 108.52 108.93 107.85 107.93 103.67153 27484500 106.81718
155 2016-08-12 107.78 108.44 107.78 108.18 103.91166 18660400 106.71055
156 2016-08-15 108.14 109.54 108.08 109.48 105.16038 25868200 107.59346
157 2016-08-16 109.63 110.23 109.21 109.38 105.06432 33794400 108.16811
158 2016-08-17 109.10 109.37 108.34 109.22 104.91062 25356000 107.54021
159 2016-08-18 109.23 109.60 109.02 109.08 104.77616 21984700 107.79872
160 2016-08-19 108.77 109.69 108.36 109.36 105.04511 25368100 107.69837
161 2016-08-22 108.86 109.10 107.85 108.51 104.22866 25820200 107.05955
162 2016-08-23 108.59 109.32 108.53 108.85 104.55523 21257700 107.46841
163 2016-08-24 108.57 108.75 107.68 108.03 103.76759 23675100 106.73253
164 2016-08-25 107.39 107.88 106.68 107.57 103.32574 25086200 105.96191
165 2016-08-26 107.41 107.95 106.31 106.94 102.72058 27766300 105.66019
166 2016-08-29 106.62 107.44 106.29 106.82 102.60533 24970300 105.44511
167 2016-08-30 105.80 106.50 105.50 106.00 101.81767 24863900 104.60589
168 2016-08-31 105.66 106.57 105.64 106.10 101.91373 29662400 104.70791
169 2016-09-01 106.14 106.80 105.62 106.73 102.51887 26701500 104.97963
170 2016-09-02 107.70 108.00 106.82 107.73 103.47942 26802500 106.09981
171 2016-09-06 107.90 108.30 107.51 107.70 103.45059 26880400 106.42020
172 2016-09-07 107.83 108.76 107.07 108.36 104.08456 42364300 106.63819
173 2016-09-08 107.25 107.27 105.24 105.52 101.35661 53002000 104.62220
174 2016-09-09 104.64 105.72 103.13 103.13 99.06092 46557000 102.63697
175 2016-09-12 102.65 105.72 102.53 105.44 101.27977 45292800 103.17659
176 2016-09-13 107.51 108.79 107.24 107.95 103.69073 62176200 106.57358
177 2016-09-14 108.73 113.03 108.60 111.77 107.36002 110888700 109.66334
178 2016-09-15 113.86 115.73 113.49 115.57 111.01009 89983600 113.41003
179 2016-09-16 115.12 116.13 114.04 114.92 110.38573 79886900 113.51858
180 2016-09-19 115.19 116.18 113.25 113.58 109.09860 47023000 112.84287
181 2016-09-20 113.05 114.12 112.51 113.57 109.08898 34514300 111.90633
182 2016-09-21 113.85 113.99 112.44 113.55 109.06978 36003200 111.83326
183 2016-09-22 114.35 114.94 114.00 114.62 110.09757 31074000 113.01252
184 2016-09-23 114.42 114.79 111.55 112.71 108.26292 52481200 111.53431
185 2016-09-26 111.64 113.39 111.55 112.88 108.42622 29869400 111.12208
186 2016-09-27 113.00 113.18 112.34 113.09 108.62794 24607400 111.38264
187 2016-09-28 113.69 114.64 113.43 113.95 109.45400 29641100 112.50800
188 2016-09-29 113.16 113.80 111.80 112.18 107.75384 35887000 111.11795
189 2016-09-30 112.46 113.37 111.80 113.05 108.58952 36379100 111.25318
190 2016-10-03 112.71 113.05 112.28 112.52 108.08041 21701800 111.13681
191 2016-10-04 113.06 114.31 112.63 113.00 108.54148 29736800 111.82716
192 2016-10-05 113.40 113.66 112.69 113.05 108.58952 21453100 111.64651
193 2016-10-06 113.70 114.34 113.13 113.89 109.39637 28779300 112.28879
194 2016-10-07 114.31 114.56 113.51 114.06 109.55968 24358400 112.54323
195 2016-10-10 115.02 116.75 114.72 116.05 111.47115 36236000 114.31372
196 2016-10-11 117.70 118.69 116.20 116.30 111.71129 64041000 115.53376
197 2016-10-12 117.35 117.98 116.75 117.34 112.71024 37586800 115.81341
198 2016-10-13 116.79 117.44 115.72 116.98 112.36446 35192400 115.17482
199 2016-10-14 117.88 118.17 117.13 117.63 112.98880 35652200 116.09627
200 2016-10-17 117.33 117.84 116.78 117.55 112.91196 23624900 115.84399
201 2016-10-18 118.18 118.21 117.45 117.47 112.83511 24553500 116.16504
202 2016-10-19 117.25 117.76 113.80 117.12 112.49893 20034600 114.68631
203 2016-10-20 116.86 117.38 116.33 117.06 112.44128 24125800 115.38376
204 2016-10-21 116.81 116.91 116.28 116.60 111.99945 23192700 115.06315
205 2016-10-24 117.10 117.74 117.00 117.65 113.00802 23538700 115.91601
206 2016-10-25 117.95 118.36 117.31 118.25 113.58434 48129000 116.41811
207 2016-10-26 114.31 115.70 113.31 115.59 111.02929 66134200 113.34643
208 2016-10-27 115.39 115.86 114.10 114.48 109.96310 34562000 113.30770
209 2016-10-28 113.87 115.21 113.45 113.72 109.23308 37861700 112.63102
210 2016-10-31 113.65 114.23 113.20 113.54 109.06019 26419400 112.16340
211 2016-11-01 113.46 113.77 110.53 111.49 107.09106 43825800 110.46369
212 2016-11-02 111.40 112.35 111.23 111.59 107.18711 28331700 110.25570
213 2016-11-03 110.98 111.46 109.55 109.83 106.03821 26932600 109.01607
214 2016-11-04 108.53 110.25 108.11 108.84 105.08237 30837000 107.81413
215 2016-11-07 110.08 110.51 109.46 110.41 106.59817 32560000 108.85606
216 2016-11-08 110.31 111.72 109.70 111.06 107.22573 24054500 109.54858
217 2016-11-09 109.88 111.32 108.05 110.88 107.05196 59176400 108.80732
218 2016-11-10 111.09 111.09 105.83 107.79 104.06863 57134500 106.99621
219 2016-11-11 107.12 108.87 106.55 108.43 104.68653 34094100 106.70218
220 2016-11-14 107.71 107.81 104.08 105.71 102.06046 51175500 104.65015
221 2016-11-15 106.57 107.68 106.16 107.11 103.41212 32264500 105.75071
222 2016-11-16 106.70 110.23 106.60 109.99 106.19268 58840500 107.67423
223 2016-11-17 109.81 110.35 108.83 109.95 106.15406 27632000 108.44469
224 2016-11-18 109.72 110.54 109.66 110.06 106.26027 28428900 108.82009
225 2016-11-21 110.12 111.99 110.01 111.73 107.87262 29264600 109.95754
226 2016-11-22 111.95 112.42 111.40 111.80 107.94019 25965500 110.58673
227 2016-11-23 111.36 111.51 110.33 111.23 107.38987 27426400 109.74329
228 2016-11-25 111.13 111.87 110.95 111.79 107.93053 11475900 110.25018
229 2016-11-28 111.43 112.47 111.39 111.57 107.71814 27194000 110.52605
230 2016-11-29 110.78 112.03 110.07 111.46 107.61195 28528800 109.90398
231 2016-11-30 111.60 112.20 110.27 110.52 106.70438 36162300 109.72479
232 2016-12-01 110.37 110.94 109.03 109.49 105.70995 37086900 108.55998
233 2016-12-02 109.17 110.09 108.85 109.90 106.10579 26528000 108.34859
234 2016-12-05 110.00 110.03 108.25 109.11 105.34306 34324500 107.87435
235 2016-12-06 109.50 110.36 109.19 109.95 106.15406 26195500 108.56802
236 2016-12-07 109.26 111.19 109.16 111.03 107.19677 29998700 109.18226
237 2016-12-08 110.86 112.43 110.60 112.12 108.24915 27068300 110.42638
238 2016-12-09 112.31 114.70 112.31 113.95 110.01596 34402600 112.34199
239 2016-12-12 113.29 115.00 112.49 113.30 109.38840 26374400 112.29280
240 2016-12-13 113.84 115.92 113.75 115.19 111.21315 43733800 113.62772
241 2016-12-14 115.04 116.20 114.98 115.19 111.21315 34031800 114.13105
242 2016-12-15 115.38 116.73 115.23 115.82 111.82140 46524500 114.59380
243 2016-12-16 116.47 116.50 115.65 115.97 111.96623 44351100 114.70541
244 2016-12-19 115.80 117.38 115.75 116.64 112.61310 27779400 115.24770
245 2016-12-20 116.74 117.50 116.68 116.95 112.91239 21425000 115.69746
246 2016-12-21 116.80 117.40 116.78 117.06 113.01859 23783200 115.73286
247 2016-12-22 116.35 116.51 115.64 116.29 112.27518 26085900 114.80840
248 2016-12-23 115.59 116.52 115.59 116.52 112.49725 14249500 114.86908
249 2016-12-27 116.52 117.80 116.49 117.26 113.21169 18296900 115.83390
250 2016-12-28 117.52 118.02 116.20 116.76 112.72895 20905900 115.64965
251 2016-12-29 116.45 117.11 116.40 116.73 112.69999 15039500 115.40333
252 2016-12-30 116.65 117.20 115.43 115.82 111.82140 30586300 114.81713
253 2017-01-03 115.80 116.33 114.76 116.15 112.14001 28781900 114.41000
254 2017-01-04 115.85 116.51 115.75 116.02 112.01450 21118100 114.75817
255 2017-01-05 115.92 116.86 115.81 116.61 112.58413 22193600 115.08471
256 2017-01-06 116.78 118.16 116.47 117.91 113.83925 31751900 116.15642
257 2017-01-09 117.95 119.43 117.94 118.99 114.88195 33561900 117.41732
258 2017-01-10 118.77 119.38 118.30 119.11 114.99782 24462100 117.55927
259 2017-01-11 118.74 119.93 118.60 119.75 115.61572 27588600 118.04857
260 2017-01-12 118.90 119.30 118.21 119.25 115.13299 27086200 117.54766
261 2017-01-13 119.11 119.62 118.81 119.04 114.93024 26111900 117.78675
262 2017-01-17 118.34 120.24 118.22 120.00 115.85709 34439800 118.10569
263 2017-01-18 120.00 120.50 119.71 119.99 115.84744 23713000 118.68581
264 2017-01-19 119.40 120.09 119.37 119.78 115.64469 25597300 118.36823
265 2017-01-20 120.45 120.45 119.73 120.00 115.85709 32597900 118.67903
266 2017-01-23 120.00 120.81 119.77 120.08 115.93433 22050200 118.83811
267 2017-01-24 119.55 120.10 119.50 119.97 115.82812 23211000 118.47604
268 2017-01-25 120.42 122.10 120.28 121.88 117.67219 32377600 120.01739
269 2017-01-26 121.67 122.44 121.60 121.94 117.73012 26337600 120.59004
270 2017-01-27 122.14 122.35 121.60 121.95 117.73977 20562900 120.56325
271 2017-01-30 120.93 121.63 120.66 121.63 117.43082 30377500 119.90694
272 2017-01-31 121.15 121.39 120.62 121.35 117.16049 49201000 119.72350
273 2017-02-01 127.03 130.49 127.01 128.75 124.30500 111985000 127.26834
274 2017-02-02 127.98 129.39 127.78 128.53 124.09261 33710400 127.08753
275 2017-02-03 128.31 129.19 128.16 129.08 124.62362 24507300 127.32454
276 2017-02-06 129.13 130.50 128.90 130.29 125.79184 26845900 128.39728
277 2017-02-07 130.54 132.09 130.45 131.53 126.98903 38183800 129.84301
278 2017-02-08 131.35 132.22 131.22 132.04 127.48141 23004100 130.30714
279 2017-02-09 131.65 132.45 131.12 132.42 128.40260 28349900 130.65753
280 2017-02-10 132.46 132.94 132.05 132.12 128.11171 20065500 131.03391
281 2017-02-13 133.08 133.82 132.75 133.29 129.24620 23035400 131.93874
282 2017-02-14 133.47 135.09 133.25 135.02 130.92372 33226200 133.08791
283 2017-02-15 135.52 136.27 134.62 135.51 131.39883 35623100 134.09628
284 2017-02-16 135.67 135.90 134.84 135.35 131.24373 22584600 133.99457
285 2017-02-17 135.10 135.83 135.10 135.72 131.60249 22198200 134.17750
286 2017-02-21 136.23 136.75 135.98 136.70 132.55275 24507200 135.09425
287 2017-02-22 136.43 137.12 136.11 137.11 132.95032 20836900 135.39344
288 2017-02-23 137.38 137.48 136.30 136.53 132.38791 20788200 135.38930
289 2017-02-24 135.91 136.66 135.28 136.66 132.51396 21776600 134.81799
290 2017-02-27 137.14 137.44 136.28 136.93 132.77577 20257400 135.49859
291 2017-02-28 137.08 137.44 136.70 136.99 132.83397 23482900 135.65799
292 2017-03-01 137.89 140.15 137.60 139.79 135.54900 36414600 137.76633
293 2017-03-02 140.00 140.28 138.76 138.96 134.74419 26211000 137.92806
294 2017-03-03 138.78 139.83 138.59 139.78 135.53931 21108100 137.98644
295 2017-03-06 139.37 139.77 138.60 139.34 135.11266 21750000 137.82756
296 2017-03-07 139.06 139.98 138.79 139.52 135.28720 17446300 138.01906
297 2017-03-08 138.95 139.80 138.82 139.00 134.78297 18707200 137.80099
298 2017-03-09 138.74 138.79 137.05 138.68 134.47267 22155900 136.77089
299 2017-03-10 139.25 139.36 138.64 139.14 134.91873 19612800 137.63958
300 2017-03-13 138.85 139.43 138.82 139.20 134.97690 17421700 137.74230
301 2017-03-14 139.30 139.65 138.84 138.99 134.77328 15309100 137.75442
302 2017-03-15 139.41 140.75 139.03 140.46 136.19869 25691800 138.65956
303 2017-03-16 140.72 141.02 140.26 140.69 136.42169 19232000 139.23390
304 2017-03-17 141.00 141.00 139.89 139.99 135.74295 43885000 138.87765
305 2017-03-20 140.40 141.50 140.23 141.46 137.16836 21542000 139.63279
306 2017-03-21 142.11 142.80 139.73 139.84 135.59750 39529900 139.37583
307 2017-03-22 139.85 141.60 139.76 141.42 137.12956 25860200 139.49652
308 2017-03-23 141.26 141.58 140.61 140.92 136.64471 20346300 139.61157
309 2017-03-24 141.50 141.74 140.35 140.64 136.37323 22395600 139.48775
310 2017-03-27 139.39 141.22 138.62 140.88 136.60596 23575100 138.81532
311 2017-03-28 140.91 144.04 140.62 143.80 139.43735 33374800 141.36578
312 2017-03-29 143.68 144.49 143.19 144.12 139.74765 29190000 142.47589
313 2017-03-30 144.19 144.50 143.50 143.93 139.56342 21207300 142.52114
314 2017-03-31 143.72 144.27 143.01 143.66 139.30160 19661700 142.19387
315 2017-04-03 143.71 144.12 143.05 143.70 139.34039 19985700 142.17013
316 2017-04-04 143.25 144.89 143.17 144.77 140.37793 19891400 142.81264
317 2017-04-05 144.22 145.46 143.81 144.02 139.65068 27717900 142.97356
318 2017-04-06 144.29 144.52 143.45 143.66 139.30160 21149000 142.42387
319 2017-04-07 143.73 144.18 143.27 143.34 138.99132 16672200 142.14711
320 2017-04-10 143.60 143.88 142.90 143.17 138.82646 18933400 141.86882
321 2017-04-11 142.94 143.35 140.06 141.63 137.33321 30379400 140.24774
322 2017-04-12 141.60 142.15 141.01 141.80 137.49803 20350000 140.21934
323 2017-04-13 141.91 142.38 141.05 141.05 136.77078 17822900 140.06693
324 2017-04-17 141.48 141.88 140.87 141.83 137.52713 16582100 140.09238
325 2017-04-18 141.41 142.04 141.11 141.20 136.91623 14697500 140.02207
326 2017-04-19 141.88 142.00 140.45 140.68 136.41200 17328400 139.62067
327 2017-04-20 141.22 142.92 141.16 142.44 138.11862 23319600 140.73287
328 2017-04-21 142.44 142.68 141.85 142.27 137.95378 17320900 140.82793
329 2017-04-24 143.50 143.95 143.18 143.64 139.28220 17134300 142.13740
330 2017-04-25 143.91 144.90 143.87 144.53 140.14519 18871500 142.97173
331 2017-04-26 144.47 144.60 143.38 143.68 139.32098 20041200 142.43366
332 2017-04-27 143.92 144.16 143.31 143.79 139.42764 14246300 142.29922
333 2017-04-28 144.09 144.30 143.27 143.65 139.29189 20860400 142.28730
334 2017-05-01 145.10 147.20 144.96 146.58 142.13300 33602900 144.76433
335 2017-05-02 147.54 148.09 146.84 147.51 143.03481 45352200 145.98827
336 2017-05-03 145.59 147.49 144.27 147.06 142.59845 45697000 144.78615
337 2017-05-04 146.52 147.14 145.81 146.53 142.08452 23371900 145.01150
338 2017-05-05 146.76 148.98 146.76 148.96 144.44081 27327700 146.72693
339 2017-05-08 149.03 153.70 149.03 153.01 148.36792 48752400 150.36597
340 2017-05-09 153.87 154.88 153.45 153.99 149.31819 39130400 152.54940
341 2017-05-10 153.63 153.94 152.11 153.26 148.61035 25805700 151.55345
342 2017-05-11 152.45 154.07 152.31 153.95 149.89557 27255100 152.09186
343 2017-05-12 154.70 156.42 154.67 156.10 151.98897 32527000 154.35965
344 2017-05-15 156.01 156.65 155.05 155.70 151.59952 26009700 154.43317
345 2017-05-16 155.94 156.06 154.72 155.47 151.37555 20048500 154.05185
346 2017-05-17 153.60 154.57 149.71 150.25 146.29303 50767700 150.19101
347 2017-05-18 151.27 153.34 151.13 152.54 148.52271 33568200 150.99757
348 2017-05-19 153.38 153.98 152.63 153.06 149.02901 26960800 151.87967
349 2017-05-22 154.00 154.58 152.91 153.99 149.93454 22966400 152.47485
350 2017-05-23 154.90 154.90 153.31 153.80 149.74954 19918900 152.65318
351 2017-05-24 153.84 154.17 152.67 153.34 149.30164 19178000 152.04721
352 2017-05-25 153.73 154.35 153.03 153.87 149.81767 19235600 152.39923
353 2017-05-26 154.00 154.24 153.31 153.61 149.56453 21927600 152.37151
354 2017-05-30 153.42 154.43 153.33 153.67 149.62295 20126900 152.46098
355 2017-05-31 153.97 154.17 152.38 152.76 148.73691 24451200 151.76230
356 2017-06-01 153.17 153.33 152.22 153.18 149.14584 16404100 151.56528
357 2017-06-02 153.58 155.45 152.89 155.45 151.35608 27770700 153.23202
358 2017-06-05 154.34 154.45 153.46 153.93 149.87610 25331700 152.59537
359 2017-06-06 153.90 155.81 153.78 154.45 150.38242 26624900 153.32414
360 2017-06-07 155.02 155.98 154.48 155.37 151.27820 21069600 153.91273
361 2017-06-08 155.25 155.54 154.40 154.99 150.90820 21250800 153.61606
362 2017-06-09 155.19 155.19 146.02 148.98 145.05646 64882700 148.75549
363 2017-06-12 145.74 146.09 142.51 145.42 141.59024 72307300 143.39674
364 2017-06-13 147.16 147.45 145.15 146.59 142.72942 34165400 145.10980
365 2017-06-14 147.50 147.50 143.84 145.16 141.33707 31531200 144.22569
366 2017-06-15 143.32 144.48 142.21 144.29 140.48996 32165400 142.39332
367 2017-06-16 143.78 144.50 142.20 142.27 138.52321 50361100 141.74107
368 2017-06-19 143.66 146.74 143.66 146.34 142.48601 32541400 144.29534
369 2017-06-20 146.87 146.87 144.94 145.01 141.19101 24900100 144.33367
370 2017-06-21 145.52 146.07 144.61 145.87 142.02837 21265800 144.23612
371 2017-06-22 145.77 146.70 145.12 145.63 141.79471 19106300 144.53823
372 2017-06-23 145.13 147.16 145.11 146.28 142.42760 35439400 144.89920
373 2017-06-26 147.17 148.28 145.38 145.82 141.97969 25692400 145.21323
374 2017-06-27 145.01 146.16 143.62 143.73 139.94473 24761900 143.24158
375 2017-06-28 144.49 146.11 143.16 145.83 141.98944 22082400 143.75315
376 2017-06-29 144.71 145.13 142.28 143.68 139.89604 31499400 142.43535
377 2017-06-30 144.45 144.96 143.78 144.02 140.22710 23024100 142.98903
378 2017-07-03 144.88 145.30 143.10 143.50 139.72080 14277800 142.70693
379 2017-07-05 143.69 144.79 142.72 144.09 140.29524 21569600 142.60175
380 2017-07-06 143.02 143.50 142.41 142.73 138.97107 24128800 141.62702
381 2017-07-07 142.90 144.75 142.90 144.18 140.38287 19201700 142.67762
382 2017-07-10 144.11 145.95 143.37 145.06 141.23970 21090600 143.51990
383 2017-07-11 144.73 145.85 144.38 145.53 141.69733 19781800 143.97578
384 2017-07-12 145.87 146.18 144.82 145.74 141.90181 24884500 144.30060
385 2017-07-13 145.50 148.49 145.44 147.77 143.87834 25199400 145.93612
386 2017-07-14 147.97 149.33 147.33 149.04 145.11488 20132100 147.25830
387 2017-07-17 148.82 150.90 148.57 149.56 145.62117 23793500 148.36372
388 2017-07-18 149.20 150.13 148.67 150.08 146.12752 17868800 148.30917
389 2017-07-19 150.48 151.42 149.95 151.02 147.04274 20923000 149.47091
390 2017-07-20 151.50 151.74 150.19 150.34 146.38066 17243700 149.43689
391 2017-07-21 149.99 150.44 148.88 150.27 146.31253 26252600 148.54418
392 2017-07-24 150.58 152.44 149.90 152.09 148.08456 21493200 150.14152
393 2017-07-25 151.80 153.84 151.80 152.74 148.71745 18853900 151.45248
394 2017-07-26 153.35 153.93 153.06 153.46 149.41852 15781000 152.13617
395 2017-07-27 153.75 153.99 147.30 150.56 146.59486 32476300 149.29496
396 2017-07-28 149.89 150.23 149.19 149.50 145.56277 17213700 148.32759
397 2017-07-31 149.90 150.33 148.13 148.73 144.81303 19845900 147.75768
398 2017-08-01 149.10 150.22 148.41 150.05 146.09830 35368600 148.24277
399 2017-08-02 159.28 159.75 156.16 157.14 153.00157 69936800 156.30386
400 2017-08-03 157.05 157.21 155.02 155.57 151.47293 27097300 154.56765
401 2017-08-04 156.07 157.40 155.69 156.39 152.27132 20559900 155.12044
402 2017-08-07 157.06 158.92 156.67 158.81 154.62761 21870300 156.73920
403 2017-08-08 158.60 161.83 158.27 160.08 155.86415 36205900 158.65472
404 2017-08-09 159.26 161.27 159.11 161.06 156.81833 26131500 159.06611
405 2017-08-10 159.90 160.00 154.63 155.32 151.82338 40804300 155.48446
406 2017-08-11 156.60 158.57 156.07 157.48 153.93475 26257100 156.19159
407 2017-08-14 159.32 160.21 158.75 159.85 156.25142 22122700 158.40381
408 2017-08-15 160.66 162.20 160.14 161.60 157.96199 29465500 160.10066
409 2017-08-16 161.94 162.51 160.15 160.95 157.32665 27671600 159.99554
410 2017-08-17 160.52 160.71 157.84 157.86 154.30621 27940600 157.61874
411 2017-08-18 157.86 159.50 156.72 157.50 153.95430 27428100 156.72477
412 2017-08-21 157.50 157.89 155.11 157.21 153.67084 26368500 155.55695
413 2017-08-22 158.23 160.00 158.02 159.78 156.18297 21604600 158.06766
414 2017-08-23 159.07 160.47 158.88 159.98 156.37846 19399100 158.57616
415 2017-08-24 160.43 160.74 158.55 159.27 155.68445 19818900 158.32482
416 2017-08-25 159.65 160.56 159.27 159.86 156.26115 25480100 158.69705
417 2017-08-28 160.14 162.00 159.93 161.47 157.83491 25966000 159.92164
418 2017-08-29 160.10 163.12 160.00 162.91 159.24251 29516900 160.78750
419 2017-08-30 163.80 163.89 162.61 163.35 159.67262 27269600 162.05754
420 2017-08-31 163.64 164.52 163.48 164.00 160.30797 26785100 162.76932
421 2017-09-01 164.80 164.94 163.63 164.05 160.35684 16591100 162.97562
422 2017-09-05 163.75 164.25 160.56 162.08 158.43118 29468500 161.08039
423 2017-09-06 162.71 162.99 160.52 161.91 158.26503 21651700 160.59168
424 2017-09-07 162.09 162.24 160.36 161.26 157.62965 21928500 160.07655
425 2017-09-08 160.86 161.15 158.53 158.63 155.05887 28611500 158.24629
426 2017-09-11 160.50 162.05 159.89 161.50 157.86426 31580800 159.93475
427 2017-09-12 162.61 163.96 158.77 160.86 157.23865 71714000 159.98955
428 2017-09-13 159.87 159.96 157.91 159.65 156.05588 44907400 157.97530
429 2017-09-14 158.99 159.40 158.09 158.28 154.71674 23760700 157.40224
430 2017-09-15 158.47 160.97 158.00 159.88 156.28072 49114600 158.41691
431 2017-09-18 160.11 160.50 158.00 158.67 155.09795 28269400 157.86598
432 2017-09-19 159.51 159.77 158.44 158.73 155.15660 20810600 157.78887
433 2017-09-20 157.90 158.26 153.83 156.07 152.55650 52951400 154.88217
434 2017-09-21 155.80 155.80 152.75 153.39 149.93683 37511700 152.82894
435 2017-09-22 151.54 152.27 150.56 151.89 148.47060 46645400 150.43353
436 2017-09-25 149.99 151.83 149.16 150.55 147.16075 44387300 149.38359
437 2017-09-26 151.78 153.92 151.69 153.14 149.69244 36660000 151.76748
438 2017-09-27 153.80 154.72 153.54 154.23 150.75794 25504200 153.00598
439 2017-09-28 153.89 154.28 152.70 153.28 149.82932 22005500 152.26977
440 2017-09-29 153.21 154.13 152.00 154.12 150.65037 26299800 152.26013
441 2017-10-02 154.26 154.45 152.72 153.81 150.34738 18698800 152.50579
442 2017-10-03 154.01 155.09 153.91 154.48 151.00227 16230300 153.33409
443 2017-10-04 153.63 153.86 152.46 153.48 150.02480 20163800 152.11493
444 2017-10-05 154.18 155.44 154.05 155.39 151.89178 21283800 153.79393
445 2017-10-06 154.97 155.49 154.56 155.30 151.80382 17407600 153.95127
446 2017-10-09 155.81 156.73 155.49 155.84 152.33166 16262900 154.85056
447 2017-10-10 156.06 158.00 155.10 155.90 152.39031 15617000 155.16344
448 2017-10-11 155.97 156.98 155.75 156.55 153.02570 16905600 155.25190
449 2017-10-12 156.35 157.37 155.73 156.00 152.48805 16125100 155.19601
450 2017-10-13 156.73 157.28 156.41 156.99 153.45577 16394200 155.71526
451 2017-10-16 157.90 160.00 157.65 159.88 156.28072 24121500 157.97690
452 2017-10-17 159.78 160.87 159.23 160.47 156.85744 18997300 158.98581
453 2017-10-18 160.42 160.71 159.60 159.76 156.16342 16374200 158.82448
454 2017-10-19 156.75 157.08 155.02 155.98 152.46852 42584200 154.85618
455 2017-10-20 156.61 157.75 155.96 156.25 152.73242 23974100 155.48081
456 2017-10-23 156.89 157.69 155.50 156.17 152.65422 21984300 155.28141
457 2017-10-24 156.29 157.42 156.20 157.10 153.56331 17757200 155.72777
458 2017-10-25 156.91 157.55 155.27 156.41 152.88884 21207100 155.23628
459 2017-10-26 157.23 157.83 156.78 157.41 153.86630 17000500 156.15877
460 2017-10-27 159.29 163.60 158.70 163.05 159.37936 44454200 160.55979
461 2017-10-30 163.89 168.07 163.72 166.72 162.96675 44700800 164.91892
462 2017-10-31 167.90 169.65 166.94 169.04 165.23451 36046800 167.27484
463 2017-11-01 169.87 169.94 165.61 166.89 163.13292 33637800 166.22764
464 2017-11-02 166.60 168.50 165.28 168.11 164.32546 41393400 166.03515
465 2017-11-03 174.00 174.26 171.12 172.50 168.61661 59398600 171.33220
466 2017-11-06 172.37 174.99 171.72 174.25 170.32721 34319500 172.34574
467 2017-11-07 173.91 175.25 173.60 174.81 170.87462 24361500 173.24154
468 2017-11-08 174.66 176.24 174.33 176.24 172.27241 24409500 174.28081
469 2017-11-09 175.11 176.10 173.14 175.88 171.92053 29482600 173.72018
470 2017-11-10 175.11 175.38 174.27 174.67 171.35153 25145500 173.66718
471 2017-11-13 173.50 174.50 173.40 173.97 170.66484 16982100 172.85494
472 2017-11-14 173.04 173.48 171.18 171.34 168.08479 24782500 170.91493
473 2017-11-15 169.97 170.32 168.38 169.08 165.86774 29158100 168.18925
474 2017-11-16 171.18 171.87 170.30 171.10 167.84938 23637500 170.00646
475 2017-11-17 171.04 171.39 169.64 170.15 166.91742 21899500 169.31581
476 2017-11-20 170.29 170.56 169.56 169.98 166.75064 16262400 168.95688
477 2017-11-21 170.78 173.70 170.78 173.14 169.85060 25131300 171.44353
478 2017-11-22 173.36 175.00 173.05 174.96 171.63603 25588900 173.22868
479 2017-11-24 175.10 175.50 174.65 174.97 171.64586 14026700 173.93195
480 2017-11-27 175.05 175.08 173.34 174.09 170.78255 20716800 173.06751
481 2017-11-28 174.30 174.87 171.86 173.07 169.78195 26428800 172.17065
482 2017-11-29 172.63 172.92 167.16 169.48 166.26015 41666400 168.78005
483 2017-11-30 170.43 172.14 168.44 171.85 168.58511 41527200 169.72170
484 2017-12-01 169.95 171.67 168.50 171.05 167.80031 39759300 169.32344
485 2017-12-04 172.48 172.62 169.63 169.80 166.57408 32542400 169.60803
486 2017-12-05 169.06 171.52 168.40 169.64 166.41710 27350200 168.77903
487 2017-12-06 167.50 170.20 166.46 169.01 165.79909 28560000 167.48636
488 2017-12-07 169.03 170.44 168.91 169.32 166.10319 25673300 168.48440
489 2017-12-08 170.49 171.00 168.82 169.37 166.15222 23355200 168.65741
490 2017-12-11 169.20 172.89 168.79 172.67 169.38954 35273800 170.35651
491 2017-12-12 172.15 172.39 171.46 171.70 168.43797 19409200 170.76266
492 2017-12-13 172.50 173.54 172.00 172.27 168.99713 23818400 171.51237
493 2017-12-14 172.40 173.13 171.65 172.22 168.94807 20476500 171.24269
494 2017-12-15 173.63 174.17 172.46 173.97 170.66484 40169300 172.43162
495 2017-12-18 174.88 177.20 174.86 176.42 173.06828 29421100 175.04276
496 2017-12-19 175.03 175.39 174.09 174.54 171.22401 27436400 173.56800
497 2017-12-20 174.87 175.42 173.25 174.35 171.03763 23475600 173.23588
498 2017-12-21 174.17 176.02 174.10 175.01 171.68507 20949900 173.93503
499 2017-12-22 174.68 175.42 174.50 175.01 171.68507 16114600 173.86836
500 2017-12-26 170.80 171.47 169.68 170.57 167.32945 33185500 169.49315
501 2017-12-27 170.10 170.78 169.71 170.60 167.35887 21498200 169.28296
502 2017-12-28 171.00 171.85 170.48 171.08 167.82976 16480200 170.05325
503 2017-12-29 170.52 170.59 169.22 169.23 166.01491 25884400 168.60830
504 2018-01-02 170.16 172.30 169.26 172.26 168.98732 25555900 170.18244
505 2018-01-03 172.53 174.55 171.96 172.23 168.95789 29517900 171.82263
506 2018-01-04 172.54 173.47 172.08 173.03 169.74271 22434600 171.76424
507 2018-01-05 173.44 175.37 173.05 175.00 171.67528 23660000 173.36509
508 2018-01-08 174.35 175.61 173.93 174.35 171.03763 20567800 173.52587
509 2018-01-09 174.55 175.06 173.41 174.33 171.01800 21584000 173.16267
510 2018-01-10 173.16 174.30 173.00 174.29 170.97876 23959900 172.75959
511 2018-01-11 174.59 175.49 174.49 175.28 171.94995 18667700 173.97665
512 2018-01-12 176.18 177.36 175.65 177.09 173.72557 25226000 175.57852
513 2018-01-16 177.90 179.39 176.14 176.19 172.84268 29565900 176.12423
514 2018-01-17 176.15 179.25 175.07 179.10 175.69739 34386800 176.67246
515 2018-01-18 179.37 180.10 178.25 179.26 175.85434 31193400 178.06812
516 2018-01-19 178.61 179.58 177.41 178.46 175.06955 32425100 177.35319
517 2018-01-22 177.30 177.78 176.60 177.00 173.63728 27108600 176.00576
518 2018-01-23 177.30 179.44 176.82 177.04 173.67651 32689100 176.64551
519 2018-01-24 177.25 177.30 173.20 174.22 170.91008 51105100 173.80336
520 2018-01-25 174.51 174.95 170.53 171.11 167.85916 41529000 171.11305
521 2018-01-26 172.00 172.00 170.06 171.51 168.25156 39143000 170.10385
522 2018-01-29 170.16 170.16 167.07 167.96 164.76901 50640400 167.33301
523 2018-01-30 165.53 167.37 164.70 166.97 163.79784 46048200 165.28928
524 2018-01-31 166.87 168.44 166.50 167.43 164.24908 32478900 166.39636
525 2018-02-01 167.17 168.62 166.76 167.78 164.59244 47230800 166.65748
526 2018-02-02 166.00 166.80 160.10 160.50 157.45075 86593800 161.45025
527 2018-02-05 159.10 163.88 156.00 156.49 153.51695 72738500 157.79899
528 2018-02-06 154.83 163.72 154.00 163.03 159.93266 68243800 159.21755
529 2018-02-07 163.09 163.40 159.07 159.54 156.50897 51608600 159.65966
530 2018-02-08 160.29 161.00 155.03 155.15 152.20239 54390500 156.07746
531 2018-02-09 157.07 157.89 150.24 156.41 154.06406 70672600 154.06469
532 2018-02-12 158.50 163.89 157.51 162.71 160.26956 60819500 160.55652
533 2018-02-13 161.95 164.75 161.65 164.34 161.87511 32549200 162.75837
534 2018-02-14 163.04 167.54 162.88 167.37 164.85965 40644900 165.09322
535 2018-02-15 169.79 173.09 169.00 172.99 170.39537 51147200 170.82846
536 2018-02-16 172.36 174.82 171.77 172.43 169.84376 40176100 172.14459
537 2018-02-20 172.05 174.26 171.42 171.85 169.27246 33930500 171.65082
538 2018-02-21 172.83 174.12 171.01 171.07 168.50417 37471600 171.21139
539 2018-02-22 171.80 173.95 171.71 172.50 169.91270 30991900 171.85757
540 2018-02-23 173.67 175.65 173.54 175.50 172.86771 33812400 174.01923
541 2018-02-26 176.35 179.39 176.21 178.97 176.28567 38162200 177.29523
542 2018-02-27 179.10 180.48 178.16 178.39 175.71439 38928100 178.11813
543 2018-02-28 179.26 180.62 178.05 178.12 175.44841 37782100 178.03947
544 2018-03-01 178.54 179.78 172.66 175.00 172.37521 48802000 174.93841
545 2018-03-02 172.80 176.30 172.45 176.21 173.56708 38454000 174.10569
546 2018-03-05 175.21 177.74 174.52 176.82 174.16792 28401400 175.47598
547 2018-03-06 177.91 178.25 176.13 176.67 174.02017 23788500 176.13339
548 2018-03-07 174.94 175.85 174.27 175.03 172.40477 31703500 174.17493
549 2018-03-08 175.48 177.12 175.07 176.94 174.28612 23774100 175.49204
550 2018-03-09 177.96 180.00 177.39 179.98 177.28052 32185200 178.22351
551 2018-03-12 180.29 182.39 180.21 181.72 178.99442 32207100 180.53147
552 2018-03-13 182.59 183.50 179.24 179.97 177.27066 31693500 180.00356
553 2018-03-14 180.32 180.52 177.81 178.44 175.76361 29368400 178.03120
554 2018-03-15 178.50 180.24 178.07 178.65 175.97046 22743800 178.09349
555 2018-03-16 178.65 179.12 177.62 178.02 175.34992 39404700 177.36330
556 2018-03-19 177.32 177.47 173.66 175.30 172.67073 33446800 174.60025
557 2018-03-20 175.24 176.80 174.94 175.24 172.61162 19649400 174.78387
558 2018-03-21 175.04 175.09 171.26 171.27 168.70116 36338100 171.68372
559 2018-03-22 170.00 172.68 168.60 168.85 166.31746 41490800 169.19915
560 2018-03-23 168.39 169.92 164.94 164.94 162.46609 41028800 165.77536
561 2018-03-26 168.07 173.10 166.44 172.77 170.17868 37541200 169.90623
562 2018-03-27 173.68 175.15 166.92 168.34 165.81511 40922600 169.29503
563 2018-03-28 167.25 170.02 165.19 166.48 163.98300 41668500 166.39767
564 2018-03-29 167.81 171.75 166.90 167.78 165.26350 38398500 167.97117
565 2018-04-02 166.64 168.94 164.47 166.68 164.18001 37586800 165.86334
566 2018-04-03 167.64 168.75 164.88 168.39 165.86435 30278000 166.49812
567 2018-04-04 164.88 172.01 164.77 171.61 169.03607 34605500 168.60536
568 2018-04-05 172.58 174.23 172.08 172.80 170.20821 26933200 172.17273
569 2018-04-06 170.97 172.48 168.20 168.38 165.85452 35005300 168.84484
570 2018-04-09 169.88 173.09 169.85 170.05 167.49947 29017700 170.14649
571 2018-04-10 173.00 174.00 171.53 173.25 170.65146 28408600 172.06049
572 2018-04-11 172.23 173.92 171.70 172.44 169.85362 22431600 171.82454
573 2018-04-12 173.41 175.00 173.04 174.14 171.52811 22889300 173.18937
574 2018-04-13 174.78 175.84 173.85 174.73 172.10927 25124300 173.93309
575 2018-04-16 175.03 176.19 174.83 175.82 173.18292 21578400 174.73431
576 2018-04-17 176.49 178.94 176.41 178.24 175.56662 26509000 176.97221
577 2018-04-18 177.81 178.82 176.88 177.84 175.17261 20754500 176.95754
578 2018-04-19 173.76 175.39 172.66 172.80 170.20821 34808800 172.75274
579 2018-04-20 170.60 171.22 165.43 165.72 163.23439 65491100 166.62813
580 2018-04-23 166.83 166.92 164.09 165.24 162.76160 36515500 164.59053
581 2018-04-24 165.67 166.33 161.22 162.94 160.49611 33692000 162.68204
582 2018-04-25 162.62 165.42 162.41 163.65 161.19545 28382100 163.00848
583 2018-04-26 164.12 165.73 163.37 164.22 161.75691 27963000 163.61897
584 2018-04-27 164.00 164.33 160.63 162.32 159.88541 35655800 161.61514
585 2018-04-30 162.13 167.26 161.84 165.26 162.78130 42427400 163.96043
586 2018-05-01 166.41 169.20 165.27 169.10 166.56370 53569400 167.01124
587 2018-05-02 175.23 177.75 173.80 176.57 173.92168 66539400 175.15723
588 2018-05-03 175.88 177.50 174.44 176.89 174.23686 34068200 175.39229
589 2018-05-04 178.25 184.25 178.17 183.83 181.07277 56201300 181.16426
590 2018-05-07 185.18 187.67 184.75 185.16 182.38281 42451400 184.93427
591 2018-05-08 184.99 186.22 183.67 186.05 183.25948 28402800 184.38316
592 2018-05-09 186.55 187.40 185.22 187.36 184.54984 23211200 185.72328
593 2018-05-10 187.74 190.37 187.65 190.04 187.18962 27989300 188.40320
594 2018-05-11 189.49 190.06 187.45 188.59 186.47769 26212200 187.99590
595 2018-05-14 189.01 189.53 187.86 188.15 186.04262 20778800 187.81087
596 2018-05-15 186.78 187.07 185.10 186.44 184.35178 23695200 185.50726
597 2018-05-16 186.07 188.46 186.00 188.18 186.07227 19183100 186.84409
598 2018-05-17 188.00 188.91 186.36 186.99 184.89561 17294000 186.72187
599 2018-05-18 187.19 187.81 186.13 186.31 184.22324 18297700 186.05441
600 2018-05-21 188.00 189.27 186.91 187.63 185.52844 18400800 187.23615
601 2018-05-22 188.38 188.88 186.78 187.16 185.06372 15240700 186.90791
602 2018-05-23 186.35 188.50 185.76 188.36 186.25026 20058400 186.83675
603 2018-05-24 188.77 188.84 186.21 188.15 186.04262 23234000 187.03087
604 2018-05-25 188.23 189.65 187.65 188.58 186.46780 17461000 187.92260
605 2018-05-29 187.60 188.75 186.87 187.90 185.79542 22514100 187.13847
606 2018-05-30 187.72 188.00 186.78 187.50 185.39989 18690500 186.72663
607 2018-05-31 187.22 188.23 186.14 186.87 184.77695 27482800 186.38231
608 2018-06-01 187.99 190.26 187.75 190.24 188.10922 23442500 188.70641
609 2018-06-04 191.64 193.42 191.35 191.83 189.68140 26266200 191.48380
610 2018-06-05 193.07 193.94 192.36 193.31 191.14482 21566000 192.48161
611 2018-06-06 193.63 194.08 191.92 193.98 191.80731 20933600 192.60244
612 2018-06-07 194.14 194.20 192.34 193.46 191.29315 21347200 192.61105
613 2018-06-08 191.17 192.00 189.77 191.70 189.55286 26656800 190.44095
614 2018-06-11 191.35 191.97 190.21 191.23 189.08813 18308500 190.42271
615 2018-06-12 191.39 192.61 191.15 192.28 190.12636 16911100 191.29545
616 2018-06-13 192.42 192.88 190.44 190.70 188.56406 21638400 190.62802
617 2018-06-14 191.55 191.57 190.22 190.80 188.66293 21610100 190.15098
618 2018-06-15 190.03 190.16 188.26 188.84 186.72488 61719200 188.38163
619 2018-06-18 187.88 189.22 187.20 188.74 186.62602 18484900 187.68201
620 2018-06-19 185.14 186.33 183.45 185.69 183.61017 33578500 184.46339
621 2018-06-20 186.35 187.20 185.73 186.50 184.41110 20628700 185.78036
622 2018-06-21 187.25 188.35 184.94 185.46 183.38275 25711900 185.55759
623 2018-06-22 186.12 186.15 184.70 184.92 182.84878 27200400 184.56626
624 2018-06-25 183.40 184.92 180.73 182.17 180.12961 31663100 181.92653
625 2018-06-26 182.99 186.53 182.54 184.43 182.36429 24569200 183.81143
626 2018-06-27 185.23 187.28 184.03 184.16 182.09732 25285300 184.46911
627 2018-06-28 184.10 186.21 183.80 185.50 183.42230 17365200 184.47744
628 2018-06-29 186.29 187.19 182.91 185.11 183.03668 22737700 184.37890
629 2018-07-02 183.82 187.30 183.42 187.18 185.08347 17731300 185.26782
630 2018-07-03 187.79 187.95 183.54 183.92 181.85998 13954800 184.44999
631 2018-07-05 185.26 186.41 184.28 185.40 183.32341 16604200 184.67114
632 2018-07-06 185.42 188.43 185.20 187.97 185.86465 17485200 186.49821
633 2018-07-09 189.50 190.68 189.30 190.58 188.44540 19756600 189.47513
634 2018-07-10 190.71 191.28 190.18 190.35 188.21797 15939100 189.89265
635 2018-07-11 188.50 189.78 187.61 187.88 185.77565 18831500 187.72188
636 2018-07-12 189.53 191.41 189.31 191.03 188.89037 18041100 189.87012
637 2018-07-13 191.08 191.84 190.90 191.33 189.18701 12513900 190.64233
638 2018-07-16 191.52 192.65 190.42 190.91 188.77171 15043100 190.61390
639 2018-07-17 189.75 191.87 189.20 191.45 189.30566 15534500 190.12522
640 2018-07-18 191.78 191.80 189.93 190.40 188.26741 16393400 189.99914
641 2018-07-19 189.69 192.55 189.69 191.88 189.73085 20286800 190.65695
642 2018-07-20 191.78 192.43 190.17 191.44 189.29578 20676200 190.63192
643 2018-07-23 190.68 191.96 189.56 191.61 189.46387 15989400 190.32796
644 2018-07-24 192.45 193.66 192.05 193.00 190.83830 18697900 192.18277
645 2018-07-25 193.06 194.85 192.43 194.82 192.63792 16709900 193.30597
646 2018-07-26 194.61 195.96 193.61 194.21 192.03476 19076000 193.86826
647 2018-07-27 194.99 195.19 190.10 190.98 188.84091 24024000 191.37697
648 2018-07-30 191.90 192.20 189.07 189.91 187.78291 21029500 189.68431
649 2018-07-31 190.30 192.14 189.34 190.29 188.15865 39373000 189.87955
650 2018-08-01 199.13 201.76 197.31 201.50 199.24309 67935700 199.43769
651 2018-08-02 200.58 208.38 200.35 207.39 205.06712 62404000 204.59904
652 2018-08-03 207.03 208.74 205.48 207.99 205.66042 33447400 206.62681
653 2018-08-06 208.00 209.25 207.07 209.07 206.72832 25425400 207.68277
654 2018-08-07 209.32 209.50 206.76 207.11 204.79027 25587400 207.01675
655 2018-08-08 206.05 207.81 204.52 207.25 204.92870 22525500 205.75290
656 2018-08-09 207.28 209.78 207.20 208.88 206.54044 23469200 207.84014
657 2018-08-10 207.36 209.10 206.67 207.53 205.92523 24611200 207.23175
658 2018-08-13 207.70 210.95 207.70 208.87 207.25488 25869100 208.63496
659 2018-08-14 210.16 210.56 208.26 209.75 208.12807 20748000 208.98269
660 2018-08-15 209.22 210.74 208.33 210.24 208.61427 28807600 209.22809
661 2018-08-16 211.75 213.81 211.47 213.32 211.67047 28500400 212.31682
662 2018-08-17 213.44 217.95 213.16 217.58 215.89752 35427000 215.66917
663 2018-08-20 218.10 219.18 215.11 215.46 213.79393 30287700 216.02797
664 2018-08-21 216.80 217.19 214.03 215.04 213.37717 26159800 214.86572
665 2018-08-22 214.10 216.36 213.84 215.05 213.38709 19018100 214.52903
666 2018-08-23 214.65 217.05 214.60 215.49 213.82369 18883200 215.15790
667 2018-08-24 216.60 216.90 215.11 216.16 214.48850 18476400 215.49950
668 2018-08-27 217.15 218.74 216.33 217.94 216.25475 20525100 217.10825
669 2018-08-28 219.01 220.54 218.92 219.70 218.00113 22776800 219.15371
670 2018-08-29 220.15 223.49 219.41 222.98 221.25575 27254800 221.38525
671 2018-08-30 223.25 228.26 222.40 225.03 223.28992 48793800 224.64997
672 2018-08-31 226.51 228.87 226.00 227.63 225.86981 43340100 226.91327
673 2018-09-04 228.41 229.18 226.63 228.36 226.59416 27390100 227.46805
674 2018-09-05 228.99 229.67 225.10 226.87 225.11568 33333000 226.62856
675 2018-09-06 226.23 227.35 221.30 223.10 221.37485 34290000 223.34162
676 2018-09-07 221.85 225.37 220.71 221.30 219.58876 37619800 221.88959
677 2018-09-10 220.95 221.85 216.47 218.33 216.64172 39516500 218.32058
678 2018-09-11 218.01 224.30 216.56 223.85 222.11905 35749000 220.99302
679 2018-09-12 224.94 225.00 219.84 221.07 219.36055 49278700 221.40018
680 2018-09-13 223.52 228.35 222.57 226.41 224.65926 41706400 225.19309
681 2018-09-14 225.75 226.84 222.52 223.84 222.10910 31999300 223.82303
682 2018-09-17 222.15 222.95 217.27 217.88 216.19521 37195100 218.80507
683 2018-09-18 217.79 221.85 217.12 218.24 216.55241 31571700 218.50747
684 2018-09-19 218.50 219.62 215.30 218.37 216.68141 27123800 217.20047
685 2018-09-20 220.24 222.28 219.15 220.03 218.32858 26608800 219.91953
686 2018-09-21 220.78 221.36 217.29 217.66 215.97691 96246700 218.20897
687 2018-09-24 216.82 221.26 216.63 220.79 219.08269 27693400 218.99090
688 2018-09-25 219.75 222.82 219.70 222.19 220.47188 24554400 220.99729
689 2018-09-26 221.00 223.75 219.76 220.42 218.71556 23984700 220.74185
690 2018-09-27 223.82 226.44 223.54 224.95 223.21053 30181200 224.39684
691 2018-09-28 224.79 225.84 224.02 225.74 223.99443 22929400 224.61814
692 2018-10-01 227.95 229.42 226.35 227.26 225.50267 23600800 227.09089
693 2018-10-02 227.25 230.00 226.63 229.28 227.50705 24788200 228.04568
694 2018-10-03 230.05 233.47 229.78 232.07 230.27548 28654800 231.17516
695 2018-10-04 230.78 232.35 226.73 227.99 226.22704 32042000 228.43568
696 2018-10-05 227.96 228.41 220.58 224.29 222.55563 33580500 223.84855
697 2018-10-08 222.21 224.80 220.20 223.77 222.03966 29663900 222.34655
698 2018-10-09 223.64 227.27 222.25 226.87 225.11568 26891000 224.87856
699 2018-10-10 225.46 226.35 216.05 216.36 214.68697 41990600 219.02899
700 2018-10-11 214.52 219.50 212.32 214.45 212.79172 53124400 214.87058
701 2018-10-12 220.42 222.88 216.84 222.11 220.39250 40337900 220.03750
702 2018-10-15 221.16 221.83 217.27 217.36 215.67923 30791000 218.25975
703 2018-10-16 218.93 222.99 216.76 222.15 220.43218 29184000 220.06072
704 2018-10-17 222.30 222.64 219.34 221.19 219.47961 22885400 220.48654
705 2018-10-18 217.86 219.74 213.00 216.02 214.34958 32581300 215.69653
706 2018-10-19 218.06 221.26 217.43 219.31 217.61415 33078700 218.76805
707 2018-10-22 219.79 223.36 218.94 220.65 218.94377 28792100 220.41459
708 2018-10-23 215.83 223.25 214.70 222.73 221.00769 38767800 219.65256
709 2018-10-24 222.60 224.23 214.54 215.09 213.42677 40925500 217.39892
710 2018-10-25 217.71 221.38 216.75 219.80 218.10036 29855800 218.74345
711 2018-10-26 215.90 220.19 212.67 216.30 214.62741 47258400 215.82914
712 2018-10-29 219.19 219.69 206.09 212.24 210.59883 45935500 212.12628
713 2018-10-30 211.15 215.18 209.27 213.30 211.65062 36660000 212.03354
714 2018-10-31 216.88 220.45 216.62 218.86 217.16762 38358900 218.07920
715 2018-11-01 219.05 222.36 216.81 222.22 220.50163 58323200 219.89054
716 2018-11-02 209.55 213.65 205.43 207.48 205.87561 91328700 208.31853
717 2018-11-05 204.30 204.39 198.17 201.59 200.03116 66163700 200.86372
718 2018-11-06 201.92 204.72 201.69 203.77 202.19432 31882900 202.86811
719 2018-11-07 205.97 210.06 204.13 209.95 208.32651 33424400 207.50550
720 2018-11-08 209.98 210.12 206.75 208.49 207.59966 25362600 208.15655
721 2018-11-09 205.55 206.01 202.25 204.47 203.59682 34365800 203.95227
722 2018-11-12 199.00 199.85 193.79 194.17 193.34079 51135500 195.66026
723 2018-11-13 191.63 197.18 191.45 192.23 191.40907 46882900 193.34635
724 2018-11-14 193.90 194.48 185.93 186.80 186.00227 60801000 188.80409
725 2018-11-15 188.39 191.97 186.90 191.41 190.59259 46478800 189.82086
726 2018-11-16 190.50 194.97 189.46 193.53 192.70354 36928300 192.37785
727 2018-11-19 190.00 190.70 184.99 185.86 185.06628 41925300 186.91876
728 2018-11-20 178.37 181.47 175.51 176.98 176.22420 67825200 177.73473
729 2018-11-21 179.73 180.27 176.55 176.78 176.02506 31124200 177.61502
730 2018-11-23 174.94 176.60 172.10 172.29 171.55423 23624000 173.41808
731 2018-11-26 174.24 174.95 170.26 174.62 173.87428 44998500 173.02809
732 2018-11-27 171.51 174.77 170.88 174.24 173.49591 41387400 173.04864
733 2018-11-28 176.73 181.29 174.93 180.94 180.16730 46062500 178.79576
734 2018-11-29 182.66 182.80 177.70 179.55 178.78323 41770000 179.76108
735 2018-11-30 180.29 180.33 177.03 178.58 177.81738 39531500 178.39246
736 2018-12-03 184.46 184.94 181.21 184.82 184.03073 40802500 183.39358
737 2018-12-04 180.95 182.39 176.27 176.69 175.93545 41344300 178.19849
738 2018-12-06 171.76 174.78 170.42 174.72 173.97386 43098400 173.05795
739 2018-12-07 173.49 174.49 168.30 168.49 167.77048 42281600 170.18683
740 2018-12-10 165.00 170.09 163.33 169.60 168.87573 62026000 167.43191
741 2018-12-11 171.66 171.79 167.00 168.63 167.90987 47281700 168.89995
742 2018-12-12 170.40 171.92 169.02 169.10 168.37787 35627700 169.77262
743 2018-12-13 170.49 172.57 169.55 170.95 170.21995 31898600 170.77999
744 2018-12-14 169.00 169.08 165.28 165.48 164.77331 40703700 166.37777
745 2018-12-17 165.45 168.35 162.73 163.94 163.23990 44287900 164.77330
746 2018-12-18 165.38 167.53 164.39 166.07 165.36081 33841500 165.76027
747 2018-12-19 166.00 167.45 159.09 160.89 160.20293 49047300 162.24764
748 2018-12-20 160.40 162.11 155.30 156.83 156.16026 64773000 157.85676
749 2018-12-21 156.86 158.16 149.63 150.73 150.08630 95744600 152.62544
750 2018-12-24 148.15 151.55 146.59 146.83 146.20297 37169200 148.11432
751 2018-12-26 148.30 157.23 146.72 157.17 156.49881 58582500 153.48294
752 2018-12-27 155.84 156.77 150.07 156.15 155.48315 53117100 154.10772
753 2018-12-28 157.50 158.52 154.55 156.23 155.56282 42291400 156.21094
754 2018-12-31 158.53 159.36 156.48 157.74 157.06638 35003500 157.63546
|
100 knock/01.ipynb | ###Markdown
Day 1
###Code
import pandas as pd
customer_master = pd.read_csv('csv/customer_master.csv')
customer_master.head()
item_master = pd.read_csv('csv/item_master.csv')
item_master.head()
transaction_1 = pd.read_csv('csv/transaction_1.csv')
transaction_1.head()
transaction_detail_1 = pd.read_csv('csv/transaction_detail_1.csv')
transaction_detail_1.head()
###Output
_____no_output_____
###Markdown
Day2
###Code
transaction_2 = pd.read_csv('csv/transaction_2.csv')
transaction = pd.concat([transaction_1, transaction_2], ignore_index = True)
transaction.head()
[*map(len, (transaction_1, transaction_2, transaction))]
transaction_detail_2 = pd.read_csv('csv/transaction_detail_2.csv')
transaction_detail = pd.concat([transaction_detail_1, transaction_detail_2], ignore_index = True)
transaction_detail.head()
###Output
_____no_output_____
###Markdown
Day3
###Code
join_data = pd.merge(transaction_detail, transaction[["transaction_id", "payment_date", "customer_id"]], on = "transaction_id", how = "left")
join_data.head()
[*map(len, (transaction_detail, transaction, join_data))]
###Output
_____no_output_____
###Markdown
Day4
###Code
join_data = pd.merge(join_data, customer_master, on = "customer_id", how = "left")
join_data = pd.merge(join_data, item_master, on = "item_id", how = "left")
join_data.head()
join_data["price"] = join_data["quantity"] * join_data["item_price"]
join_data[["quantity", "item_price", "price"]].head()
###Output
_____no_output_____
###Markdown
Day6
###Code
join_data["price"].sum(), transaction["price"].sum()
###Output
_____no_output_____
###Markdown
Day7 `isnull()` で欠損値を bool で返す
###Code
join_data.isnull().sum()
join_data.describe()
print(join_data["payment_date"].min())
print(join_data["payment_date"].max())
###Output
2019-02-01 01:36:57
2019-07-31 23:41:38
###Markdown
Day8
###Code
join_data.dtypes
join_data["payment_date"] = pd.to_datetime(join_data["payment_date"])
join_data["payment_month"] = join_data["payment_date"].dt.strftime("%Y%m")
join_data[["payment_date", "payment_month"]].head()
join_data.groupby("payment_month").sum()["price"]
join_data.groupby(["payment_month", "item_name"]).sum()[["price", "quantity"]]
pd.pivot_table(join_data, index = 'item_name', columns = 'payment_month', values = ['price', 'quantity'], aggfunc = 'sum')
###Output
_____no_output_____
###Markdown
Day10
###Code
graph_data = pd.pivot_table(join_data, index = 'payment_month', columns = 'item_name', values = 'price', aggfunc = 'sum')
graph_data.head()
import matplotlib.pyplot as plt
%matplotlib inline
for c in "ABCDE":
plt.plot(list(graph_data.index), graph_data[f"PC-{c}"], label = f'PC-{c}')
plt.legend()
###Output
_____no_output_____ |
Statistics for Classification.ipynb | ###Markdown
**Statistics for Classification**
###Code
import numpy as np
import matplotlib.pyplot as plt
import random
%matplotlib inline
###Output
_____no_output_____
###Markdown
**Logistic Regression**
###Code
np.random.seed(894288)
fg_index = np.linspace(0,100,6)
stock_index_change = (fg_index-50)*0.3 - np.sign((fg_index-50)*0.3)*8
plt.figure(figsize=(10,6))
plt.scatter(fg_index[stock_index_change > 0],
stock_index_change[stock_index_change > 0],
s=200,
marker=6,
label="Up")
plt.scatter(fg_index[stock_index_change < 0],
stock_index_change[stock_index_change < 0],
s=200,
marker=7,
label="Down")
plt.hlines(0,0,100,label="Neutral line")
plt.xlabel("Fear & Greedy Index",fontsize=20)
plt.ylabel("Stock Index Change",fontsize=20)
plt.legend(ncol=3);
slope, intercept = 0.1, 0
fig, ax1 = plt.subplots(figsize=(10,6))
ax1.scatter(fg_index[stock_index_change > 0],
(slope*fg_index + intercept)[stock_index_change > 0],
s=200,
marker=6,
label = "Up")
ax1.scatter(fg_index[stock_index_change < 0],
(slope*fg_index + intercept)[stock_index_change < 0],
s=200,
marker=7,
label = "Down")
ax1.plot(fg_index, slope*fg_index + intercept,
linewidth=2,
c="red",
label="Odds")
ax2 = ax1.twinx()
ax2.scatter(fg_index[stock_index_change > 0],
stock_index_change[stock_index_change > 0],
s=100,
marker=6,
label="Up")
ax2.scatter(fg_index[stock_index_change < 0],
stock_index_change[stock_index_change < 0],
s=100,
marker=7,
label="Down")
ax2.arrow(fg_index[2],stock_index_change[2]-0.5,0,-4.5,head_width=2, head_length=1, fc='k', ec='k',linewidth=2)
ax1.set_xlabel("Fear & Greedy Index",fontsize=20)
ax1.set_ylabel("Odds",fontsize=20)
ax2.set_ylabel("Stock Index Change",fontsize=20)
plt.legend(fontsize=20);
###Output
_____no_output_____
###Markdown
**Change intercept**
###Code
slope, intercept = 0.1, -5
fig, ax1 = plt.subplots(figsize=(10,6))
ax1.scatter(fg_index[stock_index_change > 0],
(slope*fg_index + intercept)[stock_index_change > 0],
s=200,
marker=6,
label = "Up")
ax1.scatter(fg_index[stock_index_change < 0],
(slope*fg_index + intercept)[stock_index_change < 0],
s=200,
marker=7,
label = "Down")
ax1.plot(fg_index, slope*fg_index + intercept,
linewidth=2,
c="red")
ax2 = ax1.twinx()
ax2.scatter(fg_index[stock_index_change > 0],
stock_index_change[stock_index_change > 0],
s=100,
marker=6,
label="Up")
ax2.scatter(fg_index[stock_index_change < 0],
stock_index_change[stock_index_change < 0],
s=100,
marker=7,
label="Down")
ax2.arrow(fg_index[2],stock_index_change[2]-0.5,0,-4.5,head_width=2, head_length=1, fc='k', ec='k',linewidth=2)
# ax1.axhline(0,linewidth=2,linestyle=":",label="Critical Odds value")
ax1.set_xlabel("Fear & Greedy Index",fontsize=20)
ax1.set_ylabel("Odds - 5",fontsize=20)
ax2.set_ylabel("Stock Index Change",fontsize=20)
plt.legend(fontsize=20);
###Output
_____no_output_____
###Markdown
**Logistic function**
###Code
def logistic(x):
return 1 / (1 + np.exp(-x))
def cal_shifted_odds(val, slope, intercept):
return val*slope + intercept
slope, intercept = 0.1, -5
fig, ax1 = plt.subplots(figsize=(10,6))
shifted_odds = cal_shifted_odds(fg_index,slope,intercept)
ax1.scatter(fg_index[stock_index_change > 0],
shifted_odds[stock_index_change > 0],
s=200,
marker=6,
label = "Up")
ax1.scatter(fg_index[stock_index_change < 0],
shifted_odds[stock_index_change < 0],
s=200,
marker=7,
label = "Down")
ax1.plot(fg_index, shifted_odds,
linewidth=2,
c="red")
ax2 = ax1.twinx()
ax2.scatter(fg_index[stock_index_change > 0],
logistic(shifted_odds)[stock_index_change > 0],
s=100,
marker=6,
label="Up")
ax2.scatter(fg_index[stock_index_change < 0],
logistic(shifted_odds)[stock_index_change < 0],
s=100,
marker=7,
label="Down")
fg_grids = np.linspace(0,100,100)
ax2.plot(fg_grids,
logistic(cal_shifted_odds(fg_grids,slope,intercept)),
linewidth=4,
linestyle=":",
c="green")
ax1.set_xlabel("Fear & Greedy Index",fontsize=20)
ax1.set_ylabel("Odds - 5",fontsize=20)
ax2.set_ylabel("Probability of Going Up",fontsize=20)
plt.legend(fontsize=20);
###Output
_____no_output_____
###Markdown
**Test positiveness**
###Code
slope, intercept, threshold = 0.1, -5, 0.5
fig, ax1 = plt.subplots(figsize=(10,6))
shifted_odds = cal_shifted_odds(fg_index,slope,intercept)
ax1.axhline(threshold,
linewidth=2,
c="red",
label="Threshold")
ax1.scatter(fg_index[stock_index_change > 0],
logistic(shifted_odds)[stock_index_change > 0],
s=100,
marker=6,
label="Up")
ax1.scatter(fg_index[stock_index_change < 0],
logistic(shifted_odds)[stock_index_change < 0],
s=100,
marker=7,
label="Down")
ax1.scatter(fg_index[logistic(cal_shifted_odds(fg_index,slope,intercept)) > threshold],
logistic(cal_shifted_odds(fg_index,slope,intercept))[logistic(cal_shifted_odds(fg_index,slope,intercept)) > threshold],
s=400,
facecolors='none',
edgecolors='b')
ax1.set_xlabel("Fear & Greedy Index",fontsize=20)
ax1.set_ylabel("Probability of Going Up",fontsize=20)
ax1.legend(fontsize=20);
slope, intercept, threshold = 0.1, -5, 0.8
fig, ax1 = plt.subplots(figsize=(10,6))
shifted_odds = cal_shifted_odds(fg_index,slope,intercept)
ax1.axhline(threshold,
linewidth=2,
c="red",
label="Threshold")
ax1.scatter(fg_index[stock_index_change > 0],
logistic(shifted_odds)[stock_index_change > 0],
s=100,
marker=6,
label="Up")
ax1.scatter(fg_index[stock_index_change < 0],
logistic(shifted_odds)[stock_index_change < 0],
s=100,
marker=7,
label="Down")
ax1.scatter(fg_index[logistic(cal_shifted_odds(fg_index,slope,intercept)) > threshold],
logistic(cal_shifted_odds(fg_index,slope,intercept))[logistic(cal_shifted_odds(fg_index,slope,intercept)) > threshold],
s=400,
facecolors='none',
edgecolors='b')
ax1.set_xlabel("Fear & Greedy Index",fontsize=20)
ax1.set_ylabel("Probability of Going Up",fontsize=20)
ax1.legend(fontsize=20);
###Output
_____no_output_____
###Markdown
**Maximize likelihood**
###Code
probs = logistic(cal_shifted_odds(fg_index,slope,intercept))
probs
np.prod(probs[stock_index_change>0])*np.prod(1-probs[stock_index_change<0])
###Output
_____no_output_____
###Markdown
**Another set of parameters**
###Code
probs = logistic(cal_shifted_odds(fg_index,
slope=0.11,intercept=-5.5))
np.prod(probs[stock_index_change>0])*np.prod(1-probs[stock_index_change<0])
from sklearn.linear_model import LogisticRegression
regressor = LogisticRegression(penalty="none",
solver="newton-cg").fit(fg_index.reshape(-1,1),
stock_index_change>0)
print("slope: ",regressor.coef_[0][0])
print("intercept: ",regressor.intercept_[0])
probs = logistic(cal_shifted_odds(fg_index,
slope=0.06070138,intercept=-3.03506894))
np.prod(probs[stock_index_change>0])*np.prod(1-probs[stock_index_change<0])
###Output
_____no_output_____
###Markdown
**Naïve Bayesian Classification from Scratch** **sample data**
###Code
import pandas as pd
from collections import Counter
stroke_risk = pd.read_csv("strok_risk.csv")
stroke_risk
def build_probabilities(df,feature_columns:list, category_variable:str):
# build prior probabilities
prior_probability = Counter(df[category_variable])
# build conditional probabilities
conditional_probabilities = {}
for key in prior_probability:
conditional_probabilities[key] = {}
# for each category variable cases
for feature in feature_columns:
feature_kinds = set(np.unique(df[feature]))
feature_dict = Counter(df[df[category_variable]==key][feature])
for possible_feature in feature_kinds:
if possible_feature not in feature_dict:
feature_dict[possible_feature] = 0
total = sum(feature_dict.values())
for feature_level in feature_dict:
feature_dict[feature_level] /= total
conditional_probabilities[key] [feature] = feature_dict
return prior_probability, conditional_probabilities
prior_prob, conditional_prob = build_probabilities(stroke_risk,
feature_columns=["weight","high_oil_diet","smoking"],
category_variable="stroke_risk")
from pprint import pprint
pprint(conditional_prob)
###Output
{'high': {'high_oil_diet': Counter({'yes': 0.5, 'no': 0.5}),
'smoking': Counter({'yes': 1.0, 'no': 0.0}),
'weight': Counter({'high': 0.75, 'middle': 0.25, 'low': 0.0})},
'low': {'high_oil_diet': Counter({'no': 0.75, 'yes': 0.25}),
'smoking': Counter({'no': 0.75, 'yes': 0.25}),
'weight': Counter({'low': 0.5, 'middle': 0.5, 'high': 0.0})},
'middle': {'high_oil_diet': Counter({'yes': 0.6666666666666666,
'no': 0.3333333333333333}),
'smoking': Counter({'no': 0.6666666666666666,
'yes': 0.3333333333333333}),
'weight': Counter({'middle': 0.6666666666666666,
'low': 0.3333333333333333,
'high': 0.0})}}
###Markdown
**prediction function**
###Code
def predict(prior_prob, conditional_prob, feature_values:dict):
probs = {}
total = sum(prior_prob.values())
for key in prior_prob:
probs[key] = prior_prob[key]/total
for key in probs:
posterior_dict = conditional_prob[key]
for feature_name, feature_level in feature_values.items():
probs[key] *= posterior_dict[feature_name][feature_level]
total = sum(probs.values())
if total == 0:
print("Undetermined!")
else:
for key in probs:
probs[key]/= total
return probs
predict(prior_prob,conditional_prob,{"weight":"middle","high_oil_diet":"no","smoking":"yes"})
predict(prior_prob,conditional_prob,{"weight":"high","high_oil_diet":"no","smoking":"no"})
###Output
Undetermined!
###Markdown
**Overfitting and underfitting examples**
###Code
plt.figure(figsize=(10,6))
x_coor = [1,2,3,4,5,6,7]
y_coor = [3,8,5,7,10,9,15]
plt.scatter(x_coor,y_coor);
styles=[":","--","-"]
plt.figure(figsize=(10,6))
x = np.linspace(1,7,20)
for idx, degree in enumerate(range(1,6,2)):
coef = np.polyfit(x_coor,y_coor,degree)
y = np.polyval(coef,x)
plt.plot(x,y,
linewidth=4,
linestyle=styles[idx],
label="degree {}".format(str(degree)))
plt.scatter(x_coor,y_coor,
s=400,
label="Original Data",
marker="o");
plt.legend();
###Output
_____no_output_____
###Markdown
**Beyond the original data**
###Code
styles=[":","--","-"]
plt.figure(figsize=(10,6))
x = np.linspace(0,8,40)
for idx, degree in enumerate(range(1,6,2)):
coef = np.polyfit(x_coor,y_coor,degree)
y = np.polyval(coef,x)
plt.plot(x,y,
linewidth=4,
linestyle=styles[idx],
label="degree {}".format(str(degree)))
plt.scatter(x_coor,y_coor,
s=400,
label="Original Data",
marker="o");
plt.legend();
###Output
_____no_output_____
###Markdown
**Cross-validation**
###Code
stroke_risk = pd.read_csv("strok_risk.csv")
stroke_risk
def map_feature(feature_value, feature):
if feature == "weight" or feature == "stroke_risk":
if feature_value == "low":
return 0
elif feature_value == "middle":
return 1
else:
return 2
elif feature == "high_oil_diet" or feature == "smoking":
if feature_value == "yes":
return 1
else:
return 0
else:
print("No such feature: {}".format(feature))
stroke_risk.weight = stroke_risk.weight.map(lambda x: map_feature(x,"weight"))
stroke_risk.high_oil_diet = stroke_risk.high_oil_diet.map(lambda x: map_feature(x,"high_oil_diet"))
stroke_risk.smoking = stroke_risk.smoking.map(lambda x: map_feature(x,"smoking"))
stroke_risk.stroke_risk = stroke_risk.stroke_risk.map(lambda x: map_feature(x,"stroke_risk"))
stroke_risk
from sklearn.linear_model import LogisticRegressionCV
X = stroke_risk[["weight","high_oil_diet","smoking"]]
y = stroke_risk["stroke_risk"]
classifier = LogisticRegressionCV(cv=3,random_state=2020,multi_class="auto").fit(X,y)
classifier.get_params
###Output
_____no_output_____
###Markdown
**Examine the shape of the input data**
###Code
X.values[:1, :]
###Output
_____no_output_____
###Markdown
**Predict the probability**
###Code
classifier.predict_proba(np.array([[1.5,0.5,2]]))
###Output
/opt/conda/lib/python3.8/site-packages/sklearn/base.py:450: UserWarning: X does not have valid feature names, but LogisticRegressionCV was fitted with feature names
warnings.warn(
|
modules/02-data-organization-and-visualization/09-pandas-series-missing-values-and-concatenation.ipynb | ###Markdown
A NumPy array returns NaN (not a number) as mean(), if there is a NaN value inside it.A Pandas array returns the mean() discarding the NaN values (if they exists).
###Code
numpyArr = np.array([1, 2, 3, np.nan]); numpyArr
numpyArr.mean()
pandasArr = pd.Series([1, 2, 3, np.nan]); pandasArr
pandasArr.mean()
###Output
_____no_output_____
###Markdown
Series Concatenation
###Code
serie_01 = pd.Series(np.random.rand(5)); serie_01
serie_02 = pd.Series(np.random.rand(5)); serie_02
serieConcat = pd.concat([serie_01, serie_02]); serieConcat
# The indexes aren't unique:
serieConcat[0]
# To fix it:
serieConcat.index = range(serieConcat.count()); serieConcat
# This re-indexing will generate missing values:
serieConcat.reindex([0, 6, 12, 18])
# There are some techniques to help us filling these values.
# The "fill_value" parameter:
serieConcat.reindex([0, 6, 12, 18], fill_value = 0)
# The "ffill()" method:
newSerie = serieConcat.reindex([0, 6, 12, 18])
newSerie.ffill()
# The "bfill()" method:
newSerie = serieConcat.reindex([0, 6, 12, 18])
newSerie[18] = 0.123456
newSerie.bfill()
# The "fillna()" method:
newSerie = serieConcat.reindex([0, 6, 12, 18])
newSerie.fillna(1)
###Output
_____no_output_____
###Markdown
Different Indexes
###Code
mySerie01 = pd.Series(np.random.randn(5)); mySerie01
mySerie02 = pd.Series(np.random.randn(5)); mySerie02
mySerie01 + mySerie02
# Changing mySerie02 indexes:
mySerie02.index = list(range(3, 8)); mySerie02
# Now, the series sum will generate missing values:
mySerie01 + mySerie02
# Re-indexing and filling values for both series to sum them:
mySerie01.reindex(range(10), fill_value = 0) + mySerie02.reindex(range(10), fill_value = 0)
###Output
_____no_output_____
###Markdown
Copying Series
###Code
mySerie = pd.Series(['A', 'B', 'C']); mySerie
mySerieCopy = mySerie.copy(); mySerieCopy
mySerieCopy[2] = 'Z'; mySerieCopy
mySerie
###Output
_____no_output_____
###Markdown
Pandas Map() Function
###Code
mySerie = pd.Series(range(3), index = ['A', 'B', 'C']); mySerie
mySerie.map(lambda number: number + 10)
# You can reference the indexes by numbers, even if they are shown as strings:
mySerie.map({0: 'Apple', 1: 'Banana', 2: 'Cherry'})
###Output
_____no_output_____ |
Weather Forecast.ipynb | ###Markdown
Before you start using this library, you need to get your API key here. https://darksky.net/dev/registerCollect the api key from the below link after registering https://darksky.net/dev/account
###Code
## Replace the API_key
pip install darkskylib
from darksky import forecast
from datetime import date, timedelta
#Change the location of your own
COLLEGEPARK = 38.9897, 76.9378
weekday = date.today()
with forecast('API_Key', *COLLEGEPARK) as collegepark:
print(collegepark.daily.summary, end='\nDaily Temperature\n')
for day in collegepark.daily:
day = dict(day = date.strftime(weekday, '%a'),
sum = day.summary,
tempMin = day.temperatureMin,
tempMax = day.temperatureMax,
rain = day.icon
)
print('{day}: {sum} Temp range: {tempMin} - {tempMax} Rain:{rain}'.format(**day))
weekday += timedelta(days=1)
print(collegepark)
print (collegepark.daily)
###Output
<darksky.data.DataBlock object at 0x000002D8A73F79E8>
###Markdown
https://darksky.net/dev/docsdata-pointCollect any information you are interested to collect
###Code
print (collegepark.apparentTemperature)
for day in collegepark.daily:
day = dict(day = date.strftime(weekday, '%a'),
sum = day.summary,
tempMin = day.temperatureMin,
tempMax = day.temperatureMax,
rain = day.icon
)
print('{day}: {sum} Temp range: {tempMin} - {tempMax} Minimum Temp. Rain: {rain}'.format(**day))
weekday += timedelta(days=1)
###Output
Fri: Mostly cloudy throughout the day. Temp range: 68.07 - 92.32 Minimum Temp. Rain: partly-cloudy-day
Sat: Mostly cloudy throughout the day. Temp range: 71.1 - 89.82 Minimum Temp. Rain: partly-cloudy-day
Sun: Mostly cloudy throughout the day. Temp range: 68.75 - 85.75 Minimum Temp. Rain: partly-cloudy-day
Mon: Mostly cloudy throughout the day. Temp range: 67.19 - 85.95 Minimum Temp. Rain: partly-cloudy-day
Tue: Partly cloudy throughout the day and breezy starting in the evening. Temp range: 64.56 - 88.73 Minimum Temp. Rain: wind
Wed: Mostly cloudy starting in the evening. Temp range: 68.96 - 91.22 Minimum Temp. Rain: partly-cloudy-night
Thu: Mostly cloudy throughout the day. Temp range: 69.95 - 89.51 Minimum Temp. Rain: partly-cloudy-day
Fri: Mostly cloudy throughout the day. Temp range: 69.55 - 91.87 Minimum Temp. Rain: partly-cloudy-day
|
PythonHW-1.2.4/pizza_shop/HW_pizza_shop.ipynb | ###Markdown
Pizza ShopYou work at a pizza restaurant, which is starting to accept orders online. You need toprovide a python function that will accept an arbitrary order as its arguments, and compute the correct price for the order.Your cost-calculator function should have four arguments:- pizzas- drinks- wings- couponA single pizza order is formed as a list of toppings. For example- A pizza with no toppings (other than cheese and sauce is: [] (an empty list)- A pizza with pepperoni and a double order of olives is : ["pepperoni", "olives", "olives"]*An arbitrary number of pizzas may be ordered, including no pizzas as all*Drinks come in as a named order (i.e. a keyword argument 'drinks'). If drinks are ordered,they are specified as a list of sizes (possible sizes: "small", "medium", "large", "tub"). For example, `drinks=["small", "small", "large"]` would indicate an order including two small drinks and a large drink. Wings come in as a named order as well (keyword argument 'wings'). If wings are ordered,they are specified as a list of integers (possible sizes: 10, 20, 40, 100). For example, `wings=[20]` would indicate a single order of 20-piece wings.A coupon may be specified as the keyword argument 'coupon'. It is will be a singlefloating point number between 0 and 1. This indicates the fraction of the *pre-tax*price that is to be subtracted. For example `coupon=.25` would indicate a 25%-off coupon.A 6.25% tax is applied to every order. The tax is computed on the total cost of theorder *before a coupon is applied*Round the price to the nearest cent, using the built-in function round. `round(x, 2)` will round `x` to two decimal places. PricesThe prices are as follows:**Pizza**- \$13.00**Toppings**- pepperoni : \$1.00- mushroom : \$0.50- olive : \$0.50- anchovy : \$2.00- ham : \$1.50**Drinks**- small : \$2.00- medium : \$3.00- large : \$3.50- tub : \$3.75**Wings**- 10 : \$5.00- 20 : \$9.00- 40 : \$17.50- 100 : \$48.00 ExamplesThe following is an order for a plain pizza, a ham and anchovy pizza, two "tub"-sizeddrinks, with a 10%-off coupon:```python>>>cost_calculator([], ["ham", "anchovy"], drinks=["tub", "tub"], coupon=0.1)35.61```This order consists only of a small drink.```python>>> cost_calculator(drinks=["small"])2.12```This is an order of two plain pizzas, a pizza with double-pepperoni, an order of a 10-piece and a 20-piece wings, and a small drink.```python>>> cost_calculator([], [], ["pepperoni", "pepperoni"], wings=[10, 20], drinks=["small"])60.56``` DetailsYou can assume that the front-end of the website will never pass your function erroneousorders. That is, you will never receive orders for items that do not exist noritems that contain typos.Consider defining individual functions responsible for computingthe cost of the pizzas, drinks, and wings, respectively. Have `cost_calculator`invoke these internally. Alternatively, you can read ahead about dictionaries and make nice use of these in this problem.Our `cost_calculator` signature is empty. Part of the assignment is to come up with thecorrect function signature!
###Code
# make sure to execute this cell so that your function is defined
# you must re-run this cell any time you make a change to this function
def cost_calculator(*pizzas, **sides):
price = 0.00
for pizza in pizzas:
price += 13.00
for topping in pizza:
if topping == 'pepperoni':
price += 1.00
elif topping == 'mushroom':
price += 0.50
elif topping == 'olive':
price += 0.50
elif topping == 'anchovy':
price += 2.00
elif topping == 'ham':
price += 1.50
for side in sides:
if side == "wings":
for wing in sides[side]:
if wing == 10:
price += 5.00
elif wing == 20:
price += 9.00
elif wing == 40:
price += 17.50
elif wing == 100:
price += 48.00
if side == "drinks":
for drink in sides[side]:
if drink == 'small':
price += 2.00
elif drink == 'medium':
price += 3.00
elif drink == 'large':
price += 3.50
elif drink == 'tub':
price += 3.75
if side == "coupon":
cprice = price * sides["coupon"]
price += price * 0.0625
price -= cprice
price = round(price, 2)
return price
break
price += price * 0.0625
price = round(price, 2)
return price
# Execute this cell to grade your work
from bwsi_grader.python.pizza_shop import grader
grader(cost_calculator)
###Output
============================== ALL TESTS PASSED! ===============================
Your submission code: bw376a5897c31ca763c949f9e3e3b48926ae5e8e0dfb0210f3f59d4485
================================================================================
|
agreg/Bitsets_en_OCaml.ipynb | ###Markdown
Table of Contents 1 BitSets en OCaml1.1 Type abstrait1.2 Implémentation1.3 Exemples1.3.1 Tests des opérations unaires1.3.2 Tests des opérations binaires1.4 Comparaison1.4.1 Mesure un temps d'éxecution1.4.2 Suite de test pour bit_set_321.4.3 Suite de test pour bool array1.4.4 Suite de test pour Set.Make(Int)1.4.5 Mesurer les temps d'exécution1.5 Conclusion BitSets en OCamlLet *BitSets* sont une implémentation efficace pour représenter, stocker et manipuler des ensembles de *petits* entiers.Avec un seul entier 32 bits, on peut représenter *tous* les ensembles d'entiers de $\{0,\dots,31\}$.On utilise les [opérations bit à bit ("bitwise")](caml.inria.fr/pub/docs/manual-ocaml/libref/Pervasives.htmlVALmin_int) de OCaml pour effectuer les opérations de base sur les ensembles.Cette implémentation est inspiré de [celle ci](http://ocaml-lib.sourceforge.net/doc/BitSet.html). Type abstraitJe vais faire ça à l'arrache, mais on pourrait faire soit un module qui contient un type interne (caché), soit avec un champ d'enregistrement `{ n : int }`.Cette approche est la plus simple, mais montre un peu le fonctionnement interne (on pourrait vouloir l'éviter).
###Code
type bit_set_32 = int;;
let max_size_bit_set_32 = 32;;
###Output
_____no_output_____
###Markdown
Avec [Int64](http://caml.inria.fr/pub/docs/manual-ocaml/libref/Int64.html) on pourrait faire la même chose avec des entiers sur 64 bits. La flemme. --- ImplémentationLes ensembles ne seront pas mutables dynamiquement : comme une liste, toute fonction qui modifie un `bit_set_32` renvoit un nouveau `bit_set_32`.
###Code
let empty () : bit_set_32 = 0
;;
###Output
_____no_output_____
###Markdown
Le singleton $\{i\}$ est codé comme $2^i$ donc $1$ au bit en $i$ème position.En Python, c'est `1 << i` (ou `2**i`), et en OCaml c'est `1 lsl i`.
###Code
let create (i : int) : bit_set_32 =
assert ((0 <= i) && (i < max_size_bit_set_32));
1 lsl i
;;
###Output
_____no_output_____
###Markdown
Si on voulait utiliser la syntaxe Python, on pourrait faire ça :
###Code
let ( << ) = ( lsl );;
let ( >> ) = ( lsr );;
###Output
_____no_output_____
###Markdown
La copie ne fait pas beaucoup de sens comme la stucture n'est pas mutable... Mais soit.
###Code
let copy (b : bit_set_32) : bit_set_32 =
b + 0 (* enough ? *)
;;
let clone = copy;;
###Output
_____no_output_____
###Markdown
`set b n` permet d'ajouter `n` dans l'ensemble `b`, et `unset b n` permet de l'enlever (peu importe s'il était présent ou non).
###Code
let set (b : bit_set_32) (n : int) : bit_set_32 =
b lor (1 lsl n)
;;
let add = set;;
let unset (b : bit_set_32) (n : int) : bit_set_32 =
b land (lnot (1 lsl n))
;;
let remove = unset;;
###Output
_____no_output_____
###Markdown
On peut combiner `set` et `unset` pour faire :
###Code
let put (b : bit_set_32) (p : bool) (n : int) : bit_set_32 =
if p then set b n else unset b n
;;
###Output
_____no_output_____
###Markdown
Ces deux autres opérations sont rapides :
###Code
let is_set (b : bit_set_32) (n : int) : bool =
let i = (b land (1 lsl n)) lsr n in
i = 1
;;
let contains_int = is_set;;
let is_in = is_set;;
let toggle (b : bit_set_32) (n : int) : bit_set_32 =
put b (not (is_set b n)) n
;;
###Output
_____no_output_____
###Markdown
La comparaison et le test d'égalité sont évidents.
###Code
let compare (b1 : bit_set_32) (b2 : bit_set_32) : int =
Pervasives.compare b1 b2
;;
let equals (b1 : bit_set_32) (b2 : bit_set_32) : bool =
b1 = b2
;;
###Output
_____no_output_____
###Markdown
On peut chercher à être plus efficace que cette implémentation impérative et naïve. Essayez !
###Code
let count (b : bit_set_32) : int =
let s = ref 0 in
for n = 0 to max_size_bit_set_32 - 1 do
if is_set b n then incr s
done;
!s
;;
let length = count;;
###Output
_____no_output_____
###Markdown
Pour la création des exemples, on va aussi se permettre de convertir un `bit_set_32` en une `int list`, et inversement de créer un `bit_set_32` depuis une `int list`.
###Code
let bit_set_32_to_list (b : bit_set_32) : (int list) =
let list_of_b = ref [] in
for n = 0 to max_size_bit_set_32 - 1 do
if is_set b n then
list_of_b := n :: !list_of_b
done;
List.rev (!list_of_b)
;;
let bit_set_32_from_list (list_of_b : int list) : bit_set_32 =
let b = ref (empty()) in
List.iter (fun i -> b := add !b i) list_of_b;
!b
;;
let bit_set_32_iter (f : int -> unit) (b : bit_set_32) : unit =
List.iter f (bit_set_32_iter_to_list b)
;;
###Output
_____no_output_____
###Markdown
Pour l'affichage des exemples, on va aussi se permettre de convertir un `bit_set_32` en une `string`.
###Code
let bit_set_32_to_string (b : bit_set_32) : string =
let list_of_b = bit_set_32_to_list b in
match List.length list_of_b with
| 0 -> "{}"
| 1 -> "{" ^ (string_of_int (List.hd list_of_b)) ^ "}"
| _ -> begin
let s = ref ("{" ^ (string_of_int (List.hd list_of_b))) in
List.iter (fun i -> s := !s ^ ", " ^ (string_of_int i)) (List.tl list_of_b);
!s ^ "}"
end
;;
let print_bit_set_32 (b : bit_set_32) : unit =
print_endline (bit_set_32_to_string b)
;;
###Output
_____no_output_____
###Markdown
Comme le domaine est fixé (à $\{0,\dots,31\}$), on peut prendre le complémentaire.
###Code
let complementaire (b : bit_set_32) : bit_set_32 =
lnot b
;;
###Output
_____no_output_____
###Markdown
Les opérations intersection, union, différence et différence symétrique sont très faciles.
###Code
let intersection (b1 : bit_set_32) (b2 : bit_set_32) : bit_set_32 =
b1 land b2
;;
let union (b1 : bit_set_32) (b2 : bit_set_32) : bit_set_32 =
b1 lor b2
;;
###Output
_____no_output_____
###Markdown
Avec l'union on peut facilement tester si `b1` est contenu dans `b2` ($b_1 \subset b_2 \equiv (b_1 \cup b_2) = b_2$)
###Code
let contains (b1 : bit_set_32) (b2 : bit_set_32) : bool =
(union b1 b2) = b2
;;
let difference (b1 : bit_set_32) (b2 : bit_set_32) : bit_set_32 =
intersection b1 (complementaire b2)
;;
let difference_sym (b1 : bit_set_32) (b2 : bit_set_32) : bit_set_32 =
b1 lxor b2
;;
###Output
_____no_output_____
###Markdown
--- Exemples
###Code
print_bit_set_32 (empty());;
let b1 = bit_set_32_from_list [0; 12]
and b2 = bit_set_32_from_list [1; 3; 6]
and b3 = bit_set_32_from_list [0; 3; 6; 17]
;;
print_bit_set_32 b1;;
print_bit_set_32 b2;;
print_bit_set_32 b3;;
###Output
_____no_output_____
###Markdown
Tests des opérations unaires Tester l'appartenance :
###Code
(is_in b1 3);;
(is_in b2 3);;
(is_in b3 3);;
(is_in b1 0);;
(is_in b2 0);;
(is_in b3 0);;
###Output
_____no_output_____
###Markdown
On peut ajouter une valeur :
###Code
print_bit_set_32 (add b1 20);;
print_bit_set_32 (add b2 20);;
print_bit_set_32 (add b3 20);;
###Output
{0, 12, 20}
###Markdown
Ou l'enlever :
###Code
print_bit_set_32 (remove b1 3);;
print_bit_set_32 (remove b2 3);;
print_bit_set_32 (remove b3 3);;
length b1;;
length b2;;
length b3;;
print_bit_set_32 (complementaire b1);;
print_bit_set_32 (complementaire b2);;
print_bit_set_32 (complementaire b3);;
print_bit_set_32 (complementaire (union (union b1 b2) b3));;
###Output
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
###Markdown
Tests des opérations binaires
###Code
print_bit_set_32 (union b1 b2);;
print_bit_set_32 (union b1 b3);;
print_bit_set_32 (union b2 b3);;
contains b1 b2;;
contains b1 b3;;
contains b1 (intersection b1 b3);;
contains (intersection b1 b3) b1;;
contains b1 (union b1 b3);;
contains b2 b3;;
print_bit_set_32 (intersection b1 b2);;
print_bit_set_32 (intersection b1 b3);;
print_bit_set_32 (intersection b2 b3);;
print_bit_set_32 (difference b1 b2);;
print_bit_set_32 (difference b1 b3);;
print_bit_set_32 (difference b2 b3);;
print_bit_set_32 (difference_sym b1 b2);;
print_bit_set_32 (difference_sym b1 b3);;
print_bit_set_32 (difference_sym b2 b3);;
###Output
{0, 1, 3, 6, 12}
###Markdown
--- ComparaisonOn va essayer de comparer notre implémentation avec une implémentation naïve utilisant des `bool array` et une utilisant le [module `Set`](http://caml.inria.fr/pub/docs/manual-ocaml/libref/Set.S.html) de la bibliothèque standard. Mesure un temps d'éxecution
###Code
let time (n : int) (f : unit -> 'a) : float =
let t = Sys.time() in
for _ = 0 to n-1 do
ignore (f ());
done;
let delta_t = Sys.time() -. t in
let t_moyen = delta_t /. (float_of_int n) in
Printf.printf " Temps en secondes: %fs\n" t_moyen;
flush_all ();
t_moyen
;;
Random.self_init();;
let random_int_0_31 () =
Random.int 32
;;
###Output
_____no_output_____
###Markdown
Suite de test pour `bit_set_32` Notre test va consister à créer un ensemble vide, et ajouter 100 fois de suite des valeurs aléatoires, en enlever d'autres etc.
###Code
let test_bit_set_32 n n1 n2 () =
let b = ref (empty ()) in
for _ = 0 to n do
let nb_ajout = Random.int n1 in
let nb_retrait = Random.int n2 in
for i = 0 to nb_ajout + nb_retrait do
let n = random_int_0_31 () in
if i mod 2 = 0 then
b := add !b n
else
b := remove !b n;
done
done;
length !b
;;
test_bit_set_32 100 20 10 ();;
###Output
_____no_output_____
###Markdown
Suite de test pour `bool array` Avec des `bool array`, on a l'avantage d'avoir une structure dynamique.
###Code
let test_boolarray n n1 n2 () =
let b = Array.make max_size_bit_set_32 false in
for _ = 0 to n do
let nb_ajout = Random.int n1 in
let nb_retrait = Random.int n2 in
for i = 0 to nb_ajout + nb_retrait do
let n = random_int_0_31 () in
if i mod 2 = 0 then
b.(n) <- true
else
b.(n) <- false
done;
done;
Array.fold_left (+) 0 (Array.map (fun x -> if x then 1 else 0) b)
;;
test_boolarray 100 20 10 ();;
###Output
_____no_output_____
###Markdown
Suite de test pour `Set.Make(Int)`
###Code
module Int = struct
type t = int
let compare = compare
end;;
module Int32Set = Set.Make(Int);;
###Output
_____no_output_____
###Markdown
Avec des `Set`, on a l'avantage d'avoir une structure dynamique, mais moins facile à manipuler.
###Code
let test_Int32Set n n1 n2 () =
let b = ref (Int32Set.empty) in
for _ = 0 to n do
let nb_ajout = Random.int n1 in
let nb_retrait = Random.int n2 in
for i = 0 to nb_ajout + nb_retrait do
let n = random_int_0_31 () in
if i mod 2 = 0 then
b := Int32Set.add n !b
else
b := Int32Set.remove n !b
done;
done;
Int32Set.cardinal !b
;;
test_Int32Set 100 20 10 ();;
###Output
_____no_output_____
###Markdown
Mesurer les temps d'exécution On va faire 500 répétitions de ces tests aléatoires, chacun avec 1000 fois des ajouts de 30 valeurs et des retraits de 20 valeurs.
###Code
time 500 (test_bit_set_32 1000 30 20);;
time 500 (test_boolarray 1000 30 20);;
time 500 (test_Int32Set 1000 30 20);;
###Output
Temps en secondes: 0.011854s
###Markdown
Pour un second et dernier test, on va faire 500 répétitions de ces tests aléatoires, chacun avec 500 fois des ajouts de 100 valeurs et des retraits de 110 valeurs.
###Code
time 500 (test_bit_set_32 500 100 110);;
time 500 (test_boolarray 500 100 110);;
time 500 (test_Int32Set 500 100 110);;
###Output
Temps en secondes: 0.024973s
|
visualization/1.visualize_clinical_phase.ipynb | ###Markdown
The clinical status of the compounds in this dataset are visualized as a pie chart. The clinical status of the compounds was obtained from the drug repurposing hub (https://clue.io)
###Code
# Read Broad's repurposing hub dataset
repurp = (
pd.read_csv('input/repurposing_drugs_20180907.txt',
delimiter='\t',
comment='!',
encoding='iso-8859-1',
usecols=['pert_iname', 'clinical_phase'])
)
# Read JUMP-Target1 metadata file
metadata = pd.read_csv('../metadata/moa/JUMP-Target-1_compound_metadata.tsv',
delimiter='\t',
usecols=['pert_iname', 'broad_sample'])
# Merge the two dataframes
clinical_phase_df = (
metadata.merge(repurp, on='pert_iname', how='inner')
.clinical_phase.value_counts()
.reset_index()
)
# Plot pie chart
fig = px.pie(data_frame=clinical_phase_df,
values='clinical_phase',
names='index',
color_discrete_sequence=px.colors.sequential.RdBu)
fig.update_layout(title='Clinical Phase status of JUMP-Target1 compounds')
fig.update_traces(textfont_size=20)
fig.show("png")
fig.write_image('figures/clinical_phase.png', width=640, height=480, scale=2)
###Output
_____no_output_____ |
NoteBooks/Curso de WebScraping/Unificado/web-scraping-master/Clases/Módulo 5_ Tesseract y Proxies/M5C1 - Proxies (script).ipynb | ###Markdown
Proxies Escondiendo la IPMuchos servidores web, al recibir múltiples peticiones en un corto tiempo de una misma IP, la bloquean para evitar saturaciones y problemas de servicio. Esto puede ser un problema para los scrapers ya que generan justamente este comportamiento.Para evitar ser detectados tendríamos que cambiar nuestra dirección IP pública antes de cada request, cosa que sería extremadamente lento y en muchos casos imposible, o podemos utilizar un **proxy**. Un proxy es un intermediario entre quien hace la petición (nuestro programa) y quien la recibe (el servidor) que nos permite enmascarar la IP de donde salió la request. Utilizando un proxy, el servidor web verá la IP de ese proxy y no la nuestra. Si bien no podemos elegir con qué dirección IP hacer la petición, sí podemos elegir a través de qué proxy hacerla.El sitio www.cualesmiip.com te permite ver cuál es la IP saliente de tu red. Si estás en una LAN, seguramente tu IP local sea algo como 192.18.x.x, pero la IP con la que salís al mundo, la IP de tu router asignada por tu ISP, será diferente.Links útiles:- https://free-proxy-list.net/- [PySocks](https://pypi.org/project/PySocks/)
###Code
import requests
import re
def get_my_ip(url='http://www.cualesmiip.com/', proxies=None):
try:
r = requests.get(url=url, proxies=proxies)
except Exception as e:
print('Error haciendo la request', e)
return None
if r.status_code != 200:
print("Status Code:", r.status_code)
return None
regex = re.compile(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})')
my_ip = regex.findall(r.text)
return my_ip[0] if my_ip else None
get_my_ip()
proxy_dict = {'http':'http://200.255.122.170:8080',
'https':'https://200.255.122.170:8080'}
get_my_ip(proxies=proxy_dict)
socks_proxy_dict = {'http':'socks4://190.226.46.114:4145',
'https':'socks4://190.226.46.114:4145'}
get_my_ip(proxies=socks_proxy_dict)
###Output
_____no_output_____ |
DataScience/DS-Unit-1-Sprint-1-Data-Wrangling-and-Storytelling/module2-make-features/LS_DS_112_Make_Features_Assignment.ipynb | ###Markdown
Assignment:- Replicate the lesson code. - This means that if you haven't followed along already, type out the things that we did in class. Forcing your fingers to hit each key will help you internalize the syntax of what we're doing. Make sure you understand each line of code that you're writing, google things that you don't fully understand. - [Lambda Learning Method for DS - By Ryan Herr](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit?usp=sharing)- Convert the `term` column from string to integer.- Make a column named `loan_status_is_great`. It should contain the integer 1 if `loan_status` is "Current" or "Fully Paid." Else it should contain the integer 0.- Make `last_pymnt_d_month` and `last_pymnt_d_year` columns.
###Code
# First thing to do is import necessary librarys for cleaning our Data
# and Creating Features
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
------
###Code
# Take a look at the Ames Iowa Housing Dataset:
src_url = 'https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv'
# Read the data file from the src_url variable into a pandas DataFrame
iowa_housing_df = pd.read_csv(src_url)
# Display the first 10 rows to see if the DataFrame was made
print("Top 10 Rows of iowa_housing_df: ")
display(iowa_housing_df.head(10))
# Display the shape of iowa_housing_df
print("\niowa_housing_df shape: ")
display(iowa_housing_df.shape)
# Using the describe function to get a statistical description of my data
print("Basic Statistical Description: \n")
display(iowa_housing_df.describe())
# Just to show that I can get descriptions of specified columns:
print("\n\nSpecified Columns Description: \n")
display(iowa_housing_df[['OverallQual','YearBuilt']].describe())
# Displaying the data types of each column.
pd.set_option('display.max_rows', 150)
display(iowa_housing_df.dtypes)
# Lets take a look at the first 10 rows of the 'BedroomAbvGr' column.
ihdf_type = iowa_housing_df['BedroomAbvGr'].dtype
# Just for fun I will use an if statement to check if its an integer.
if ihdf_type == 'int64' or ihdf_type == 'int32':
print("Yes it's an integer")
print("Data Type: ",ihdf_type)
# Another way to call columns is wuth a dot method.
iowa_housing_df.Fireplaces.head(10)
# I don't like calling columns this way, because it could possibly interfear
# with other predefined methods if they have the same name.
# Seems to be a like minded opinion amongst the forums
# Here is how to call specific columns using []
iowa_housing_df['Fireplaces'].head(10)
# I was curious to see which one would be interprited faster so I ran the
# shell command '%timeit'
%timeit iowa_housing_df.Fireplaces.head(10)
%timeit iowa_housing_df['Fireplaces'].head(10)
print("Using df['col'] is faster on average by ", 51.9-47.9,"micro-seconds.")
# So if my previous opinion on not useing '.' to select columns wasn't enough
# to convince you.
# Using '[]' is also faster, not by much, but still.
# Now let's see how selecting multiple columns looks.
# Indtead of using a list '[]'
# I will use '[[col,col]]' to create a 2d array
iowa_housing_df[['Fireplaces', 'BedroomAbvGr']].sample(10)
# Looking at the first ten rows of the 'LotFrontage' column.
ihdflf = iowa_housing_df['LotFrontage']
print("Top 10 rows of iowa_housing_df['LotFrontage']: \n")
display(ihdflf.head(10))
# If statement to tell me if the dtype is a float or an int
if ihdflf.dtype == 'int64':
print("\nIt's an integer!")
elif ihdflf.dtype == 'float64':
print("\nIt's a float")
# Let's take a look at all possible valus in this column
display(iowa_housing_df['LotFrontage'].value_counts(dropna=False))
# By setting dropna to False it counts the NaN values as well.
# The reason that it is float dtype is, because if there is a NaN
# value amongst a column of integers it will read the NaN value as a float.
# To confirm this we can call the nan instance from the numpy library as a
# parameter for python built-in type function
type(np.nan) #---- Ouputes 'float'
# Ways to check for missing values in pandas:
# 1. We can call the is null method on our dataframe to get all instances of
# a NaN value. Then call the sum() method to add up the amount of missing
# values.
print("Total NaN values: ",iowa_housing_df['LotFrontage'].isnull().sum())
# 2. We can call the isna() method also to get the same output.
print("\nTotal NaN values: ", iowa_housing_df['LotFrontage'].isna().sum())
###Output
Total NaN values: 259
Total NaN values: 259
###Markdown
------ Making New Features
###Code
# Now to make a smaller dataframe with a few specific column headers
# by passing a list of column headers inside of the square brackets
# Here we will use the copy() function to copy the selected columns to a new dataframe.
small_iowa_df = iowa_housing_df[['TotalBsmtSF','1stFlrSF','2ndFlrSF','SalePrice']].copy()
# Lets take a look at our new dataframe:
small_iowa_df.sample(10)
# It works!!
# Now lets make some new column for or small_iowa_df
# First we need to make a new colun name to describe what it will be.
# Here it will just be a total of a houses Square Footage
# So we will add our single floor SF columns together like so:
small_iowa_df['TotalSF'] = small_iowa_df['TotalBsmtSF']+small_iowa_df['1stFlrSF']+small_iowa_df['2ndFlrSF']
# Now lets display the results.
small_iowa_df.head(10)
# Confirmed that our new column 'TotalSF' is now part of the DataFrame
# Now to create another new column for our small_iowa_df
# This column will be the price/sqrft
small_iowa_df['Price/SF'] = small_iowa_df['SalePrice'] / small_iowa_df['TotalSF']
# Now lets display to confirm changes
small_iowa_df.head(10)
###Output
_____no_output_____
###Markdown
Questons About `small_iowa_df`:- What does a high `Price/SF` say about a home that the square footage and price alone don't capture as dirctly? - Answer: It shows the comparison between the 2 so we can see if there is any corralation. One trend we can observe is that the more `SqrFt` the higher the `Price`. - What does a low `Price/SF` say about a home that the square footage and price alone don't directly capture? - Answer: Less `SqrFt` means lowere `Price` ------ Crosstabs - Focus on catagorical data - **Catagorical Data** - Catagorical variables represent types of data which may be divided into groups. - **Examples:** race,sex,age group, education level, skill set, etc...
###Code
# Let's display our data frame columns by calling
# (iowa_housing_df.columns) instance.
iowa_housing_df.columns
# This gives us an object that is made up of a list of all the DataFrame
# headers.
# Now we can go and select some catagorical columns.
print("Here are some catagorical columns we can look at: \n")
iowa_housing_df[['LotShape', 'Alley', 'Street', 'SaleCondition']].sample(10)
# Here we will take a look at individule values of sales conditions
iowa_housing_df['SaleCondition'].value_counts()
# Now let's create a new categorical variable
# We can use the np.where function to do a conditional search,
# similar to SQL.
iowa_housing_df['NormalCondition'] = np.where(iowa_housing_df['SaleCondition']
== 'Normal', 1, 0)
# After we have made a column by getting all the 'Normal' values from 'SaleCondition'
# we can now view them.
display(iowa_housing_df['NormalCondition'].value_counts())
print("\n1 = (count of 'Normal' values)\n2 = (count of non normal values)")
# Check the value counts of 'LotShape'
iowa_housing_df['LotShape'].value_counts()
# Using pythons built in map() method we can narrow 4 values down to 3.
# .map() uses dict{key:value} to map the values
iowa_housing_df['LotShape3'] = iowa_housing_df['LotShape'].map({'Reg':'Regular',
'IR1':'Irregular',
'IR2':'Other',
'IR3':'Other'})
# Display changes
iowa_housing_df['LotShape3'].value_counts()
# Let's do value counts of another categorical variable
iowa_housing_df['Neighborhood'].value_counts()
# Lets take a look at another way to map catagorical variables, shall we?
iowa_housing_df['top_neighbors']='All Others'
iowa_housing_df[['top_neighbors', 'Neighborhood']].head(10)
# Now we can do some mapping with .loc
iowa_housing_df.loc[iowa_housing_df['Neighborhood']=='NAmes', 'top_neighbors']='North Ames'
iowa_housing_df[['top_neighbors','Neighborhood']].sample(10)
# This can be done multiple times if the solution calls for it.
iowa_housing_df.loc[iowa_housing_df['Neighborhood']=='CollgCr','top_neighbors']='College Circle'
iowa_housing_df.loc[iowa_housing_df['Neighborhood']=='Gilbert','top_neighbors']='Favorite Neighborhood'
iowa_housing_df.loc[iowa_housing_df['Neighborhood']=='Somerst','top_neighbors']='Favorite Neighborhood'
# Now let's check to see if all the values were mapped correctly
iowa_housing_df[['top_neighbors','Neighborhood']].sample(10)
# Crosstab time! WOOHOO!!
pd.crosstab(iowa_housing_df['top_neighbors'], iowa_housing_df['LotShape3'])
# Lets do that again except using the margins= argument and set it to:
# margins=True to get a column that shows All counts
# Normalize index:
pd.crosstab(iowa_housing_df['top_neighbors'],iowa_housing_df['LotShape3'],
normalize='index', margins=True)
# Normalize column:
pd.crosstab(iowa_housing_df['top_neighbors'],iowa_housing_df['LotShape3'],
normalize='columns', margins=True)
# We can bar-graph some cross tab data to get a better look at it.
# Use crosstab to get some data:
crosstab_results = pd.crosstab(iowa_housing_df['top_neighbors'],
iowa_housing_df['LotShape3'])
# Now to plot the results with a bar graph:
display(crosstab_results.plot(kind='bar'));
# How about we use a Horizontal bar graph:
display(crosstab_results.plot(kind='barh'));
###Output
_____no_output_____
###Markdown
------ WORKING WITH STRINGS IN PANDAS
###Code
# Us bash to get retrieve the zip file from desired url:
! wget https://resources.lendingclub.com/LoanStats_201804.csv.zip
# Got a "404: Not Found" error when trying to get the file.
# Decided to attempt another way just to double check the response.
import requests
response = requests.get('https://resources.lendingclub.com/LoanStats_201804.csv.zip')
# Still getting 404 error
response
# I remember I downloaded these before from somewhere so i can just load the
# files from my local system.
from google.colab import files
uploaded = files.upload()
# Had to just use the GUI upload feature, this was going to slow
# Now that I have my csv file ill read it in.
loans_df = pd.read_csv('LoanStats_2018Q4.csv')
# Display the loans data set
loans_df.head(10)
###Output
/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py:2718: DtypeWarning: Columns (0,1,2,3,4,7,13,18,19,24,25,27,28,29,30,31,32,34,36,37,38,39,40,41,42,43,44,46,49,50,51,53,54,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,113,114,115,116,117,118,119,120,121,126,127,131,132,134,135,136,141,142,143) have mixed types.Specify dtype option on import or set low_memory=False.
interactivity=interactivity, compiler=compiler, result=result)
###Markdown
The extra rows at the top and bottom of the file have done two things:1. The top row has made it so that the entire dataset is being interpreted as column headers.2. The bottom two rows have been read into the 'id' column and are causing every olumn to have at least two `NaN` values in it.
###Code
# Here is one of the ways we can fix the header problem.
# By using the 'skiprows' parameter
# I used it with just the skiprows param to see what would happen.
loans_df = pd.read_csv('LoanStats_2018Q4.csv', skiprows=1)
# Now lets display the changes:
loans_df.tail()
# After looking at the bottom of the dataset there is a footer,
# luckily theres a parameter called 'skipfooter' and we will set it to 2
# since there are 2 footer rows.
# Also by skipping the footer we ar able to address the extra Nans in each column.
loans_df = pd.read_csv('LoanStats_2018Q4.csv', header=1,skipfooter=2)
loans_df
# Let's check out the shape of our dataframe
loans_df.shape
# Now lets sum up all of the null values by column and sort from least to greatest.
# First lets use pandas 'set_option' method.
# Ignore this link, it's for me when I use this notebook for notes.
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.set_option.html
pd.set_option('display.max_rows', 143)
# Use .sum() then sort all the values in a descending order
loans_df.isnull().sum().sort_values(ascending=False)
###Output
_____no_output_____
###Markdown
Question: Why might LendingClub have included columns in their dataset that are 100% blank?- Answer: Maybe they put them there as place holders to use as features at a later time for further data exploration.
###Code
# Lets frop an entire column.
loans_df.drop('url',axis=1,inplace=True)
# We could also, instead of useing inplace param, wer could just save it
# as a new variabls
###Output
_____no_output_____
###Markdown
Clean up the `int_rate` column.When we're preparing a dataset for a machine learning model we typically want to represent don't want to leave any string values in our dataset --because it's hard to do math on words.Specifically, we have a column that is representing a numeric value, but currently doesn't have a numeric datatype. lets look at the first 10 values of the `int_rate` column.
###Code
# Display the head of int_rate column.
loans_df['int_rate'].head(10)
# Shows it as an object type instead of a float.
# Look at a specific value from the in_rate column
loans_df['int_rate'][3]
# It looks like a string
# Now lets confirm that its a string:
type(loans_df['int_rate'][3])
###Output
_____no_output_____
###Markdown
Problems that we need to address with this column:- String column that should be numeric- Percent Sign `%` included with the number- Leading space at the beginning of the stringHowever, we're not going to try and write exactly the right code to fix this column in one go. We're going to methodically build up to the code that wil help us address these problems.
###Code
# Lets start with just fixing a single string.
# If we can fix one, we can usually fix all of them
int_rate = ' 14.47%'
# 1. Remove the leading space
# ALT: int_rate = int_rate[1:] # Like this or by using the .strip() method
print("Stripping the blank space: ",int_rate.strip()) # To do it in place
# 2. Remove percent sign
# ALT: int_rate = int_rate[:-2]
print("\nStripping the '%' sign: ",int_rate.strip('%'))
# 3. Removing both:
print("\nStripping both '%' and lead space: ",int_rate.strip().strip('%'),'\n')
# 4. "Cast" the string value to a float
print(float('14.47'),'\n')
# Now lets call all of this together:
print("All together now: ",float(int_rate.strip().strip('%')))
# Let's make sure it's actually a float:
print(type(float(int_rate.strip().strip('%'))))
# We can write a function that can automate this process for us:
def autoFloat(val):
return float(val.strip().strip('%'))
# Now lets call this function and pass it our int_rate variable:
af_int_rate = autoFloat(int_rate) # stored the function call in a variable
# Check to see if the type was converted
print("Your object is now a ",type(af_int_rate))
# It's confirmed!!
# Using a 'for' loop we can apply this function to every cell.
df_cell_list = [] # Create an empty list.
for cell in loans_df['int_rate']: # Start a for loop to search through 'int_rate' column
df_cell_list.append(autoFloat(cell)) # Attach every modified cell to the end
# of our new list.
# Check the first 10 values of the list:
for i in df_cell_list[:10]:
print(i)
# Lets compair that to the original values:
list(loans_df['int_rate'][:10])
# Convert the new list to a column using pd.Series:
loans_df['int_rate_clean']=pd.Series(df_cell_list)
# Compare the new and old columns:
loans_df[['int_rate','int_rate_clean']].sample(10)
# Lets check the data type of our new column:
loans_df['int_rate_clean'].dtype
###Output
_____no_output_____
###Markdown
BUT WAIT!! THERES A BETTER WAY
###Code
# Improve all of this code with the .apply function!
loans_df['int_rate_better_clean'] = loans_df['int_rate'].apply(autoFloat)
# Comparisons of the columns:
loans_df[['int_rate','int_rate_clean','int_rate_better_clean']].sample(10)
###Output
_____no_output_____
###Markdown
Lets clean up the emp_title column!- **First we'll try and diagnose how bad the problem is and what improvements we might be able to make.**
###Code
# Look at the top 20 rows
loans_df['emp_title'].head(20)
# Look at the top 20 values with the highest count in emplayment titles
loans_df['emp_title'].value_counts(dropna=False).head(20)
# Now lets check to see how many unique elements there are in 'emp_title':
# To do so we need to use '.unique()' method to get all the unique elements.
# Then call pythons built-in 'len()' function to obtain the length of our array.
# which is going to count all the unique elements for us.
print(len(loans_df['emp_title'].unique()))
# Another option, also an easier option would be to use '.nunique()' method:
print(loans_df['emp_title'].nunique())
# How often is the employment_title null?
# Lets have a look:
print("Null Sum: ",loans_df['emp_title'].isnull().sum())
###Output
43893
43892
Null Sum: 20947
###Markdown
What are some possible reasons as to why a person's employment title may have not been provided?
###Code
# We can try to recreat the problem to get to the conclusion.
# Create some examples that represent the cases that we want to clean up:
examples = ['owner', 'Supervisor', 'Project Manager', np.nan]
# Do that same function in individual steps
print("Data Type = ",type('Supervisor'))
display(isinstance('Supervisor',str))
print()
# What about the strip?
print(' supervisor'.title().strip(),"\n")
# Write a function to clean up these use cases and increase uniformity.
def cleanTitle(jobtitle):
if isinstance(jobtitle, str):
return (jobtitle.title().strip())
else:
return 'Unknown'
# Lets go ahead and test the function out.
print("cleanTitle() Test_1: ",cleanTitle(' superviSOR'),"\n")
print("cleanTitle() Test_2: ",cleanTitle(np.nan),"\n\n")
print('------------------------------------------------------------------------')
# How about a for loop to clean all of our titles? Yes, I think so!!
# Until I fully grasp numpy vectorization ;)
clean_list = []
for jobtitle in examples:
clean_list.append(cleanTitle(jobtitle))
print("Here is a list of cleaned titles: \n",clean_list)
print('------------------------------------------------------------------------')
# Now lets do the same thing as the for loop above useing list comprehension.
print("List comprehension : ",[cleanTitle(jobtitle) for jobtitle in examples])
print('------------------------------------------------------------------------')
print()
# Lets not forget about our handy dandy '.apply' method!!
loans_df['clean_title'] = loans_df['emp_title'].apply(cleanTitle)
# Check to see if it works:
clean_emp_title_table = loans_df[['emp_title', 'clean_title']]
display(clean_emp_title_table.sample(20))
# Let's take a look at the top 20 employment titles
clean_emp_title_table.head(20)
# How many different unique employment titles are there currently?
clean_emp_title_table['emp_title'].nunique()
# The answer is 43892
# How often is the employment_title null?
clean_emp_title_table['emp_title'].isnull().sum()
# The answer is 20947
###Output
_____no_output_____
###Markdown
Date Time Objects `.dt`- pandas documentation - **to_datetime** - **Time/Date Components** "These properties are accessable using the `.dt` accessor.Many of the most useful date columns in this dataset have the suffix _d to indicate that they correspond to dates.
###Code
# List all columns in our dataset with '.columns'
loans_df.columns
# Check out one column
loans_df['issue_d'].head()
# Now lets use a for loop:
issue_date_list = []
for col in loans_df.columns:
if col.endswith('_d'): # <-----'.endswith() to match the end of a string.
issue_date_list.append(col)
# Display the list.
issue_date_list
# Now as list comprehension:
my_date_cols = [col for col in loans_df.columns if col.endswith('_d')]
my_date_cols
###Output
_____no_output_____
###Markdown
Lets take a look at the string format of the `issue_d` column
###Code
# dtype
type(loans_df['issue_d'][0])
###Output
_____no_output_____
###Markdown
Because this string format %m-%y is a common datetime format, we can just let Pandas detect this format and translate it to the appropriate datetime object.
###Code
# infer_datetime_format=True:
loans_df['new_issue_d'] = pd.to_datetime(loans_df['issue_d'], infer_datetime_format=True)
# Now lets check to see if this works:
loans_df[['issue_d', 'new_issue_d']].head()
# Check the type of the new data
type(loans_df['new_issue_d'][3])
print(loans_df['new_issue_d'].dtype)
loans_df['new_issue_d'].dtype
###Output
datetime64[ns]
###Markdown
Now we can see that the `issue_d` column has been changed to hold `datetime` objects.lets look at one of the cells specifically to see what a datetime object looks like:
###Code
# Display row 1 to see that ists dtype is showing correctly.
loans_df['new_issue_d'].head(1).values
# We can use the .dt accessor to now grab specific parts of the datetime object.
# Lets grab just the year from all of the cells in the issue_d column.
loans_df['year'] = loans_df['new_issue_d'].dt.year
loans_df[['issue_d','new_issue_d','year']].head()
# Now lets do the month.
loans_df['month'] = loans_df['new_issue_d'].dt.month
loans_df[['issue_d', 'new_issue_d', 'year', 'month']].head()
# Lets add the weekday now.
loans_df['weekday'] = loans_df['new_issue_d'].dt.weekday
# Now lets display all of our changes.
loans_df[['issue_d', 'new_issue_d','year','month','weekday']].sample(10)
# Now lets add these year and onth values as new columns on our dataframe.
loans_df['new_pull'] = pd.to_datetime(loans_df['last_credit_pull_d'])
loans_df['month_pull'] = loans_df['new_pull'].dt.month
loans_df[['last_credit_pull_d','new_pull','month_pull']].head()
###Output
_____no_output_____
###Markdown
- Because all of these dates come from Q4 of 2018, the `issue_d` column isn't all that interesting. Lets look at the `earliest_cr_line` column, which is also a string, but that could be converted to datetime format.- We're going to create a new column called `days_from_earliest_credit_to_issue`.- It's a long column header, but think about how valuable this piece of information could be. Ths number will essentially indicate the length of a person's credit history and if that is correlated with repayment or other factors **could be a valuable predictor**
###Code
# Lets do some date arithmetic to find the difference between 2 columns
loans_df['day_diff'] = loans_df['new_issue_d'] - loans_df['new_pull']
# Now print it to see if it worked
loans_df[['last_credit_pull_d','new_pull','day_diff']].tail()
# Lets find the oldest credit history that was involved in Q4 2018.
loans_df['day_diff'].value_counts().sort_index()
###Output
_____no_output_____
###Markdown
------ Stretch GoalsYou can do more with the LendingClub or Instacart datasets.LendingClub options:- There's one other column in the dataframe with percent signs. Remove them and convert to floats. You'll need to handle missing values.- Modify the `emp_title` column to replace titles with 'Other' if the title is not in the top 20. - Take initiatve and work on your own ideas!Instacart options:- Read [Instacart Market Basket Analysis, Winner's Interview: 2nd place, Kazuki Onodera](http://blog.kaggle.com/2017/09/21/instacart-market-basket-analysis-winners-interview-2nd-place-kazuki-onodera/), especially the **Feature Engineering** section. (Can you choose one feature from his bulleted lists, and try to engineer it with pandas code?)- Read and replicate parts of [Simple Exploration Notebook - Instacart](https://www.kaggle.com/sudalairajkumar/simple-exploration-notebook-instacart). (It's the Python Notebook with the most upvotes for this Kaggle competition.)- Take initiative and work on your own ideas! You can uncomment and run the cells below to re-download and extract the Instacart data
###Code
# I've decided to use the instacart data to force myself to use these concepts with
# a new data set to better retain the skills from the lecture.
!wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz
!tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz
%cd instacart_2017_05_01
# Now lets get a little fancy here:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# This 'color' variable will be used later to color our graphs.
color = sns.color_palette()
%matplotlib inline
# With this backend, the output of plotting commands is
# displayed inline within frontends like the Jupyter notebook, directly below
# the code cell that produced it.
# Creating a dataframe variable for each csv file in
# instacart_2017_05_01 directory.
################################################################################
# Read in 'aisles.csv' file:
aisles_df = pd.read_csv('aisles.csv')
# Display Data:
print("Aisles Data: ")
display(aisles_df.head(3))
print('=========================================================================')
# Read in 'departments.csv' fils:
departments_df = pd.read_csv('departments.csv')
# Display Data:
print("Department Data: ")
display(departments_df.head(3))
print('=========================================================================')
# Read in 'order_products_prior.csv' file:
order_products_prior_df = pd.read_csv('order_products__prior.csv')
# Display Data:
print("Ordered Products Prior Data: ")
display(order_products_prior_df.head(3))
print('=========================================================================')
# Read in 'order_products_train.csv' file:
order_products_train_df = pd.read_csv('order_products__train.csv')
# Display Data:
print("Ordered Products Train Data: ")
display(order_products_train_df.head(3))
print('=========================================================================')
# Read in 'orders.csv' file:
orders_df = pd.read_csv('orders.csv')
# Display Data:
print("Orders Data: ")
display(orders_df.head(3))
print('=========================================================================')
# Read in 'products.csv' file:
products_df = pd.read_csv('products.csv')
# Display Data:
print("Products Data: ")
display(products_df.head(3))
# Look at the top 5 rows of 'orders_df':
print("Orders Data")
display(orders_df.head())
# Look at the top 5 rows of 'order_products_prior_df':
# This Data will be used as test data for predictive models.
print("------------------------------------\nPrior Orders Made")
display(order_products_prior_df.head())
# Look at the top 5 rows of 'order_products_train':
# We will use train data for predictive models.
print("------------------------------------\nOrders Train Data")
display(order_products_train_df.head())
###Output
Orders Data
###Markdown
Things to note:- `orders.csv` has all the information about the given order id like; - The user. - Time of purchase. - Days since prior order.
###Code
# Let us first get the count of rows in each of the three sets.
# Create a variable for value counts of 'orders_df['eval_set]
count_series = orders_df['eval_set'].value_counts()
plt.figure(figsize=(12,8)) # Set the size of our figure diplay to 12 inches wide
# and 8 inches in height.
# Use 'seaborn' to create a bar plot.
# ----The alpha argument sets the transparency
sns.barplot(count_series.index, count_series.values, alpha=0.7)
# Name the y axis:
plt.ylabel('Number of Occurrences', fontsize=12)
# Name the x axis:
plt.xlabel('Eval set type', fontsize=12)
# Give the graph a title:
plt.title('Count of rows in each dataset', fontsize=15)
# Rotate just the x axis labels to display at desired angle:
plt.xticks(rotation=45)
# Display the graph to the screen:
plt.show()
# Create a function to get all the unique counts,
# that takes 'x' and runs it through np.unique():
def get_unique_count(x):
# Return the number of all the unque counts found
return len(np.unique(x))
# The first thing going on here is we are grouping the 'user_id' by the 'eval_set'
# Then we will use the 'aggregate()' method to use our function to get the unique value counts.
count_series = orders_df.groupby('eval_set')['user_id'].aggregate(get_unique_count)
# Display our results.
count_series
# We can see our 'train counts' made up of the last purchase a user made.
# And the 'test counts' there are only 75000, so now we need to predict the rest.
# Now let us validate the claim that 4 to 100 orders of a customer are given.
# Here we will use 'groupby()' method again, but instead we will group the
# 'order_number' by the 'user_id'.
count_series = orders_df.groupby('user_id')['order_number'].aggregate(np.max).reset_index()
count_series = count_series.order_number.value_counts()
plt.figure(figsize=(15,8))
sns.barplot(count_series.index, count_series.values, alpha=0.8, color=color[8])
plt.ylabel('Number of Occurrences', fontsize=12)
plt.xlabel('Maximum order number', fontsize=12)
plt.xticks(rotation=90)
plt.show()
plt.figure(figsize=(12,8))
sns.countplot(x='order_dow', data=orders_df, alpha=0.6, color=color[9])
plt.ylabel('Count',fontsize=12)
plt.xlabel('Day of week', fontsize=12)
plt.yticks(rotation=45)
plt.title('Frequency of order by week day', fontsize=15)
plt.show()
# I want to give names to the days of the week.
# To do so I made a dictionary 'dow_val_replacement',
# to set a day of the week to a corresponding number.
dow_val_replacement = {0:'Sunday',1:'Monday',2:'Tuesday',3:'Wednesday',4:'Thursday',5:'Friday',6:'Saturday'}
# Then creating a new variable to change the values of the 'order_dow' column,
# by using pandas 'map()' function which we learned about in the previous module.
orders_df['order_dow'] = orders_df['order_dow'].map(dow_val_replacement)
# Checking to see if the changes were mapped.
orders_df['order_dow'].head(10)
# Make a new feature of the day time and wether it was morning or night time.
days_times_df = orders_df[['order_dow','order_hour_of_day']]
days_times_df.head()
def am_pm(hour_number):
if hour_number < 12:
return (str(hour_number)+'am')
elif hour_number > 12:
return (str(hour_number-11)+'pm')
else:
return '12pm'
days_times_df['am/pm'] = orders_df['order_hour_of_day'].apply(am_pm).copy()
days_times_df.head()
sns.countplot(x='am/pm',data=days_times_df)
plt.xticks(rotation=65)
plt.xlabel('Time of Day');
###Output
_____no_output_____ |
Notebooks/Lec 21 - Confidence Interval.ipynb | ###Markdown
Tutorial: Confidence Intervals Sample Mean vs. Population MeanSample means and population means are different. Generally, we want to know about a population mean, but we can only calculate a sample mean. We then want to use the sample mean to estimate the population mean. We use confidence intervals in an attempt to determine how accurately our sample mean estimates the population mean. Confidence Interval To really get a sense of how our sample mean relates to the population mean we need to compute a standard error. The standard error is a measure of the variance of the sample mean. IMPORTANTComputing a standard error involves assuming that the way you sample is unbaised, and that the data are normal and independent. If these conditions are violated, your standard error will be wrong. There are ways of testing for this and correcting.The formula for standard error is.$$SE = \frac{\sigma}{\sqrt{n}}$$Where $\sigma$ is the sample standard deviation and $n$ is the number of samples. Assuming our data are normally distributed, we can use the standard error to compute our confidence interval. To do this we first set our desired confidence level, say 95%, we then determine how many standard deviations contain 95% of the mass. Turns out that the 95% of the mass lies between -1.96 and 1.96 on a standard normal distribution. When the samples are large enough (generally > 30 is taken as a threshold) the Central Limit Theorem applies and normality can be safely assumed; if sample sizes are smaller, a safer approach is to use a $t$-distribution with appropriately specified degrees of freedom. The actual way to compute the values is by using a cumulative distribution function (CDF). If you are not familiar with CDFs, inverse CDFs, and their companion PDFs, you can read about them [here](https://en.wikipedia.org/wiki/Probability_density_function) and [here](https://en.wikipedia.org/wiki/Cumulative_distribution_function). Look [here](https://en.wikipedia.org/wiki/Student%27s_t-distribution) for information on the $t$-distribution. We can check the 95% number using one of the Python functions. NOTE: Be careful when applying the Central Limit Theorem, however, as many datasets in finance are fundamentally non-normal and it is not safe to apply the theorem casually or without attention to subtlety.We can visualize the 95% mass bounds here.
###Code
# Set up the x axis
x = np.linspace(-5,5,100)
# Here's the normal distribution
y = stats.norm.pdf(x,0,1)
plt.plot(x,y)
# Plot our bounds
plt.vlines(-1.96, 0, 1, colors='r', linestyles='dashed')
plt.vlines(1.96, 0, 1, colors='r', linestyles='dashed')
# Shade the area
fill_x = np.linspace(-1.96, 1.96, 500)
fill_y = stats.norm.pdf(fill_x, 0, 1)
plt.fill_between(fill_x, fill_y)
plt.xlabel('$\sigma$')
plt.ylabel('Normal PDF');
###Output
_____no_output_____
###Markdown
Here's the trickNow, rather than reporting our sample mean without any sense of the probability of it being correct, we can compute an interval and be much more confident that the population mean lies in that interval. To do this we take our sample mean $\mu$ and report $\left(\mu-1.96 SE , \mu+1.96SE\right)$.This works because assuming normality, that interval will contain the population mean 95% of the time. SUBTLETY:In any given case, the true value of the estimate and the bounds of the confidence interval are fixed. It is incorrect to say that "The national mean female height is between 63 and 65 inches with 95% probability," but unfortunately this is a very common misinterpretation. Rather, the 95% refers instead to the fact that over many computations of a 95% confidence interval, the true value will be in the interval in 95% of the cases (assuming correct calibration of the confidence interval, which we will discuss later). But in fact for a single sample and the single confidence interval computed from it, we have no way of assessing the probability that the interval contains the population mean. The visualization below demonstrates this. In the code block below, there are two things to note. First, although the sample size is sufficiently large to assume normality, we're using a $t$-distribution, just to demonstrate how it is used. Second, the $t$-values needed (analogous to the $\pm1.96$ used above) are being calculated from the inverted cumulative density function, the ppf in scipy.stats. The $t$-distribution requires the extra parameter degrees of freedom (d.o.f), which is the size of the sample minus one.
###Code
np.random.seed(8309)
n = 100 # number of samples to take
samples = [np.random.normal(loc=0, scale=1, size=100) for _ in range(n)]
fig, ax = plt.subplots(figsize=(10, 7))
for i in np.arange(1, n, 1):
sample_mean = np.mean(samples[i]) # calculate sample mean
se = stats.sem(samples[i]) # calculate sample standard error
h = se*stats.t.ppf((1+0.95)/2, len(samples[i])-1) # calculate t; 2nd param is d.o.f.
sample_ci = [sample_mean - h, sample_mean + h]
if ((sample_ci[0] <= 0) and (0 <= sample_ci[1])):
plt.plot((sample_ci[0], sample_ci[1]), (i, i), color='blue', linewidth=1);
plt.plot(np.mean(samples[i]), i, 'bo');
else:
plt.plot((sample_ci[0], sample_ci[1]), (i, i), color='red', linewidth=1);
plt.plot(np.mean(samples[i]), i, 'ro');
plt.axvline(x=0, ymin=0, ymax=1, linestyle='--', label = 'Population Mean');
plt.legend(loc='best');
plt.title('100 95% Confidence Intervals for mean of 0');
###Output
_____no_output_____
###Markdown
Further ReadingThis is only a brief introduction, Wikipedia has excellent articles detailing these subjects in greater depth. Let's go back to our heights example. Since the sample size is small, we'll use a $t$-test.
###Code
# standard error SE was already calculated
t_val = stats.t.ppf((1+0.95)/2, 9) # d.o.f. = 10 - 1
print 'sample mean height:', mean_height
print 't-value:', t_val
print 'standard error:', SE
print 'confidence interval:', (mean_height - t_val * SE, mean_height + t_val * SE)
###Output
sample mean height: 64.2990415407
t-value: 2.26215716274
standard error: 1.18897782627
confidence interval: (61.609386834663141, 66.988696246744738)
###Markdown
There is a built-in function in scipy.stats for computing the interval. Remember to specify the degrees of freedom.
###Code
print '99% confidence interval:', stats.t.interval(0.99, df=9,
loc=mean_height, scale=SE)
print '95% confidence interval:', stats.t.interval(0.95, df = 9,
loc=mean_height, scale=SE)
print '80% confidence interval:', stats.t.interval(0.8, df = 9,
loc=mean_height, scale=SE)
###Output
99% confidence interval: (60.43505913983995, 68.163023941567928)
95% confidence interval: (61.609386834663141, 66.988696246744738)
80% confidence interval: (62.654651037653949, 65.94343204375393)
###Markdown
Note that as your confidence increases, the interval necessarily widens. Assuming normality, there's also a built in function that will compute our interval for us. This time you don't need to specify the degrees of freedom. Note that at a corresponding level of confidence, the interval calculated using the normal distribution is narrower than the interval calcuated using the $t$-distribution.
###Code
print stats.norm.interval(0.99, loc=mean_height, scale=SE)
print stats.norm.interval(0.95, loc=mean_height, scale=SE)
print stats.norm.interval(0.80, loc=mean_height, scale=SE)
###Output
(61.236437614523354, 67.361645466884525)
(61.968687822794635, 66.629395258613243)
(62.775305146047593, 65.822777935360293)
###Markdown
What does this mean?Confidence intervals allow us to set our desired confidence, and then report a range that will likely contain the population mean. The higher our desired confidence, the larger range we report. In general, one can never report a single point value, because the probability that any given point is the true population mean is incredibly small. Let's see how our intervals tighten as we change sample size.
###Code
np.random.seed(10)
sample_sizes = [10, 100, 1000]
for s in sample_sizes:
heights = np.random.normal(POPULATION_MU, POPULATION_SIGMA, s)
SE = np.std(heights) / np.sqrt(s)
print stats.norm.interval(0.95, loc=mean_height, scale=SE)
###Output
(61.968687822794635, 66.629395258613243)
(63.343692029946574, 65.254391051461297)
(64.00593339807287, 64.592149683335009)
###Markdown
Visualizing Confidence IntervalsHere is some code to visualize a confidence interval on a graph. Feel free to play around with it.
###Code
sample_size = 100
heights = np.random.normal(POPULATION_MU, POPULATION_SIGMA, sample_size)
SE = np.std(heights) / np.sqrt(sample_size)
(l, u) = stats.norm.interval(0.95, loc=np.mean(heights), scale=SE)
print (l, u)
plt.hist(heights, bins=20)
plt.xlabel('Height')
plt.ylabel('Frequency')
# Just for plotting
y_height = 5
plt.plot([l, u], [y_height, y_height], '-', color='r', linewidth=4, label='Confidence Interval')
plt.plot(np.mean(heights), y_height, 'o', color='r', markersize=10);
###Output
(63.588854219913536, 65.573463595434731)
###Markdown
Miscalibration and Violation of AssumptionsThe computation of a standard deviation, standard error, and confidence interval all rely on certain assumptions. If these assumptions are violated then the 95% confidence interval will not necessarily contain the population parameter 95% of the time. We say that in this case the confidence interval is miscalibrated. Here is an example. Example: Autocorrelated DataIf your data generating process is autocorrelated, then estimates of standard deviation will be wrong. This is because autocorrelated processes tend to produce more extreme values than normally distributed processes. This is due to new values being dependent on previous values, series that are already far from the mean are likely to stay far from the mean. To check this we'll generate some autocorrelated data according to the following process.$$X_t = \theta X_{t-1} + \epsilon$$$$\epsilon \sim \mathcal{N}(0,1)$$
###Code
def generate_autocorrelated_data(theta, mu, sigma, N):
# Initialize the array
X = np.zeros((N, 1))
for t in range(1, N):
# X_t = theta * X_{t-1} + epsilon
X[t] = theta * X[t-1] + np.random.normal(mu, sigma)
return X
X = generate_autocorrelated_data(0.5, 0, 1, 100)
plt.plot(X);
plt.xlabel('t');
plt.ylabel('X[t]');
###Output
_____no_output_____
###Markdown
It turns out that for larger sample sizes, you should see the sample mean asymptotically converge to zero. This is because the process is still centered around zero, but let's check if that's true. We'll vary the number of samples drawn, and look for convergence as we increase sample size.
###Code
sample_means = np.zeros(200-1)
for i in range(1, 200):
X = generate_autocorrelated_data(0.5, 0, 1, i * 10)
sample_means[i-1] = np.mean(X)
plt.bar(range(1, 200), sample_means);
plt.xlabel('Sample Size');
plt.ylabel('Sample Mean');
###Output
_____no_output_____
###Markdown
Definitely looks like there's some convergence, we can also check what the mean of the sample means is.
###Code
np.mean(sample_means)
###Output
_____no_output_____
###Markdown
Pretty close to zero. We could also derive symbolically that the mean is zero, but let's assume that we've convinced ourselves with the simple empiral analysis. Now that we know the population mean, we can check the calibration of confidence intervals. First we'll write two helper functions which compute a naive interval for some input data, and check whether the interval contains the true mean, 0.
###Code
def compute_unadjusted_interval(X):
T = len(X)
# Compute mu and sigma MLE
mu = np.mean(X)
sigma = np.std(X)
SE = sigma / np.sqrt(T)
# Compute the bounds
return stats.norm.interval(0.95, loc=mu, scale=SE)
# We'll make a function that returns true when the computed bounds contain 0
def check_unadjusted_coverage(X):
l, u = compute_unadjusted_interval(X)
# Check to make sure l <= 0 <= u
if l <= 0 and u >= 0:
return True
else:
return False
###Output
_____no_output_____
###Markdown
Now we'll run many trials, in each we'll sample some data, compute a confidence interval, and then check if the confidence interval contains the population mean. We'll keep a running tally, and we should expect to see 95% of the trials succeed if the intervals are calibrated correctly.
###Code
T = 100
trials = 500
times_correct = 0
for i in range(trials):
X = generate_autocorrelated_data(0.5, 0, 1, T)
if check_unadjusted_coverage(X):
times_correct += 1
print 'Empirical Coverage: ', times_correct/float(trials)
print 'Expected Coverage: ', 0.95
###Output
Empirical Coverage: 0.732
Expected Coverage: 0.95
|
word2vec_model.ipynb | ###Markdown
**Mount google drive**
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
###Markdown
**prerequisit**
###Code
import pandas as pd
import numpy as np
from itertools import groupby
import re
from gensim.models import Word2Vec
from sklearn.metrics import balanced_accuracy_score
###Output
_____no_output_____
###Markdown
**load dataset**
###Code
train_path = '/content/drive/MyDrive/Colab Notebooks/labeling_application/train_set.csv'
test_path = '/content/drive/MyDrive/Colab Notebooks/labeling_application/test_set.csv'
result_path = '/content/drive/MyDrive/Colab Notebooks/labeling_application/prediction3.csv'
model_path = '/content/drive/MyDrive/Colab Notebooks/labeling_application/word2vec.model'
stop_path = '/content/drive/MyDrive/Colab Notebooks/labeling_application/stop_word.csv'
# this function extract important word of each game
def prepare_string(string):
result = []
# remove html tags from string
TAG_RE = re.compile(r'<[^>]+>')
string = TAG_RE.sub('', string)
# remove punctuation mark
string = re.sub("[!()@.?؛:،-]",'', string)
strings=string.split()
# remove stop word
for s in strings :
s = s.strip()
if not s in stop_word.values and s!='' :
result.append(s)
return result
# load short list of stop word
stop_word = pd.read_csv(stop_path ,header = None, encoding = 'utf8')
# load train set
docs = []
id_docs = []
sentences = []
word_count = []
labels = []
train_data = pd.read_csv(train_path, encoding='utf8')
for d in train_data.values:
id_docs.append(d[0])
p_string = prepare_string(d[1])
docs.append(p_string)
sentences.extend(p_string)
word_count.append(len(p_string))
labels.append(d[2])
distinct_words = list(set(sentences))
# # split validation data
# id_docs_validation = id_docs[-150:]
# id_docs = id_docs[:-150]
# docs_validation = docs[-150:]
# docs = docs [:-150]
# word_count_validation = word_count[-150:]
# word_count = word_count[:-150]
# labels_validation = labels[-150:]
# labels = labels[:-150]
# load test set
docs_test = []
id_docs_test = []
test_data = pd.read_csv(test_path, encoding='utf8')
for d in test_data.values:
id_docs_test.append(d[0])
docs_test.append(prepare_string(d[1]))
# print(docs[0])
# print(id_docs_test[0])
###Output
_____no_output_____
###Markdown
**word2Vec Model**
###Code
# this func creates word2VecModel of given docs
def create_word2Vec_model(docs, len_model):
print("start creating word2Vec model with length word = " + str(len_model))
model = Word2Vec(docs, window=3, min_count=1, workers=8, size=len_model)
model.save(model_path+str(len_model))
print("model creation finished")
return
# this function loads the implemented word2vec model
def loadModel(len_model):
print("start loading models...")
return Word2Vec.load(model_path+str(len_model))
# this func is used to evaluate similarity between a query and all docs and return sorted docs
def findSimilar(query_terms, model):
results = []
new_query = []
for q in query_terms :
if q in model.wv.vocab : new_query.append(q)
counter=0
for doc in id_docs:
similarity = model.wv.n_similarity(new_query,new_docs[counter])
counter+=1
results.append((doc, similarity))
return sorted(results, key=lambda t: t[1], reverse=True)
# this func evaluates all given queries with given precisions (@k)
def evaluate_queries(queries, model,validation):
labels_prediction = []
temp = 0
for query in queries:
res = findSimilar(query, model)
labels_prediction.append(labels[res[0][0]])
# result = []
# for r in res[:10]:
# result.append(labels[r[0]])
# group = groupby(result)
# final_tag = max(group, key=lambda k: len(list(k[1])))
# labels_prediction.append(final_tag[0])
if validation :
gold = labels_validation[temp]
print("result word2vec retrieval : " + str(labels_prediction[-1])+" gold result : " + str(gold))
else :
print("evaluate result "+str(id_docs_test[temp])+" predicted label : "+str(labels_prediction[-1]))
dist_save=20
if temp% dist_save == 0 :
if (temp ==0):
data = { 'id' : id_docs_test[0] ,'label' : labels_prediction[:] }
df = pd.DataFrame(data)
df.to_csv(result_path, index=False)
else :
data = { 'id' : id_docs_test[temp-dist_save+1:temp+1] ,'label' : labels_prediction[temp-dist_save+1 :] }
df = pd.DataFrame(data)
df.to_csv(result_path, mode='a',header = None,index=False)
print(">>>> save result from the begining to data number "+str(id_docs_test[temp]))
temp += 1
if validation :
print(balanced_accuracy_score(labels_validation,labels_prediction))
return labels_prediction
len_model = 200
# # create Model
create_word2Vec_model(docs, len_model)
# load model
model = loadModel(len_model)
print("model with lentgh word = " + str(len_model))
# make data ready
new_docs = []
counter=0
for doc in id_docs:
new_doc = []
for word in docs[counter]:
if word in model.wv.vocab : new_doc.append(word)
counter+=1
new_docs.append(new_doc)
# evaluation
validation = False
labels_prediction = evaluate_queries(docs_test, model,validation)
print("evaluation finished... at last !")
###Output
start creating word2Vec model with length word = 200
model creation finished
start loading models...
model with lentgh word = 200
evaluate result 40000 predicted label : 6
evaluate result 40001 predicted label : 1
evaluate result 40002 predicted label : 5
evaluate result 40003 predicted label : 9
evaluate result 40004 predicted label : 3
evaluate result 40005 predicted label : 2
evaluate result 40006 predicted label : 2
evaluate result 40007 predicted label : 9
evaluate result 40008 predicted label : 9
evaluate result 40009 predicted label : 3
evaluate result 40010 predicted label : 9
evaluate result 40011 predicted label : 4
evaluate result 40012 predicted label : 2
evaluate result 40013 predicted label : 9
evaluate result 40014 predicted label : 5
evaluate result 40015 predicted label : 3
evaluate result 40016 predicted label : 2
evaluate result 40017 predicted label : 8
evaluate result 40018 predicted label : 2
evaluate result 40019 predicted label : 9
evaluate result 40020 predicted label : 3
evaluate result 40021 predicted label : 3
evaluate result 40022 predicted label : 5
evaluate result 40023 predicted label : 2
evaluate result 40024 predicted label : 6
evaluate result 40025 predicted label : 8
evaluate result 40026 predicted label : 2
evaluate result 40027 predicted label : 4
evaluate result 40028 predicted label : 4
evaluate result 40029 predicted label : 9
evaluate result 40030 predicted label : 9
evaluate result 40031 predicted label : 2
evaluate result 40032 predicted label : 9
evaluate result 40033 predicted label : 5
evaluate result 40034 predicted label : 9
|
PDS_jupyter/Assignments/01_Python_I_Assignment_Sam_Forman.ipynb | ###Markdown
Assignment 1 - Sam Forman 1. (1.0 point) Function return valueThis exercise is to show you how function return values work. Run the code below to see what happens.
###Code
def f1(x):
print(x + 1)
def f2(x):
return(x + 1)
f1(4)
f2(4)
###Output
5
###Markdown
Each function should produce the same output. Now run the following code.
###Code
f1(4) + 1
f2(4) + 1
###Output
5
###Markdown
Explain what is happening and why you get this result.This error is happening because the output of f1 is a printed statement sent to the consol, which can't be combined with a interger. 2. (1.0 point) Function operation Consider the following piece of code:```pythondef f(x): if x == 0 or x == 1: return x return f(x - 1) + f(x - 2) ```
###Code
def f(x):
if x == 0 or x == 1:
return x
return f(x - 1) + f(x - 2)
print(f(1))
print(f(2))
print(f(3))
print(f(4))
print(f(5))
print(f(10))
print(f(15))
###Output
1
1
2
3
5
55
610
###Markdown
Describe, in words, what this code does, and how it does it.This function first evalues the "x" that is passed to it on whether it is equal to 0 or 1. If it is then the original x is returned. If it is not then the function returns the sum of the whole function run on x-1 and x-2. After that point I'm not totally sure how the function terminates for larger numbers, I assume it branches out in a certain way but can't totally think through it. For what inputs will this function not behave as expected? What will happen?For larger numbers (~ >5) different behavior starts happen where the function returns larger and larger numbers. Not totally sure why that happens. 3. (1.0 point) Defining a functionThe following program plays the game of "Rock, Paper, and Scissors". Familiarize yourself with how it works. Try running the program.
###Code
from random import randint
valid_input = ["R", "P", "S"]
winners = [["R", "S"], ["S", "P"], ["P", "R"]]
print("You are playing a Rock, Paper, Scissors simulation.")
user_choice = input("Select R for rock, P for Paper, S for scissors, Q to quit\n> ")
user_choice = user_choice.upper()
while user_choice != 'Q':
if user_choice not in valid_input:
print("Error: invalid input -- " + user_choice)
break
computer_choice = valid_input[randint(0, len(valid_input)-1)]
print("Computer chooses " + computer_choice)
if user_choice == computer_choice:
print("Tie Game")
elif [user_choice, computer_choice] in winners:
print("You win!")
else:
print("Computer wins!")
user_choice = input("Select R for rock, P for Paper, S for scissors, Q to quit\n> ")
user_choice = user_choice.upper()
###Output
_____no_output_____
###Markdown
Recall how we define a function using `def`, and how we pass in parameters. Transform the code into a function called `rps` that takes two parameters: `user_choice` and `computer_choice`, with valid inputs `'R'`, `'P'`, and `'S'`. Your function should return `'D'` for a draw, `'W'` if `user_choice` wins, and `'L'` if `user_choice` loses.
###Code
### BEGIN SOLUTION
def rps(user_choice, computer_choice):
rps_valid_input = ["R", "P", "S"]
rps_winners = [["R", "S"], ["S", "P"], ["P", "R"]]
if (user_choice or computer_choice) not in rps_valid_input:
print("Error: invalid input -- " + user_choice)
return 0
# first assure that input is valid
#then replicate last part of the game where user_choice, comp_choice is compared to the list of winning values
#and if it matches return win, lose, draw accordingly.
if user_choice == computer_choice:
return("D")
elif [user_choice, computer_choice] in rps_winners:
return("W")
else:
return("L")
### END SOLUTION
from nose.tools import assert_equal
assert_equal(rps('R', 'R'), 'D')
assert_equal(rps('R', 'P'), 'L')
assert_equal(rps('R', 'S'), 'W')
assert_equal(rps('P', 'R'), 'W')
assert_equal(rps('P', 'P'), 'D')
assert_equal(rps('P', 'S'), 'L')
assert_equal(rps('S', 'R'), 'L')
assert_equal(rps('S', 'P'), 'W')
assert_equal(rps('S', 'S'), 'D')
###Output
_____no_output_____
###Markdown
4. (1.0 point) List function Given a list of non-empty tuples, return a list sorted in increasing order by the last element in each tuple. For example: `[(1, 7), (1, 3), (3, 4, 5), (2, 2)]` yields `[(2, 2), (1, 3), (3, 4, 5), (1, 7)]`
###Code
test_tuples = [(1, 7), (1, 3), (3, 4, 5), (2, 2)]
def sort_tuple_list(tuple_list):
'''Function to sort a list of non-empty tuples in increasing order by the last element in each tuple'''
### BEGIN SOLUTION
return(sorted(tuple_list, key= lambda tup : tup[-1]))
## key sources: https://wiki.python.org/moin/HowTo/Sorting
## https://docs.python.org/3/tutorial/controlflow.html
### END SOLUTION
print(sort_tuple_list(test_tuples))
from nose.tools import assert_equal
assert_equal(sort_tuple_list([(1, 3), (3, 2), (2, 1)]), [(2, 1), (3, 2), (1, 3)])
assert_equal(sort_tuple_list([(2, 3), (1, 2), (3, 1)]), [(3, 1), (1, 2), (2, 3)])
assert_equal(sort_tuple_list([(1, 7), (1, 3), (3, 4, 5), (2, 2)]), [(2, 2), (1, 3), (3, 4, 5), (1, 7)])
###Output
_____no_output_____
###Markdown
5. (1.0 point) List sort Given two lists sorted in increasing order, create and return a merged list of all the elements in sorted order. You may modify the passed in lists. Ideally, the solution should work in "linear" time, making a single pass of both lists.
###Code
def list_merge(list1, list2):
'''Given two lists sorted in increasing order, create and
return a merged list of all the elements in sorted orde
'''
return sorted(list1 + list2)
### BEGIN SOLUTION
### END SOLUTION
from nose.tools import assert_equal
assert_equal(list_merge(['aa', 'xx', 'zz'], ['bb', 'cc']), ['aa', 'bb', 'cc', 'xx', 'zz'])
assert_equal(list_merge(['aa', 'xx'], ['bb', 'cc', 'zz']), ['aa', 'bb', 'cc', 'xx', 'zz'])
assert_equal(list_merge(['aa', 'aa'], ['aa', 'bb', 'bb']), ['aa', 'aa', 'aa', 'bb', 'bb'])
list_merge(['aa', 'ccc'], ['aa', 'bb', 'bb'])
###Output
_____no_output_____
###Markdown
6. (1.0 point) Built-in Operator Imagine that Python doesn’t have the `!=` operator built in. Write a function that takes two parameters and gives the same result as the `!=` operator. Obviously, you cannot use `!=` within your function! Test if your code works by thinking of examples and making sure the output is the same for your new method as `!=` gives you.
###Code
rand_string = 'This String.'
print(sorted(set(rand_string)))
print(type(sorted(rand_string)), sorted(rand_string))
print(sorted(rand_string) - sorted(rand_string))
### BEGiN SOLUTION 1
def not_equal(val1, val2):
''' User supplied version of the != operator, implemeted without using !='''
### BEGIN SOLUTION
# for numbers
if val1 == val2:
return False
else:
return True
### END SOLUTION 1
### BEGIN SOLUTION 2 with no != or ==
def not_equaltwo(val1, val2):
''' User supplied version of the != operator, implemeted without using !='''
# for numbers
if val1 > val2 or val1 < val2:
return True
# for strings
elif sorted(val1) == sorted(val2):
return False
# for boolean vals
else:
return False
### END SOLUTION 2
from nose.tools import assert_true, assert_false
assert_true(not_equal(2,3))
assert_false(not_equal(3,3))
assert_true(not_equal('3',3))
assert_true(not_equal((1,2),[1,2]))
assert_false(not_equal((3,4),(3,4)))
###Output
_____no_output_____
###Markdown
7. (1.5 point) Quadratic Formula A quadratic equation is a second order polynomial with three coefficients a, b, and c. It is given by:$$ax^2 + bx + c = 0$$The solutions to the equation are given by the formulas:$$x_1 = \frac{-b + \sqrt{b^2 - 4ac}}{2a}$$and$$x_2 = \frac{-b - \sqrt{b^2 - 4ac}}{2a}$$ Write a function that computes the roots of a quadratic equation and returns them as a tuple. Your function should take three parameters, a, b, and c. Note that if$$b^2 - 4ac < 0$$ then the roots will be complex. Hint: remember that Python has a built-in complex data type.
###Code
import cmath
import math
import numpy as np
print(math.sqrt(9))
print(complex(22/7))
def roots(a_in, b_in, c_in):
''' Computes the roots of the quadratic equation with coefficients a, b, c, and returns
them as the tuple (x1, x2)
'''
### BEGIN SOLUTION
a = complex(a_in)
b = complex(b_in)
c = complex(c_in)
x1 = ((-b + cmath.sqrt(b**2 - 4*a*c))/(2*a))
x2 = ((-b - cmath.sqrt(b**2 - 4*a*c))/(2*a))
return (x1, x2)
### END SOLUTION
from nose.tools import assert_equal
assert_equal(roots(1,2,3), ((-1+1.4142135623730951j), (-1-1.4142135623730951j)))
assert_equal(roots(0,0,4), (0j, 0j))
assert_equal(roots(-4,3,2), (-0.42539052967910607, 1.175390529679106))
###Output
_____no_output_____
###Markdown
8. (2.5 points) The Game of Nims In this game, two players sit in front of a pile of 100 stones. They take turns, each removing between 1 and 5 stones (assuming there are at least 5 stones left in the pile). The person who removes the last stone(s) wins.In this problem, you’ll write a function to play this game. It may seem tricky, so break it down into parts. Like many programs, we have to use nested loops (one loop inside another). In the outermost loop, we want to keep playing until we are out of stones. Inside that, we want to keep alternating players. You have the option of either writing two blocks of code, or keeping a variable that tracks the current player.We are going to use the `input` function to obtain the user's move. We might want to have an innermost loop that checks if the user’s input is valid. Is it a number? Is it a valid number (e.g. between 1 and 5)? Are there enough stones in the pile to take off this many? If any of these answers are no, we should tell the user and re-ask them the question.If you choose to write two blocks of code, the basic outline of the program should be something like this: while [pile is not empty]: while [player 1’s answer is not valid]: [ask player 1] [execute player 1’s move] [same as above for player 2] Be careful with the validity checks. Specifically, we want to keep asking player 1 for their choice as long as their answer is not valid, BUT we want to make sure we ask them at least ONCE. So, for example, we will want to keep a variable that tracks whether their answer is valid, and set it to `False` initially.There is no need to limit yourself to only one function! Use as many functions as you want in order to make the code more readable and modular.When you're finished, test each other's programs by playing them!
###Code
def play_nims(pile, max_stones):
'''
An interactive two-person game; also known as Stones.
@param pile: the number of stones in the pile to start
@param max_stones: the maximum number of stones you can take in one turn
'''
#turn check into loop?
if str(type(max_stones)) in ["<class 'float'>","<class 'int'>"]:
pass
else:
turn_size = input('Sorry, please enter a number for max turn size ->')
if str(type(pile)) in ["<class 'float'>","<class 'int'>"]:
pass
else:
total_pile = input('Sorry, please enter a number total stones ->')
## Basic structure of program (feel free to alter as you please):
# while [pile is not empty]:
# while [player 1's answer is not valid]:
# [ask player 1]
# [execute player 1's move]
#
# while [player 2's answer is not valid]:
# [ask player 2]
# [execute player 2's move]
#
# print "Game over"
### BEGIN SOLUTION
winning_player = 'unk'
total_pile = pile
turn_size = max_stones
while total_pile > turn_size:
#player1 turn
p1_turn = input("There are %d stones remaining. \nPlayer 1 choose how many stones to remove from the pile -> " %total_pile)
while True:
try:
int(p1_turn)
assert(int(p1_turn) <= turn_size)
break
except ValueError:
p1_turn = input("Sorry player 1, you must enter an interger -> ")
except AssertionError:
p1_turn = input("Sorry player 1, your turn must be less than the max turn size -> ")
p1_turn = int(p1_turn)
total_pile -= p1_turn
winning_player = 'Player 2'
if total_pile <= turn_size:
break
else:
pass
# player2 turn
p2_turn = input("There are %d stones remaining. \nPlayer 2 choose how many stones to remove from the pile -> " %total_pile)
while True:
try:
int(p2_turn)
assert(int(p2_turn) <= turn_size)
break
except ValueError:
p2_turn = input("Sorry player 2, you must enter an interger -> ")
except AssertionError:
p2_turn = input("Sorry player 2, your turn must be less than the max turn size -> ")
p2_turn = int(p2_turn)
total_pile -= p2_turn
winning_player = 'Player 1'
print('\n', winning_player, ' wins!')
### END SOLUTION
play_nims(20,5)
play_nims(20,5)
###Output
There are 20 stones remaining.
Player 1 choose how many stones to remove from the pile -> 5
There are 15 stones remaining.
Player 2 choose how many stones to remove from the pile -> 4
There are 11 stones remaining.
Player 1 choose how many stones to remove from the pile -> 5
There are 6 stones remaining.
Player 2 choose how many stones to remove from the pile -> 1
Player 1 wins!
|
notebooks/000_filter_for_differentially_localized_proteins.ipynb | ###Markdown
Drop second nucleoli fibrillar center
###Code
localization = localization.drop(columns=['Nucleoli_fibrillar_center.1'], errors='ignore')
len(localization.index.unique())
all_metadata_cols = set(['ensg_id', 'gene_name', 'uniprot_id', 'prest_id', 'aa_sequencing_sequence', 'tissue_name', 'antibody_id'])
metadata_cols = list(all_metadata_cols.difference([LOCALIZATION_INDEX]))
metadata = localization[metadata_cols]
metadata = metadata.drop_duplicates()
metadata = metadata.sort_index()
print(metadata.shape)
metadata.head()
metadata_multiple_seqs = metadata.groupby(level=0).filter(lambda x: len(x['aa_sequencing_sequence'].unique()) != 1)
print(metadata_multiple_seqs.shape)
metadata_multiple_seqs = metadata_multiple_seqs.sort_index()
print(len(metadata_multiple_seqs.index.unique()))
metadata_multiple_seqs.head()
localization.head()
cols = localization.columns.difference(metadata.columns).append(pd.Index(['tissue_name']))
cell_line_localization = localization[cols]
cell_line_localization = cell_line_localization.set_index('tissue_name', append=True)
cell_line_localization = cell_line_localization.sort_index()
cell_line_localization = cell_line_localization.astype(bool)
print(cell_line_localization.shape)
cell_line_localization.head()
cell_line_localization_tidy = cell_line_localization.replace(False, np.nan).stack().reset_index()
cell_line_localization_tidy = cell_line_localization_tidy.dropna()
cell_line_localization_tidy = cell_line_localization_tidy.drop(columns=[0])
cell_line_localization_tidy = cell_line_localization_tidy.rename(columns={"level_2": "cellular_component"})
print(cell_line_localization_tidy.shape)
cell_line_localization_tidy["level_a"] = cell_line_localization_tidy['cellular_component'].map(lambda x: x.lower())
cell_line_localization_tidy.head(10)
###Output
(50095, 3)
###Markdown
Add granularity
###Code
txt = os.path.join(input_folder, 'Annotation granularity levels.txt')
granularity = pd.read_table(txt)
print(granularity.shape)
granularity
granularity_v2 = granularity.applymap(lambda x: x.lower().replace(' ', '_').replace('(', '').replace(')', ''))
granularity_v2['Level B'] = granularity_v2['Level B'].replace('mtoc', 'microtubule_organizing_center')
granularity_v2
level_a_to_b = pd.Series(index=granularity_v2['Level A'], data=granularity_v2['Level B'].values)
level_a_to_b
level_a_to_c = pd.Series(index=granularity_v2['Level A'], data=granularity_v2['Level C'].values)
level_a_to_c
cell_line_localization_tidy['level_b'] = cell_line_localization_tidy['level_a'].map(level_a_to_b)
cell_line_localization_tidy['level_c'] = cell_line_localization_tidy['level_a'].map(level_a_to_c)
cell_line_localization_tidy.head(20)
###Output
_____no_output_____
###Markdown
SAVE tidy version of localization and metadata to fileHave the v17 version of the database so put it in the name
###Code
csv = os.path.join(data_folder, 'v17_antibody_localization_tidy.csv')
cell_line_localization_tidy.to_csv(csv, index=False)
csv = os.path.join(data_folder, 'v17_antibody_metadata.csv')
metadata.to_csv(csv)
!ls -lha $data_folder
###Output
total 7.3M
drwxrwxr-x 2 ubuntu ubuntu 4.0K May 16 20:26 .
drwxrwxr-x 6 ubuntu ubuntu 4.0K May 16 20:09 ..
-rw-rw-r-- 1 ubuntu ubuntu 2.8M May 16 20:31 v17_antibody_localization_tidy.csv
-rw-rw-r-- 1 ubuntu ubuntu 4.5M May 16 20:31 v17_antibody_metadata.csv
###Markdown
Get only singly localized proteinsMany of the Antibodies have multiple localizations per cell type
###Code
single_localization_per_celltype = cell_line_localization_tidy.groupby(LOCALIZATION_INDEX).filter(
lambda x: len(x['tissue_name']) == len(x['tissue_name'].unique()))
print(single_localization_per_celltype.shape)
single_localization_per_celltype.head(20)
###Output
(14579, 6)
###Markdown
How many antibodies ids have only one localization per cell?
###Code
len(single_localization_per_celltype[LOCALIZATION_INDEX].unique())
###Output
_____no_output_____
###Markdown
Remove antibodies tested in only one tissue
###Code
multiple_tissues_tested = single_localization_per_celltype.groupby(LOCALIZATION_INDEX).filter(lambda x: len(x['tissue_name']) > 1)
print(multiple_tissues_tested.shape)
print(len(multiple_tissues_tested[LOCALIZATION_INDEX].unique()))
multiple_tissues_tested.head()
###Output
(13553, 6)
5053
###Markdown
Find ENSG ids with more than one cellular component
###Code
multiple_tissues_tested.groupby(LOCALIZATION_INDEX).apply(lambda x: len(x['cellular_component'].unique())).head()
multiple_tissues_tested.groupby(LOCALIZATION_INDEX).apply(lambda x: len(x['cellular_component'])).head()
differential_cell_line_localization = multiple_tissues_tested.groupby(LOCALIZATION_INDEX).filter(
lambda x: len(x['cellular_component'].unique()) > 1)
print(differential_cell_line_localization.shape)
print(len(differential_cell_line_localization[LOCALIZATION_INDEX].unique()))
differential_cell_line_localization.head(20)
from collections import Counter
multiple_localization_names = differential_cell_line_localization.groupby(LOCALIZATION_INDEX).apply(lambda x: ' - '.join(sorted(x['cellular_component'].unique()))).reset_index()
multiple_localization_names = multiple_localization_names.rename(columns={0: 'cellular_components'})
multiple_localization_names.head()
Counter(multiple_localization_names['cellular_components'])
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(context='notebook')
%matplotlib inline
unique_locations = sorted(multiple_localization_names['cellular_components'].unique())
fig, ax = plt.subplots(figsize=(6, 12))
ax.set_xscale('log')
sns.countplot(y='cellular_components',
data=multiple_localization_names, order=unique_locations)
fig.tight_layout()
pdf = os.path.join(figure_folder, 'multiple_localizations_across_cell_lines.pdf')
fig.savefig(pdf)
###Output
_____no_output_____
###Markdown
Seems like a lot of within-order switching
###Code
type(('Nucleoli', 'Nucleoli fibrillar center'))
type({'Nucleoli', 'Nucleoli fibrillar center'})
{'Nuclear membrane'}
cellular_component_orders = {
'Nuclear membrane': {'Nuclear membrane',},
'Nucleoli': {'Nucleoli', 'Nucleoli fibrillar center'},
'Nucleoplasm': {'Nucleoplasm', 'Nuclear bodies', 'Nuclear speckles', 'Nucleus'},
'Actin filaments': {'Actin filaments', 'Focal adhesion sites'},
'Centrosome': {'Centrosome', 'Microtubule organizing center'},
'Cytosol': {'Aggresome', 'Cytoplasmic bodies', 'Cytosol', 'Rods & rings'},
'Intermediate filaments': {'Intermediate filaments'},
'Microtubules': {'Cleavage furrow', 'Cytokinetic bridge', 'Microtubule ends', 'Microtubules',
'Midbody', 'Midbody ring', 'Mitotic spindle'},
'Mitochondria': {'Mitochondria',},
'Endoplasmic reticulum': {'Endoplasmic reticulum',},
'Golgi apparatus': {'Golgi apparatus',},
'Plasma membrane': {'Cell junctions', 'Plasma membrane'},
'Secreted proteins': {'Secreted proteins',},
'Vesicles': {'Endosomes', 'Lipid droplets', 'Lysosomes', 'Peroxisomes', 'Vesicles'}
}
cellular_component_orders = {k: set(map(lambda x: x.lower().replace(' ', '_'), v))
for k, v in cellular_component_orders.items()}
cellular_component_orders
def count_cellular_component_orders(locations):
n_orders = 0
for name, subnames in cellular_component_orders.items():
if len(subnames.intersection(locations)) > 0:
n_orders += 1
return n_orders
differential_cell_line_localization['cellular_component_lower'] = differential_cell_line_localization['cellular_component'].str.lower()
n_orders = differential_cell_line_localization.groupby(LOCALIZATION_INDEX)['cellular_component_lower'].apply(count_cellular_component_orders)
n_orders.head()
n_orders.unique()
ids_with_three_orders = n_orders[n_orders == 3].index
for x in ids_with_three_orders:
print(x)
differential_cell_line_localization.query('@LOCALIZATION_INDEX in @ids_with_three_orders')
cross_order_differential = differential_cell_line_localization.groupby(LOCALIZATION_INDEX).filter(
lambda x: count_cellular_component_orders(x['cellular_component_lower']) > 1)
print(cross_order_differential.shape)
print(len(cross_order_differential[LOCALIZATION_INDEX].unique()))
cross_order_differential.head()
from collections import Counter
cross_order_names = cross_order_differential.groupby(LOCALIZATION_INDEX).apply(lambda x: ' - '.join(sorted(x['cellular_component'].unique()))).reset_index()
cross_order_names = cross_order_names.rename(columns={0: 'cellular_components'})
cross_order_names.head()
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(context='notebook')
%matplotlib inline
unique_locations = sorted(cross_order_names['cellular_components'].unique())
fig, ax = plt.subplots(figsize=(6, 12))
# ax.set_xscale('log')
sns.countplot(y='cellular_components',
data=cross_order_names, order=unique_locations)
fig.tight_layout()
pdf = os.path.join(figure_folder, 'multiple_localizations_across_cell_lines_cross_order.pdf')
fig.savefig(pdf)
# differential_cell_line_localization.query('ensg_id == "ENSG00000253537"')
differential_cell_line_localization = multiple_tissues_tested.groupby(LOCALIZATION_INDEX).filter(
lambda x: len(x['level_c'].unique()) > 1)
print(differential_cell_line_localization.shape)
print(len(differential_cell_line_localization[LOCALIZATION_INDEX].unique()))
differential_cell_line_localization.head(20)
# def check_differential_localization(df):
# return df.apply(lambda x: len(x.unique()) != 1)
# is_differentially_localized = cell_line_localization.groupby(level=0, axis=0).apply(check_differential_localization)
# print(is_differentially_localized.shape)
# is_differentially_localized.head()
# differential_localization = is_differentially_localized.loc[is_differentially_localized.any(axis=1), :]
# print(differential_localization.shape)
# differential_localization.head()
# Are all compartments fair game for differential localization?
# differential_localization.any()
# differential_localization.sum()
# differential_localization_na = differential_localization.replace(False, np.nan)
# differential_localization_na = differential_localization_na.dropna(how='all', axis=1)
# print(differential_localization_na.shape)
# differential_localization_na.head()
# differential_localization_tidy = differential_localization_na.unstack().reset_index()
# print(differential_localization_tidy.shape)
# differential_localization_tidy = differential_localization_tidy.dropna()
# differential_localization_tidy = differential_localization_tidy.rename(columns={'level_0': 'cellular_component'})
# differential_localization_tidy = differential_localization_tidy.drop(columns=[0])
# print(differential_localization_tidy.shape)
# differential_localization_tidy.head()
# differential_cellular_components = differential_localization_tidy.groupby('ensg_id').apply(lambda x: '|'.join(x['cellular_component']))
# print(differential_cellular_components.shape)
# differential_cellular_components.head()
###Output
_____no_output_____
###Markdown
Read official file from website
###Code
txt = os.path.join(input_folder, 'subcellular_location.tsv')
subcellular_localization = pd.read_table(txt, index_col=0)
print(subcellular_localization.shape)
subcellular_localization.head()
subcellular_localization_spatial_variation = subcellular_localization.dropna(subset=['Single-cell variation spatial'])
print(subcellular_localization_spatial_variation.shape)
subcellular_localization_spatial_variation.head()
###Output
_____no_output_____ |
Notebooks/RadarCOVID-Report/Daily/RadarCOVID-Report-2021-09-02.ipynb | ###Markdown
RadarCOVID-Report Data Extraction
###Code
import datetime
import json
import logging
import os
import shutil
import tempfile
import textwrap
import uuid
import matplotlib.pyplot as plt
import matplotlib.ticker
import numpy as np
import pandas as pd
import pycountry
import retry
import seaborn as sns
%matplotlib inline
current_working_directory = os.environ.get("PWD")
if current_working_directory:
os.chdir(current_working_directory)
sns.set()
matplotlib.rcParams["figure.figsize"] = (15, 6)
extraction_datetime = datetime.datetime.utcnow()
extraction_date = extraction_datetime.strftime("%Y-%m-%d")
extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1)
extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d")
extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H")
current_hour = datetime.datetime.utcnow().hour
are_today_results_partial = current_hour != 23
###Output
_____no_output_____
###Markdown
Constants
###Code
from Modules.ExposureNotification import exposure_notification_io
spain_region_country_code = "ES"
germany_region_country_code = "DE"
default_backend_identifier = spain_region_country_code
backend_generation_days = 7 * 2
daily_summary_days = 7 * 4 * 3
daily_plot_days = 7 * 4
tek_dumps_load_limit = daily_summary_days + 1
###Output
_____no_output_____
###Markdown
Parameters
###Code
environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER")
if environment_backend_identifier:
report_backend_identifier = environment_backend_identifier
else:
report_backend_identifier = default_backend_identifier
report_backend_identifier
environment_enable_multi_backend_download = \
os.environ.get("RADARCOVID_REPORT__ENABLE_MULTI_BACKEND_DOWNLOAD")
if environment_enable_multi_backend_download:
report_backend_identifiers = None
else:
report_backend_identifiers = [report_backend_identifier]
report_backend_identifiers
environment_invalid_shared_diagnoses_dates = \
os.environ.get("RADARCOVID_REPORT__INVALID_SHARED_DIAGNOSES_DATES")
if environment_invalid_shared_diagnoses_dates:
invalid_shared_diagnoses_dates = environment_invalid_shared_diagnoses_dates.split(",")
else:
invalid_shared_diagnoses_dates = []
invalid_shared_diagnoses_dates
###Output
_____no_output_____
###Markdown
COVID-19 Cases
###Code
report_backend_client = \
exposure_notification_io.get_backend_client_with_identifier(
backend_identifier=report_backend_identifier)
@retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10))
def download_cases_dataframe():
return pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv")
confirmed_df_ = download_cases_dataframe()
confirmed_df_.iloc[0]
confirmed_df = confirmed_df_.copy()
confirmed_df = confirmed_df[["date", "new_cases", "iso_code"]]
confirmed_df.rename(
columns={
"date": "sample_date",
"iso_code": "country_code",
},
inplace=True)
def convert_iso_alpha_3_to_alpha_2(x):
try:
return pycountry.countries.get(alpha_3=x).alpha_2
except Exception as e:
logging.info(f"Error converting country ISO Alpha 3 code '{x}': {repr(e)}")
return None
confirmed_df["country_code"] = confirmed_df.country_code.apply(convert_iso_alpha_3_to_alpha_2)
confirmed_df.dropna(inplace=True)
confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True)
confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d")
confirmed_df.sort_values("sample_date", inplace=True)
confirmed_df.tail()
confirmed_days = pd.date_range(
start=confirmed_df.iloc[0].sample_date,
end=extraction_datetime)
confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"])
confirmed_days_df["sample_date_string"] = \
confirmed_days_df.sample_date.dt.strftime("%Y-%m-%d")
confirmed_days_df.tail()
def sort_source_regions_for_display(source_regions: list) -> list:
if report_backend_identifier in source_regions:
source_regions = [report_backend_identifier] + \
list(sorted(set(source_regions).difference([report_backend_identifier])))
else:
source_regions = list(sorted(source_regions))
return source_regions
report_source_regions = report_backend_client.source_regions_for_date(
date=extraction_datetime.date())
report_source_regions = sort_source_regions_for_display(
source_regions=report_source_regions)
report_source_regions
def get_cases_dataframe(source_regions_for_date_function, columns_suffix=None):
source_regions_at_date_df = confirmed_days_df.copy()
source_regions_at_date_df["source_regions_at_date"] = \
source_regions_at_date_df.sample_date.apply(
lambda x: source_regions_for_date_function(date=x))
source_regions_at_date_df.sort_values("sample_date", inplace=True)
source_regions_at_date_df["_source_regions_group"] = source_regions_at_date_df. \
source_regions_at_date.apply(lambda x: ",".join(sort_source_regions_for_display(x)))
source_regions_at_date_df.tail()
#%%
source_regions_for_summary_df_ = \
source_regions_at_date_df[["sample_date", "_source_regions_group"]].copy()
source_regions_for_summary_df_.rename(columns={"_source_regions_group": "source_regions"}, inplace=True)
source_regions_for_summary_df_.tail()
#%%
confirmed_output_columns = ["sample_date", "new_cases", "covid_cases"]
confirmed_output_df = pd.DataFrame(columns=confirmed_output_columns)
for source_regions_group, source_regions_group_series in \
source_regions_at_date_df.groupby("_source_regions_group"):
source_regions_set = set(source_regions_group.split(","))
confirmed_source_regions_set_df = \
confirmed_df[confirmed_df.country_code.isin(source_regions_set)].copy()
confirmed_source_regions_group_df = \
confirmed_source_regions_set_df.groupby("sample_date").new_cases.sum() \
.reset_index().sort_values("sample_date")
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df.merge(
confirmed_days_df[["sample_date_string"]].rename(
columns={"sample_date_string": "sample_date"}),
how="right")
confirmed_source_regions_group_df["new_cases"] = \
confirmed_source_regions_group_df["new_cases"].clip(lower=0)
confirmed_source_regions_group_df["covid_cases"] = \
confirmed_source_regions_group_df.new_cases.rolling(7, min_periods=0).mean().round()
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df[confirmed_output_columns]
confirmed_source_regions_group_df = confirmed_source_regions_group_df.replace(0, np.nan)
confirmed_source_regions_group_df.fillna(method="ffill", inplace=True)
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df[
confirmed_source_regions_group_df.sample_date.isin(
source_regions_group_series.sample_date_string)]
confirmed_output_df = confirmed_output_df.append(confirmed_source_regions_group_df)
result_df = confirmed_output_df.copy()
result_df.tail()
#%%
result_df.rename(columns={"sample_date": "sample_date_string"}, inplace=True)
result_df = confirmed_days_df[["sample_date_string"]].merge(result_df, how="left")
result_df.sort_values("sample_date_string", inplace=True)
result_df.fillna(method="ffill", inplace=True)
result_df.tail()
#%%
result_df[["new_cases", "covid_cases"]].plot()
if columns_suffix:
result_df.rename(
columns={
"new_cases": "new_cases_" + columns_suffix,
"covid_cases": "covid_cases_" + columns_suffix},
inplace=True)
return result_df, source_regions_for_summary_df_
confirmed_eu_df, source_regions_for_summary_df = get_cases_dataframe(
report_backend_client.source_regions_for_date)
confirmed_es_df, _ = get_cases_dataframe(
lambda date: [spain_region_country_code],
columns_suffix=spain_region_country_code.lower())
###Output
_____no_output_____
###Markdown
Extract API TEKs
###Code
raw_zip_path_prefix = "Data/TEKs/Raw/"
base_backend_identifiers = [report_backend_identifier]
multi_backend_exposure_keys_df = \
exposure_notification_io.download_exposure_keys_from_backends(
backend_identifiers=report_backend_identifiers,
generation_days=backend_generation_days,
fail_on_error_backend_identifiers=base_backend_identifiers,
save_raw_zip_path_prefix=raw_zip_path_prefix)
multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"]
multi_backend_exposure_keys_df.rename(
columns={
"generation_datetime": "sample_datetime",
"generation_date_string": "sample_date_string",
},
inplace=True)
multi_backend_exposure_keys_df.head()
early_teks_df = multi_backend_exposure_keys_df[
multi_backend_exposure_keys_df.rolling_period < 144].copy()
early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6
early_teks_df[early_teks_df.sample_date_string != extraction_date] \
.rolling_period_in_hours.hist(bins=list(range(24)))
early_teks_df[early_teks_df.sample_date_string == extraction_date] \
.rolling_period_in_hours.hist(bins=list(range(24)))
multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[
"sample_date_string", "region", "key_data"]]
multi_backend_exposure_keys_df.head()
active_regions = \
multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
active_regions
multi_backend_summary_df = multi_backend_exposure_keys_df.groupby(
["sample_date_string", "region"]).key_data.nunique().reset_index() \
.pivot(index="sample_date_string", columns="region") \
.sort_index(ascending=False)
multi_backend_summary_df.rename(
columns={"key_data": "shared_teks_by_generation_date"},
inplace=True)
multi_backend_summary_df.rename_axis("sample_date", inplace=True)
multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int)
multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days)
multi_backend_summary_df.head()
def compute_keys_cross_sharing(x):
teks_x = x.key_data_x.item()
common_teks = set(teks_x).intersection(x.key_data_y.item())
common_teks_fraction = len(common_teks) / len(teks_x)
return pd.Series(dict(
common_teks=common_teks,
common_teks_fraction=common_teks_fraction,
))
multi_backend_exposure_keys_by_region_df = \
multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index()
multi_backend_exposure_keys_by_region_df["_merge"] = True
multi_backend_exposure_keys_by_region_combination_df = \
multi_backend_exposure_keys_by_region_df.merge(
multi_backend_exposure_keys_by_region_df, on="_merge")
multi_backend_exposure_keys_by_region_combination_df.drop(
columns=["_merge"], inplace=True)
if multi_backend_exposure_keys_by_region_combination_df.region_x.nunique() > 1:
multi_backend_exposure_keys_by_region_combination_df = \
multi_backend_exposure_keys_by_region_combination_df[
multi_backend_exposure_keys_by_region_combination_df.region_x !=
multi_backend_exposure_keys_by_region_combination_df.region_y]
multi_backend_exposure_keys_cross_sharing_df = \
multi_backend_exposure_keys_by_region_combination_df \
.groupby(["region_x", "region_y"]) \
.apply(compute_keys_cross_sharing) \
.reset_index()
multi_backend_cross_sharing_summary_df = \
multi_backend_exposure_keys_cross_sharing_df.pivot_table(
values=["common_teks_fraction"],
columns="region_x",
index="region_y",
aggfunc=lambda x: x.item())
multi_backend_cross_sharing_summary_df
multi_backend_without_active_region_exposure_keys_df = \
multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier]
multi_backend_without_active_region = \
multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
multi_backend_without_active_region
exposure_keys_summary_df = multi_backend_exposure_keys_df[
multi_backend_exposure_keys_df.region == report_backend_identifier]
exposure_keys_summary_df.drop(columns=["region"], inplace=True)
exposure_keys_summary_df = \
exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame()
exposure_keys_summary_df = \
exposure_keys_summary_df.reset_index().set_index("sample_date_string")
exposure_keys_summary_df.sort_index(ascending=False, inplace=True)
exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True)
exposure_keys_summary_df.head()
###Output
/opt/hostedtoolcache/Python/3.8.11/x64/lib/python3.8/site-packages/pandas/core/frame.py:4110: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
return super().drop(
###Markdown
Dump API TEKs
###Code
tek_list_df = multi_backend_exposure_keys_df[
["sample_date_string", "region", "key_data"]].copy()
tek_list_df["key_data"] = tek_list_df["key_data"].apply(str)
tek_list_df.rename(columns={
"sample_date_string": "sample_date",
"key_data": "tek_list"}, inplace=True)
tek_list_df = tek_list_df.groupby(
["sample_date", "region"]).tek_list.unique().reset_index()
tek_list_df["extraction_date"] = extraction_date
tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour
tek_list_path_prefix = "Data/TEKs/"
tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json"
tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json"
tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json"
for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]:
os.makedirs(os.path.dirname(path), exist_ok=True)
tek_list_base_df = tek_list_df[tek_list_df.region == report_backend_identifier]
tek_list_base_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json(
tek_list_current_path,
lines=True, orient="records")
tek_list_base_df.drop(columns=["extraction_date_with_hour"]).to_json(
tek_list_daily_path,
lines=True, orient="records")
tek_list_base_df.to_json(
tek_list_hourly_path,
lines=True, orient="records")
tek_list_base_df.head()
###Output
_____no_output_____
###Markdown
Load TEK Dumps
###Code
import glob
def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame:
extracted_teks_df = pd.DataFrame(columns=["region"])
file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json"))))
if limit:
file_paths = file_paths[:limit]
for file_path in file_paths:
logging.info(f"Loading TEKs from '{file_path}'...")
iteration_extracted_teks_df = pd.read_json(file_path, lines=True)
extracted_teks_df = extracted_teks_df.append(
iteration_extracted_teks_df, sort=False)
extracted_teks_df["region"] = \
extracted_teks_df.region.fillna(spain_region_country_code).copy()
if region:
extracted_teks_df = \
extracted_teks_df[extracted_teks_df.region == region]
return extracted_teks_df
daily_extracted_teks_df = load_extracted_teks(
mode="Daily",
region=report_backend_identifier,
limit=tek_dumps_load_limit)
daily_extracted_teks_df.head()
exposure_keys_summary_df_ = daily_extracted_teks_df \
.sort_values("extraction_date", ascending=False) \
.groupby("sample_date").tek_list.first() \
.to_frame()
exposure_keys_summary_df_.index.name = "sample_date_string"
exposure_keys_summary_df_["tek_list"] = \
exposure_keys_summary_df_.tek_list.apply(len)
exposure_keys_summary_df_ = exposure_keys_summary_df_ \
.rename(columns={"tek_list": "shared_teks_by_generation_date"}) \
.sort_index(ascending=False)
exposure_keys_summary_df = exposure_keys_summary_df_
exposure_keys_summary_df.head()
###Output
_____no_output_____
###Markdown
Daily New TEKs
###Code
tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply(
lambda x: set(sum(x, []))).reset_index()
tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True)
tek_list_df.head()
def compute_teks_by_generation_and_upload_date(date):
day_new_teks_set_df = tek_list_df.copy().diff()
try:
day_new_teks_set = day_new_teks_set_df[
day_new_teks_set_df.index == date].tek_list.item()
except ValueError:
day_new_teks_set = None
if pd.isna(day_new_teks_set):
day_new_teks_set = set()
day_new_teks_df = daily_extracted_teks_df[
daily_extracted_teks_df.extraction_date == date].copy()
day_new_teks_df["shared_teks"] = \
day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set))
day_new_teks_df["shared_teks"] = \
day_new_teks_df.shared_teks.apply(len)
day_new_teks_df["upload_date"] = date
day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True)
day_new_teks_df = day_new_teks_df[
["upload_date", "generation_date", "shared_teks"]]
day_new_teks_df["generation_to_upload_days"] = \
(pd.to_datetime(day_new_teks_df.upload_date) -
pd.to_datetime(day_new_teks_df.generation_date)).dt.days
day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0]
return day_new_teks_df
shared_teks_generation_to_upload_df = pd.DataFrame()
for upload_date in daily_extracted_teks_df.extraction_date.unique():
shared_teks_generation_to_upload_df = \
shared_teks_generation_to_upload_df.append(
compute_teks_by_generation_and_upload_date(date=upload_date))
shared_teks_generation_to_upload_df \
.sort_values(["upload_date", "generation_date"], ascending=False, inplace=True)
shared_teks_generation_to_upload_df.tail()
today_new_teks_df = \
shared_teks_generation_to_upload_df[
shared_teks_generation_to_upload_df.upload_date == extraction_date].copy()
today_new_teks_df.tail()
if not today_new_teks_df.empty:
today_new_teks_df.set_index("generation_to_upload_days") \
.sort_index().shared_teks.plot.bar()
generation_to_upload_period_pivot_df = \
shared_teks_generation_to_upload_df[
["upload_date", "generation_to_upload_days", "shared_teks"]] \
.pivot(index="upload_date", columns="generation_to_upload_days") \
.sort_index(ascending=False).fillna(0).astype(int) \
.droplevel(level=0, axis=1)
generation_to_upload_period_pivot_df.head()
new_tek_df = tek_list_df.diff().tek_list.apply(
lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index()
new_tek_df.rename(columns={
"tek_list": "shared_teks_by_upload_date",
"extraction_date": "sample_date_string",}, inplace=True)
new_tek_df.tail()
shared_teks_uploaded_on_generation_date_df = shared_teks_generation_to_upload_df[
shared_teks_generation_to_upload_df.generation_to_upload_days == 0] \
[["upload_date", "shared_teks"]].rename(
columns={
"upload_date": "sample_date_string",
"shared_teks": "shared_teks_uploaded_on_generation_date",
})
shared_teks_uploaded_on_generation_date_df.head()
estimated_shared_diagnoses_df = shared_teks_generation_to_upload_df \
.groupby(["upload_date"]).shared_teks.max().reset_index() \
.sort_values(["upload_date"], ascending=False) \
.rename(columns={
"upload_date": "sample_date_string",
"shared_teks": "shared_diagnoses",
})
invalid_shared_diagnoses_dates_mask = \
estimated_shared_diagnoses_df.sample_date_string.isin(invalid_shared_diagnoses_dates)
estimated_shared_diagnoses_df[invalid_shared_diagnoses_dates_mask] = 0
estimated_shared_diagnoses_df.head()
###Output
_____no_output_____
###Markdown
Hourly New TEKs
###Code
hourly_extracted_teks_df = load_extracted_teks(
mode="Hourly", region=report_backend_identifier, limit=25)
hourly_extracted_teks_df.head()
hourly_new_tek_count_df = hourly_extracted_teks_df \
.groupby("extraction_date_with_hour").tek_list. \
apply(lambda x: set(sum(x, []))).reset_index().copy()
hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \
.sort_index(ascending=True)
hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff()
hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply(
lambda x: len(x) if not pd.isna(x) else 0)
hourly_new_tek_count_df.rename(columns={
"new_tek_count": "shared_teks_by_upload_date"}, inplace=True)
hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[
"extraction_date_with_hour", "shared_teks_by_upload_date"]]
hourly_new_tek_count_df.head()
hourly_summary_df = hourly_new_tek_count_df.copy()
hourly_summary_df.set_index("extraction_date_with_hour", inplace=True)
hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index()
hourly_summary_df["datetime_utc"] = pd.to_datetime(
hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H")
hourly_summary_df.set_index("datetime_utc", inplace=True)
hourly_summary_df = hourly_summary_df.tail(-1)
hourly_summary_df.head()
###Output
_____no_output_____
###Markdown
Official Statistics
###Code
import requests
import pandas.io.json
official_stats_response = requests.get("https://radarcovid.covid19.gob.es/kpi/statistics/basics")
official_stats_response.raise_for_status()
official_stats_df_ = pandas.io.json.json_normalize(official_stats_response.json())
official_stats_df = official_stats_df_.copy()
official_stats_df["date"] = pd.to_datetime(official_stats_df["date"], dayfirst=True)
official_stats_df.head()
official_stats_column_map = {
"date": "sample_date",
"applicationsDownloads.totalAcummulated": "app_downloads_es_accumulated",
"communicatedContagions.totalAcummulated": "shared_diagnoses_es_accumulated",
}
accumulated_suffix = "_accumulated"
accumulated_values_columns = \
list(filter(lambda x: x.endswith(accumulated_suffix), official_stats_column_map.values()))
interpolated_values_columns = \
list(map(lambda x: x[:-len(accumulated_suffix)], accumulated_values_columns))
official_stats_df = \
official_stats_df[official_stats_column_map.keys()] \
.rename(columns=official_stats_column_map)
official_stats_df["extraction_date"] = extraction_date
official_stats_df.head()
official_stats_path = "Data/Statistics/Current/RadarCOVID-Statistics.json"
previous_official_stats_df = pd.read_json(official_stats_path, orient="records", lines=True)
previous_official_stats_df["sample_date"] = pd.to_datetime(previous_official_stats_df["sample_date"], dayfirst=True)
official_stats_df = official_stats_df.append(previous_official_stats_df)
official_stats_df.head()
official_stats_df = official_stats_df[~(official_stats_df.shared_diagnoses_es_accumulated == 0)]
official_stats_df.sort_values("extraction_date", ascending=False, inplace=True)
official_stats_df.drop_duplicates(subset=["sample_date"], keep="first", inplace=True)
official_stats_df.head()
official_stats_stored_df = official_stats_df.copy()
official_stats_stored_df["sample_date"] = official_stats_stored_df.sample_date.dt.strftime("%Y-%m-%d")
official_stats_stored_df.to_json(official_stats_path, orient="records", lines=True)
official_stats_df.drop(columns=["extraction_date"], inplace=True)
official_stats_df = confirmed_days_df.merge(official_stats_df, how="left")
official_stats_df.sort_values("sample_date", ascending=False, inplace=True)
official_stats_df.head()
official_stats_df[accumulated_values_columns] = \
official_stats_df[accumulated_values_columns] \
.astype(float).interpolate(limit_area="inside")
official_stats_df[interpolated_values_columns] = \
official_stats_df[accumulated_values_columns].diff(periods=-1)
official_stats_df.drop(columns="sample_date", inplace=True)
official_stats_df.head()
###Output
_____no_output_____
###Markdown
Data Merge
###Code
result_summary_df = exposure_keys_summary_df.merge(
new_tek_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
official_stats_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = confirmed_eu_df.tail(daily_summary_days).merge(
result_summary_df, on=["sample_date_string"], how="left")
result_summary_df.head()
result_summary_df = confirmed_es_df.tail(daily_summary_days).merge(
result_summary_df, on=["sample_date_string"], how="left")
result_summary_df.head()
result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string)
result_summary_df = result_summary_df.merge(source_regions_for_summary_df, how="left")
result_summary_df.set_index(["sample_date", "source_regions"], inplace=True)
result_summary_df.drop(columns=["sample_date_string"], inplace=True)
result_summary_df.sort_index(ascending=False, inplace=True)
result_summary_df.head()
with pd.option_context("mode.use_inf_as_na", True):
result_summary_df = result_summary_df.fillna(0).astype(int)
result_summary_df["teks_per_shared_diagnosis"] = \
(result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0)
result_summary_df["shared_diagnoses_per_covid_case"] = \
(result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0)
result_summary_df["shared_diagnoses_per_covid_case_es"] = \
(result_summary_df.shared_diagnoses_es / result_summary_df.covid_cases_es).fillna(0)
result_summary_df.head(daily_plot_days)
def compute_aggregated_results_summary(days) -> pd.DataFrame:
aggregated_result_summary_df = result_summary_df.copy()
aggregated_result_summary_df["covid_cases_for_ratio"] = \
aggregated_result_summary_df.covid_cases.mask(
aggregated_result_summary_df.shared_diagnoses == 0, 0)
aggregated_result_summary_df["covid_cases_for_ratio_es"] = \
aggregated_result_summary_df.covid_cases_es.mask(
aggregated_result_summary_df.shared_diagnoses_es == 0, 0)
aggregated_result_summary_df = aggregated_result_summary_df \
.sort_index(ascending=True).fillna(0).rolling(days).agg({
"covid_cases": "sum",
"covid_cases_es": "sum",
"covid_cases_for_ratio": "sum",
"covid_cases_for_ratio_es": "sum",
"shared_teks_by_generation_date": "sum",
"shared_teks_by_upload_date": "sum",
"shared_diagnoses": "sum",
"shared_diagnoses_es": "sum",
}).sort_index(ascending=False)
with pd.option_context("mode.use_inf_as_na", True):
aggregated_result_summary_df = aggregated_result_summary_df.fillna(0).astype(int)
aggregated_result_summary_df["teks_per_shared_diagnosis"] = \
(aggregated_result_summary_df.shared_teks_by_upload_date /
aggregated_result_summary_df.covid_cases_for_ratio).fillna(0)
aggregated_result_summary_df["shared_diagnoses_per_covid_case"] = \
(aggregated_result_summary_df.shared_diagnoses /
aggregated_result_summary_df.covid_cases_for_ratio).fillna(0)
aggregated_result_summary_df["shared_diagnoses_per_covid_case_es"] = \
(aggregated_result_summary_df.shared_diagnoses_es /
aggregated_result_summary_df.covid_cases_for_ratio_es).fillna(0)
return aggregated_result_summary_df
aggregated_result_with_7_days_window_summary_df = compute_aggregated_results_summary(days=7)
aggregated_result_with_7_days_window_summary_df.head()
last_7_days_summary = aggregated_result_with_7_days_window_summary_df.to_dict(orient="records")[1]
last_7_days_summary
aggregated_result_with_14_days_window_summary_df = compute_aggregated_results_summary(days=13)
last_14_days_summary = aggregated_result_with_14_days_window_summary_df.to_dict(orient="records")[1]
last_14_days_summary
###Output
_____no_output_____
###Markdown
Report Results
###Code
display_column_name_mapping = {
"sample_date": "Sample\u00A0Date\u00A0(UTC)",
"source_regions": "Source Countries",
"datetime_utc": "Timestamp (UTC)",
"upload_date": "Upload Date (UTC)",
"generation_to_upload_days": "Generation to Upload Period in Days",
"region": "Backend",
"region_x": "Backend\u00A0(A)",
"region_y": "Backend\u00A0(B)",
"common_teks": "Common TEKs Shared Between Backends",
"common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)",
"covid_cases": "COVID-19 Cases (Source Countries)",
"shared_teks_by_generation_date": "Shared TEKs by Generation Date (Source Countries)",
"shared_teks_by_upload_date": "Shared TEKs by Upload Date (Source Countries)",
"shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date (Source Countries)",
"shared_diagnoses": "Shared Diagnoses (Source Countries – Estimation)",
"teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis (Source Countries)",
"shared_diagnoses_per_covid_case": "Usage Ratio (Source Countries)",
"covid_cases_es": "COVID-19 Cases (Spain)",
"app_downloads_es": "App Downloads (Spain – Official)",
"shared_diagnoses_es": "Shared Diagnoses (Spain – Official)",
"shared_diagnoses_per_covid_case_es": "Usage Ratio (Spain)",
}
summary_columns = [
"covid_cases",
"shared_teks_by_generation_date",
"shared_teks_by_upload_date",
"shared_teks_uploaded_on_generation_date",
"shared_diagnoses",
"teks_per_shared_diagnosis",
"shared_diagnoses_per_covid_case",
"covid_cases_es",
"app_downloads_es",
"shared_diagnoses_es",
"shared_diagnoses_per_covid_case_es",
]
summary_percentage_columns= [
"shared_diagnoses_per_covid_case_es",
"shared_diagnoses_per_covid_case",
]
###Output
_____no_output_____
###Markdown
Daily Summary Table
###Code
result_summary_df_ = result_summary_df.copy()
result_summary_df = result_summary_df[summary_columns]
result_summary_with_display_names_df = result_summary_df \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping)
result_summary_with_display_names_df
###Output
_____no_output_____
###Markdown
Daily Summary Plots
###Code
result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \
.droplevel(level=["source_regions"]) \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping)
summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar(
title=f"Daily Summary",
rot=45, subplots=True, figsize=(15, 30), legend=False)
ax_ = summary_ax_list[0]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.95)
_ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist()))
for percentage_column in summary_percentage_columns:
percentage_column_index = summary_columns.index(percentage_column)
summary_ax_list[percentage_column_index].yaxis \
.set_major_formatter(matplotlib.ticker.PercentFormatter(1.0))
###Output
/opt/hostedtoolcache/Python/3.8.11/x64/lib/python3.8/site-packages/pandas/plotting/_matplotlib/tools.py:307: MatplotlibDeprecationWarning:
The rowNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().rowspan.start instead.
layout[ax.rowNum, ax.colNum] = ax.get_visible()
/opt/hostedtoolcache/Python/3.8.11/x64/lib/python3.8/site-packages/pandas/plotting/_matplotlib/tools.py:307: MatplotlibDeprecationWarning:
The colNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().colspan.start instead.
layout[ax.rowNum, ax.colNum] = ax.get_visible()
/opt/hostedtoolcache/Python/3.8.11/x64/lib/python3.8/site-packages/pandas/plotting/_matplotlib/tools.py:313: MatplotlibDeprecationWarning:
The rowNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().rowspan.start instead.
if not layout[ax.rowNum + 1, ax.colNum]:
/opt/hostedtoolcache/Python/3.8.11/x64/lib/python3.8/site-packages/pandas/plotting/_matplotlib/tools.py:313: MatplotlibDeprecationWarning:
The colNum attribute was deprecated in Matplotlib 3.2 and will be removed two minor releases later. Use ax.get_subplotspec().colspan.start instead.
if not layout[ax.rowNum + 1, ax.colNum]:
###Markdown
Daily Generation to Upload Period Table
###Code
display_generation_to_upload_period_pivot_df = \
generation_to_upload_period_pivot_df \
.head(backend_generation_days)
display_generation_to_upload_period_pivot_df \
.head(backend_generation_days) \
.rename_axis(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping)
fig, generation_to_upload_period_pivot_table_ax = plt.subplots(
figsize=(12, 1 + 0.6 * len(display_generation_to_upload_period_pivot_df)))
generation_to_upload_period_pivot_table_ax.set_title(
"Shared TEKs Generation to Upload Period Table")
sns.heatmap(
data=display_generation_to_upload_period_pivot_df
.rename_axis(columns=display_column_name_mapping)
.rename_axis(index=display_column_name_mapping),
fmt=".0f",
annot=True,
ax=generation_to_upload_period_pivot_table_ax)
generation_to_upload_period_pivot_table_ax.get_figure().tight_layout()
###Output
_____no_output_____
###Markdown
Hourly Summary Plots
###Code
hourly_summary_ax_list = hourly_summary_df \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.plot.bar(
title=f"Last 24h Summary",
rot=45, subplots=True, legend=False)
ax_ = hourly_summary_ax_list[-1]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.9)
_ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist()))
###Output
_____no_output_____
###Markdown
Publish Results
###Code
github_repository = os.environ.get("GITHUB_REPOSITORY")
if github_repository is None:
github_repository = "pvieito/Radar-STATS"
github_project_base_url = "https://github.com/" + github_repository
display_formatters = {
display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}" if x != 0 else "",
display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}" if x != 0 else "",
display_column_name_mapping["shared_diagnoses_per_covid_case_es"]: lambda x: f"{x:.2%}" if x != 0 else "",
}
general_columns = \
list(filter(lambda x: x not in display_formatters, display_column_name_mapping.values()))
general_formatter = lambda x: f"{x}" if x != 0 else ""
display_formatters.update(dict(map(lambda x: (x, general_formatter), general_columns)))
daily_summary_table_html = result_summary_with_display_names_df \
.head(daily_plot_days) \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.to_html(formatters=display_formatters)
multi_backend_summary_table_html = multi_backend_summary_df \
.head(daily_plot_days) \
.rename_axis(columns=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping) \
.to_html(formatters=display_formatters)
def format_multi_backend_cross_sharing_fraction(x):
if pd.isna(x):
return "-"
elif round(x * 100, 1) == 0:
return ""
else:
return f"{x:.1%}"
multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \
.rename_axis(columns=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping) \
.to_html(
classes="table-center",
formatters=display_formatters,
float_format=format_multi_backend_cross_sharing_fraction)
multi_backend_cross_sharing_summary_table_html = \
multi_backend_cross_sharing_summary_table_html \
.replace("<tr>","<tr style=\"text-align: center;\">")
extraction_date_result_summary_df = \
result_summary_df[result_summary_df.index.get_level_values("sample_date") == extraction_date]
extraction_date_result_hourly_summary_df = \
hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour]
covid_cases = \
extraction_date_result_summary_df.covid_cases.item()
shared_teks_by_generation_date = \
extraction_date_result_summary_df.shared_teks_by_generation_date.item()
shared_teks_by_upload_date = \
extraction_date_result_summary_df.shared_teks_by_upload_date.item()
shared_diagnoses = \
extraction_date_result_summary_df.shared_diagnoses.item()
teks_per_shared_diagnosis = \
extraction_date_result_summary_df.teks_per_shared_diagnosis.item()
shared_diagnoses_per_covid_case = \
extraction_date_result_summary_df.shared_diagnoses_per_covid_case.item()
shared_teks_by_upload_date_last_hour = \
extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int)
display_source_regions = ", ".join(report_source_regions)
if len(report_source_regions) == 1:
display_brief_source_regions = report_source_regions[0]
else:
display_brief_source_regions = f"{len(report_source_regions)} 🇪🇺"
def get_temporary_image_path() -> str:
return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png")
def save_temporary_plot_image(ax):
if isinstance(ax, np.ndarray):
ax = ax[0]
media_path = get_temporary_image_path()
ax.get_figure().savefig(media_path)
return media_path
def save_temporary_dataframe_image(df):
import dataframe_image as dfi
df = df.copy()
df_styler = df.style.format(display_formatters)
media_path = get_temporary_image_path()
dfi.export(df_styler, media_path)
return media_path
summary_plots_image_path = save_temporary_plot_image(
ax=summary_ax_list)
summary_table_image_path = save_temporary_dataframe_image(
df=result_summary_with_display_names_df)
hourly_summary_plots_image_path = save_temporary_plot_image(
ax=hourly_summary_ax_list)
multi_backend_summary_table_image_path = save_temporary_dataframe_image(
df=multi_backend_summary_df)
generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image(
ax=generation_to_upload_period_pivot_table_ax)
###Output
[0902/231031.550971:ERROR:gpu_init.cc(441)] Passthrough is not supported, GL is swiftshader
###Markdown
Save Results
###Code
report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-"
result_summary_df.to_csv(
report_resources_path_prefix + "Summary-Table.csv")
result_summary_df.to_html(
report_resources_path_prefix + "Summary-Table.html")
hourly_summary_df.to_csv(
report_resources_path_prefix + "Hourly-Summary-Table.csv")
multi_backend_summary_df.to_csv(
report_resources_path_prefix + "Multi-Backend-Summary-Table.csv")
multi_backend_cross_sharing_summary_df.to_csv(
report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv")
generation_to_upload_period_pivot_df.to_csv(
report_resources_path_prefix + "Generation-Upload-Period-Table.csv")
_ = shutil.copyfile(
summary_plots_image_path,
report_resources_path_prefix + "Summary-Plots.png")
_ = shutil.copyfile(
summary_table_image_path,
report_resources_path_prefix + "Summary-Table.png")
_ = shutil.copyfile(
hourly_summary_plots_image_path,
report_resources_path_prefix + "Hourly-Summary-Plots.png")
_ = shutil.copyfile(
multi_backend_summary_table_image_path,
report_resources_path_prefix + "Multi-Backend-Summary-Table.png")
_ = shutil.copyfile(
generation_to_upload_period_pivot_table_image_path,
report_resources_path_prefix + "Generation-Upload-Period-Table.png")
###Output
_____no_output_____
###Markdown
Publish Results as JSON
###Code
def generate_summary_api_results(df: pd.DataFrame) -> list:
api_df = df.reset_index().copy()
api_df["sample_date_string"] = \
api_df["sample_date"].dt.strftime("%Y-%m-%d")
api_df["source_regions"] = \
api_df["source_regions"].apply(lambda x: x.split(","))
return api_df.to_dict(orient="records")
summary_api_results = \
generate_summary_api_results(df=result_summary_df)
today_summary_api_results = \
generate_summary_api_results(df=extraction_date_result_summary_df)[0]
summary_results = dict(
backend_identifier=report_backend_identifier,
source_regions=report_source_regions,
extraction_datetime=extraction_datetime,
extraction_date=extraction_date,
extraction_date_with_hour=extraction_date_with_hour,
last_hour=dict(
shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour,
shared_diagnoses=0,
),
today=today_summary_api_results,
last_7_days=last_7_days_summary,
last_14_days=last_14_days_summary,
daily_results=summary_api_results)
summary_results = \
json.loads(pd.Series([summary_results]).to_json(orient="records"))[0]
with open(report_resources_path_prefix + "Summary-Results.json", "w") as f:
json.dump(summary_results, f, indent=4)
###Output
_____no_output_____
###Markdown
Publish on README
###Code
with open("Data/Templates/README.md", "r") as f:
readme_contents = f.read()
readme_contents = readme_contents.format(
extraction_date_with_hour=extraction_date_with_hour,
github_project_base_url=github_project_base_url,
daily_summary_table_html=daily_summary_table_html,
multi_backend_summary_table_html=multi_backend_summary_table_html,
multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html,
display_source_regions=display_source_regions)
with open("README.md", "w") as f:
f.write(readme_contents)
###Output
_____no_output_____
###Markdown
Publish on Twitter
###Code
enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER")
github_event_name = os.environ.get("GITHUB_EVENT_NAME")
if enable_share_to_twitter and github_event_name == "schedule" and \
(shared_teks_by_upload_date_last_hour or not are_today_results_partial):
import tweepy
twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"]
twitter_api_auth_keys = twitter_api_auth_keys.split(":")
auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1])
auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3])
api = tweepy.API(auth)
summary_plots_media = api.media_upload(summary_plots_image_path)
summary_table_media = api.media_upload(summary_table_image_path)
generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path)
media_ids = [
summary_plots_media.media_id,
summary_table_media.media_id,
generation_to_upload_period_pivot_table_image_media.media_id,
]
if are_today_results_partial:
today_addendum = " (Partial)"
else:
today_addendum = ""
def format_shared_diagnoses_per_covid_case(value) -> str:
if value == 0:
return "–"
return f"≤{value:.2%}"
display_shared_diagnoses_per_covid_case = \
format_shared_diagnoses_per_covid_case(value=shared_diagnoses_per_covid_case)
display_last_14_days_shared_diagnoses_per_covid_case = \
format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case"])
display_last_14_days_shared_diagnoses_per_covid_case_es = \
format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case_es"])
status = textwrap.dedent(f"""
#RadarCOVID – {extraction_date_with_hour}
Today{today_addendum}:
- Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour)
- Shared Diagnoses: ≤{shared_diagnoses:.0f}
- Usage Ratio: {display_shared_diagnoses_per_covid_case}
Last 14 Days:
- Usage Ratio (Estimation): {display_last_14_days_shared_diagnoses_per_covid_case}
- Usage Ratio (Official): {display_last_14_days_shared_diagnoses_per_covid_case_es}
Info: {github_project_base_url}#documentation
""")
status = status.encode(encoding="utf-8")
api.update_status(status=status, media_ids=media_ids)
###Output
_____no_output_____ |
Practices/Practice05_Indexing.ipynb | ###Markdown
Practice with indexing!**Remember:*** Use square brackets to index (`[]`)* You can index lists and strings* Python starts counting at zero!Let's start with our list of pets:
###Code
# command Python to make a list of pets. Store it in pets
['dog','cat','turtle','hamster','goldfish','snake','rabbit']
# command Python to print pets
###Output
_____no_output_____
###Markdown
Use indexing to print out cat:
###Code
# command Python to print cat by indexing the pets list
###Output
_____no_output_____
###Markdown
Print the 5th element in your list. What is it? Be sure to check your answer!
###Code
# command Python to print the 5th element in pets
###Output
_____no_output_____
###Markdown
Get turtle, hamster, and goldfish from pets using indexing and store it as pets_subset:
###Code
# command Python to store turtle, hamster, and goldfish from pets into pets_subset
# command Python to print pets_subset
###Output
_____no_output_____
###Markdown
How can you make a list of the first and third elements of pets?
###Code
# make a list of the first and third elements of pets
###Output
_____no_output_____
###Markdown
Let's get the last element of pets (rabbit) and store it as my_pet. Then print it out to make sure you did it correctly!
###Code
# command Python to store the last element of pets in my_pet
# command Python to print my_pet
###Output
_____no_output_____
###Markdown
Print the first three letters of my_pet:
###Code
# command Python to print the first three letters of my_pet
###Output
_____no_output_____ |
LAB/LAB03-2.ipynb | ###Markdown
Psi4 Lab 03: Introduction Potential Energy Function Part2In this part of lab, you will need to calculate the heat of formation of glyoxal using theoretical methods. From NIST’s CCCBDB web site, http://cccbdb.nist.gov/enthformation.asp. Use B3LYP/6-31G* to optimize the geometries + vibrational frequency calculations of CHOCHO, CH2O, CO, CH4, and CH3CHO. Then followed by the suggestion you will need to calculate in two different pathway* I. CHOCHO → CH2O + CO* II. CHOCHO + CH4 → CH3CHO + CH2O Where the second pathway is the isodesmic (same number of same type of bonds on each side of the reaction) reaction with 2C=O, 1C-C, 6 C-H bonds on both sides of the reaction. Using the experimental gas phase heat of formation at 1 atm, 298.15 K * $\Delta$Hf(CH2O, formaldehyde) = -25.98 kcal/mol * $\Delta$Hf(CO) = -26.42 kcal/mol * $\Delta$Hf(CH4) = -17.83 kcal/mol* $\Delta$Hf(CH3CHO, acetaldehyde) = -40.80 kcal/mol. Compare your finding to the experimental value in http://webbook.nist.gov/cgi/cbook.cgi?ID=C107222&Units=CAL&Mask=1Thermo-Gas. (1 Hartree = 627.5096 kcal/mol), comment your findings.
###Code
import psi4
psi4.set_memory('2 GB')
###Output
_____no_output_____
###Markdown
Glyoxal CHOCHO
###Code
# setup Geometry
chocho = psi4.geometry ("""
0 1
C -0.04756536 -0.12415443 -0.00737727
O 1.17975164 -0.12415443 -0.00737727
H -0.63971036 0.81524957 -0.00737727
C -0.86876150 -1.42693488 -0.00732319
O -2.09607850 -1.42693488 -0.00732319
H -0.27661650 -2.36633888 -0.00732319
""")
# setup calculation, optimization
psi4.core.set_output_file('chocho-b3lyp631gs.log', False)
chocho_eng = psi4.optimize('b3lyp/6-31G*', molecule=chocho)
print(chocho_eng)
print(chocho.to_string(dtype='xyz'))
# visualize geometry, copy the xyz between quotes
import fortecubeview
chochoxyz = """
C 0.407660294542 0.644384448798 -0.000004217415
O 1.617099171825 0.627665294831 -0.000056483843
H -0.196180241274 1.574707615761 -0.000009808357
C -0.407660294542 -0.644384448798 0.000004217415
O -1.617099171825 -0.627665294831 0.000056483843
H 0.196180241274 -1.574707615761 0.000009808357
"""
fortecubeview.geom(xyz=chochoxyz )
# perform frequency calculation
psi4.set_options({"normal_modes_write": True})
scf_e, scf_wfn = psi4.frequency('b3lyp/6-31G*', molecule=chocho, return_wfn=True)
# list of frequencies
freqs = scf_wfn.frequencies().to_array()
print(freqs)
# check normal mode file
!ls
# check the thermochemistry functions
with open('chocho-b3lyp631gs.log','r') as f:
lines = f.readlines()[-118:]
print(''.join(lines))
# visualize vibrational frequencies
fortecubeview.vib('chocho-b3lyp631gs.default.69.molden_normal_modes')
###Output
_____no_output_____
###Markdown
CH2O
###Code
# setup Geometry
ch2o = psi4.geometry ("""
0 1
C -1.93987007 0.37493965 -0.04196342
O -0.71255307 0.37493965 -0.04196342
H -2.53201507 1.31434365 -0.04196342
H -2.53201507 -0.56446435 -0.04192442
""")
# setup calculation, optimization
psi4.core.set_output_file('ch2o-b3lyp631gs.log', False)
ch2o_eng = psi4.optimize('b3lyp/6-31G*', molecule=ch2o)
print(ch2o_eng)
print(ch2o.to_string(dtype='xyz'))
# visualize geometry, copy the xyz between quotes
import fortecubeview
ch2oxyz = """
C 0.407660294542 0.644384448798 -0.000004217415
O 1.617099171825 0.627665294831 -0.000056483843
H -0.196180241274 1.574707615761 -0.000009808357
C -0.407660294542 -0.644384448798 0.000004217415
O -1.617099171825 -0.627665294831 0.000056483843
H 0.196180241274 -1.574707615761 0.000009808357
"""
fortecubeview.geom(xyz=ch2oxyz )
# perform frequency calculation
psi4.set_options({"normal_modes_write": True})
scf_e, scf_wfn = psi4.frequency('b3lyp/6-31G*', molecule=ch2o, return_wfn=True)
# list of frequencies
freqs = scf_wfn.frequencies().to_array()
print(freqs)
# check normal mode file
!ls
# check the thermochemistry functions
with open('ch2o-b3lyp631gs.log','r') as f:
lines = f.readlines()[-118:]
print(''.join(lines))
# visualize vibrational frequencies
fortecubeview.vib('ch2o-b3lyp631gs.default.96.molden_normal_modes')
###Output
_____no_output_____
###Markdown
CO
###Code
# setup Geometry
co = psi4.geometry ("""
0 1
C -1.93987007 0.37493965 -0.04196342
O -0.71255307 0.37493965 -0.04196342
""")
# setup calculation, optimization
psi4.core.set_output_file('co-b3lyp631gs.log', False)
co_eng = psi4.optimize('b3lyp/6-31G*', molecule=co)
print(co_eng)
print(co.to_string(dtype='xyz'))
# visualize geometry, copy the xyz between quotes
import fortecubeview
coxyz = """
C 0.407660294542 0.644384448798 -0.000004217415
O 1.617099171825 0.627665294831 -0.000056483843
"""
fortecubeview.geom(xyz=coxyz )
# perform frequency calculation
psi4.set_options({"normal_modes_write": True})
scf_e, scf_wfn = psi4.frequency('b3lyp/6-31G*', molecule=co, return_wfn=True)
# list of frequencies
freqs = scf_wfn.frequencies().to_array()
print(freqs)
# check normal mode file
!ls
# check the thermochemistry functions
with open('co-b3lyp631gs.log','r') as f:
lines = f.readlines()[-118:]
print(''.join(lines))
# visualize vibrational frequencies
fortecubeview.vib('co-b3lyp631gs.default.96.molden_normal_modes')
###Output
_____no_output_____
###Markdown
CH3CHO
###Code
# setup Geometry
ch3cho = psi4.geometry ("""
0 1
C -0.85078749 1.04206499 0.01113742
O 0.36675656 1.04821887 -0.14331527
H -1.43349725 1.97552776 0.16017491
C -1.67198363 -0.26071546 0.01119150
H -2.71558459 -0.02450152 0.01127730
H -1.43553841 -0.83140358 -0.86248391
H -1.43539896 -0.83141954 0.88481874
""")
# setup calculation, optimization
psi4.core.set_output_file('ch3cho-b3lyp631gs.log', False)
ch3cho_eng = psi4.optimize('b3lyp/6-31G*', molecule=ch3cho)
print(ch3cho_eng)
print(ch3cho.to_string(dtype='xyz'))
# visualize geometry, copy the xyz between quotes
import fortecubeview
ch3choxyz = """
C -0.85078749 1.04206499 0.01113742
O 0.36675656 1.04821887 -0.14331527
H -1.43349725 1.97552776 0.16017491
C -1.67198363 -0.26071546 0.01119150
H -2.71558459 -0.02450152 0.01127730
H -1.43553841 -0.83140358 -0.86248391
H -1.43539896 -0.83141954 0.88481874
"""
fortecubeview.geom(xyz=ch3choxyz )
# perform frequency calculation
psi4.set_options({"normal_modes_write": True})
scf_e, scf_wfn = psi4.frequency('b3lyp/6-31G*', molecule=ch3cho, return_wfn=True)
# list of frequencies
freqs = scf_wfn.frequencies().to_array()
print(freqs)
# check normal mode file
!ls
# check the thermochemistry functions
with open('ch3cho-b3lyp631gs.log','r') as f:
lines = f.readlines()[-118:]
print(''.join(lines))
# visualize vibrational frequencies
fortecubeview.vib('ch3cho-b3lyp631gs.default.96.molden_normal_modes')
###Output
_____no_output_____
###Markdown
CH4
###Code
# Setup Geometry
ch4 = psi4.geometry("""
0 1
C -5.1204447883 0.2129832513 0.0000000000
H -4.0504447883 0.2129832513 0.0000000000
H -5.4771075702 -0.7792879421 0.1819053015
H -5.4771124261 0.5515830994 -0.9502832941
H -5.4771137210 0.8666534100 0.7683765978
""")
# setup calculation, optimization
psi4.core.set_output_file('ch4-b3lyp631gs.log', False)
ch4_eng = psi4.optimize('b3lyp/6-31G*', molecule=ch4)
print(ch4_eng)
print(ch4.to_string(dtype='xyz'))
# visualize geometry
import fortecubeview
ch4xyz = """
C 0.000000238953 0.000000369154 0.000000000000
H 0.624381193333 -0.093461361896 0.892846398423
H 0.624381193333 -0.093461361896 -0.892846398423
H -0.492213077375 0.976467592553 0.000000000000
H -0.756552154462 -0.789549264213 0.000000000000
"""
fortecubeview.geom(xyz=ch4xyz )
# perform frequency calculation
psi4.set_options({"normal_modes_write": True})
scf_e, scf_wfn = psi4.frequency('b3lyp/6-31G*', molecule=ch4, return_wfn=True)
# list of frequencies
freqs = scf_wfn.frequencies().to_array()
print(freqs)
# check normal mode file
!ls
# check the thermochemistry functions
with open('ch4-b3lyp631gs.log','r') as f:
lines = f.readlines()[-118:]
print(''.join(lines))
# visualize vibrational frequencies
fortecubeview.vib('ch4-b3lyp631gs.default.51.molden_normal_modes')
###Output
_____no_output_____ |
05-Tidy-Data/Exercise-01-Pandas-and-Missing-Values-Part2.ipynb | ###Markdown
Exercise (continued) - Working with Pandas and SeabornNow it's your turn to practice what we learned in the class.In this notebook we will play with some of the concepts we just learned, such as handling missing values, grouping, and aggregation. You have seen this dataset in the previous class, so the first part of the notebook doesn't require you to write any code.We are working with the following dataset: `data/2017_StPaul_MN_Real_Estate.csv`Let's call this DataFrame `houses`. We can import the data using a URL, if the data file is located on the internet. This is a very convinient option since our file is located in my Github, however, this is not always the case and in real life the data is not on a public URL. We will use the URL syntax as much as possible to avoid any local path issues:
###Code
import pandas as pd
url = 'https://raw.githubusercontent.com/soltaniehha/Business-Analytics/master/data/2017_StPaul_MN_Real_Estate.csv'
houses = pd.read_csv(url)
print("There are {} rows and {} columns.".format(houses.shape[0], houses.shape[1]))
###Output
There are 5000 rows and 74 columns.
###Markdown
This dataset has too many columns to study. To start, let's create a new dataset with a smaller number of attributes. To do this, use the following list, `subset_columns`:
###Code
subset_columns = ['streetaddress','STREETNAME', 'PostalCode', 'StateOrProvince', 'City', 'SchoolDistrictNumber',
'SalesClosePrice', 'LISTDATE', 'offmarketdate', 'LISTPRICE', 'LISTTYPE',
'OriginalListPrice', 'PricePerTSFT', 'DAYSONMARKET', 'ROOF',
'SQFTABOVEGROUND', 'RoomArea1', 'YEARBUILT']
df = houses[subset_columns].copy() # This will create an individual copy of the original DataFrame
# Adding a new column, sales_vs_list
df['sales_vs_list'] = (df['SalesClosePrice'] - df['LISTPRICE'])/df['LISTPRICE'] * 100
print("There are {} rows and {} columns.".format(df.shape[0], df.shape[1]))
df.head()
###Output
There are 5000 rows and 19 columns.
###Markdown
Use `describe()` to get a high level summary of the data:
###Code
df.describe()
###Output
_____no_output_____
###Markdown
Using `.info()` extract more info regarding the missing values and columns types:
###Code
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5000 entries, 0 to 4999
Data columns (total 19 columns):
streetaddress 5000 non-null object
STREETNAME 5000 non-null object
PostalCode 5000 non-null int64
StateOrProvince 5000 non-null object
City 5000 non-null object
SchoolDistrictNumber 5000 non-null object
SalesClosePrice 5000 non-null int64
LISTDATE 5000 non-null object
offmarketdate 5000 non-null object
LISTPRICE 5000 non-null int64
LISTTYPE 5000 non-null object
OriginalListPrice 5000 non-null int64
PricePerTSFT 5000 non-null float64
DAYSONMARKET 5000 non-null int64
ROOF 4235 non-null object
SQFTABOVEGROUND 5000 non-null int64
RoomArea1 4543 non-null object
YEARBUILT 5000 non-null int64
sales_vs_list 5000 non-null float64
dtypes: float64(2), int64(7), object(10)
memory usage: 742.3+ KB
###Markdown
From the outcome of `info()` we can see that columns *ROOF* and *RoomArea1* have some null values. We can first visually inspect the rows where for instance *ROOF* is missing and see if we find any common cause:
###Code
df[df['ROOF'].isnull()].head()
###Output
_____no_output_____
###Markdown
Your TrunLet's find out what possible values *ROOF* can get. We can do this by applying `.unique()` function on the column of interest
###Code
# Your answer goes here
###Output
_____no_output_____
###Markdown
This is great, but wouldn't it be even cooler to see how popular these roofs are? Let's use a `groupby()` on our `df` DataFrame and count how many times each roof type was used in the dataset. The easy way to do this is to use the function `size()`, which will give you the number of elements in each group:hint1: this will print a long list, you can use `.head(10)` to limit it.
###Code
# Your answer goes here
###Output
_____no_output_____
###Markdown
Now we will sort it to get the most common ones on top. Use `.sort_values()` (right after your aggregation function and before using `head()`. If you use it after head it will only sort among the limited ones that were printed which is not what we are looking for here):
###Code
# Your answer goes here
###Output
_____no_output_____
###Markdown
Let's find out how many values are missig from *ROOF* (even though we can find out from `info()`).1. Subset the column of interest by the following format: `DF['col1']`. This will give us a Series object.2. Chain the `isnull()` function to this Series. I.e, `DF['col1'].isnull()`3. Chain the `sum()` function to the previous step
###Code
# Your answer goes here
###Output
_____no_output_____
###Markdown
Let's replace the null values with the most commonly value used in the dataset, "Asphalt Shingles". **Note:** This may or may not be the right thing to do depending on the problem you are solving. To do so we can use the `.fillna()` function, chain it to the column subset, `df['ROOF']`. Within the argument of this function you want to first pass the replacement to be used, here "Asphalt Shingles", and then `inplace=True`. If we don't specify the inplace being active it won't permanently fill the nulls in our DataFrame.
###Code
# Your answer goes here
###Output
_____no_output_____
###Markdown
Check if there are any nulls left in that column (similar to the cell above):
###Code
# Your answer goes here
###Output
_____no_output_____
###Markdown
Check out the `info()`:
###Code
# Your answer goes here
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5000 entries, 0 to 4999
Data columns (total 19 columns):
streetaddress 5000 non-null object
STREETNAME 5000 non-null object
PostalCode 5000 non-null int64
StateOrProvince 5000 non-null object
City 5000 non-null object
SchoolDistrictNumber 5000 non-null object
SalesClosePrice 5000 non-null int64
LISTDATE 5000 non-null object
offmarketdate 5000 non-null object
LISTPRICE 5000 non-null int64
LISTTYPE 5000 non-null object
OriginalListPrice 5000 non-null int64
PricePerTSFT 5000 non-null float64
DAYSONMARKET 5000 non-null int64
ROOF 5000 non-null object
SQFTABOVEGROUND 5000 non-null int64
RoomArea1 4543 non-null object
YEARBUILT 5000 non-null int64
sales_vs_list 5000 non-null float64
dtypes: float64(2), int64(7), object(10)
memory usage: 742.3+ KB
###Markdown
We can see that the only column with null is `RoomArea1`. For the sake of practice let's handle it differently this time. We will drop any records that doesn't have a value for this column. We can do this using `dropna()` function.Do NOT use the option `inplace=True`, instead save the output into a new DataFrame called `df2`:
###Code
# Your answer goes here
###Output
_____no_output_____
###Markdown
Check out the `info()` on `df2`:
###Code
# Your answer goes here
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 4543 entries, 0 to 4999
Data columns (total 19 columns):
streetaddress 4543 non-null object
STREETNAME 4543 non-null object
PostalCode 4543 non-null int64
StateOrProvince 4543 non-null object
City 4543 non-null object
SchoolDistrictNumber 4543 non-null object
SalesClosePrice 4543 non-null int64
LISTDATE 4543 non-null object
offmarketdate 4543 non-null object
LISTPRICE 4543 non-null int64
LISTTYPE 4543 non-null object
OriginalListPrice 4543 non-null int64
PricePerTSFT 4543 non-null float64
DAYSONMARKET 4543 non-null int64
ROOF 4543 non-null object
SQFTABOVEGROUND 4543 non-null int64
RoomArea1 4543 non-null object
YEARBUILT 4543 non-null int64
sales_vs_list 4543 non-null float64
dtypes: float64(2), int64(7), object(10)
memory usage: 709.8+ KB
###Markdown
`dropna()` removes all of the records that have any number of nulls. For other functionalities please check out the help function or go back to 02-Aggregation-and-Grouping.ipynb.We could continue with `df2` but we weren't really planing on using `RoomArea1` for this analysis. In order to have a clean dataset let's just remove that column. The advantage would be that we get to keep all the 5000 data points.To do so we will use `drop()` function. 1. Pass the name of the column to drop2. Set parameter `axis=1`. This will indicate that the name that we passed is a column name and not a row name (index)3. `inplace=True`
###Code
# Your answer goes here
###Output
_____no_output_____
###Markdown
Check out the `info()`:
###Code
# Your answer goes here
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5000 entries, 0 to 4999
Data columns (total 18 columns):
streetaddress 5000 non-null object
STREETNAME 5000 non-null object
PostalCode 5000 non-null int64
StateOrProvince 5000 non-null object
City 5000 non-null object
SchoolDistrictNumber 5000 non-null object
SalesClosePrice 5000 non-null int64
LISTDATE 5000 non-null object
offmarketdate 5000 non-null object
LISTPRICE 5000 non-null int64
LISTTYPE 5000 non-null object
OriginalListPrice 5000 non-null int64
PricePerTSFT 5000 non-null float64
DAYSONMARKET 5000 non-null int64
ROOF 5000 non-null object
SQFTABOVEGROUND 5000 non-null int64
YEARBUILT 5000 non-null int64
sales_vs_list 5000 non-null float64
dtypes: float64(2), int64(7), object(9)
memory usage: 703.2+ KB
###Markdown
Okay! Now that we don't have any missing values in our DataFrame let's continue with some aggregation tasks.Group our `df` by "City" and calculate min, max, and count for every group and every column.Hint: use function `agg()` and pass a list of aggregation function you need.
###Code
# Your answer goes here
###Output
_____no_output_____
###Markdown
Too many values are being printed and it's really hard to read. Let's limit this by asking to show `SalesClosePrice` and `SQFTABOVEGROUND` only.Hint: you can do this by passing `[['SalesClosePrice', 'SQFTABOVEGROUND']]` to groupby.
###Code
# Your answer goes here
###Output
_____no_output_____
###Markdown
Use `describe()` and `groupby()` to get a high level summary of "LISTPRICE" for each "City".1. Apply groupby and use the column you want to group with2. Pass the name of the column you'd like `describe()` to describe!3. Chain `describe()`
###Code
# Your answer goes here
import seaborn as sns
import matplotlib.pyplot as plt
sns.violinplot(x="City", y="SalesClosePrice", data=df, inner="quartile",
scale='count')
plt.xticks(rotation=90)
plt.title("Distribution of Closing Price for Different Cities")
sns.despine()
###Output
_____no_output_____
###Markdown
This time your turn. Use the `boxplot()` function to plot box plots of `sales_vs_list` for each city:Hint: Code the similar to the one above without setting parameters inner and scale.
###Code
# Your answer goes here
###Output
_____no_output_____
###Markdown
Notice that we have modified the "inner" option of the plot above. It accepts the following values: {"box", "quartile", "point", "stick", None}. Try them to see the difference.We also have changed the scale method, the method used to scale the width of each violin. These are the possible options: {"area", "count", "width"}. If ``area``, each violin will have the same area. If ``count``, the width of the violins will be scaled by the number of observations in that bin. If ``width``, each violin will have the same width. Now let's use the `agg()` function to find the avergae `SalesClosePrice` per each `PostalCode` and `count` for each group:
###Code
# Your answer goes here
###Output
_____no_output_____
###Markdown
Let's use the `filter()` function to filter all the houses that are within `PostalCode` that have had an average `SalesClosePrice` of less than 250,000. Save the result to a new DataFrame and call it `df_inexpensive_zips`. Run the aggregate again:
###Code
# Your answer goes here
# Your answer goes here
df_inexpensive_zips.head(3)
# Your answer goes here
###Output
_____no_output_____
###Markdown
Use the `transform()` function on `df_inexpensive_zips` to create a new column called `SalesPriceNormalized` that shows the proportional value of a sold house to the most expensive house sold within the same zipcode:
###Code
# Your answer goes here
###Output
/Users/msoltani/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
This is separate from the ipykernel package so we can avoid doing imports until
|
notebook/2018-07-26_parsch.ipynb | ###Markdown
Parsch Argyridou, Eliza, and John Parsch. 2018. “Regulation of the X Chromosome in the Germline and Soma of Drosophila Melanogaster Males.” Genes 9 (5). https://doi.org/10.3390/genes9050242.I just want to re-create some tables that they had in this paper.
###Code
import os
import sys
from pathlib import Path
from IPython.display import display, HTML, Markdown
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# Project level imports
from larval_gonad.notebook import Nb
# Setup notebook
nbconfig = Nb.setup_notebook()
tpm = pd.read_parquet('../output/scrnaseq-wf/tpm.parquet')
tpm.head()
chroms = nbconfig.fbgn2chrom.copy()
chroms.replace({'chr2L': 'A', 'chr2R': 'A', 'chr3L': 'A', 'chr3R': 'A'}, inplace=True)
chroms = chroms.query('chrom != "chr4" & chrom != "chrM" & chrom != "chrY"').copy()
xgenes = chroms.query('chrom == "chrX"').index.tolist()
agenes = chroms.query('chrom == "A"').index.tolist()
tpmChrom = tpm.join(chroms, how='left')
tpmOn = (tpm > 0).join(chroms, how='left')
numGenesOn = tpmOn.groupby('chrom').sum().T
numGenesOn.columns = ['# A genes', '# X genes']
numGenesOn
medGenes = tpmChrom.groupby('chrom').median().T
medGenes.columns = ['A median', 'X median']
medGenes['X/A'] = medGenes['X median'] / medGenes['A median']
medGenes
from scipy.stats import mannwhitneyu
tpm.head()
def mann(dat):
_x = dat.reindex(xgenes).dropna()
_a = dat.reindex(agenes).dropna()
_, pval = mannwhitneyu(_x, _a, alternative='less')
return pd.Series([np.round(pval, 4)], name=dat.name, index=['p-value'])
p_value = tpm.apply(mann).T
parsch1 = numGenesOn.join(medGenes).join(p_value)
parsch1 = parsch1.loc[nbconfig.CLUSTER_ORDER, ['# X genes', 'X median', '# A genes', 'A median', 'X/A', 'p-value']]
display(parsch1)
display(Markdown('p-value: One-sided Mann-Whiteney U'))
def prop_in_bin(dat):
ct = pd.cut(dat, [2, 4, 8, 16, 32, 64, 128, 256, 99999999999], duplicates='drop')
ct.name = f'bins'
cnts = chroms.join(ct, how='right').groupby([f'bins', 'chrom']).size()
cnts.name = 'cnts'
cnts = cnts.to_frame().reset_index()
_x = cnts.query('chrom == "chrX"').copy()
expectation = _x.cnts.sum() / _x.shape[0]
_x[f'{dat.name}'] = _x['cnts'] / expectation
return _x[['bins', f'{dat.name}']].set_index('bins', drop=True)
dfs = []
for i, col in tpm.iteritems():
dfs.append(prop_in_bin(col))
binned = pd.concat(dfs, axis=1).reset_index().melt(id_vars='bins')
def myplot(data, name, ax):
sns.barplot('bins', 'value', data=data, ax=ax)
ax.axhline(1, color='k', ls=':')
ax.text(0.5, .99, name, ha='center', va='top', fontdict={'size': 10}, transform=ax.transAxes)
ax.set_xlabel('')
ax.set_ylabel('')
plt.setp(ax.get_xticklabels(), rotation=45, ha='right')
from itertools import zip_longest
fig, axes = plt.subplots(3, 4, sharex=True, sharey=True, figsize=(10, 10), gridspec_kw={'wspace': 0.01, 'hspace': 0.01})
for ax, name in zip_longest(axes.flatten(), nbconfig.CLUSTER_ORDER):
if name is None:
ax.set_visible(False)
continue
dd = binned.query(f'variable == "{name}"')
myplot(dd, name, ax)
fig.text(0.05, 0.5, 'X-linked Genes (Observed/Expected)', rotation=90, transform=fig.transFigure, fontsize=12, ha='left', va='center')
fig.text(0.5, 0.01, 'Bins', transform=fig.transFigure, fontsize=12, ha='center', va='bottom')
###Output
_____no_output_____ |
materiais_pdf/Data_Science(curso_de_2019)/Machine Learning/Jupyter notebooks/Machine Learning 1 - Clust Hier.ipynb | ###Markdown
Hierarchical clusteringDentro do Machine Learning são inúmeras as naturezas que um conjunto de dados a ser utilizado em um modelo pode assumir. Assim sendo, tão diverso quanto os âmagos de cada conjunto, são os métodos de agrupá-los. Afinal, características distintas demandam algoritmos - também distintos - para lograr um maior êxito nas previsões feitas pelo modelo implementado.Por exemplo, imaginemos uma situação na qual temos informações acerca de um aglomerado de compradores e queremos dividí-los em função do seu perfil de compra. Por tratar-se de um conjunto cujo número de clusters é desconhecido, o método de Clusterização Hierárquica apresenta-se como uma excelente opção. Pois, esse método de classificação tem como um de seus resultados a construção de um dendrograma, que é um instrumento capaz de nos mostrar a relação de proximidade entre os elementos. O algoritmo hierárquico de clusterização pode ser empregado de duas formas: 1 – Modo aglomerativo: O algoritmo inicia com todos os elementos da matriz separados em grupos distintos e, a cada passo, os dois grupos mais próximos são agrupados. Este processo repete-se até que haja somente um único grupo como objeto final.2 – Modo divisivo: Aqui há o processo inverso do modo supracitado, o algoritmo começa com um único grupo e os vai dividindo em pares até que restem os objetos isolados da matriz inicial.O modo adotado na literatura clássica – e que também adotaremos aqui – é o primeiro modo.Vale notar que o processo de encontrar os dois grupos mais próximos é feito através de um dos métodos abaixos: • Single: A distância entre os grupos é mensurada através da distância mínima encontrada entre os objetos que os compõem. • Complete: Diferente do primeiro método, o critério é a distância máxima entre os objetos. • Average: Já aqui, o que é levado em consideração é a distância média entre os objetos. • Centroid: É considerada a distância entre os centroides dos dois clusters. • Ward: Caso os dois clusters selecionados unam-se, medirá como mudará a distância total em relação aos centroides originais.Agora que os fundamentos teóricos básicos foram introduzidos, vamos a um exemplo prático para fixá-los. Trabalharemos com um dataset que contém informações sobre compradores\footnote{O download do dataset encontra-se no drive junto com o material da apostila}.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt #Biblioteca responsável pela plotagem dos gráficos
import scipy.cluster.hierarchy as shc #Biblioteca responsável pela construção do dendrograma.
from sklearn.cluster import AgglomerativeClustering #Biblioteca responsável pelo fit dos dados.
data = pd.read_csv("/home/murilo/Downloads/shopping_data.csv")
data.head()
###Output
_____no_output_____
###Markdown
O dataset é composto pelas colunas CustomerID, Genre, Age, Annual Income(Mensurado em milhares de reais) e Spending Score. Destas, restringir-nos-emos as duas últimas que apresentam as inputs úteis ao nosso processo. A primeira aponta o rendimento anual e a segunda a propensão a consumir, sendo 0 o mínimo e 100 o máximo.
###Code
infos = data.iloc[:,3 :5].values #Seleção das duas referidas colunas.
plt.figure(figsize=(10,10)) #Dimensões do gráfico que será plotado.
link = shc.linkage(infos, method='ward') #Realiza a união dos grupos utilizando o método ward.
dendrogram = shc.dendrogram(link) #Constroi o dendrograma.
plt.show() #Exibe-o.
###Output
_____no_output_____
###Markdown
Algo interessante a se notar no dendrograma é o fato do tamanho das linhas verticais representarem a distância demandada no agrupamento. Ou seja, o ideal é aguardar surgir extensas linhas para escolher o número de clusters. No caso deste dataset, é notável que isso ocorre após a formação de 5 grupos. Com o número em mãos, resta-nos partir para a clusterização.
###Code
cluster = AgglomerativeClustering(n_clusters=5, affinity='euclidean', linkage='ward')
cluster.fit_predict(infos)
###Output
_____no_output_____
###Markdown
Como é possível visualizar acima, a função retorna uma lista nos informando o grupo que cada elemento da matriz pertence, vamos, portanto, plotar o gráfico final já com essa divisão.
###Code
#O eixo X será representado pelo rendimento anual e o Y pela proprensão a consumir.
plt.scatter(infos[:,0], infos[:,1], c=cluster.labels_, cmap='rainbow')
plt.show()
###Output
_____no_output_____ |
dati_2017/05/2DOF.ipynb | ###Markdown
2 DOF System The data that describes our system and its loading,$$\boldsymbol p(t) = p_0\,\begin{Bmatrix}0\\1\end{Bmatrix}\sin 2\omega_0\,t$$
###Code
M = np.array(((2.0, 0.0), ( 0.0, 1.0)))
K = np.array(((3.0,-2.0), (-2.0, 2.0)))
p = np.array(( 0.0, 1.0))
w = 2.0
###Output
_____no_output_____
###Markdown
Computing the eigenvalues and the eigenvectorsUsing the `eigh` library function, specialized for Hermitian (symmetric) matrices.We compute also the _starred_ matrices.
###Code
evals, Psi = eigh(K, M)
Mstar = Psi.T@M@Psi
Kstar = Psi.T@K@Psi
pstar = Psi.T@p
###Output
_____no_output_____
###Markdown
The `@` operator stands, in this context, for matrix multiplication. We print the computed values.
###Code
print(evals,end='\n\n')
print(Psi,end='\n\n')
print(Mstar,end='\n\n')
print(Kstar,end='\n\n')
print(pstar,end='\n\n')
print(Mstar[0,1]*150*10E6*1000)
###Output
[ 0.31385934 3.18614066]
[[-0.54177432 -0.45440135]
[-0.64262055 0.76618459]]
[[ 1.00000000e+00 -5.55111512e-17]
[ -5.55111512e-17 1.00000000e+00]]
[[ 3.13859338e-01 5.55111512e-17]
[ 0.00000000e+00 3.18614066e+00]]
[-0.64262055 0.76618459]
-8.32667268469e-05
###Markdown
Modal ResponseThe modal equation of motion, divided by $m$$$ \ddot q_i + \Lambda^2_i\omega_0^2 q_i = \frac{p_0}{k}\frac{k}{m}\,p^\star_i \sin2\omega_0t.$$The particular integral $\xi_i(t)$ is given by$$\xi_i = \Delta_{st} \frac{\omega_0^2}{(\Lambda^2_i-4)\omega_0^2}p^\star_i\sin2\omega_0t.$$Eventually the modal response, computed in the hypotesis of initial rest conditions is$$q_i = \frac{\Delta_{st}}{\Lambda^2_i-4}p^\star_i(\sin2\omega_0t-\beta\sin\Lambda_i\omega_0t).$$
###Code
L = np.sqrt(evals)
DAF = 1.0/(L**2-w**2)
beta = w/L
t = np.linspace(0,60,601)[:,None]
q = pstar*DAF*(np.sin(w*t)-beta*np.sin(L*t))
###Output
_____no_output_____
###Markdown
The definition of time vectoris a bit complicated...
###Code
curves = plt.plot(t,q)
plt.legend(curves,['q1', 'q2'])
plt.title('Modal Response')
plt.xlabel('$\omega_0t$')
plt.ylabel('$q_i/\Delta_{st}$');
x = ([email protected]).T
curves = plt.plot(t, x)
plt.legend(curves,['x1', 'x2'])
plt.title('Structural Response')
plt.xlabel('$\omega_0t$')
plt.ylabel('$X_i/\Delta_{st}$');
%matplotlib inline
import matplotlib.pyplot as plt ; plt.style.use(['fivethirtyeight', '00_mplrc'])
import numpy as np
from scipy.linalg import eigh
np.set_printoptions(suppress=False, linewidth=120)
from IPython.display import HTML
HTML(open('00_custom.css').read())
###Output
_____no_output_____ |
notebooks/TensorFMRI_StripesNotebook.ipynb | ###Markdown
Some prelimary code to visualize tensor-tensor products, the t-SVD, and projections.
###Code
# import modules
import numpy as np
from data.synthetic_data import stripes
import tensor.tensor_product_wrapper as tp
from utils.plotting_utils import montage_array, slice_subplots
import matplotlib.pyplot as plt
import similarity_metrics as sm
###Output
_____no_output_____
###Markdown
Choose an available tensor-tensor product. Options for this notebook include:* 'f' : facewise product (no transform along third dimension)* 't' : t-product (fast Fourier transform along third dimension)* 'c': c-product (discrete cosine transform along third dimension)
###Code
# choose product type {'f', 't', 'c'}
prod_type = 't'
###Output
_____no_output_____
###Markdown
Given a tensor-tensor product $\star_M$, the projection of a tensor $\mathcal{A}$onto the space spanned by the lateral slices of a tensor $\mathcal{U}$ is$$\mathcal{U} \star_M \mathcal{U}^\top \star_M \mathcal{A}$$
###Code
def projection(A, U, prod_type):
training_coeff = tp.ten_prod(tp.ten_tran(U, prod_type=prod_type), A, prod_type=prod_type)
return tp.ten_prod(U, training_coeff, prod_type=prod_type)
###Output
_____no_output_____
###Markdown
The data is composed of $n\times n$ images with four different possible "stripes" of the same width.Let ${\bf e} \in \mathbb{R}^n$ be the vector of all ones. Let ${\bf x}, {\bf y}\in \mathbb{R}^n$ be random vectors.* 'vertical' : $\mathbf{x}\mathbf{e}^\top $* 'horizontal' : $\mathbf{e}\mathbf{x}^\top $* 'main_diagonal' : $\text{toeplitz}({\bf x}, {\bf y})$ (constant from top left to bottom right)* 'off_diagonal' : $\text{rot90}(\text{toeplitz}({\bf x}, {\bf y}), 1)$ (constant from top right to bottom left)The data is stored as an $n \times M\times n$ tensor where $M$ is the number of images.
###Code
# for reproducibility
np.random.seed(20)
# load data
img_shape = (8, 8)
num_classes = 2
training_data, training_labels = stripes(num_images_per_class=100, img_shape=img_shape, num_classes=num_classes)
test_data, test_labels = stripes(num_images_per_class=9, img_shape=img_shape, num_classes=num_classes)
# permute such that lateral slices are the images
training_data = training_data.transpose(0, 2, 1)
test_data = test_data.transpose(0, 2, 1)
# visualize data
for i in range(num_classes):
slice_subplots(test_data[:, test_labels == i, :], axis=1, title='class ' + str(i))
plt.show()
print(test_data.shape)
print(test_labels)
# form local t-svd
num_classes = len(np.unique(training_labels))
k = 2
U = []
for i in range(num_classes):
u, _, _, _ = tp.ten_svd(training_data[:, training_labels == i, :], k, prod_type=prod_type)
U.append(u)
# visualizations
for i in range(num_classes):
slice_subplots(U[i], axis=1, title='basis elments for class ' + str(i))
plt.show()
# compute results on training and test data
training_error = np.zeros([num_classes, training_data.shape[1]])
test_error = np.zeros([num_classes, test_data.shape[1]])
for i in range(num_classes):
training_projection = projection(training_data, U[i], prod_type=prod_type)
training_error[i, :] = sm.frobenius_metric(training_data, training_projection, axis=1)
test_projection = projection(test_data, U[i], prod_type=prod_type)
test_error[i, :] = sm.frobenius_metric(test_data, test_projection, axis=1)
# classification
training_predicted_classes = np.argmin(training_error, axis=0).reshape(-1)
test_predicted_classes = np.argmin(test_error, axis=0).reshape(-1)
# results
training_num_correct = np.sum(training_predicted_classes == training_labels)
training_accuracy = training_num_correct / training_data.shape[1]
test_num_correct = np.sum(test_predicted_classes == test_labels)
test_accuracy = test_num_correct / test_data.shape[1]
print('train accuracy = %0.2f' % (100 * training_accuracy))
print('test accuracy = %0.2f' % (100 * test_accuracy))
# plot results
plt.figure()
for i in range(num_classes):
plt.semilogy(training_error[i, :], 'o', label=i)
plt.xlabel('image index')
plt.ylabel('distance score (lower is better)')
plt.legend()
plt.show()
# visualizations of projected tensors
for j in range(num_classes):
for i in range(num_classes):
A = projection(training_data[:, training_labels == j, :], U[i], prod_type=prod_type)
slice_subplots(A[:, :4, :], axis=1, title='projection of class ' + str(j) + ' onto basis for class ' + str(i))
plt.show()
###Output
_____no_output_____ |
Python/Cisco-PCAP-Python/Module2.ipynb | ###Markdown
Module 2
###Code
print("Hello, World!")
###Output
Hello, World!
###Markdown
Function- function let you evaluate value- it can come from python, module/library, or you can create one- function use parentheses () to give argument ```python function_name(argument) ```- Python requires that there cannot be more than one instruction in a line.- Python use the positional way (this name comes from the fact that the meaning of the argument is dictated by its position, e.g., the second argument will be outputted after the first, not the other way round). How function work in python- First, Python checks if the name specified is legal (it browses its internal data in order to find an existing function of the name; if this search fails, Python aborts the code);- second, Python checks if the function's requirements for the number of arguments allows you to invoke the function in this way (e.g., if a specific function demands exactly two arguments, any invocation delivering only one argument will be considered erroneous, and will abort the code's execution);- third, Python leaves your code for a moment and jumps into the function you want to invoke; of course, it takes your argument(s) too and passes it/them to the function;- fourth, the function executes its code, causes the desired effect (if any), evaluates the desired result(s) (if any) and finishes its task;- finally, Python returns to your code (to the place just after the invocation) and resumes its execution.
###Code
# print without argument will produce a newline
print("The itsy bitsy spider climbed up the waterspout.")
print()
print("Down came the rain and washed the spider out.")
# \n will give a newline also
print("The itsy bitsy spider\nclimbed up the waterspout.")
print()
print("Down came the rain\nand washed the spider out.")
# print multiple argument will join them together
print("The itsy bitsy spider" , "climbed up" , "the waterspout.")
print(1 , "climbed up" , "the waterspout.")
###Output
The itsy bitsy spider climbed up the waterspout.
1 climbed up the waterspout.
###Markdown
Any keyword arguments have to be put after the last positional argument (this is very important)
###Code
# using keyword
print("My name is", "Python.", end=" ")
print("Monty Python.")
print("My name is ", end="")
print("Monty Python.")
###Output
My name is Monty Python.
###Markdown
The keyword argument that can do this is named sep (like separator)
###Code
print("My", "name", "is", "Monty", "Python.", sep="-")
print("My", "name", "is", sep="_", end="*")
print("Monty", "Python.", sep="*", end="*\n")
print("Programming", "Essentials", "in", end="...", sep="***")
print("Python")
###Output
Programming***Essentials***in...Python
###Markdown
Python Literals- A literal is data whose values are determined by the literal itself.- You use literals to encode data and to put them into your code.- If you encode a literal and place it inside Python code, the form of the literal determines the representation (type) Python will use to store it in the memory. Integer
###Code
# python allow you to use octal and hexadecimal number
print(0o123)
print(0x123)
print(1)
###Output
83
291
1
###Markdown
Float
###Code
print(.4)
print(0.4)
print(4.)
# using exponen for long float number
print(3E8)
print(3E-8)
###Output
300000000.0
3e-08
###Markdown
String
###Code
print("I like \"Monty Python\"")
###Output
I like "Monty Python"
###Markdown
Boolean Value
###Code
print(True > False)
print(True < False)
###Output
True
False
###Markdown
Excercise Python Literals
###Code
print("\"I\'m\"\n\"\"learning\"\"\n\"\"\"Python\"\"\"")
print(type("Hello "), type("007"))
print(type("1.5"), type(2.0), type(528), type(False))
# add 0b to convert binary 1011 to int
print(int(0b1011))
###Output
"I'm"
""learning""
"""Python"""
<class 'str'> <class 'str'>
<class 'str'> <class 'float'> <class 'int'> <class 'bool'>
11
###Markdown
Basic Operator
###Code
print('exponentiation depend on the input')
print(2 ** 3)
print(2 ** 3.)
print(2. ** 3)
print(2. ** 3.)
print('division always resulting float')
print(6 / 3)
print(6 / 3.)
print(6. / 3)
print(6. / 3.)
print('divisional operator depend on the input')
print(6 // 3)
print(6 // 3.)
print(6. // 3)
print(6. // 3.)
print('modulo operator depend on the input')
print(12 % 4.5)
print(12 % 5)
print('addition and substraction operator depend on the input')
print(-4 + 4)
print(-4. + 8)
print(-4 - 4)
print(4. - 8)
###Output
exponentiation depend on the input
8
8.0
8.0
8.0
division always resulting float
2.0
2.0
2.0
2.0
divisional operator depend on the input
2
2.0
2.0
2.0
modulo operator depend on the input
3.0
2
0
4.0
-8
-4.0
###Markdown
Operator binding
###Code
# hierarchy
print(2 + 3 * 5)
# module use left-sided binding
print(9 % 6 % 2)
# exponentiation use right binding
print(2 ** 2 ** 3) # 256 instead of 64
###Output
17
1
256
###Markdown
|Priority |Operator | ||:--------:|:----------:|:-----:| | 1 | +, - |unary || 2 | ** | || 3 | *, /, % | | | 4 | +, - |binary |
###Code
print((5 * ((25 % 13) + 100) / (2 * 13)) // 2)
###Output
10.0
###Markdown
Excercise Python Operator
###Code
print((2 ** 4), (2 * 4.), (2 * 4))
print((-2 / 4), (2 / 4), (2 // 4), (-2 // 4))
print((2 % -4), (2 % 4), (2 ** 3 ** 2))
###Output
16 8.0 8
-0.5 0.5 0 -1
-2 2 512
###Markdown
Variables- Python offers special "boxes" (containers) for store the results, and these boxes are called variables```python variable namingMyVariable, i, t34, Exchange_Rate, counter, days_to_christmas, TheNameIsSoLongThatYouWillMakeMistakesWithIt, _.```
###Code
var = 1
account_balance = 1000.0
client_name = 'John Doe'
print(var, account_balance, client_name)
print(var)
var = 1
print(var)
var = var + 1
print(var)
a = 3.0
b = 4.0
c = (a ** 2 + b ** 2) ** 0.5
print("c =", c)
###Output
c = 5.0
###Markdown
Excercise : Variables
###Code
#excercise 1
john = 1
mary = 1
adam = 1
totalApples = john + mary + adam
print("Total number of apples: ", totalApples)
# excercise 2
kilometers = 12.25
miles = 7.38
miles_to_kilometers = miles * 1.61
kilometers_to_miles = kilometers / 1.61
print(miles, "miles is", round(miles_to_kilometers, 2), "kilometers")
print(kilometers, "kilometers is", round(kilometers_to_miles, 2), "miles")
#excercise 3
x = 1
x = float(x)
y = 3*x**3 - 2*x**2 + 3*x - 1
print("y =", y)
###Output
Total number of apples: 3
7.38 miles is 11.88 kilometers
12.25 kilometers is 7.61 miles
###Markdown
Inputs
###Code
anything = input("Enter a number: ")
something = float(anything) ** 2.0
print(anything, "to the power of 2 is", something)
###Output
_____no_output_____
###Markdown
Concat
###Code
fnam = input("May I have your first name, please? ")
lnam = input("May I have your last name, please? ")
print("Thank you.")
print("\nYour name is " + fnam + " " + lnam + ".")
###Output
_____no_output_____
###Markdown
Replication
###Code
print("+" + 10 * "-" + "+")
print(("|" + " " * 10 + "|\n") * 5, end="")
print("+" + 10 * "-" + "+")
leg_a = float(input("Input first leg length: "))
leg_b = float(input("Input second leg length: "))
print("Hypotenuse length is " + str((leg_a**2 + leg_b**2) ** .5))
###Output
Hypotenuse length is 36.05551275463989
###Markdown
Excercise : Input
###Code
# input a float value for variable a here
a = float(input("please input a number :"))
# input a float value for variable b here
b = float(input("please input another number :"))
# output the result of addition here
print("a + b = ",a+b)
# output the result of subtraction here
print("a - b = ",a-b)
# output the result of multiplication here
print("a * b = ",a*b)
# output the result of division here
print("a / b = ",a/b)
print("\nThat's all, folks!")
###Output
_____no_output_____
###Markdown
Excercise : Operator and expressions
###Code
x = float(input("Enter value for x: "))
count = 0
# put your code here
def try_recursion(x, count):
if(count == 2):
result = x + (1 / x)
return result
else:
count+=1
result = x + (1 / try_recursion(x, count))
return result
y = 1 / try_recursion(x, count)
print("y =", y)
hour = int(input("Starting time (hours): "))
mins = int(input("Starting time (minutes): "))
dura = int(input("Event duration (minutes): "))
# put your code here
def try_compute(hour, mins, dura):
if((dura + mins) % 60 > 0):
end_min = (dura + mins) % 60
end_hour = hour + 1
else:
end_min = (dura + mins)
end_hour = hour
if(end_hour % 24 == 0):
end_hour = 0
return end_min, end_hour
y = try_compute(hour, mins, dura)
print(y[1], ':', y[0])
###Output
0 : 11
|
nbs/01_data_man.ipynb | ###Markdown
Data management> Create, from FT1 and FT2, a compact data set with photon and livetime info. OverviewFermi-LAT weekly data files are extracted from the [GSFC FTP server ](https://heasarc.gsfc.nasa.gov/FTP/fermi/data/lat/weekly), with subfolders for the photon data, `photon` and spacecraft data, `spacecraft`. It is [described here](http://fermi.gsfc.nasa.gov/ssc/data/access/http://fermi.gsfc.nasa.gov/ssc/data/access/)The class `FermiData` downloads these to temporary files and constructs a dict for each week withcontents* photons: a table, entry per selected photon with columns, converted with `get_ft1_data` * run number (uint32, stored as a category by pandas) * time since the run start, in 2 $\mu$s intervals (uint32) * energy and event type (uint8) * position as HEALPix index (uint32) * sc_data: a table, an entry per 30-s interval, with columns, all float32, converted with `get_ft2_info` * start/stop time * S/C direction * zenith direction* gti_times: an array of interleaved start/stop intervals* file_date: modification date for the FT1 file at GSFC.These dict objects, one per week, are saved in a folder A note about timingA run is typically an orbit or less, at most 6 ks. Integerizing the offset from this using 32 bits, one has 5e5 intervals/s, so we choose 2$\mu$s.
###Code
from nbdev.showdoc import *
# export
import os, sys
import dateutil, datetime
from astropy.io import fits
from ftplib import FTP_TLS as FTP
import healpy
from pathlib import Path
import numpy as np
import pandas as pd
import pickle
from wtlike.config import Config, Timer, UTC, MJD
# export
def get_ft1_data( config, ft1_file):
"""
Read in a photon data (FT1) file, bin in energy and position to convert to a compact DataFrame
- `ft1_file` -- A monthly file generated by J. Ballet, or a weekly file from GSFC
Depends on config items
- `theta_cut, z_cut` -- selection criteria
- `ebins, etypes` -- define band index
- `nside, nest` -- define HEALPix binning
Returns a tuple with
- `tstart`, the start MET time
- DataFrame with columns
- `band` (uint8): energy band index*2 + 0,1 for Front/Back
- `nest_index` if nest else `ring_index` (uint32): HEALPIx index for the nside
- `run_id` (uint32) The run number, stored as a categorical uint32 array
- `trun` (unit32): time since run the id in 2 $\mu s$ units
- gti times as an interleaved start, stop array.
For the selected events above 100 MeV, this represents 9 bytes per photon, vs. 27.
"""
delta_t = config.offset_size
ebins = config.energy_edges
etypes = config.etypes
nside = config.nside
nest = config.nest
z_cut =config.z_max
theta_cut = np.degrees(np.arccos(config.cos_theta_max))
verbose = config.verbose
with fits.open(ft1_file) as ft1:
tstart = ft1[1].header['TSTART']
## GTI - setup raveled array function to make cut
gti_data= ft1['GTI'].data
# extract arrays for values of interest
data =ft1['EVENTS'].data
a,b = sorted(gti_data.START), sorted(gti_data.STOP)
gti_times = np.ravel(np.column_stack((a,b)))
if np.any(np.diff(gti_times)<0):
raise Exception(f'Non-monatonic GTI found')
def apply_gti(time):
x = np.digitize(time, gti_times)
return np.bitwise_and(x,1).astype(bool)
# apply selections
sel = ((data['ENERGY'] > ebins[0]) &
(data['ZENITH_ANGLE'] < z_cut) &
(data['THETA'] < theta_cut))
dsel = data[sel]
# get the columns for output
glon, glat, energy, et, z, theta, time, ec =\
[dsel[x] for x in 'L B ENERGY EVENT_TYPE ZENITH_ANGLE THETA TIME EVENT_CLASS'.split()]
# generate event_type masks
et_mask={}
for ie in etypes:
et_mask[ie]= et[:,-1-ie]
if verbose>1:
total = sum(b)-sum(a)
fraction = total/(b[-1]-a[0])
print( f'FT1: {ft1_file.name}, GTI range {a[0]:.1f}-{b[-1]:.1f}: {len(data):,} photons'\
f'\n\tSelection E > {ebins[0]:.0f} MeV. theta<{theta_cut:.1f} and z<{z_cut} remove:'\
f' {100.- 100*len(dsel)/float(len(data)):.2f}%'
# f', GTI cut removes {sum(~gti_cut)}'
)
# event class -- turn into single int for later mask
# bits = np.array([1<<n for n in range(20)])
# def to_bin(x):
# return np.sum(bits[x[:20]])
# ec = [to_bin(row[20]) for row in ec
# pixelate direction
hpindex = healpy.ang2pix(nside, glon, glat, nest=nest, lonlat=True).astype(np.uint32)
hpname = 'nest_index' if nest else 'ring_index'
# digitize energy and create band index incluing (front/back)
band_index = (2*(np.digitize(energy, ebins, )-1) + et_mask[1]).astype(np.uint8)
#
run_id = dsel['RUN_ID'].astype(np.uint32)
df = pd.DataFrame(
{ 'band' : band_index,
hpname : hpindex,
#'time' : (time-tstart).astype(np.float32), # the old time
'run_id': pd.Categorical(run_id),
'trun' : ((time-run_id)/delta_t).astype(np.uint32),
} )
if verbose>1:
print(f'\tReturning tstart={tstart:.0f}, {len(dsel):,} photons.')
return tstart, df, gti_times
show_doc(get_ft1_data, title_level=2)
# export
def get_ft2_info(config, filename,
gti=lambda t: True):
"""Process a FT2 file, with S/C history data, and return a summary DataFrame
Parameters:
* config -- verbose, cos_theta_max, z_max
* filename -- spacecraft (FT2) file
* gti -- GTI object that checkes for allowed intervals, in MJD units
Returns: A DataFrame with fields consistent with GTI if specified
* start, stop -- interval in MJD units
* livetime -- sec
* ra_scz, dec_scz --spaceraft direction
* ra_zenith, dec_zenith -- local zenith
"""
# combine the files into a DataFrame with following fields besides START and STOP (lower case for column)
fields = ['LIVETIME','RA_SCZ','DEC_SCZ', 'RA_ZENITH','DEC_ZENITH']
with fits.open(filename) as hdu:
scdata = hdu['SC_DATA'].data
tstart, tstop = [float(hdu[0].header[field]) for field in ('TSTART','TSTOP') ]
if config.verbose>1:
print(f'FT2: {filename.name}, MET range {tstart:.1f}-{tstop:.1f},', end='')# {"not" if gti is None else ""} applying GTI')
# get times to check against MJD limits and GTI
start, stop = [MJD(np.array(scdata.START, float)),
MJD(np.array(scdata.STOP, float))]
# apply GTI to bin center (avoid edge effects?)
in_gti = gti(0.5*(start+stop))
if config.verbose>1:
s = sum(in_gti)
print(f' {len(start)} entries, {s} ({100*s/len(start):.1f}%) in GTI')
t = [('start', start[in_gti]), ('stop',stop[in_gti])]+\
[(field.lower(), np.array(scdata[field][in_gti],np.float32)) for field in fields ]
sc_data = pd.DataFrame(dict(t) )
return sc_data
show_doc(get_ft2_info, title_level=2)
# export
def filepaths(week):
"""Returns: A tuple with two elements for the week number, each with two triplets with:
ftp folder, ftp filename, local simple filename
"""
urls = []
for ftype, alias in [('spacecraft','ft2'), ('photon','ft1')]:
urls.append((
f'{ftype}',
f'lat_{ftype}_weekly_w{week:03d}_p{"305" if ftype=="photon" else "310" }_v001.fits',
f'week{week:03d}_{alias}.fits',
))
return urls
# export
class FermiData(dict):
""" Manage the full data set in weekly chunks
* Checking the current set of files at GSFC
* downloading a week at a time to a local tmp
* Converting to condensed format and saving to pickled dicts in wtlike_data
"""
ftp_site = 'heasarc.gsfc.nasa.gov'
ftp_path = 'fermi/data/lat/weekly'
local_path = '/tmp/from_gsfc'
def __init__(self, config=None):
""" Obtain list of the weekly FT1 and FT2 files at GSFC, Set up as a dict, with
keys= week numbers, values=mofification date strings
"""
self.config = config or Config()
self.wtlike_data_file_path = Path(self.config.datapath/'data_files')
assert self.wtlike_data_file_path.is_dir(), 'Data path invalid'
os.makedirs(self.local_path, exist_ok=True)
try:
with FTP(self.ftp_site) as ftp:
ftp.login()
ftp.prot_p()
ftp.cwd(self.ftp_path+'/photon') # or spacecraft
# aet modification time and type for all files in folder
parse_week = lambda fn: int(fn.split('_')[3][1:])
flist = ftp.mlsd(facts=['modify', 'type'])
self.fileinfo = sorted([(parse_week(name), fact['modify']) for name,fact in flist
if fact['type']=='file' and name.startswith('lat') ])
except Exception as msg:
raise Exception(f'FTP login to or download from {self.ftp_site} failed: {msg}')
self.update(self.fileinfo)
@property
def local_filedate(self):
""" the datetime object representing the last file date in local storage"""
from dateutil.parser import parse
weekly_folder = self.config.datapath/'data_files'
ff = sorted(list(weekly_folder.glob('*.pkl')))
if len(ff)==0:
print(f'No .pkl files found in {weekly_folder}', file=sys.stderr)
return None
wk = list(map(lambda f: int(os.path.splitext(f)[0][-3:]), ff))
lastweek = pickle.load(open(ff[-1],'rb'))
return dateutil.parser.parse(lastweek['file_date'])
@property
def gsfc_filedate(self):
return dateutil.parser.parse(list(self.values())[-1])
def download(self, week):
""" Download the given week to the tmp folder
"""
assert week in self, f'week {week} not found at FTP site'
with FTP(self.ftp_site) as ftp:
ftp.login()
ftp.prot_p()
for ftp_folder, ftp_filename, local_filename in filepaths(week):
ftp.cwd('/'+self.ftp_path+'/'+ftp_folder)
if self.config.verbose>0:
print(f'FermiData: {ftp_folder}/{ftp_filename} --> {local_filename}')
with open(f'{self.local_path}/{local_filename}', 'wb') as localfile:
ftp.retrbinary('RETR ' + ftp_filename, localfile.write)
def __str__(self):
return f'FermiData: {len(self.fileinfo)} week files at GSFC, from {self.fileinfo[0]} to {self.fileinfo[-1]}'
def in_temp(self):
"""return list of GSFC copied files in the local_path folder"""
names = [f.name for f in Path(self.local_path).glob('*')]
return names
def __call__(self, week, test=False, tries_left=3):
""" Process the given week:
* download from GSFC
* convert each
* save pickled dict summary
"""
assert week in self, f'week {week} not found at FTP site'
ff = filepaths(week)
ft1_file = Path(self.local_path)/ff[1][2]
ft2_file = Path(self.local_path)/ff[0][2]
if self.config.verbose>1:
print(f'FermiData: converting week {week}')
while tries_left>0:
try:
if not (ft1_file.exists() and ft2_file.exists()):
self.download(week)
tstart, photon_data, gti_times = get_ft1_data(self.config, ft1_file)
break
except Exception as e:
print(f'*** ERROR *** Failed to convert {ft1_file}: {e} download it again)')
os.unlink(ft1_file)
tries_left -=1
def apply_gti(time): # note MJD
x = np.digitize(time, MJD(gti_times))
return np.bitwise_and(x,1).astype(bool)
sc_data = get_ft2_info(self.config, ft2_file, apply_gti)
# finished with copies of FT1 and FT2 files: delete them
for file in (ft1_file,ft2_file):
os.unlink(file)
# package info into a dict for pickle
d = dict(tstart = tstart,
photons = photon_data,
sc_data = sc_data,
gti_times = gti_times,
file_date = self[week])
filename = self.wtlike_data_file_path/f'week_{week:03d}.pkl'
if filename.exists():
print(f'FermiData: replacing existing {filename}')
if not test:
with open(filename, 'wb') as out:
pickle.dump(d, out)
if self.config.verbose>0:
print(f'FermiData: Saved to {filename}')
if self.config.verbose>1:
print(photon_data.info())
def load_week(self, week):
"""Load a pickled week summary """
filename = self.wtlike_data_file_path/f'week_{week:03d}.pkl'
assert filename.exists(), f'File {filename} does not exist'
with open(filename, 'rb') as imp:
ret = pickle.load(imp)
return ret
def check_week(self, week):
"""Returns True if the local week needs updating"""
data = self.load_week(week)
if 'file_date' not in data:
return True
return data['file_date'] == self[week]
def needs_update(self, threshold=0):
""" Compare files on disk with the GSFC list and compile list that need to be downloaded
Check the file date of the last one on disk and include it if:
* it short and there is one or more GSFC weeks following it,
* It is the most recent week and is short by more than *threshold* days
"""
gg =self.wtlike_data_file_path.glob('*.pkl')
file_weeks= map(lambda n: int(n.name[5:8]), gg)
ondisk = np.array(list(file_weeks))
missing = list(set(self.keys()).difference(set(ondisk)))
last = ondisk[-1]
if last not in missing and not self.check_week( last):
delta = (self.gsfc_filedate -self.local_filedate).seconds/24/3600
if delta> threshold:
missing.append(last)
return missing
def process(self, days=1):
""" Download and process all weeks missing or needing an update, if within `days`
from the present
Return status: True if anything changed
"""
# will use multprocessing if len(todo)>1 and pool_size>1
todo = self.needs_update(days)
if len(todo)==0: return False
if self.config.pool_size >1 and len(todo)>1:
print('multitasking not applied yet', file=sys.stderr)
pass
list(map(self, todo))
return True
# fd = FermiData(); print(fd)
# fd.local_filedate, fd.gsfc_filedate
# filename = fd.local_files()[-1]
# with open(filename, 'rb') as imp:
# fs= pickle.load(imp)
# fs.keys()
# ff = fd.local_files()
# wk = list(map(lambda f: int(os.path.splitext(f)[0][-3:]), ff))
# # get gti for weeks
# tt = []
# for f in ff:
# with open(f, 'rb') as imp:
# fs = pickle.load(imp)
# gti = fs['gti_times']
# tt += [gti[0], gti[-1]]
# uu = MJD(np.array(tt)); uu
# import matplotlib.pyplot as plt
# from wtlike.config import first_data, day
# plt.plot( wk, (uu[0::2]-first_data)/7 -wk+9.7, '.')
# ft = lambda t: (t-first_data) / 7 + 9.7
# ft(54685.15), ft(59464.84)
# def find_week(self, mjd):
# """ find the week number that contains the MJD """
# # estimate that could be off on boundary -- assume not more than a week
# week= int((mjd-first_data) / 7 + 9.7 )
# x = self.load_week(week)
# # check with actual GTI info
# gti = x['gti_times']
# first, last = MJD(gti[0]), MJD(gti[-1])
# if mjd< first: return week-1
# elif mjd>last: return week+1
# return week
###Output
_____no_output_____
###Markdown
FermiData methods
###Code
show_doc(FermiData.download)
show_doc(FermiData.needs_update)
self = FermiData()
print(self, self.needs_update())
show_doc(FermiData.process)
show_doc(FermiData.check_week)
config = Config()
if config.valid:
self = FermiData(Config(verbose=1))
check =self.needs_update(0.5)
print(self, f'\n\tweek(s) needing update: {check}' )
#hide
# with Timer() as t:
# self.process(0.5)
# print(t)
###Output
_____no_output_____
###Markdown
check the weekly files
###Code
# export
def check_data(config=None):
"""
Return: sorted list of files, last week number, number of days in last week"""
if config is None: config=Config()
if config.valid:
weekly_folder = config.datapath/'data_files'
ff = sorted(list(weekly_folder.glob('*.pkl')))
if len(ff)==0:
print(f'No .pkl files found in {weekly_folder}', file=sys.stderr)
return
wk = list(map(lambda f: int(os.path.splitext(f)[0][-3:]), ff))
lastweek = pickle.load(open(ff[-1],'rb'))
file_date = lastweek['file_date']
gti = lastweek['gti_times'];
days = (gti[-1]-gti[0])/(24*3600)
if config.verbose>0:
print(f'Weekly folder "{weekly_folder}" contains {len(wk)} weeks.'\
f'\n\t Last week, # {wk[-1]}, has {days:.3f} days, ends at UTC {UTC(MJD(gti[-1]))}, filedate {file_date}' )
return ff, wk[-1], days
else:
print(f'Config not valid, {config.errors}', file=sys.stderr)
return None
#ret = check_data()
# export
def update_data(update_threshold=1, config=None):
"""Bring all of the local week data summaries up to date, downloading the missing ones from GSFC.
If the last one is the current week, check to see if needs updating, by comparing file date, in days,
from the last update with the current one at GSFC.
"""
ff = FermiData(config)
return ff.process(update_threshold)
#export
def get_week_files(config, week_range=None):
"""Return list of week files
- week_range [None] -- tuple with inclusive range. If None, get all
"""
data_folder = config.datapath/'data_files'
data_files = sorted(list(data_folder.glob('*.pkl')))
weeks = week_range or config.week_range
if week_range is not None:
slc = slice(*week_range) if type(week_range)==tuple else slice(week_range,week_range)
wk_table = pd.Series(data=[df for df in data_files],
index= [ int(df.name[-7:-4]) for df in data_files],
)
data_files = wk_table.loc[slc].values
if config.verbose>0:
q = lambda x: x if x is not None else ""
print(f'LoadData: Loading weeks[{q(slc.start)}:{q(slc.stop)}:{q(slc.step)}]', end='' if config.verbose<2 else '\n')
else:
if config.verbose>0: print(f'LoadData: loading all {len(data_files)} weekly files')
if len(data_files)==0:
msg = f'Specified week_range {week_range} produced no output. Note that week numbers are 9-'
raise Exception(msg)
return data_files
# hide
from nbdev.export import notebook2script
notebook2script()
!date
###Output
Converted 00_config.ipynb.
Converted 01_data_man.ipynb.
Converted 02_effective_area.ipynb.
Converted 03_exposure.ipynb.
Converted 03_sources.ipynb.
Converted 04_load_data.ipynb.
Converted 04_simulation.ipynb.
Converted 05_source_data.ipynb.
Converted 06_poisson.ipynb.
Converted 07_loglike.ipynb.
Converted 08_cell_data.ipynb.
Converted 09_lightcurve.ipynb.
Converted 14_bayesian.ipynb.
Converted 90_main.ipynb.
Converted 99_presentation.ipynb.
Converted 99_tutorial.ipynb.
Converted index.ipynb.
Tue Oct 5 09:29:58 PDT 2021
|
password_with_requirement.ipynb | ###Markdown
###Code
import random
import string
def randomPassword(size):
all_chars = string.ascii_letters + string.digits + string.punctuation
password = ""
password += random.choice(string.ascii_lowercase)
password += random.choice(string.ascii_uppercase)
password += random.choice(string.digits)
password += random.choice(string.punctuation)
for i in range(size-4):
password += random.choice(all_chars)
return password
pass_len = int(input("What would be the password length? "))
print ("First Random Password is:", randomPassword(pass_len))
print ("Second Random Password is:", randomPassword(pass_len))
print ("Third Random Password is:", randomPassword(pass_len))
###Output
What would be the password length? 8
First Random Password is: cR3/id5V
Second Random Password is: jX2@J<N_
Third Random Password is: dA3%8,mC
|
2020/ode_numerico/ode_numerico.ipynb | ###Markdown
¿Cómo resolver problemas de física en la vida real? Nicolás Guarín-Zapata Acerca de mí ([nicoguaro.github.io](nicoguaro.github.io))Descripción Intereses de Investigación- Física computacional- Diseño de materiales Docencia- Modelación computacional- Métodos numéricos- Matemáticas avanzadas para ingenieros
###Code
# Celda oculta para código
import numpy as np
###Output
_____no_output_____ |
Learning Notebooks/BLU12 - Learning Notebook - Part 3 of 3 - Advanced Topics.ipynb | ###Markdown
BLU12 - Learning Notebook- Part 3 of 3 - Advanced Topics
###Code
import os
from collections import defaultdict
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
from surprise import Dataset, Reader
from surprise.model_selection import train_test_split
from surprise.prediction_algorithms import KNNBasic
from scipy.sparse import coo_matrix, dok_matrix
from make_data import make_data
from export_ratings import export_ratings
###Output
_____no_output_____
###Markdown
1 Implicit FeedbackMost times, RS algorithms ingest implicit feedback, i.e., unary ratings, to understand user preferences.In such cases, the unary data indicates whether a user $u \in U$ performed a given action (e.g., click, purchase).Afterward, this data is used on its own or combined with explicit ratings.In a way, ratings from unary data are ratings $r_{ui} \in S = \{1\}$, i.e., with a singleton or unit set of possible ratings $S$.Absent ratings $r_ui \notin R$ indicates that we have no information relating the user $u$ to the item $i$, just like before.(Perhaps the user purchased the item somewhere else, or the user didn't click the item because he didn't see it.)![collaborative_filtering_unary](../media/collaborative_filtering_unary.png)We make, however, some distinctions. 1.1 ExampleWe generated some fake unary data, using the [Faker](https://faker.readthedocs.io/en/master/) package.In `Learning Notebooks/make_data.py`, you find the `make_data()` function that generates two COO sparse matrices. This function is exactly like in the learning materials and exercises from [BLU11](https://github.com/LDSSA/batch2-blu11), so we don't repeat it.
###Code
users, items, clicks, purchases = make_data()
clicks
purchases
###Output
_____no_output_____
###Markdown
The data contains exactly 50 users and 50 items, i.e., $|U| = 50$ and $|I| = 50$.We include 500 clicks and 500 purchases for us to play with. 1.2 Duplicate EntriesFor starters, the user $u \in U$ can perform an action, i.e., implicitly rate, multiple times for the same item $i$.This violates the assumptions of the matrix $R$, so upstream consolidation is required, enforcing one rating $r_ui$ for each pair $(u, i) \in U \times I$.Again, let $A$ be set of unary ratings, i.e., $a_{ui} \in S = \{1\}$, for user-item pairs $(u, i) \in U \times I$, which contains duplicate pairs.A common technique is to sum together duplicate entries, as:$$\sum\limits_{(u, i) \in U \times I} a_{ui}$$As we've seen in [BLU11](https://github.com/LDSSA/batch2-blu11), this is the default behavior when we convert from COO to CSR.
###Code
clicks_ = clicks.tocsr()
clicks_
###Output
_____no_output_____
###Markdown
The reduction from 500 to 460 stored element in the matrix is due to the consolidation.We can confirm this by calling `.max()` on it.
###Code
clicks_.max()
purchases_ = purchases.tocsr()
purchases_
purchases_.max()
###Output
_____no_output_____
###Markdown
Another conventional technique is to use the logarithm of the sum, instead.$$\log{\sum\limits_{(u, i) \in U \times I} a_{ui}}$$The log transformation is particularly useful with right-skewed distributions, i.e., not centered, with a peak on the left and a tail on the right.(Imagine a user $u$ with few clicks on many items and many of clicks on a few items, which is very common.)We can apply this quickly if so we choose, by applying the logaritm element-wise on the resulting matrix.
###Code
clicks_.log1p()
purchases.log1p()
###Output
_____no_output_____
###Markdown
1.3 Inferring RatingsAlso, since we have multiple signals relating the user $u$ to item $i$, we have to consolidate them into a single rating.Different signals (e.g., impressions, clicks, purchases) have distinct signal-to-noise ratios and levels of intent and, thus, may require different weights.Consider the set $D$, containing all types of implicit feedback, e.g., $D = \{Click, Purchase\}$, with the associated weights $W$.We can compute the ratings $r_{ui}$, for $(u, i) \in U \times I$, as:$$r_{ui} = \sum\limits_{(u, i) \in U \times I} \Big(\sum\limits_{d \in D} w_d \cdot a_{ui}^d \Big)$$In our example, we attribute more relevance to purchases than clicks.(Please note that Python silently converts from COO to CSR, summing together duplicate entries by default.)
###Code
def make_ratings(c, p, w_c, w_p):
return w_c * c + w_p * p
ratings = make_ratings(clicks, purchases, .3, .7)
ratings
###Output
_____no_output_____
###Markdown
1.4 Exporting RatingsOnce we have final ratings, it's good practice to export them in long-form, using the `'uid,iid,rating'` convention.We can do this easily, by converting back to COO and use the `.row`, `.col` and `.data` attributes.
###Code
ratings_ = ratings.tocoo()
uid = np.array([users[row] for row in ratings_.row], dtype='O')
iid = np.array([items[col] for col in ratings_.col], dtype='O')
data = ratings_.data
###Output
_____no_output_____
###Markdown
For full implementation detail and NumPy nitty gritty, refer to `Learning Notebooks/export_ratings.py`.
###Code
export_ratings(users, items, ratings)
###Output
_____no_output_____
###Markdown
From here onwards, we can use all the RS techniques we have learned.(Including using the Surprise package.) 2 Generating top-*N* ListsOften, we task the RS with recommending a list $L_u$, containing $N$ items likely to be of interest to an active user $u$.This type of output is particularly frequent in the presence of implicit feedback and unary data, as ratings loose meaning *per se*.How can we generate such a list $L_u$, using Surprise?
###Code
dataset = Dataset.load_builtin('ml-100k')
R_train = dataset.build_full_trainset()
###Output
_____no_output_____
###Markdown
We will use the `KNNBasic` to generate predictions, with all the defaults.(This may take a few minutes.)
###Code
knn = KNNBasic()
knn.fit(R_train)
R_test = R_train.build_anti_testset()
R_pred = knn.test(R_test)
###Output
_____no_output_____
###Markdown
From the Surprise documentation, [this](https://surprise.readthedocs.io/en/stable/FAQ.htmlhow-to-get-the-top-n-recommendations-for-each-user) is the recommended way to extract a top-$N$ list for each user. (Slightly adapted, so that we can use it in the future).
###Code
def get_top_n(predictions, n=10):
top_n = defaultdict(list)
for uid, iid, true_r, est, _ in predictions:
top_n[uid].append((iid, est))
for uid, user_ratings in top_n.items():
user_ratings.sort(key=lambda x: x[1], reverse=True)
top_n[uid] = [x[0] for x in user_ratings[:n]]
return pd.DataFrame.from_dict(data=top_n, orient='index')
L = get_top_n(R_pred, n=10)
L.head()
###Output
_____no_output_____ |
first_pytorch.ipynb | ###Markdown
Zadania1. Dodać GPU
###Code
import torch
import numpy as np
import matplotlib.pyplot as plt
dtype = torch.float
device = torch.device("cpu")
N, D_in, H, D_out = 16, 4, 0, 1
# Create random input and output data
x_numpy = np.array( [[0., 0., 0., 1.],
[1., 0., 0., 1.],
[0., 1., 0., 1.],
[0., 0., 1., 1.],
[1., 1., 0., 1.],
[1., 0., 1., 1.],
[0., 1., 1., 1.],
[1., 1., 1., 1.],
[0., 0., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[1., 1., 0., 0.],
[1., 0., 1., 0.],
[0., 1., 1., 0.],
[1., 1., 1., 0.]])
x = torch.from_numpy(x_numpy).float()
print(x)
y_numpy = np.array( [[1.],
[1.],
[1.],
[1.],
[1.],
[1.],
[1.],
[1.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.]])
y = torch.from_numpy(y_numpy).float()
w = torch.randn(D_in, D_out, device=device, dtype=dtype, requires_grad=True)
print(w)
learning_rate = 1e-4
loss_list = []
for t in range(5):
y_pred = x.mm(w)
loss = (y_pred - y).pow(2).sum()
loss_list.append(loss.item())
loss.backward()
with torch.no_grad():
w -= learning_rate * w.grad
w.grad.zero_()
plt.plot(loss_list, label = 'loss')
plt.legend()
plt.show()
###Output
_____no_output_____ |
notebooks/Module3-Pyhton-Programming-Fundamentals/PY0101EN-3.2_notebook_quizz_Loops.ipynb | ###Markdown
For Loops Use loops to print out the elements in the list A
###Code
A=[3,4,5]
for i in A:
print(i)
###Output
3
4
5
###Markdown
Click here for the solution```pythonfor i in A: print(i) ``` While Loops Find the value of x that will print out the sequence 1,2,..,10
###Code
x = 11
y=1
while(y != x):
print(y)
y=y+1
###Output
_____no_output_____ |
AutoEncoder_CAVE_new.ipynb | ###Markdown
Review Configspace
###Code
configspace.get_hyperparameters() # List of all Hyperparameter with name, type, choices/interval
configspace.get_hyperparameters()[0] # Index return hyperparamter at this place
configspace.get_hyperparameters()[0].name # Specification what is needed of this hyperparameter
configspace.get_hyperparameter_names() # List of the names of all hyperparameter
training.get_all_configs_combined() # list of all configurations
training.get_all_configs_combined()[0] # Returns the configuration at the place of index
name = configspace.get_hyperparameters()[0].name
training.get_all_configs_combined()[0].get(name) # Get value of the configuration of the defined hyperparameter
###Output
_____no_output_____
###Markdown
Convert Data Case 1* standardize continual Data* replace nan with -1
###Code
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for hp in configspace.get_hyperparameters():
if type(hp) is CategoricalHyperparameter:
hp.choices = hp.choices + (-1234, )
values = [OneHotEncoder(categories='auto').fit((np.sort(np.array(hp.choices)).reshape((-1,1))))
if type(hp) is CategoricalHyperparameter
else (StandardScaler().fit(np.array([confi.get(hp.name) for confi in training.get_all_configs_combined()]).reshape(-1, 1))
if type(hp) in {UniformFloatHyperparameter, UniformIntegerHyperparameter, OrdinalHyperparameter}
else None)
for hp in configspace.get_hyperparameters()]
for i in range(len(values)):
if type(values[i]) == StandardScaler and type(values[i]) != OneHotEncoder:
pass
elif type(values[i]) == OneHotEncoder and type(values[i]) != StandardScaler:
pass
else:
print("Fehler")
config = training.get_all_configs_combined()[0]
for hp in configspace.get_hyperparameters():
if type(hp) is CategoricalHyperparameter:
print(hp.name, hp.choices)
# print(config)
# print(hp)
# OneHotEncoder(categories='auto').fit(np.vstack((np.sort(np.array(hp.choices)).reshape((-1,1)), [[-1]])))
#one_hot_encode(training.get_all_configs_combined()[0])
# one hot encoding
def one_hot_encode(config):
# Code from PhMueller
# Create array with one hot encoded values
result_vec = np.array([]).reshape((-1, 1)) # .astype(object)
for i, hp in enumerate(configspace.get_hyperparameters()):
val = np.array(config.get(hp.name)).reshape((-1, 1))
# print(val)
# case if categorical
if type(values[i]) is OneHotEncoder:
if val == [[None]]:
# val = np.array(['-1']).reshape((-1, 1))
val = np.array([['-1234']])
if len(result_vec) == 0:
result_vec = values[i].transform(val).toarray() # .astype(object)
else:
result_vec = np.hstack((result_vec,
values[i].transform(val).toarray()))
# if it is continous
else:
if val == [[None]]:
if len(result_vec) == 0:
result_vec = np.array([-1234]).reshape((-1, 1))
else:
result_vec = np.hstack((result_vec, [[-1234]]))
elif len(result_vec) == 0:
result_vec = values[i].transform(val)
else:
result_vec = np.hstack((result_vec,
values[i].transform(val)))
return result_vec
for i in range(len(values)):
if i == None:
print("Error")
convert_data = np.array([]).reshape((-1, 1))
for confi in range(len(training.config_ids)):
if confi % 500 == 0:
print(confi)
if len(convert_data) == 0:
convert_data = one_hot_encode(training.get_all_configs_combined()[confi])
continue
convert_data = np.vstack((convert_data, one_hot_encode(training.get_all_configs_combined()[confi])))
print(len(convert_data))
# [one_hot_encode(training.get_all_configs_combined()[confi]) for confi in range(len(training.config_ids))]
convert_data_transform = np.array([]).reshape((-1, 1))
for confi in range(len(transform.config_ids)):
if confi % 10 == 0:
print(confi)
if len(convert_data_transform) == 0:
convert_data_transform = one_hot_encode(transform.get_all_configs_combined()[confi])
continue
convert_data_transform = np.vstack((convert_data_transform, one_hot_encode(transform.get_all_configs_combined()[confi])))
print(len(convert_data_transform))
convert_data.shape[1] == convert_data_transform.shape[1]
np.save("convert_data.npy", convert_data)
convert_data.shape
np.load("convert_data.npy")
###Output
_____no_output_____
###Markdown
AutoEncoder
###Code
class Softmax_BA(nn.Module):
"""My own class with softmax and crossentropy to transform tensor back in original strucutre"""
__constants__ = ['dim']
def __init__(self, num_category, transform_list, confi, dim=None):
super(Softmax_BA, self).__init__()
self.num_cat = num_category
self.transform_list = transform_list
self.configspace = confi
self.dim = dim
def forward(self, x):
indexing = 0
x_ = x.clone()
softmax = nn.Softmax(dim=1)
for num in range(len(self.configspace.get_hyperparameters())):
if type(self.transform_list[num]) == OneHotEncoder:
x_[:, indexing:indexing+self.num_cat[num]] = softmax(x[:, indexing:indexing+self.num_cat[num]])
indexing += self.num_cat[num]
else:
indexing += 1
x = x_
return x# Variable(x.data, requires_grad=True)
class Autoencoder(nn.Module):
""" Our autoencoder class. """
def __init__(self, length, act_f, num_layers):
super(Autoencoder, self).__init__()
if act_f.lower() == 'relu':
self.act_f = torch.nn.ReLU()
else:
self.act_f = torch.nn.Tanh()
assert num_layers > 1
self.encoder_layer = nn.ModuleList(
[nn.Linear(int(length/(i+1)), int(length/(i+2))) for i in range(num_layers-1)]
)
self.encoder_layer.extend([nn.Linear(int(length/(num_layers)), 2)])
self.decoder_layer = nn.ModuleList(
[nn.Linear(2, int(length/(num_layers)))]
)
self.decoder_layer.extend(
[nn.Linear(int(length/(i+1)), int(length/(i))) for i in range(num_layers-1, 0, -1)]
)
def encoder(self, x):
for i, layer in enumerate(self.encoder_layer):
x = layer(x)
x = self.act_f(x) if i < len(self.encoder_layer) - 1 else x
return x
def decoder(self, x):
for i, layer in enumerate(self.decoder_layer):
x = layer(x)
x = self.act_f(x) if i < len(self.decoder_layer) - 1 else x
return x
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
x = Softmax_BA(num_cat, values, configspace, dim=1)(x)
return x
def give_latent_image(self, x):
x = self.encoder(x)
return x
num_cat = []
for hp in configspace.get_hyperparameters():
if type(hp) == CategoricalHyperparameter:
num_cat.append(len(hp.choices))
else:
num_cat.append(False)
def train(model, X_train, X_test, num_epochs, learning_rate, weight_decay=1e-5, plot_interval=10, verbose=False):
loss_history = list()
test_loss_history = list()
# criterion = loss_function()
optimizer = torch.optim.Adam(model.parameters(),
lr=learning_rate,
weight_decay=weight_decay)
for epoch in range(num_epochs):
# Get a new batch of data, 64 key-value pairs in it
ids = np.random.choice(X_train.shape[0], 64, replace=False)
X_train = X_train[ids]
# Convert to torch tensor, usually you also need to convert to float as in here.
# X_train = torch.tensor(X_train).float()
# X_test = torch.tensor(X_test).float()
# Forward. Encodes and decodes and gives us the model's prediction.
# model() actually calls 'forward()'
output = model(X_train)
output_test = model(X_test)
# Calculate loss, defined above as mean squared error
loss = loss_function(output, X_train, num_cat)
loss_test = loss_function(output_test, X_test, num_cat)
# === The backpropagation
# Reset the gradients
optimizer.zero_grad()
# Calculate new gradients with backpropagation
loss.backward()
# Tune weights accoring to optimizer (it has the learnrate and weight decay as defined above)
optimizer.step()
# To do output stuff with loss and image, we have to detach() and convert back to numpy.
loss = loss.detach().numpy()
loss_test = loss_test.detach().numpy()
# Append to loss history
loss_history.append(loss)
test_loss_history.append(loss_test)
if verbose:
print('Epoch: ' + str(epoch) + ". Train loss: " + str(loss.item()) + " Test loss: " + str(loss_test.item()))
if epoch % plot_interval == 0 and epoch != 0:
print('First 5x5 Dimension of prediction \n ')
print(X_train[0, 22:31])
print(output[0, 22:31])
print("-"*100)
"""low_dim_train = model.give_latent_image(X_train)
low_dim_test = model.give_latent_image(X_test)
low_dim_train = low_dim_train.detach().numpy()
low_dim_test = low_dim_test.detach().numpy()
plt.scatter(low_dim_train[:, 0], low_dim_train[:, 1], s=10.0,label="train points")
plt.scatter(low_dim_test[:, 0], low_dim_test[:, 1], s=10.0,label="test points")
plt.legend()
plt.show()"""
return loss_history, test_loss_history, model
def test(trained_model, X, num_plot):
""" Test our autoencoder. """
for i in range(num_plot):
"""index = 0
for cats in num_cat:
if cats == False:
index += 1
continue
plt.bar(np.arange(cats), X[i][index:index+cats], label="true", alpha=0.3)
plt.bar(np.arange(cats), output[i][index:index+cats], label="prediction", alpha=0.3)
plt.legend()
plt.show()
index += cats
print("last index true: " + str(X[i][-1]) + ", prediction: " + str(output[i][-1]))"""
ids = np.random.choice(X.shape[0], 100)
X = X[ids]
X = torch.tensor(X).float()
output = trained_model(X)
loss = loss_function(output, X, num_cat)
loss = loss.detach().numpy()
X = X.detach().numpy()
output = output.detach().numpy()
print("Input: \n %s \n Output: \n %s" % (X[:2, 15:25], output[:2, 15:25]))
print("Train loss: " + str(loss.item()))
print("-" * 10)
import random
division = int(len(training.config_ids)* 0.75)
ids = np.arange(convert_data.shape[0])
np.random.shuffle(ids)
train_ids = ids[:division]
test_ids = ids[division:]
def cross_entropy_one_hot(input, target):
_, labels = target.max(dim=1)
return nn.CrossEntropyLoss()(input, labels)
def loss_function(label, predition, num_category):
indexing = 0
categorical_Loss = 0
mse = nn.MSELoss()
mse_Loss = 0
for num, hp in enumerate(configspace.get_hyperparameters()):
if type(hp) == CategoricalHyperparameter:
confi_pred = predition[:, indexing:indexing+num_category[num]]
confi_lable = label[:, indexing:indexing+num_category[num]]
categorical_Loss += cross_entropy_one_hot(confi_lable, confi_pred)
indexing += num_category[num]
else:
mse_Loss += mse(label[:, indexing], predition[:, indexing])
indexing += 1
#print("MSE: %s" % mse_Loss)
#print("CE: %s" % categorical_Loss)
return mse_Loss + categorical_Loss
# New model
model = Autoencoder(convert_data.shape[1], "tanh", 3)
# Train the model and return loss history
loss_history, test_loss_history, new_model = train(model,
X_train=torch.tensor(convert_data[train_ids]).float(),
X_test=torch.tensor(convert_data[test_ids]).float(),
num_epochs=1000,
learning_rate=1e-5,
weight_decay=1e-5,
plot_interval=100,
verbose=True)
# Plot the loss history. Careful: It's the train loss
plt.plot(loss_history, label="train")
plt.plot(test_loss_history, label="test")
plt.legend()
plt.show()
# Print the test loss and plot some example images
test(new_model, convert_data_transform, num_plot=100)
X = torch.tensor(convert_data).float()
Z = torch.tensor(convert_data_transform).float()
low_dim_rand = model.give_latent_image(X)
low_dim_rand = low_dim_rand.detach().numpy()
low_dim_local = model.give_latent_image(Z)
low_dim_local = low_dim_local.detach().numpy()
plt.scatter(low_dim_rand[:, 0], low_dim_rand[:, 1], s=10.0,label="random points")
plt.scatter(low_dim_local[:, 0], low_dim_local[:, 1], s=10.0,label="random points")
plt.show()
from ConfigSpace.read_and_write import json
with open('./config_space.json', 'w') as f:
f.write(json.write(configspace))
X = torch.tensor(convert_data).float()
low_dim = model.give_latent_image(X)
low_dim = low_dim.detach().numpy()
plt.scatter(low_dim[:, 0], low_dim[:, 1],) # label="local points")
# plt.legend()
plt.show()
def calculate_costvalue(dists, red_dists):
"""
Helpfunction to calculate the costvalue to test how big the difference of distance is in the embedding
and original space.
Parameters
----------
dists: np.array, shape(n_samples, n_samples)
Matrix of the distances in the original space.
red_dists: np.array, shape(n_samples, k_dimensions)
Koordinates o
Returns
--------
costvalue: float
Costvalue of the distances of the two spaces.
costvalues = sum_i sum_j=i+1 (distance_low_space_ij - distance_high_space_ij)
"""
n_conf = dists.shape[0]
low_dists = euclidean_distances(red_dists)
costvalue = []
mean_actual = []
for i in range(n_conf - 1):
for j in range(i+1, n_conf):
costvalue.append((dists[i][j] - low_dists[i][j])**2)
mean_actual.append(low_dists[i][j])
mean_actual_value = sum(mean_actual) / len(mean_actual)
actual = [(mean_actual_value - dif)**2 for dif in mean_actual]
pred_actual = sum(costvalue)
rse = pred_actual / sum(actual)
costvalue = sum(costvalue) / len(costvalue)
print("costvalue")
print(costvalue)
print("rse")
print(rse)
return costvalue
# Softmax
m = nn.Softmax(dim=1)
test = torch.randn(2, 3)
output = m(test)
print(test)
print(output)
loss = nn.CrossEntropyLoss()
input = torch.randn(4, 1, requires_grad=True)
target = torch.empty(4, dtype=torch.long).random_(1)
output = loss(input, target)
output.backward()
input
torch.empty(4, dtype=torch.long).random_(1)
image = output.detach().numpy()
# image = image[0].reshape(image.shape[1])
plt.imshow(image)
###Output
_____no_output_____
###Markdown
Misc One-hot-encoder version with -1 for each one-hot dimension → nan by categorical with 4 choices is [-1, -1, -1, -1]
###Code
# one hot encoding
def one_hot_encode(config):
# Code from PhMueller
# Create array with one hot encoded values
result_vec = np.array([]).reshape((-1, 1)) # .astype(object)
for i, name in enumerate(configspace.get_hyperparameter_names()):
val = np.array(config.get(name)).reshape((-1, 1))
# Case if this value is not given in the configuration
if val == [[None]]:
# Test, maybe this is not working
if len(result_vec) == 0 and type(configspace.get_hyperparameter(name)) == CategoricalHyperparameter:
cats = len(configspace.get_hyperparameters()[i].choices)
result_vec = np.array([-1] * cats).reshape((1, len(np.array([-1] * cats))))
elif len(result_vec) == 0 and type(configspace.get_hyperparameter(name)) != CategoricalHyperparameter:
result_vec = np.array([-1]).reshape((-1, 1))
elif len(result_vec) > 0 and type(configspace.get_hyperparameter(name)) == CategoricalHyperparameter:
cats = len(configspace.get_hyperparameters()[i].choices)
result_vec = np.hstack((result_vec, np.array([-1] * cats).reshape((1, len(np.array([-1] * cats))))))
else:
result_vec = np.hstack((result_vec, [[-1]]))
# case if categorical
elif type(values[i]) is OneHotEncoder:
if len(result_vec) == 0:
result_vec = values[i].transform(val).toarray() # .astype(object)
else:
result_vec = np.hstack((result_vec,
values[i].transform(val).toarray()))
# if it is one
else:
if len(result_vec) == 0:
result_vec = values[i].transform(val)
else:
result_vec = np.hstack((result_vec,
values[i].transform(val)))
return result_vec
oe = OneHotEncoder(categories='auto').fit(np.array([1,2,'-1']).reshape((-1,1)))
oe.categories_
oe.transform(np.array(1).reshape((-1, 1))).toarray()
###Output
/home/abj/anaconda3/envs/visHyp/lib/python3.5/site-packages/numpy/lib/arraysetops.py:518: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
mask &= (ar1 != a)
/home/abj/anaconda3/envs/visHyp/lib/python3.5/site-packages/numpy/lib/arraysetops.py:522: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
mask |= (ar1 == a)
|
_notebooks/2021-06-17-recostep-tutorial-offline-replayer-eval-recogym-small.ipynb | ###Markdown
Offline Replayer Evaluation II - Recogym small> Running recogym for offline simulation and evaluation with small number of users and items- toc: true- badges: true- comments: true- categories: [bandit]- image: Environment setup
###Code
!pip install -q recogym
import numpy as np
from numpy.random.mtrand import RandomState
from scipy.special import logsumexp
import scipy
import pandas as pd
from scipy.stats.distributions import beta
from copy import deepcopy
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import svds
from itertools import chain
from sklearn.neighbors import NearestNeighbors
from IPython.display import display, HTML
from matplotlib.ticker import FormatStrFormatter
import gym, recogym
from recogym import env_1_args, Configuration
from recogym.agents import OrganicUserEventCounterAgent, organic_user_count_args
from recogym.agents.organic_count import OrganicCount, organic_count_args, to_categorical
from recogym import Configuration
from recogym.agents import Agent
from recogym.envs.observation import Observation
from recogym.agents import RandomAgent, random_args
from recogym import verify_agents, verify_agents_IPS
from recogym.evaluate_agent import plot_verify_agents, verify_agents_recall_at_k
from recogym.envs.session import OrganicSessions
from recogym.envs.context import DefaultContext
from recogym.envs.observation import Observation
import matplotlib.pyplot as plt
%matplotlib inline
P = 50 # Number of Products
U = 50 # Number of Users
# You can overwrite environment arguments here
env_1_args['random_seed'] = 42
env_1_args['num_products']= P
env_1_args['phi_var']=0.0
env_1_args['number_of_flips']=P//2
env_1_args['sigma_mu_organic'] = 0.1
env_1_args['sigma_omega']=0.05
# Initialize the gym for the first time by calling .make() and .init_gym()
env = gym.make('reco-gym-v1')
env.init_gym(env_1_args)
env.reset()
# Generate RecSys logs for U users
reco_log = env.generate_logs(U)
reco_log.head(20)
n_events = reco_log.shape[0]
n_organic = reco_log.loc[reco_log['z'] == 'organic'].shape[0]
print('Training on {0} organic and {1} bandit events'.format(n_organic, n_events - n_organic))
###Output
Training on 1117 organic and 3926 bandit events
###Markdown
Defining evaluation methods Traditional evaluation
###Code
def leave_one_out(reco_log, agent, last = False, N = 1, folds = 10):
# 1. Extract all organic events
reco_log = reco_log.loc[reco_log['z'] == 'organic']
# 2. For every user sequence - randomly sample out an item
hits = []
for _ in range(folds):
user_id = 0
history = []
session = OrganicSessions()
agent.reset()
for row in reco_log.itertuples():
# If we have a new user
if row.u != user_id:
if last:
# Sample out last item
index = len(history) - 1
else:
# Sample out a random item from the history
index = np.random.choice(len(history),
replace = False)
test = history[index]
train = history[:index] + history[index + 1:]
# 3. Recreate the user sequence without these items - Let the agent observe the incomplete sequence
for t, v in list(train):
session.next(DefaultContext(t, user_id), int(v))
# 4. Generate a top-N set of recommendations by letting the agent act
# TODO - For now only works for N = 1
try:
prob_a = agent.act(Observation(DefaultContext(t + 1, user_id), session), 0, False)['ps-a']
except:
prob_a = [1 / P] * P
# 5. Compute metrics checking whether the sampled test item is in the top-N
try:
hits.append(np.argmax(prob_a) == int(test[1]))
except:
hits.append(0)
# Reset variables
user_id = row.u
history = []
session = OrganicSessions()
agent.reset()
# Save the organic interaction to the running average for the session
history.append((row.t,row.v))
# Error analysis
mean_hits = np.mean(hits)
serr_hits = np.std(hits) / np.sqrt(len(hits))
low_bound = mean_hits - 1.96 * serr_hits
upp_bound = mean_hits + 1.96 * serr_hits
return mean_hits, low_bound, upp_bound
def verify_agents_traditional(reco_log, agents, last = False, N = 1, folds = 10):
# Placeholder DataFrame for result
stat = {
'Agent': [],
'0.025': [],
'0.500' : [],
'0.975': [],
}
# For every agent
for agent_id in agents:
# Compute HR@k
mean, low, upp = leave_one_out(reco_log, agents[agent_id], last = last, N = N, folds = folds)
stat['Agent'].append(agent_id)
stat['0.025'].append(low)
stat['0.500'].append(mean)
stat['0.975'].append(upp)
return pd.DataFrame().from_dict(stat)
###Output
_____no_output_____
###Markdown
Counterfactual evaluation
###Code
def compute_ips_weights(agent, reco_log):
# Placeholder for return values
rewards = [] # Labels for actions
t_props = [] # Treatment propensities
l_props = [] # Logging propensities
# For every logged interaction
user_id = 0
session = OrganicSessions()
agent.reset()
for row in reco_log.itertuples():
# If we have a new user
if row.u != user_id:
# Reset
session = OrganicSessions()
agent.reset()
user_id = row.u
# If we have an organic event
if row.z == 'organic':
session.next(DefaultContext(row.t, row.u), int(row.v))
else:
prob_a = agent.act(Observation(DefaultContext(row.t, row.u), session), 0, False)['ps-a']
rewards.append(row.c)
try:
t_props.append(prob_a[int(row.a)])
except:
t_props.append(0)
l_props.append(row.ps)
session = OrganicSessions()
return np.asarray(rewards), np.asarray(t_props), np.asarray(l_props)
def verify_agents_counterfactual(reco_log, agents, cap = 3):
# Placeholder DataFrame for results
IPS_stat = {
'Agent': [],
'0.025': [],
'0.500' : [],
'0.975': [],
}
CIPS_stat = {
'Agent': [],
'0.025': [],
'0.500' : [],
'0.975': [],
}
SNIPS_stat = {
'Agent': [],
'0.025': [],
'0.500' : [],
'0.975': [],
}
# For every agent
for agent_id in agents:
# Get the rewards and propensities
rewards, t_props, l_props = compute_ips_weights(agents[agent_id], reco_log)
# Compute the sample weights - propensity ratios
p_ratio = t_props / l_props
# Effective sample size for E_t estimate (from A. Owen)
n_e = len(rewards) * (np.mean(p_ratio) ** 2) / (p_ratio ** 2).mean()
n_e = 0 if np.isnan(n_e) else n_e
print("Effective sample size for agent {} is {}".format(str(agent_id), n_e))
# Critical value from t-distribution as we have unknown variance
alpha = .00125
cv = scipy.stats.t.ppf(1 - alpha, df = int(n_e) - 1)
###############
# VANILLA IPS #
###############
# Expected reward for pi_t
E_t = np.mean(rewards * p_ratio)
# Variance of the estimate
var = ((rewards * p_ratio - E_t) ** 2).mean()
stddev = np.sqrt(var)
# C.I. assuming unknown variance - use t-distribution and effective sample size
min_bound = E_t - cv * stddev / np.sqrt(int(n_e))
max_bound = E_t + cv * stddev / np.sqrt(int(n_e))
# Store result
IPS_stat['Agent'].append(agent_id)
IPS_stat['0.025'].append(min_bound)
IPS_stat['0.500'].append(E_t)
IPS_stat['0.975'].append(max_bound)
##############
# CAPPED IPS #
##############
# Cap ratios
p_ratio_capped = np.clip(p_ratio, a_min = None, a_max = cap)
# Expected reward for pi_t
E_t_capped = np.mean(rewards * p_ratio_capped)
# Variance of the estimate
var_capped = ((rewards * p_ratio_capped - E_t_capped) ** 2).mean()
stddev_capped = np.sqrt(var_capped)
# C.I. assuming unknown variance - use t-distribution and effective sample size
min_bound_capped = E_t_capped - cv * stddev_capped / np.sqrt(int(n_e))
max_bound_capped = E_t_capped + cv * stddev_capped / np.sqrt(int(n_e))
# Store result
CIPS_stat['Agent'].append(agent_id)
CIPS_stat['0.025'].append(min_bound_capped)
CIPS_stat['0.500'].append(E_t_capped)
CIPS_stat['0.975'].append(max_bound_capped)
##############
# NORMED IPS #
##############
# Expected reward for pi_t
E_t_normed = np.sum(rewards * p_ratio) / np.sum(p_ratio)
# Variance of the estimate
var_normed = np.sum(((rewards - E_t_normed) ** 2) * (p_ratio ** 2)) / (p_ratio.sum() ** 2)
stddev_normed = np.sqrt(var_normed)
# C.I. assuming unknown variance - use t-distribution and effective sample size
min_bound_normed = E_t_normed - cv * stddev_normed / np.sqrt(int(n_e))
max_bound_normed = E_t_normed + cv * stddev_normed / np.sqrt(int(n_e))
# Store result
SNIPS_stat['Agent'].append(agent_id)
SNIPS_stat['0.025'].append(min_bound_normed)
SNIPS_stat['0.500'].append(E_t_normed)
SNIPS_stat['0.975'].append(max_bound_normed)
return pd.DataFrame().from_dict(IPS_stat), pd.DataFrame().from_dict(CIPS_stat), pd.DataFrame().from_dict(SNIPS_stat)
###Output
_____no_output_____
###Markdown
Creating agents SVD agent
###Code
class SVDAgent(Agent):
def __init__(self, config, U = U, P = P, K = 5):
super(SVDAgent, self).__init__(config)
self.rng = RandomState(self.config.random_seed)
assert(P >= K)
self.K = K
self.R = csr_matrix((U,P))
self.V = np.zeros((P,K))
self.user_history = np.zeros(P)
def train(self, reco_log, U = U, P = P):
# Extract all organic user logs
reco_log = reco_log.loc[reco_log['z'] == 'organic']
# Generate ratings matrix for training, row-based for efficient row (user) retrieval
self.R = csr_matrix((np.ones(len(reco_log)),
(reco_log['u'],reco_log['v'])),
(U,P))
# Singular Value Decomposition
_, _, self.V = svds(self.R, k = self.K)
def observe(self, observation):
for session in observation.sessions():
self.user_history[session['v']] += 1
def act(self, observation, reward, done):
"""Act method returns an Action based on current observation and past history"""
self.observe(observation)
scores = self.user_history.dot(self.V.T).dot(self.V)
action = np.argmax(scores)
prob = np.zeros_like(scores)
prob[action] = 1.0
return {
**super().act(observation, reward, done),
**{
'a': action,
'ps': prob[action],
'ps-a': prob,
},
}
def reset(self):
self.user_history = np.zeros(P)
###Output
_____no_output_____
###Markdown
Item-KNN agent
###Code
class itemkNNAgent(Agent):
def __init__(self, config, U = U, P = P, k = 5, greedy = False, alpha = 1):
super(itemkNNAgent, self).__init__(config)
self.rng = RandomState(self.config.random_seed)
self.k = min(P,k)
self.greedy = greedy
self.alpha = alpha
self.Rt = csr_matrix((P,U))
self.user_history = np.zeros(P)
self.S = np.eye(P)
def train(self, reco_log, U = U, P = P):
# Extract all organic user logs
reco_log = reco_log.loc[reco_log['z'] == 'organic']
# Generate ratings matrix for training, row-based for efficient row (user) retrieval
self.R_t = csr_matrix((np.ones(len(reco_log)),
(reco_log['v'],reco_log['u'])),
(P,U))
# Set up nearest neighbours module
nn = NearestNeighbors(n_neighbors = self.k,
metric = 'cosine')
# Initialise placeholder for distances and indices
distances = []
indices = []
# Dirty fix for multiprocessing backend being unable to pickle large objects
nn.fit(self.R_t)
distances, indices = nn.kneighbors(self.R_t, return_distance = True)
# Precompute similarity matrix S
data = list(chain.from_iterable(1.0 - distances))
rows = list(chain.from_iterable([i] * self.k for i in range(P)))
cols = list(chain.from_iterable(indices))
# (P,P)-matrix with cosine similarities between items
self.S = csr_matrix((data,(rows, cols)), (P,P)).todense()
def observe(self, observation):
for session in observation.sessions():
self.user_history[session['v']] += 1
def act(self, observation, reward, done):
"""Act method returns an Action based on current observation and past history"""
self.observe(observation)
scores = self.user_history.dot(self.S).A1
if self.greedy:
action = np.argmax(scores)
prob = np.zeros_like(scores)
prob[action] = 1.0
else:
scores **= self.alpha
prob = scores / np.sum(scores)
action = self.rng.choice(self.S.shape[0], p = prob)
return {
**super().act(observation, reward, done),
**{
'a': action,
'ps': prob[action],
'ps-a': prob,
},
}
def reset(self):
self.user_history = np.zeros(P)
###Output
_____no_output_____
###Markdown
User-KNN agent
###Code
class userkNNAgent(Agent):
def __init__(self, config, U = U, P = P, k = 5, greedy = False, alpha = 1):
super(userkNNAgent, self).__init__(config)
self.rng = RandomState(self.config.random_seed)
self.k = min(P,k)
self.greedy = greedy
self.alpha = alpha
self.U = U
self.P = P
self.R = csr_matrix((U,P))
self.user_history = np.zeros(P)
self.nn = NearestNeighbors(n_neighbors = self.k, metric = 'cosine')
def train(self, reco_log, U = U, P = P):
# Extract all organic user logs
reco_log = reco_log.loc[reco_log['z'] == 'organic']
# Generate ratings matrix for training, row-based for efficient row (user) retrieval
self.R = csr_matrix((np.ones(len(reco_log)),
(reco_log['u'],reco_log['v'])),
(U,P))
# Fit nearest neighbours
self.nn.fit(self.R)
def observe(self, observation):
for session in observation.sessions():
self.user_history[session['v']] += 1
def act(self, observation, reward, done):
"""Act method returns an Action based on current observation and past history"""
self.observe(observation)
# Get neighbouring users based on user history
distances, indices = self.nn.kneighbors(self.user_history.reshape(1,-1))
scores = np.add.reduce([dist * self.R[idx,:] for dist, idx in zip(distances,indices)])
if self.greedy:
action = np.argmax(scores)
prob = np.zeros_like(scores)
prob[action] = 1.0
else:
scores **= self.alpha
prob = scores / np.sum(scores)
action = self.rng.choice(self.P, p = prob)
return {
**super().act(observation, reward, done),
**{
'a': action,
'ps': prob[action],
'ps-a': prob,
},
}
def reset(self):
self.user_history = np.zeros(P)
###Output
_____no_output_____
###Markdown
Agent initializations
###Code
# SVD Agent
SVD_agent = SVDAgent(Configuration(env_1_args), U, P, 30)
# item-kNN Agent
itemkNN_agent = itemkNNAgent(Configuration(env_1_args), U, P, 500, greedy = True)
# user-kNN Agent
userkNN_agent = userkNNAgent(Configuration(env_1_args), U, P, 20, greedy = True)
# Generalised Popularity agent
GPOP_agent = OrganicCount(Configuration({
**env_1_args,
'select_randomly': True,
}))
# Generalised Popularity agent
GPOP_agent_greedy = OrganicCount(Configuration({
**env_1_args,
'select_randomly': False,
}))
# Peronalised Popularity agent
PPOP_agent = OrganicUserEventCounterAgent(Configuration({
**organic_user_count_args,
**env_1_args,
'select_randomly': True,
}))
# Peronalised Popularity agent
PPOP_agent_greedy = OrganicUserEventCounterAgent(Configuration({
**organic_user_count_args,
**env_1_args,
'select_randomly': False,
}))
# Random Agent
random_args['num_products'] = P
RAND_agent = RandomAgent(Configuration({**env_1_args, **random_args,}))
SVD_agent.train(reco_log)
itemkNN_agent.train(reco_log)
userkNN_agent.train(reco_log)
###Output
_____no_output_____
###Markdown
Offline evaluation Generating test logs
###Code
%%time
# Placeholder for agents
agents = {
' Random': RAND_agent,
' Popular': GPOP_agent_greedy,
' User-pop': PPOP_agent,
' SVD': SVD_agent,
' User-kNN': userkNN_agent,
'Item-kNN': itemkNN_agent,
}
agent_ids = sorted(list(agents.keys()))#['SVD','GPOP','PPOP','RAND']
# Generate new logs, to be used for offline testing
n_test_users = 50 # U
test_log = env.generate_logs(n_test_users)
n_events = test_log.shape[0]
n_organic = test_log.loc[test_log['z'] == 'organic'].shape[0]
print('Testing on {0} organic and {1} bandit events'.format(n_organic, n_events - n_organic))
###Output
Organic Users: 0it [00:00, ?it/s]
Users: 100%|██████████| 50/50 [00:01<00:00, 39.77it/s]
###Markdown
(Util) helper function to plot barchart
###Code
def plot_barchart(result, title, xlabel, col = 'tab:red', figname = 'fig.eps', size = (6,2), fontsize = 12):
fig, axes = plt.subplots(figsize = size)
plt.title(title, size = fontsize)
n_agents = len(result)
yticks = np.arange(n_agents)
mean = result['0.500']
lower = result['0.500'] - result['0.025']
upper = result['0.975'] - result['0.500']
plt.barh(yticks,
mean,
height = .25,
xerr = (lower, upper),
align = 'center',
color = col,)
plt.yticks(yticks, result['Agent'], size = fontsize)
plt.xticks(size = fontsize)
plt.xlabel(xlabel, size = fontsize)
plt.xlim(.0,None)
plt.gca().xaxis.set_major_formatter(FormatStrFormatter('%.2f'))
plt.savefig(figname, bbox_inches = 'tight')
plt.show()
###Output
_____no_output_____
###Markdown
Leave-one-out evaluation
###Code
%%time
result_LOO = verify_agents_traditional(test_log, deepcopy(agents))
display(result_LOO)
plot_barchart(result_LOO, 'Evaluate on Organic Feedback', 'HR@1', 'tab:red', 'traditional_eval.eps')
###Output
/usr/local/lib/python3.7/dist-packages/recogym/agents/organic_user_count.py:51: RuntimeWarning: invalid value encountered in true_divide
action_proba = features / np.sum(features)
###Markdown
IPS Estimators
###Code
# Generate new logs, to be used for offline testing
test_log_ppop = env.generate_logs(n_test_users, agent = deepcopy(PPOP_agent))
test_log_ppop.head()
%%time
cap = 15
result_IPS, result_CIPS, result_SNIPS = verify_agents_counterfactual(test_log_ppop, deepcopy(agents), cap = cap)
display(result_IPS)
plot_barchart(result_IPS, 'IPS', 'CTR', 'tab:blue', 'bandit_eval_noclip.eps')
display(result_CIPS)
plot_barchart(result_CIPS, 'Clipped IPS', 'CTR', 'tab:blue', 'bandit_eval_clip{0}.eps'.format(cap))
###Output
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:65: RuntimeWarning: invalid value encountered in double_scalars
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:120: RuntimeWarning: invalid value encountered in double_scalars
###Markdown
A/B tests
###Code
n_test_users = 50 # U
agents = {
' Random': RAND_agent,
' Popular': GPOP_agent_greedy,
' User-pop': PPOP_agent,
' SVD': SVD_agent,
' User-kNN': userkNN_agent,
'Item-kNN': itemkNN_agent,
}
%%time
result_AB = verify_agents(env, n_test_users, deepcopy(agents))
display(result_AB)
plot_barchart(result_AB, 'A/B-test', 'CTR', 'tab:green', 'ABtest_eval.eps')
def combine_barchart(resultAB, resultCIPS, title, xlabel, figname = 'fig.eps', size = (10,8), fontsize = 12):
fig, axes = plt.subplots(figsize = size)
plt.title(title, size = fontsize)
n_agents = len(resultAB)
for i, (name, colour, result) in enumerate([('A/B-test', 'tab:green', result_AB),('CIPS', 'tab:blue', result_CIPS)]):
mean = result['0.500']
lower = result['0.500'] - result['0.025']
upper = result['0.975'] - result['0.500']
height = .25
yticks = [a + i * height for a in range(n_agents)]
plt.barh(yticks,
mean,
height = height,
xerr = (lower, upper),
align = 'edge',
label = name,
color = colour)
plt.yticks(yticks, result['Agent'], size = fontsize)
plt.xticks(size = fontsize)
plt.xlabel(xlabel, size = fontsize)
plt.legend(loc = 'lower right')
plt.xlim(.0,None)
plt.gca().xaxis.set_major_formatter(FormatStrFormatter('%.3f'))
plt.savefig(figname, bbox_inches = 'tight')
plt.show()
combine_barchart(result_AB, result_CIPS, 'Evaluate on Bandit Feedback', 'CTR', 'ABtest_CIPS.eps')
plot_barchart(result_LOO, 'Evaluate on Organic Feedback', 'HR@1', 'tab:red', 'traditional_eval.eps')
###Output
_____no_output_____ |
bmcs_beam/tension/time_dependent_cracking.ipynb | ###Markdown
Time dependent tensile response
###Code
%matplotlib widget
import matplotlib.pylab as plt
from bmcs_beam.tension.time_dependent_cracking import TimeDependentCracking
import sympy as sp
sp.init_printing()
import numpy as np
###Output
_____no_output_____
###Markdown
Single material point Time dependent function
###Code
TimeDependentCracking(T_prime_0 = 100).interact()
###Output
_____no_output_____
###Markdown
Time-dependent temperature evolution functionFind a suitable continuous function that can represent the temperature evolution during the hydration. Currently the a function of a Weibull type has been chosen and transformed such that the peak value and the corresponding time can be specified as a parameter.
###Code
t = sp.symbols('t', nonnegative=True)
T_m = sp.Symbol("T_m", positive = True)
T_s = sp.Symbol("T_s", positive = True)
omega_fn = 1 - sp.exp(-(t/T_s)**T_m)
T_prime_0 = sp.Symbol("T_prime_0", positive = True)
T_t = (1 - omega_fn) * T_prime_0 * t
###Output
_____no_output_____
###Markdown
**Shape functions for temperature evolution**
###Code
T_t
T_prime_t = sp.simplify(T_t.diff(t))
T_prime_t
###Output
_____no_output_____
###Markdown
**Transform the shape function**to be able to explicitly specify the maximum temperature and corresponding time
###Code
t_argmax_T = sp.Symbol("t_argmax_T")
T_s_sol = sp.solve( sp.Eq( sp.solve(T_prime_t,t)[0], t_argmax_T ), T_s)[0]
T_max = sp.Symbol("T_max", positive=True)
T_prime_0_sol = sp.solve(sp.Eq(T_t.subs(T_s, T_s_sol).subs(t, t_argmax_T), T_max),
T_prime_0)[0]
T_max_t = sp.simplify( T_t.subs({T_s: T_s_sol, T_prime_0: T_prime_0_sol}) )
T_max_t
get_T_t = sp.lambdify((t, T_prime_0, T_m, T_s), T_t)
get_T_max_t = sp.lambdify((t, T_max, t_argmax_T, T_m), T_max_t)
data = dict(T_prime_0=100, T_m=1, T_s=1)
_, ax = plt.subplots(1,1)
t_range = np.linspace(0,10,100)
plt.plot(t_range, get_T_t(t_range, **data));
plt.plot(t_range, get_T_max_t(t_range, 37, 1., 2));
###Output
_____no_output_____
###Markdown
Time dependent compressive strength **From Eurocode 2:** $s$ captures the effect of cement type on the time evolution of the compressive strengthit ranges from $s = 0.2$ for class R (rapid), $s = 0.25$ for class N (normal), and $s = 0.38$ for class S (slow).
###Code
s = sp.Symbol("s", positive=True)
beta_cc = sp.exp( s * (1 - sp.sqrt(28/t)))
beta_cc
get_beta_cc = sp.lambdify((t, s), beta_cc )
_, ax = plt.subplots(1,1)
plt.plot(t_range, get_beta_cc(t_range, 0.2))
###Output
_____no_output_____
###Markdown
Compressive strength
###Code
f_cm_28 = sp.Symbol("f_cm28", positive=True)
f_cm_28
f_cm_t = beta_cc * f_cm_28
f_cm_t
get_f_cm_t = sp.lambdify((t, f_cm_28, s), f_cm_t)
###Output
_____no_output_____
###Markdown
Tensile strength
###Code
f_ctm = sp.Symbol("f_ctm", positive=True)
alpha_f = sp.Symbol("alpha_f", positive=True)
f_ctm_t = beta_cc * f_ctm
f_ctm_t
get_f_ctm_t = sp.lambdify((t, f_ctm, s), f_ctm_t)
###Output
_____no_output_____
###Markdown
Elastic modulus
###Code
E_cm_28 = sp.Symbol("E_cm28", positive=True)
E_cm_t = (f_cm_t / f_cm_28)**0.3 * E_cm_28
E_cm_t
get_E_cm_t = sp.lambdify((t, E_cm_28, s), E_cm_t)
###Output
_____no_output_____
###Markdown
Uncracked state - Specimen is clamped at both sides. Then $\varepsilon_\mathrm{app} = 0, \forall x \in \Omega$ - Then the matrix stress is given as\begin{align} \sigma^\mathrm{m}(x,t) = - E^\mathrm{m}(t) \cdot \alpha \int_0^t T^\prime(x,\theta)\, \mathrm{d}\theta\end{align}
###Code
alpha = sp.Symbol("alpha", positive=True )
eps_eff = alpha * T_max_t
dot_T_max_t = sp.simplify(T_max_t.diff(t))
dot_eps_eff = alpha * dot_T_max_t
dot_E_cm_t = E_cm_t.diff(t)
sig_t = E_cm_t * eps_eff
dot_sig_t = E_cm_t * dot_eps_eff + dot_E_cm_t * eps_eff
sp.simplify(dot_sig_t)
###Output
_____no_output_____
###Markdown
Integral cannot be resolved algebraically - numerical integration is used
###Code
#sig2_t = sp.integrate(dot_sig_t, (t,0,t))
###Output
_____no_output_____ |
Udemy_Courses_Analysis_Baybay1.ipynb | ###Markdown
Udemy Courses Analysis Analysis of data gathered from Udemy Courses. Link is available at https://www.kaggle.com/andrewmvd/udemy-courses?select=udemy_courses.csv I. Importing modules and librariesFor this step, I imported numpy, and matplotlib modules. After setting up the modules, the csv file was also imported.
###Code
import numpy as np
import pandas as pd
import os
import csv
os.chdir("C:\\Users\\maegr\Documents\Maymay\DataScience\modules")
#Opens the data file as df
with open("udemy_courses.csv", "r") as courses_df:
courses_df = pd.read_csv('udemy_courses.csv')
###Output
_____no_output_____
###Markdown
II. Describing the CoursesInstead of getting the statistical values separately, the **describe** syntax was used so that it would be easier to analyze the data.
###Code
courses_df.describe()
###Output
_____no_output_____
###Markdown
Conclusions: 1. If someone is planning to take a Udemy course, it must be noted that the average price of Udemy Courses is **66.049 USD**. Some of the courses are entirely **free**, while others could go as high as **200 USD**2. The average number of subcsribers per course is **3197**3. Some courses could have as high as **27455** reviews while some courses don't have any review at all.3. In order to finish a course, one must spend an average of **4.09 hours**. III. Amount of Courses by SubjectThe pie chart below describes the distribution of the amount of courses per subject
###Code
courses_df['subject'].value_counts() / courses_df['subject'].value_counts().sum()
###Output
_____no_output_____
###Markdown
Conclusions: 1. About 32.6% of lessons from Udemy is about Web Development, which makes it the most popular subject in the site. 2. The Web Development course is followed closely by Business Finance with around 32.5%. 3. Musical Instruments is at third, while Graphic Design is placed at last. IV. Standard Deviation and MeanPart III already discussed the following details but this is another way of getting the standard deviation
###Code
print("Mean Values")
print( "Price: " + str(courses_df["price"].mean()) )
print( "Number of subscribers: "+ str(courses_df["num_subscribers"].mean()) )
print( "Number of reviews: "+ str(courses_df["num_reviews"].mean()) )
print( "Number of lectures: "+ str(courses_df["num_lectures"].mean()) )
print( "Content Duration: "+ str(courses_df["content_duration"].mean()) )
print("\nStandard Deviation")
print( "Price: " + str(courses_df["price"].std()) )
print( "Number of subscribers: "+ str(courses_df["num_subscribers"].std()) )
print( "Number of reviews: "+ str(courses_df["num_reviews"].std()) )
print( "Number of lectures: "+ str(courses_df["num_lectures"].std()) )
print( "Content Duration: "+ str(courses_df["content_duration"].std()) )
###Output
Mean Values
Price: 66.0494834148994
Number of subscribers: 3197.150625339859
Number of reviews: 156.25910821098424
Number of lectures: 40.108754758020666
Content Duration: 4.0945169476164605
Standard Deviation
Price: 61.00575547648037
Number of subscribers: 9504.117010438013
Number of reviews: 935.452044243373
Number of lectures: 50.38334552355022
Content Duration: 6.053840414790095
###Markdown
Conclusions:1. Based from the table above, we can infer that the there the standard deviation between the price and the number of lectures is small hence, they are more closely distributed around the mean value gathered earlier. V. Maximum and Minimum ValuesBelow are the maximum and minimum values of the given data
###Code
print( "Maximum Course Price: " + str(courses_df["price"].max()) )
print( "Minimum Course Price: " + str(courses_df["price"].min()) )
print( "\nMaximum Number of subscribers: "+ str(courses_df["num_subscribers"].max()) )
print( "Minimum Number of subscribers: "+ str(courses_df["num_subscribers"].min()) )
print( "\nMaximum Number of reviews: "+ str(courses_df["num_reviews"].max()) )
print( "Minimum Number of reviews: "+ str(courses_df["num_reviews"].min()) )
print( "\nMaximum Number of lectures: "+ str(courses_df["num_lectures"].max()) )
print( "Minimum Number of lectures: "+ str(courses_df["num_lectures"].min()) )
print( "\nMaximum Content Duration: "+ str(courses_df["content_duration"].max()) )
print( "Minimum Content Duration: "+ str(courses_df["content_duration"].min()) )
###Output
Maximum Course Price: 200
Minimum Course Price: 0
Maximum Number of subscribers: 268923
Minimum Number of subscribers: 0
Maximum Number of reviews: 27445
Minimum Number of reviews: 0
Maximum Number of lectures: 779
Minimum Number of lectures: 0
Maximum Content Duration: 78.5
Minimum Content Duration: 0.0
###Markdown
Conclusions:1. The most expensive course costs 200USD while there are courses that are free.2. The most subscribed course has 268923 subscribers3. The most reviewed course has 27445 reviews4. The most number of lectures in a course is 779. Surprisingly, there are courses that do not have any lectures at all.5. The maximum content duration of a course is 78.5 hours, while again, there are courses that have 0 content duration. VI. Most Popular Paid CoursesThe table below shows the most popular paid courses in Udemy based on the number of subscribers
###Code
paid_courses_df = courses_df.query("price != 0")
top25_paid = paid_courses_df.sort_values("num_subscribers", ascending=False)[0:25].sort_values("num_subscribers", ascending=False)
top25_paid
###Output
_____no_output_____
###Markdown
Conclusions:1. The most popular paid course is "The Web Developer Bootcamp". 2. 21 out of 25 most popular paid courses is about Web Development. From this and the conclusion made earlier, it is possible that many udemy users are interested in learning about Web Development.3. Those most popular paid course in musical instruments is about learning to play piano and guitar. VII Most Popular Free Courses
###Code
free_courses_df = courses_df.query("price == 0")
top25_free = free_courses_df.sort_values("num_subscribers", ascending=False)[0:25].sort_values("num_subscribers", ascending=False)
top25_free
###Output
_____no_output_____
###Markdown
Conclusions:1. The most popular free course is "Learn HTML5 Programming from scratch". 2. 21 out of 25 most popular free courses is about Web Development. 3. Those most popular free course in musical instruments is about learning to play electric guitar, titled "Free Beginner Electric Guitar Lessons" VIII. Highest reviewed course
###Code
num_reviews_df = courses_df.query("num_reviews != 0")
top25_reviewed = num_reviews_df.sort_values("num_reviews", ascending=False)[0:25].sort_values("num_reviews", ascending=True).reset_index(drop=True).reset_index()
top25_reviewed.max()
###Output
_____no_output_____ |
Feature Cleaning and First Models.ipynb | ###Markdown
EDA
###Code
df.head()
df.shape
df.columns.values
df.info()
df.describe(include='all')
#Histogram to check distribution and skewness
l= ['Zero_Crossings', 'Duration', 'Amp_range', 'Avg_amp', 'Freq_range',
'Pulses_per_Sec', 'Partials', 'MFCC', 'Spectral Rolloff']
plt.figure(figsize=(15,20))
for i in range(len(l)):
plt.subplot(5,2,i+1)
sns.histplot(df[l[i]],kde=True)
plt.show()
#Boxplot to check for outliers
l= ['Zero_Crossings', 'Duration', 'Amp_range', 'Avg_amp', 'Freq_range',
'Pulses_per_Sec', 'Partials', 'MFCC', 'Spectral Rolloff']
plt.figure(figsize=(20,25))
for i in range(0,len(l)):
plt.subplot(5,2,i+1)
sns.set_style('whitegrid')
sns.boxplot(df[l[i]],color='green',orient='h')
plt.tight_layout()
plt.show()
#Quality correlation matrix
k = 9 #number of variables for heatmap
cols = df.corr().nlargest(k, 'Amp_range')['Amp_range'].index
cm = df[cols].corr()
plt.figure(figsize=(10,6))
sns.heatmap(cm, annot=True, cmap = 'coolwarm')
df['Call'].value_counts()
df['Species'].value_counts()
###Output
_____no_output_____
###Markdown
Data Cleansing
###Code
df['Call'].unique()
clean = {'unknown':np.NaN, 'growl?': 'growl','Growl':'growl', 'growl ':'growl', 'hiss?':'hiss', 'Hiss':'hiss',
'Sharp Hiss':'hiss', 'purr sequence': 'purr', 'Loud rumble/roar':'roar', 'call?':'call', 'main call':'call',
'call sequence':'call', 'roar or call':'roar', 'roar?':'roar', 'purr sequence':'purr', ' roar':'roar', 'hiss ':'hiss',
'mew?':'mew', 'Call sequence(possible mew)':'call', 'call sequence?':'call', 'single call?':'call',
'grow/hiss':'growl/hiss'}
df.replace(clean, inplace = True)
df['Call'].unique()
df['Age'].unique()
clean2 = {'A':'Adult','Adult ':'Adult', 'Juvenile ':'Juvenile', 'juvenile':'Juvenile'}
df.replace(clean2, inplace = True)
df['Age'].fillna('Unknown', inplace = True)
df['Age'].unique()
df['Sex'].unique()
clean3 = {'Female ':'Female','F':'Female', 'M':'Male','male ':'Male', 'P':'Pair', 'Pair (Unknown)':'Pair', 'G':'Group', 'G (1 M and 2F)':'Group'}
df.replace(clean3, inplace = True)
df['Sex'].fillna('Unknown', inplace = True)
df['Sex'].unique()
df.describe(include='object')
###Output
_____no_output_____
###Markdown
Standardize Continuous Features
###Code
continuous = ['Zero_Crossings', 'Duration', 'Amp_range', 'Avg_amp', 'Freq_range',
'Pulses_per_Sec', 'Partials', 'MFCC', 'Spectral Rolloff']
scaler = StandardScaler()
for var in continuous:
df[var] = df[var].astype('float64')
df[var] = scaler.fit_transform(df[var].values.reshape(-1, 1))
df.describe(include='float64')
#Save new clean data to new CSV
df.to_csv('features_cleaned.csv', index=False)
###Output
_____no_output_____
###Markdown
Convert categorical variables into dummy/indicator variables
###Code
categorical = ['Sex', 'Age', 'Species']
for var in categorical:
df = pd.concat([df, pd.get_dummies(df[var], prefix=var)], axis=1)
del df[var]
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 345 entries, 0 to 344
Data columns (total 24 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Zero_Crossings 345 non-null float64
1 Duration 345 non-null float64
2 Amp_range 345 non-null float64
3 Avg_amp 345 non-null float64
4 Freq_range 345 non-null float64
5 Pulses_per_Sec 345 non-null float64
6 Partials 345 non-null float64
7 MFCC 345 non-null float64
8 Spectral Rolloff 345 non-null float64
9 Call 331 non-null object
10 Sex_Female 345 non-null uint8
11 Sex_Group 345 non-null uint8
12 Sex_Male 345 non-null uint8
13 Sex_Pair 345 non-null uint8
14 Sex_Unknown 345 non-null uint8
15 Age_Adult 345 non-null uint8
16 Age_Juvenile 345 non-null uint8
17 Age_Unknown 345 non-null uint8
18 Species_A. jubatus 345 non-null uint8
19 Species_Bobcat 345 non-null uint8
20 Species_Caracal Carcal 345 non-null uint8
21 Species_Domestic Cat 345 non-null uint8
22 Species_L. lynx 345 non-null uint8
23 Species_Ocelot 345 non-null uint8
dtypes: float64(9), object(1), uint8(14)
memory usage: 31.8+ KB
###Markdown
Vocalization Classification Splitting data
###Code
X = df[pd.notnull(df['Call'])].drop(['Call'], axis=1)
y = df[pd.notnull(df['Call'])]['Call']
print(X.shape)
print(y.shape)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.30)
print(X_train.shape)
print(X_test.shape)
print('Call values for Data')
print(df['Call'].value_counts())
print('\n')
print('Call values for Training')
print(y_train.value_counts())
print('\n')
print('Call values for Testing')
print(y_test.value_counts())
print('Calls trained for but not tested for')
print(set(np.unique(y_train))-set(np.unique(y_test)))
print('Calls test for but not trained for')
print(set(np.unique(y_test))-set(np.unique(y_train)))
###Output
Calls trained for but not tested for
{'growl/hiss'}
Calls test for but not trained for
set()
###Markdown
XGBoost
###Code
parameters = dict(
objective='multi:softprob',
random_state = 30,
max_depth=9,
learning_rate=0.01,
subsample=0.8,
colsample_bytree=0.4,
tree_method='gpu_hist')
#eval_metric='mlogloss'
clf = XGBClassifier(**parameters, n_estimators=1200)
clf.fit(X_train, y_train)
clf.score(X_train, y_train)
clf.score(X_test,y_test)
y_pred = clf.predict(X_test)
print('1. Tested Calls')
print(np.unique(y_test))
print('2. Predicted Calls')
print(np.unique(y_pred))
print('3. Not tested for but predicted')
print(set(np.unique(y_pred))-set(np.unique(y_test)))
print('4. Tested for but not predicted')
print(set(np.unique(y_test))-set(np.unique(y_pred)))
names = sorted(list(set(np.unique(y_test)).union(set(y_pred))))
cnf = confusion_matrix(y_test, y_pred)
fig, ax = plt.subplots(figsize=(8,4))
# Normalise
cnf = cnf.astype('float')/cnf.sum(axis=1)[:, np.newaxis]
sns.heatmap(cnf, annot=True, fmt='.1%', xticklabels=names, yticklabels=names,cmap= "YlOrBr")
plt.title('XGBoost')
ax.xaxis.set_label_position('top')
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show(block=False)
print('Accuracy',clf.score(X_test, y_test))
plot_importance(clf)
figsize=(5,10)
###Output
_____no_output_____
###Markdown
SVM
###Code
clf_svc = SVC()
clf_svc.fit(X_train, y_train)
clf_svc.score(X_train, y_train)
clf_svc.score(X_test, y_test)
y_pred_svc=clf_svc.predict(X_test)
print('1. Tested Calls')
print(np.unique(y_test))
print('2. Predicted Calls')
print(np.unique(y_pred_svc))
print('3. False Positive')
print(set(np.unique(y_pred_svc))-set(np.unique(y_test)))
print('4. False Negative')
print(set(np.unique(y_test))-set(np.unique(y_pred_svc)))
names_svc = sorted(list(set(np.unique(y_test)).union(set(y_pred_svc))))
cnf = confusion_matrix(y_test, y_pred_svc)
fig, ax = plt.subplots(figsize=(8,4))
# Normalise
cnf = cnf.astype('float')/cnf.sum(axis=1)[:, np.newaxis]
sns.heatmap(cnf, annot=True, fmt='.1%', xticklabels=names_svc, yticklabels=names_svc,cmap= "YlOrBr")
plt.title('SVM Confusion Matrix')
ax.xaxis.set_label_position('top')
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show(block=False)
print('Accuracy',clf_svc.score(X_test, y_test))
###Output
_____no_output_____
###Markdown
Naive Bayes
###Code
gnb = GaussianNB()
gnb.fit(X_train, y_train)
gnb.score(X_train, y_train)
gnb.score(X_test, y_test)
y_pred_nb = gnb.predict(X_test)
print('1. Tested Calls')
print(np.unique(y_test))
print('2. Predicted Calls')
print(np.unique(y_pred_nb))
print('3. Not tested for but predicted')
print(set(np.unique(y_pred_nb))-set(np.unique(y_test)))
print('4. Tested for but not predicted')
print(set(np.unique(y_test))-set(np.unique(y_pred_nb)))
names_nb = sorted(list(set(np.unique(y_test)).union(set(y_pred_nb))))
cnf = confusion_matrix(y_test, y_pred_nb)
fig, ax = plt.subplots(figsize=(8,4))
# Normalise
cnf = cnf.astype('float')/cnf.sum(axis=1)[:, np.newaxis]
sns.heatmap(cnf, annot=True, fmt='.1%', xticklabels=names_nb, yticklabels=names_nb,cmap= "YlOrBr")
plt.title('Naive Bayes Confusion Matrix')
ax.xaxis.set_label_position('top')
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show(block=False)
print('Accuracy',gnb.score(X_test, y_test))
###Output
_____no_output_____
###Markdown
Logistic Regression
###Code
lr = LogisticRegression(solver='liblinear', multi_class='ovr')
lr.fit(X_train,y_train)
lr.score(X_train, y_train)
lr.score(X_test, y_test)
y_pred_lr = lr.predict(X_test)
print('1. Tested Calls')
print(np.unique(y_test))
print('2. Predicted Calls')
print(np.unique(y_pred_lr))
print('3. Not tested for but predicted')
print(set(np.unique(y_pred_lr))-set(np.unique(y_test)))
print('4. Tested for but not predicted')
print(set(np.unique(y_test))-set(np.unique(y_pred_lr)))
names_lr = sorted(list(set(np.unique(y_test)).union(set(y_pred_lr))))
cnf = confusion_matrix(y_test, y_pred_lr)
fig, ax = plt.subplots(figsize=(8,4))
# Normalise
cnf = cnf.astype('float')/cnf.sum(axis=1)[:, np.newaxis]
sns.heatmap(cnf, annot=True, fmt='.1%', xticklabels=names_lr, yticklabels=names_lr,cmap= "YlOrBr")
plt.title('Logistic Regression')
ax.xaxis.set_label_position('top')
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show(block=False)
print('Accuracy',lr.score(X_test, y_test))
###Output
_____no_output_____
###Markdown
KNN
###Code
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
knn.score(X_train, y_train)
knn.score(X_test, y_test)
y_pred_knn = knn.predict(X_test)
print('1. Tested Calls')
print(np.unique(y_test))
print('2. Predicted Calls')
print(np.unique(y_pred_knn))
print('3. Not tested for but predicted')
print(set(np.unique(y_pred_knn))-set(np.unique(y_test)))
print('4. Tested for but not predicted')
print(set(np.unique(y_test))-set(np.unique(y_pred_knn)))
names_knn = sorted(list(set(np.unique(y_test)).union(set(y_pred_knn))))
cnf = confusion_matrix(y_test, y_pred_knn)
fig, ax = plt.subplots(figsize=(8,4))
#Normalise
cnf = cnf.astype('float')/cnf.sum(axis=1)[:, np.newaxis]
sns.heatmap(cnf, annot=True, fmt='.1%', xticklabels=names_knn, yticklabels=names_knn,cmap= "YlOrBr")
plt.title('KNN')
ax.xaxis.set_label_position('top')
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show(block=False)
print('Accuracy',knn.score(X_test, y_test))
###Output
_____no_output_____
###Markdown
Decision Tree Classifier
###Code
cart = DecisionTreeClassifier()
cart.fit(X_train, y_train)
cart.score(X_train, y_train)
cart.score(X_test, y_test)
y_pred_cart = cart.predict(X_test)
print('1. Tested Calls')
print(np.unique(y_test))
print('2. Predicted Calls')
print(np.unique(y_pred_cart))
print('3. Not tested for but predicted')
print(set(np.unique(y_pred_cart))-set(np.unique(y_test)))
print('4. Tested for but not predicted')
print(set(np.unique(y_test))-set(np.unique(y_pred_cart)))
names_cart = sorted(list(set(np.unique(y_test)).union(set(y_pred_cart))))
cnf = confusion_matrix(y_test, y_pred_cart)
fig, ax = plt.subplots(figsize=(8,4))
# Normalise
cnf = cnf.astype('float')/cnf.sum(axis=1)[:, np.newaxis]
sns.heatmap(cnf, annot=True, fmt='.1%', xticklabels=names_cart, yticklabels=names_cart,cmap= "YlOrBr")
plt.title('Decision Tree Classifier')
ax.xaxis.set_label_position('top')
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show(block=False)
print('Accuracy',cart.score(X_test, y_test))
###Output
_____no_output_____
###Markdown
Random Forest Classifier
###Code
rf = RandomForestClassifier()
rf.fit(X_train, y_train)
rf.score(X_train, y_train)
rf.score(X_test, y_test)
y_pred_rf = cart.predict(X_test)
print('1. Tested Calls')
print(np.unique(y_test))
print('2. Predicted Calls')
print(np.unique(y_pred_rf))
print('3. Not tested for but predicted')
print(set(np.unique(y_pred_rf))-set(np.unique(y_test)))
print('4. Tested for but not predicted')
print(set(np.unique(y_test))-set(np.unique(y_pred_rf)))
names_rf = sorted(list(set(np.unique(y_test)).union(set(y_pred_rf))))
cnf = confusion_matrix(y_test, y_pred_rf)
fig, ax = plt.subplots(figsize=(8,4))
# Normalise
cnf = cnf.astype('float')/cnf.sum(axis=1)[:, np.newaxis]
sns.heatmap(cnf, annot=True, fmt='.1%', xticklabels=names_rf, yticklabels=names_rf,cmap= "YlOrBr")
plt.title('Random Forest')
ax.xaxis.set_label_position('top')
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show(block=False)
print('Accuracy',rf.score(X_test, y_test))
#The data is unbalanced, this could be fixed by updating the class weights
#Or getting more varied data
df['Call'].value_counts()
###Output
_____no_output_____
###Markdown
Species Classification
###Code
df2= pd.read_csv('features_cleaned.csv')
df2.head()
df2.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 345 entries, 0 to 344
Data columns (total 13 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Zero_Crossings 345 non-null float64
1 Duration 345 non-null float64
2 Amp_range 345 non-null float64
3 Avg_amp 345 non-null float64
4 Freq_range 345 non-null float64
5 Pulses_per_Sec 345 non-null float64
6 Partials 345 non-null float64
7 MFCC 345 non-null float64
8 Spectral Rolloff 345 non-null float64
9 Sex 345 non-null object
10 Age 345 non-null object
11 Species 345 non-null object
12 Call 331 non-null object
dtypes: float64(9), object(4)
memory usage: 35.2+ KB
###Markdown
Convert categorical variables into dummy/indicator variables
###Code
categorical = ['Sex', 'Age', 'Call']
for var in categorical:
df2 = pd.concat([df2, pd.get_dummies(df2[var], prefix=var)], axis=1)
del df2[var]
df2.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 345 entries, 0 to 344
Data columns (total 25 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Zero_Crossings 345 non-null float64
1 Duration 345 non-null float64
2 Amp_range 345 non-null float64
3 Avg_amp 345 non-null float64
4 Freq_range 345 non-null float64
5 Pulses_per_Sec 345 non-null float64
6 Partials 345 non-null float64
7 MFCC 345 non-null float64
8 Spectral Rolloff 345 non-null float64
9 Species 345 non-null object
10 Sex_Female 345 non-null uint8
11 Sex_Group 345 non-null uint8
12 Sex_Male 345 non-null uint8
13 Sex_Pair 345 non-null uint8
14 Sex_Unknown 345 non-null uint8
15 Age_Adult 345 non-null uint8
16 Age_Juvenile 345 non-null uint8
17 Age_Unknown 345 non-null uint8
18 Call_call 345 non-null uint8
19 Call_call/growl 345 non-null uint8
20 Call_growl 345 non-null uint8
21 Call_growl/hiss 345 non-null uint8
22 Call_hiss 345 non-null uint8
23 Call_purr 345 non-null uint8
24 Call_roar 345 non-null uint8
dtypes: float64(9), object(1), uint8(15)
memory usage: 32.1+ KB
###Markdown
Splitting data
###Code
X = df2[pd.notnull(df2['Species'])].drop(['Species'], axis=1)
y = df2[pd.notnull(df2['Species'])]['Species']
print(X.shape)
print(y.shape)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.30)
print(X_train.shape)
print(X_test.shape)
print('Species values for Data')
print(df2['Species'].value_counts())
print('\n')
print('Species values for Training')
print(y_train.value_counts())
print('\n')
print('Species values for Testing')
print(y_test.value_counts())
print('Species trained for but not tested for')
print(set(np.unique(y_train))-set(np.unique(y_test)))
print('Species test for but not trained for')
print(set(np.unique(y_test))-set(np.unique(y_train)))
###Output
Species trained for but not tested for
set()
Species test for but not trained for
set()
###Markdown
XGBoost
###Code
parameters = dict(
objective='multi:softprob',
random_state = 30,
max_depth=9,
learning_rate=0.01,
subsample=0.8,
colsample_bytree=0.4,
tree_method='gpu_hist')
#eval_metric='mlogloss'
clf = XGBClassifier(**parameters, n_estimators=1200)
clf.fit(X_train, y_train)
clf.score(X_train, y_train)
clf.score(X_test,y_test)
y_pred = clf.predict(X_test)
print('1. Tested Species')
print(np.unique(y_test))
print('2. Predicted Species')
print(np.unique(y_pred))
print('3. Not tested for but predicted')
print(set(np.unique(y_pred))-set(np.unique(y_test)))
print('4. Tested for but not predicted')
print(set(np.unique(y_test))-set(np.unique(y_pred)))
names = sorted(list(set(np.unique(y_test)).union(set(y_pred))))
cnf = confusion_matrix(y_test, y_pred)
fig, ax = plt.subplots(figsize=(8,4))
# Normalise
cnf = cnf.astype('float')/cnf.sum(axis=1)[:, np.newaxis]
sns.heatmap(cnf, annot=True, fmt='.1%', xticklabels=names, yticklabels=names,cmap= "YlOrBr")
plt.title('XGBoost')
ax.xaxis.set_label_position('top')
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show(block=False)
print('Accuracy',clf.score(X_test, y_test))
plot_importance(clf)
figsize=(8,4)
###Output
_____no_output_____
###Markdown
SVM
###Code
clf_svc = SVC()
clf_svc.fit(X_train, y_train)
clf_svc.score(X_train, y_train)
clf_svc.score(X_test, y_test)
y_pred_svc=clf_svc.predict(X_test)
print('1. Tested Species')
print(np.unique(y_test))
print('2. Predicted Species')
print(np.unique(y_pred_svc))
print('3. False Positive')
print(set(np.unique(y_pred_svc))-set(np.unique(y_test)))
print('4. False Negative')
print(set(np.unique(y_test))-set(np.unique(y_pred_svc)))
names_svc = sorted(list(set(np.unique(y_test)).union(set(y_pred_svc))))
cnf = confusion_matrix(y_test, y_pred_svc)
fig, ax = plt.subplots(figsize=(8,4))
# Normalise
cnf = cnf.astype('float')/cnf.sum(axis=1)[:, np.newaxis]
sns.heatmap(cnf, annot=True, fmt='.1%', xticklabels=names_svc, yticklabels=names_svc,cmap= "YlOrBr")
plt.title('SVM Confusion Matrix')
ax.xaxis.set_label_position('top')
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show(block=False)
print('Accuracy',clf_svc.score(X_test, y_test))
###Output
_____no_output_____
###Markdown
Naive Bayes
###Code
gnb = GaussianNB()
gnb.fit(X_train, y_train)
gnb.score(X_train, y_train)
gnb.score(X_test, y_test)
y_pred_nb = gnb.predict(X_test)
print('1. Tested Species')
print(np.unique(y_test))
print('2. Predicted Species')
print(np.unique(y_pred_nb))
print('3. Not tested for but predicted')
print(set(np.unique(y_pred_nb))-set(np.unique(y_test)))
print('4. Tested for but not predicted')
print(set(np.unique(y_test))-set(np.unique(y_pred_nb)))
names_nb = sorted(list(set(np.unique(y_test)).union(set(y_pred_nb))))
cnf = confusion_matrix(y_test, y_pred_nb)
fig, ax = plt.subplots(figsize=(8,4))
# Normalise
cnf = cnf.astype('float')/cnf.sum(axis=1)[:, np.newaxis]
sns.heatmap(cnf, annot=True, fmt='.1%', xticklabels=names_nb, yticklabels=names_nb,cmap= "YlOrBr")
plt.title('Naive Bayes Confusion Matrix')
ax.xaxis.set_label_position('top')
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show(block=False)
print('Accuracy',gnb.score(X_test, y_test))
###Output
_____no_output_____
###Markdown
Logistic Regression
###Code
lr = LogisticRegression(solver='liblinear', multi_class='ovr')
lr.fit(X_train,y_train)
lr.score(X_train, y_train)
lr.score(X_test, y_test)
y_pred_lr = lr.predict(X_test)
print('1. Tested Species')
print(np.unique(y_test))
print('2. Predicted Species')
print(np.unique(y_pred_lr))
print('3. Not tested for but predicted')
print(set(np.unique(y_pred_lr))-set(np.unique(y_test)))
print('4. Tested for but not predicted')
print(set(np.unique(y_test))-set(np.unique(y_pred_lr)))
names_lr = sorted(list(set(np.unique(y_test)).union(set(y_pred_lr))))
cnf = confusion_matrix(y_test, y_pred_lr)
fig, ax = plt.subplots(figsize=(8,4))
# Normalise
cnf = cnf.astype('float')/cnf.sum(axis=1)[:, np.newaxis]
sns.heatmap(cnf, annot=True, fmt='.1%', xticklabels=names_lr, yticklabels=names_lr,cmap= "YlOrBr")
plt.title('Logistic Regression')
ax.xaxis.set_label_position('top')
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show(block=False)
print('Accuracy',lr.score(X_test, y_test))
###Output
_____no_output_____
###Markdown
KNN
###Code
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
knn.score(X_train, y_train)
knn.score(X_test, y_test)
y_pred_knn = knn.predict(X_test)
print('1. Tested Species')
print(np.unique(y_test))
print('2. Predicted Species')
print(np.unique(y_pred_knn))
print('3. Not tested for but predicted')
print(set(np.unique(y_pred_knn))-set(np.unique(y_test)))
print('4. Tested for but not predicted')
print(set(np.unique(y_test))-set(np.unique(y_pred_knn)))
names_knn = sorted(list(set(np.unique(y_test)).union(set(y_pred_knn))))
cnf = confusion_matrix(y_test, y_pred_knn)
fig, ax = plt.subplots(figsize=(8,4))
#Normalise
cnf = cnf.astype('float')/cnf.sum(axis=1)[:, np.newaxis]
sns.heatmap(cnf, annot=True, fmt='.1%', xticklabels=names_knn, yticklabels=names_knn,cmap= "YlOrBr")
plt.title('KNN')
ax.xaxis.set_label_position('top')
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show(block=False)
print('Accuracy',knn.score(X_test, y_test))
###Output
_____no_output_____
###Markdown
Decision Tree Classifier
###Code
cart = DecisionTreeClassifier()
cart.fit(X_train, y_train)
cart.score(X_train, y_train)
cart.score(X_test, y_test)
y_pred_cart = cart.predict(X_test)
print('1. Tested Species')
print(np.unique(y_test))
print('2. Predicted Species')
print(np.unique(y_pred_cart))
print('3. Not tested for but predicted')
print(set(np.unique(y_pred_cart))-set(np.unique(y_test)))
print('4. Tested for but not predicted')
print(set(np.unique(y_test))-set(np.unique(y_pred_cart)))
names_cart = sorted(list(set(np.unique(y_test)).union(set(y_pred_cart))))
cnf = confusion_matrix(y_test, y_pred_cart)
fig, ax = plt.subplots(figsize=(8,4))
# Normalise
cnf = cnf.astype('float')/cnf.sum(axis=1)[:, np.newaxis]
sns.heatmap(cnf, annot=True, fmt='.1%', xticklabels=names_cart, yticklabels=names_cart,cmap= "YlOrBr")
plt.title('Decision Tree Classifier')
ax.xaxis.set_label_position('top')
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show(block=False)
print('Accuracy',cart.score(X_test, y_test))
###Output
_____no_output_____
###Markdown
Random Forest Classifier
###Code
rf = RandomForestClassifier()
rf.fit(X_train, y_train)
rf.score(X_train, y_train)
rf.score(X_test, y_test)
y_pred_rf = cart.predict(X_test)
print('1. Tested Species')
print(np.unique(y_test))
print('2. Predicted Species')
print(np.unique(y_pred_rf))
print('3. Not tested for but predicted')
print(set(np.unique(y_pred_rf))-set(np.unique(y_test)))
print('4. Tested for but not predicted')
print(set(np.unique(y_test))-set(np.unique(y_pred_rf)))
names_rf = sorted(list(set(np.unique(y_test)).union(set(y_pred_rf))))
cnf = confusion_matrix(y_test, y_pred_rf)
fig, ax = plt.subplots(figsize=(8,4))
# Normalise
cnf = cnf.astype('float')/cnf.sum(axis=1)[:, np.newaxis]
sns.heatmap(cnf, annot=True, fmt='.1%', xticklabels=names_rf, yticklabels=names_rf,cmap= "YlOrBr")
plt.title('Random Forest')
ax.xaxis.set_label_position('top')
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show(block=False)
print('Accuracy',rf.score(X_test, y_test))
###Output
_____no_output_____ |
20202/dm_20202_0601_overfitting.ipynb | ###Markdown
###Code
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
np.random.seed(1981)
n_samples = 30
degrees = [1, 15, 3, 5]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples)*0.1
plt.figure(figsize=(14, 14))
for i in range(len(degrees)):
ax = plt.subplot(2, 2, i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i], include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="Real Function")
plt.scatter(X, y, label="Training points")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degrees %d"%(degrees[i]))
plt.show()
###Output
_____no_output_____ |
Notas_R/Notas_AdminActiva/Notas_AdministracionActiva.ipynb | ###Markdown
Microestructura y Sistemas de Trading Notas de Administracion Activa
###Code
# Remover todos los objetos del "Environment"
rm(list = ls())
# los 0s aceptados antes de expresas una cifra en notaci?n cient?fica
options("scipen"=100, "digits"=4)
# Cargas librer?as a utilizar
#suppressMessages(library(Quandl)) # Descargar Precios
#suppressMessages(library(ROI)) # Optimizacion para portafolio
#suppressMessages(library(knitr)) # Opciones de documentaci?n + c?digo
#suppressMessages(library(xlsx)) # Leer archivos XLSx
#suppressMessages(library(kableExtra)) # Tablas en HTML
#suppressMessages(library(PortfolioAnalytics)) # Teor?a Moderna de Portafolios
options(knitr.table.format = "html")
tk <- as.data.frame(read.csv(file = "IAK_holdings.csv",header = FALSE, sep = ","))
cs <- c("date", "adj_close")
class(tk)
tk
###Output
_____no_output_____ |
examples/2_Rejection_ABC_closer_look.ipynb | ###Markdown
[![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/eth-cscs/abcpy/master?filepath=examples%2FRejection_ABC_closer_look.ipynb) A closer look to Rejection ABCIn this notebook, we give some insights on how Rejection ABC (and ABC in general) works, using `ABCpy`. Approximate Bayesian Computation (ABC) Approximate Bayesian Computation is a set of methods that allow to find the 'best' parameters of a scientific model with respect to observations from the real world. More specifically, ABC sits in the set of Bayesian inference methods; therefore, it provides the user not only with a point estimate of parameter values, but with a _posterior_ distribution quantifying uncertainty.To infer the parameters of a model using ABC, three basic ingredients are required:- A model is required that, given some input parameters, can generate synthetic observations- Some prior knowledge about the input parameters is required (a Uniform distribution over the parameters space is always possible)- A discrepancy function is required that quantifies how similar two sets of observations (real and synthetic) are. Here, we will use the simple Euclidean distance between observations.**Note: we do not need the likelihood function of the bi-variate normal distribution!**In this model, we will consider a setup in which a scientist measures height and weigth of a set of people and wants to use a statistical model to describe them; moreover, she also wants to find the posterior distributon over parameters.
###Code
from math import cos, sin, pi
import matplotlib.mlab as mlab
import numpy as np
import scipy
from matplotlib import gridspec, pyplot as plt
from numpy.linalg import inv
from scipy.stats import multivariate_normal
from abcpy.probabilisticmodels import ProbabilisticModel, Continuous, InputConnector
from abcpy.continuousmodels import Uniform
from abcpy.statistics import Identity
from abcpy.distances import Euclidean
from abcpy.inferences import RejectionABC
from abcpy.backends import BackendDummy as Backend
%matplotlib inline
###Output
_____no_output_____
###Markdown
Let us define the model we will consider; this is specifically a bivariate normal model, in which the covariance matrix is defined in the following way (method `get_cov`): - the standard deviations s1 and s2 are used to define a diagonal covariance matrix- then, a rotation matrix corresponding to angle alpha is used to rotate that to a correlated covariance matrix. Essentially, then, s1 and s2 are the standard deviation of the final bivariate normal along the directions in which the two components are uncorrelated. This is related to eigendecomposition, but this is not the main point here. We use `ABCpy` API to define the model:
###Code
class BivariateNormal(ProbabilisticModel, Continuous):
def __init__(self, parameters, name='BivariateNormal'):
# We expect input of type parameters = [m1, m2, s1, s2, alpha]
if not isinstance(parameters, list):
raise TypeError('Input of Normal model is of type list')
if len(parameters) != 5:
raise RuntimeError('Input list must be of length 5, containing [m1, m2, s1, s2, alpha].')
input_connector = InputConnector.from_list(parameters)
super().__init__(input_connector, name)
def _check_input(self, input_values):
# Check whether input has correct type or format
if len(input_values) != 5:
raise ValueError('Number of parameters of BivariateNormal model must be 5.')
# Check whether input is from correct domain
m1 = input_values[0]
m2 = input_values[1]
s1 = input_values[2]
s2 = input_values[3]
alpha = input_values[4]
if s1 < 0 or s2 < 0:
return False
return True
def _check_output(self, values):
if not isinstance(values, np.array):
raise ValueError('This returns a bivariate array')
if values.shape[0] != 2:
raise RuntimeError('The size of the output has to be 2.')
return True
def get_output_dimension(self):
return 2
def forward_simulate(self, input_values, k, rng=np.random.RandomState()):
# Extract the input parameters
m1 = input_values[0]
m2 = input_values[1]
s1 = input_values[2]
s2 = input_values[3]
alpha = input_values[4]
mean = np.array([m1, m2])
cov = self.get_cov(s1, s2, alpha)
obs_pd = multivariate_normal(mean=mean, cov=cov)
vector_of_k_samples = obs_pd.rvs(k)
# Format the output to obey API
result = [np.array([x]) for x in vector_of_k_samples]
return result
def get_cov(self, s1, s2, alpha):
"""Function to generate a covariance bivariate covariance matrix; it starts from considering a
diagonal covariance matrix with standard deviations s1, s2 and then applies the rotation matrix with
angle alpha. """
r = np.array([[cos(alpha), -sin(alpha)], [sin(alpha), cos(alpha)]]) # Rotation matrix
e = np.array([[s1, 0], [0, s2]]) # Eigenvalue matrix
rde = np.dot(r, e)
rt = np.transpose(r)
cov = np.dot(rde, rt)
return cov
###Output
_____no_output_____
###Markdown
Next, we define some help functions for plots:
###Code
def plot_dspace(ax, sl, marker, color):
"""Plot the data in 'sl' on 'ax';"""
ax.set_xlim(100,220)
ax.set_ylim(30,150)
ax.set_xlabel('Height in cm')
ax.set_ylabel('Weigth in kg')
for samples in sl:
ax.plot(samples[:,0], samples[:,1], marker, c=color)
def plot_pspace(ax_means, ax_vars, ax_angle, m1, m2, s1, s2, alpha, color):
"""Plot parameter space. m1 and m2 are the means of the height and weight respectively, while s1, s2 are
two standard deviations for the eigenvalue normal components. Finally, alpha is the angle that determines the
amount of rotation applied to the two independent components to get the covariance matrix."""
ax_means.set_xlabel('Mean of height')
ax_means.set_ylabel('Mean of weight')
ax_means.set_xlim(120,200)
ax_means.set_ylim(50,150)
ax_means.plot(m1, m2, 'o', c=color)
ax_vars.set_xlabel('Standard deviation 1')
ax_vars.set_ylabel('Standard deviation 2')
ax_vars.set_xlim(0,100)
ax_vars.set_ylim(0,100)
ax_vars.plot(s1, s2, 'o', c=color)
ax_angle.set_xlabel('Rotation angle')
ax_angle.set_xlim(0, pi/2)
ax_angle.set_yticks([])
ax_angle.plot(np.linspace(0, pi, 10), [0]*10, c='black', linewidth=0.2)
ax_angle.plot(alpha, 0, 'o', c=color)
def plot_all(axs, m1, m2, s1, s2, alpha, color, marker, model, k):
"""Function plotting pameters, generating data from them and plotting data too. It uses the model
to generate k samples from the provided set of parameters.
m1 and m2 are the means of the height and weight respectively, while s1, s2 are
two standard deviations for the eigenvalue normal components. Finally, alpha is the angle that determines the
amount of rotation applied to the two independent components to get the covariance matrix.
"""
ax_pspace_means, ax_pspace_vars, ax_pspace_angle, ax_dspace = axs
plot_pspace(ax_pspace_means, ax_pspace_vars, ax_pspace_angle, m1, m2, s1, s2, alpha, color)
samples = model.forward_simulate([m1, m2, s1, s2, alpha], k)
plot_dspace(ax_dspace, samples, marker, color)
###Output
_____no_output_____
###Markdown
Define now the probabilistic model; we put uniform priors on the parameters:
###Code
m1 = Uniform([[120], [200]], name="Mean_height")
m2 = Uniform([[50], [150]], name="Mean_weigth")
s1 = Uniform([[0], [100]], name="sd_1")
s2 = Uniform([[0], [100]], name="sd_2")
alpha = Uniform([[0], [pi/2]], name="alpha")
bivariate_normal = BivariateNormal([m1, m2, s1, s2, alpha])
###Output
_____no_output_____
###Markdown
Assume now that the scientist obtained an observation, from field data, that was generated by the model with a specific set of parameters `obs_par`; this is of course fictitious, but we take this assumption in order to check whether we are able to recover decently the actual model parameters we used.
###Code
obs_par = np.array([175, 75, 90, 35, pi/4.])
obs = bivariate_normal.forward_simulate(obs_par, 100)
fig_obs = plt.figure(dpi=300)
fig_obs.set_size_inches(9,9)
ax_obs = fig_obs.add_subplot(111)
ax_obs.set_title('Observations')
plot_dspace(ax_obs, obs, 'x', 'C0')
###Output
_____no_output_____
###Markdown
Rejection ABCThis is the most fundamental algorithm for ABC; it works in four steps:Repeat:1. draw a parameter sample theta from the prior2. generate synthetic observations from the model using theta3. compute the distance between observed and synthetic data4. if the distance is smaller than a threshold, add theta to accepted parametersAnd the loop continues until enough parameter values are accepted. The output is a set of accepted parameters, that resembles the parameters 'true' (posterior) distribution.![Rejection ABC image](https://github.com/eth-cscs/abcpy/raw/master/doc/source/ABC_rejection.png) RejectionABC in FiguresWe will now display the observations generated from the model for a set of parameter values; specifically, we consider 4 different sets of parameter values (corresponding to the four different colors) which are displayed in the left hand side set of plot; the corresponding observations are of the same color in the right plot; in the latter, we also show the observation (blue).
###Code
np.random.seed(0)
fig_sim = plt.figure(dpi=150)
fig_sim.set_size_inches(19, 9)
gs = gridspec.GridSpec(1, 2, width_ratios=[1,1], height_ratios=[1])
gs_pspace = gridspec.GridSpecFromSubplotSpec(2, 2, subplot_spec=gs[0,0], width_ratios=[1, 1], height_ratios=[4,1])
ax_pspace_means = plt.subplot(gs_pspace[0,0])
ax_pspace_vars = plt.subplot(gs_pspace[0,1])
ax_pspace_angle = plt.subplot(gs_pspace[1,:])
ax_dspace = plt.subplot(gs[0,1])
axs = (ax_pspace_means, ax_pspace_vars, ax_pspace_angle, ax_dspace)
#plot_dspace(ax_dspace, [obs], 'x', 'C0')
plot_all(axs, 130,110,95,50,pi/5, 'C1', 'x', bivariate_normal, 100)
plot_all(axs, 170,80,60,5,0.3, 'C2', 'x', bivariate_normal, 100)
plot_all(axs, 135,55,10,70,1.3, 'C3', 'x', bivariate_normal, 100)
plot_all(axs, 190,120,21,21,pi/3., 'C4', 'x', bivariate_normal, 100)
plot_dspace(ax_dspace, obs, 'X', 'C0')
###Output
_____no_output_____
###Markdown
The idea of ABC is the following: similar data sets come from similar sets of parameters. For this reason, to obtain the best parameter values which fit the observation, we will compare the observation with the synthetic data for different choices of parameters, for instance, above you can see that the green dataset is a better match for the observation than the others. Let us now generate some samples from the prior and see how well they fit the observation:
###Code
n_prior_samples = 100
params_prior = np.zeros((n_prior_samples,5))
for i in range(n_prior_samples):
m1_val = m1.forward_simulate([[120], [200]], k=1)
m2_val = m2.forward_simulate([[50], [150]], k=1)
s1_val = s1.forward_simulate([[0], [100]], k=1)
s2_val = s2.forward_simulate([[0], [100]], k=1)
alpha_val = alpha.forward_simulate([[0], [pi / 2]], k=1)
params_prior[i] = np.array([m1_val, m2_val, s1_val, s2_val, alpha_val]).squeeze()
np.random.seed(0)
fig_abc1 = plt.figure(dpi=150)
fig_abc1.set_size_inches(19, 9)
gs = gridspec.GridSpec(1, 2, width_ratios=[1,1], height_ratios=[1])
gs_pspace = gridspec.GridSpecFromSubplotSpec(2, 2, subplot_spec=gs[0,0], width_ratios=[1, 1], height_ratios=[4,1])
ax_pspace_means = plt.subplot(gs_pspace[0,0])
ax_pspace_vars = plt.subplot(gs_pspace[0,1])
ax_pspace_angle = plt.subplot(gs_pspace[1,:])
ax_dspace = plt.subplot(gs[0,1])
axs = (ax_pspace_means, ax_pspace_vars, ax_pspace_angle, ax_dspace)
for i in range(0, n_prior_samples):
plot_all(axs, params_prior[i,0], params_prior[i,1], params_prior[i,2], params_prior[i,3], params_prior[i,4],
'C1', '.', bivariate_normal, k=100)
plot_pspace(ax_pspace_means, ax_pspace_vars, ax_pspace_angle, *obs_par, color="C0")
plot_dspace(ax_dspace, obs, 'X', 'C0')
###Output
_____no_output_____
###Markdown
Above, the blue dot represent the parameter values which originated the observation, while the orange parameter values are the ones sampled from the prior; the corresponding synthetic datasets are shown as orange clouds of dots, while the observation is shown as blue crosses. InferenceNow, let's perform inference with Rejection ABC to get some approximate posterior samples:
###Code
statistics_calculator = Identity()
distance_calculator = Euclidean(statistics_calculator)
backend = Backend()
sampler = RejectionABC([bivariate_normal], [distance_calculator], backend, seed=1)
###Output
_____no_output_____
###Markdown
Sampling may take a while. It will take longer the more you decrease the threshold epsilon or increase the number of samples.
###Code
n_samples = 100 # number of posterior samples we aim for
n_samples_per_param = 100 # number of simulations for each set of parameter values
journal = sampler.sample([obs], n_samples, n_samples_per_param, epsilon=15)
print(journal.number_of_simulations)
###Output
[3732]
###Markdown
Now, we will produce a plot similar to the above one for the prior but starting from the posterior samples.
###Code
posterior_samples = np.array(journal.get_accepted_parameters()).squeeze()
np.random.seed(0)
fig_abc1 = plt.figure(dpi=150)
fig_abc1.set_size_inches(19, 9)
gs = gridspec.GridSpec(1, 2, width_ratios=[1,1], height_ratios=[1])
gs_pspace = gridspec.GridSpecFromSubplotSpec(2, 2, subplot_spec=gs[0,0], width_ratios=[1, 1], height_ratios=[4,1])
ax_pspace_means = plt.subplot(gs_pspace[0,0])
ax_pspace_vars = plt.subplot(gs_pspace[0,1])
ax_pspace_angle = plt.subplot(gs_pspace[1,:])
ax_dspace = plt.subplot(gs[0,1])
axs = (ax_pspace_means, ax_pspace_vars, ax_pspace_angle, ax_dspace)
for i in range(0, n_samples):
plot_all(axs, posterior_samples[i,0], posterior_samples[i,1], posterior_samples[i,2], posterior_samples[i,3],
posterior_samples[i,4], 'C1', '.', bivariate_normal, k=100)
plot_pspace(ax_pspace_means, ax_pspace_vars, ax_pspace_angle, *obs_par, color="C0")
plot_dspace(ax_dspace, obs, 'X', 'C0')
###Output
_____no_output_____ |
1_mosaic_data_attention_experiments/3_stage_wise_training/alternate_minimization/on CIFAR data/old_notebooks/alternate_focus_first_classify_later_RMSprop_scheduling_every_1.ipynb | ###Markdown
load mosaic data
###Code
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
foreground_classes = {'plane', 'car', 'bird'}
#foreground_classes = {'bird', 'cat', 'deer'}
background_classes = {'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'}
#background_classes = {'plane', 'car', 'dog', 'frog', 'horse','ship', 'truck'}
fg1,fg2,fg3 = 0,1,2
dataiter = iter(trainloader)
background_data=[]
background_label=[]
foreground_data=[]
foreground_label=[]
batch_size=10
for i in range(5000):
images, labels = dataiter.next()
for j in range(batch_size):
if(classes[labels[j]] in background_classes):
img = images[j].tolist()
background_data.append(img)
background_label.append(labels[j])
else:
img = images[j].tolist()
foreground_data.append(img)
foreground_label.append(labels[j])
foreground_data = torch.tensor(foreground_data)
foreground_label = torch.tensor(foreground_label)
background_data = torch.tensor(background_data)
background_label = torch.tensor(background_label)
def create_mosaic_img(bg_idx,fg_idx,fg):
"""
bg_idx : list of indexes of background_data[] to be used as background images in mosaic
fg_idx : index of image to be used as foreground image from foreground data
fg : at what position/index foreground image has to be stored out of 0-8
"""
image_list=[]
j=0
for i in range(9):
if i != fg:
image_list.append(background_data[bg_idx[j]])#.type("torch.DoubleTensor"))
j+=1
else:
image_list.append(foreground_data[fg_idx])#.type("torch.DoubleTensor"))
label = foreground_label[fg_idx]-fg1 # minus 7 because our fore ground classes are 7,8,9 but we have to store it as 0,1,2
#image_list = np.concatenate(image_list ,axis=0)
image_list = torch.stack(image_list)
return image_list,label
desired_num = 30000
mosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images
fore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9
mosaic_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(desired_num):
np.random.seed(i)
bg_idx = np.random.randint(0,35000,8)
fg_idx = np.random.randint(0,15000)
fg = np.random.randint(0,9)
fore_idx.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
mosaic_list_of_images.append(image_list)
mosaic_label.append(label)
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label, fore_idx):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx], self.fore_idx[idx]
batch = 250
msd = MosaicDataset(mosaic_list_of_images, mosaic_label , fore_idx)
train_loader = DataLoader( msd,batch_size= batch ,shuffle=True)
test_images =[] #list of mosaic images, each mosaic image is saved as laist of 9 images
fore_idx_test =[] #list of indexes at which foreground image is present in a mosaic image
test_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(10000):
np.random.seed(i+30000)
bg_idx = np.random.randint(0,35000,8)
fg_idx = np.random.randint(0,15000)
fg = np.random.randint(0,9)
fore_idx_test.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
test_images.append(image_list)
test_label.append(label)
test_data = MosaicDataset(test_images,test_label,fore_idx_test)
test_loader = DataLoader( test_data,batch_size= batch ,shuffle=False)
###Output
_____no_output_____
###Markdown
models
###Code
class Module1(nn.Module):
def __init__(self):
super(Module1, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.fc4 = nn.Linear(10,1)
def forward(self, z):
x = torch.zeros([batch,9],dtype=torch.float64)
y = torch.zeros([batch,3, 32,32], dtype=torch.float64)
x,y = x.to("cuda"),y.to("cuda")
for i in range(9):
x[:,i] = self.helper(z[:,i])[:,0]
x = F.softmax(x,dim=1) # alphas
x1 = x[:,0]
torch.mul(x1[:,None,None,None],z[:,0])
for i in range(9):
x1 = x[:,i]
y = y + torch.mul(x1[:,None,None,None],z[:,i])
return y , x
def helper(self,x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
class Module2(nn.Module):
def __init__(self):
super(Module2, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.fc4 = nn.Linear(10,3)
def forward(self,y): #z batch of list of 9 images
y1 = self.pool(F.relu(self.conv1(y)))
y1 = self.pool(F.relu(self.conv2(y1)))
y1 = y1.view(-1, 16 * 5 * 5)
y1 = F.relu(self.fc1(y1))
y1 = F.relu(self.fc2(y1))
y1 = F.relu(self.fc3(y1))
y1 = self.fc4(y1)
return y1
def calculate_attn_loss(dataloader,what,where,criter):
what.eval()
where.eval()
r_loss = 0
alphas = []
lbls = []
pred = []
fidices = []
correct = 0
tot = 0
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
inputs, labels,fidx = data
lbls.append(labels)
fidices.append(fidx)
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
avg,alpha = where(inputs)
outputs = what(avg)
_, predicted = torch.max(outputs.data, 1)
correct += sum(predicted == labels)
tot += len(predicted)
pred.append(predicted.cpu().numpy())
alphas.append(alpha.cpu().numpy())
loss = criter(outputs, labels)
r_loss += loss.item()
alphas = np.concatenate(alphas,axis=0)
pred = np.concatenate(pred,axis=0)
lbls = np.concatenate(lbls,axis=0)
fidices = np.concatenate(fidices,axis=0)
#print(alphas.shape,pred.shape,lbls.shape,fidices.shape)
analysis = analyse_data(alphas,lbls,pred,fidices)
return r_loss/i,analysis,correct.item(),tot,correct.item()/tot
def analyse_data(alphas,lbls,predicted,f_idx):
'''
analysis data is created here
'''
batch = len(predicted)
amth,alth,ftpt,ffpt,ftpf,ffpf = 0,0,0,0,0,0
for j in range (batch):
focus = np.argmax(alphas[j])
if(alphas[j][focus] >= 0.5):
amth +=1
else:
alth +=1
if(focus == f_idx[j] and predicted[j] == lbls[j]):
ftpt += 1
elif(focus != f_idx[j] and predicted[j] == lbls[j]):
ffpt +=1
elif(focus == f_idx[j] and predicted[j] != lbls[j]):
ftpf +=1
elif(focus != f_idx[j] and predicted[j] != lbls[j]):
ffpf +=1
#print(sum(predicted==lbls),ftpt+ffpt)
return [ftpt,ffpt,ftpf,ffpf,amth,alth]
torch.manual_seed(1234)
where_net = Module1().double()
where_net = where_net.to("cuda")
# print(net.parameters)
torch.manual_seed(1234)
what_net = Module2().double()
what_net = what_net.to("cuda")
###Output
_____no_output_____
###Markdown
training
###Code
# instantiate optimizer
optimizer_where = optim.RMSprop(where_net.parameters(),lr =0.001)#,momentum=0.9)#,nesterov=True)
optimizer_what = optim.RMSprop(what_net.parameters(), lr=0.001)#,momentum=0.9)#,nesterov=True)
scheduler_where = optim.lr_scheduler.ReduceLROnPlateau(optimizer_where, mode='min', factor=0.5, patience=3,min_lr=5e-6,verbose=True)
scheduler_what = optim.lr_scheduler.ReduceLROnPlateau(optimizer_what, mode='min', factor=0.5, patience=3,min_lr=5e-6, verbose=True)
criterion = nn.CrossEntropyLoss()
acti = []
analysis_data_tr = []
analysis_data_tst = []
loss_curi_tr = []
loss_curi_tst = []
epochs = 130
every_what_epoch = 1
# calculate zeroth epoch loss and FTPT values
running_loss,anlys_data,correct,total,accuracy = calculate_attn_loss(train_loader,what_net,where_net,criterion)
print('training epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(0,running_loss,correct,total,accuracy))
loss_curi_tr.append(running_loss)
analysis_data_tr.append(anlys_data)
running_loss,anlys_data,correct,total,accuracy = calculate_attn_loss(test_loader,what_net,where_net,criterion)
print('test epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(0,running_loss,correct,total,accuracy))
loss_curi_tst.append(running_loss)
analysis_data_tst.append(anlys_data)
# training starts
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
what_net.train()
where_net.train()
if ((epoch) % (every_what_epoch*2) ) <= every_what_epoch-1 :
print(epoch+1,"updating where_net, what_net is freezed")
print("--"*40)
elif ((epoch) % (every_what_epoch*2)) > every_what_epoch-1 :
print(epoch+1,"updating what_net, where_net is freezed")
print("--"*40)
for i, data in enumerate(train_loader, 0):
# get the inputs
inputs, labels,_ = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_where.zero_grad()
optimizer_what.zero_grad()
# forward + backward + optimize
avg, alpha = where_net(inputs)
outputs = what_net(avg)
loss = criterion(outputs, labels)
# print statistics
running_loss += loss.item()
loss.backward()
if ((epoch) % (every_what_epoch*2) ) <= every_what_epoch-1 :
optimizer_where.step()
elif ( (epoch) % (every_what_epoch*2)) > every_what_epoch-1 :
optimizer_what.step()
running_loss_tr,anls_data,correct,total,accuracy = calculate_attn_loss(train_loader,what_net,where_net,criterion)
analysis_data_tr.append(anls_data)
loss_curi_tr.append(running_loss_tr) #loss per epoch
print('training epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(epoch+1,running_loss_tr,correct,total,accuracy))
running_loss_tst,anls_data,correct,total,accuracy = calculate_attn_loss(test_loader,what_net,where_net,criterion)
analysis_data_tst.append(anls_data)
loss_curi_tst.append(running_loss_tst) #loss per epoch
print('test epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(epoch+1,running_loss_tst,correct,total,accuracy))
if running_loss_tr<=0.01:
break
if ((epoch) % (every_what_epoch*2) ) <= every_what_epoch-1 :
scheduler_what.step(running_loss_tst)
elif ( (epoch) % (every_what_epoch*2)) > every_what_epoch-1 :
scheduler_where.step(running_loss_tst)
print('Finished Training run ')
analysis_data_tr = np.array(analysis_data_tr)
analysis_data_tst = np.array(analysis_data_tst)
fig = plt.figure(figsize = (12,8) )
vline_list = np.arange(every_what_epoch, epoch + every_what_epoch, every_what_epoch )
# train_loss = np.random.randn(340)
# test_loss = np.random.randn(340)
epoch_list = np.arange(0, epoch+2)
plt.plot(epoch_list,loss_curi_tr, label='train_loss')
plt.plot(epoch_list,loss_curi_tst, label='test_loss')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("CE Loss")
plt.vlines(vline_list,min(min(loss_curi_tr),min(loss_curi_tst)), max(max(loss_curi_tst),max(loss_curi_tr)),linestyles='dotted')
plt.title("train loss vs test loss")
plt.show()
fig.savefig("train_test_loss_plot.pdf")
analysis_data_tr
analysis_data_tr = np.array(analysis_data_tr)
analysis_data_tst = np.array(analysis_data_tst)
columns = ["epochs", "argmax > 0.5" ,"argmax < 0.5", "focus_true_pred_true", "focus_false_pred_true", "focus_true_pred_false", "focus_false_pred_false" ]
df_train = pd.DataFrame()
df_test = pd.DataFrame()
df_train[columns[0]] = np.arange(0,epoch+2)
df_train[columns[1]] = analysis_data_tr[:,-2]
df_train[columns[2]] = analysis_data_tr[:,-1]
df_train[columns[3]] = analysis_data_tr[:,0]
df_train[columns[4]] = analysis_data_tr[:,1]
df_train[columns[5]] = analysis_data_tr[:,2]
df_train[columns[6]] = analysis_data_tr[:,3]
df_test[columns[0]] = np.arange(0,epoch+2)
df_test[columns[1]] = analysis_data_tst[:,-2]
df_test[columns[2]] = analysis_data_tst[:,-1]
df_test[columns[3]] = analysis_data_tst[:,0]
df_test[columns[4]] = analysis_data_tst[:,1]
df_test[columns[5]] = analysis_data_tst[:,2]
df_test[columns[6]] = analysis_data_tst[:,3]
df_train
df_test
plt.figure(figsize=(12,8))
plt.plot(df_train[columns[0]],df_train[columns[1]], label='argmax > 0.5')
plt.plot(df_train[columns[0]],df_train[columns[2]], label='argmax < 0.5')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
plt.title("On Training set")
plt.vlines(vline_list,min(min(df_train[columns[1]]),min(df_train[columns[2]])), max(max(df_train[columns[1]]),max(df_train[columns[2]])),linestyles='dotted')
plt.show()
plt.figure(figsize=(12,8))
plt.plot(df_train[columns[0]],df_train[columns[3]], label ="focus_true_pred_true ")
plt.plot(df_train[columns[0]],df_train[columns[4]], label ="focus_false_pred_true ")
plt.plot(df_train[columns[0]],df_train[columns[5]], label ="focus_true_pred_false ")
plt.plot(df_train[columns[0]],df_train[columns[6]], label ="focus_false_pred_false ")
plt.title("On Training set")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
plt.vlines(vline_list,min(min(df_train[columns[3]]),min(df_train[columns[4]]),min(df_train[columns[5]]),min(df_train[columns[6]])), max(max(df_train[columns[3]]),max(df_train[columns[4]]),max(df_train[columns[5]]),max(df_train[columns[6]])),linestyles='dotted')
plt.show()
plt.figure(figsize=(12,8))
plt.plot(df_test[columns[0]],df_test[columns[1]], label='argmax > 0.5')
plt.plot(df_test[columns[0]],df_test[columns[2]], label='argmax < 0.5')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
plt.title("On Training set")
plt.vlines(vline_list,min(min(df_test[columns[1]]),min(df_test[columns[2]])), max(max(df_test[columns[1]]),max(df_test[columns[2]])),linestyles='dotted')
plt.show()
plt.figure(figsize=(12,8))
plt.plot(df_test[columns[0]],df_test[columns[3]], label ="focus_true_pred_true ")
plt.plot(df_test[columns[0]],df_test[columns[4]], label ="focus_false_pred_true ")
plt.plot(df_test[columns[0]],df_test[columns[5]], label ="focus_true_pred_false ")
plt.plot(df_test[columns[0]],df_test[columns[6]], label ="focus_false_pred_false ")
plt.title("On Training set")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
plt.vlines(vline_list,min(min(df_test[columns[3]]),min(df_test[columns[4]]),min(df_test[columns[5]]),min(df_test[columns[6]])), max(max(df_test[columns[3]]),max(df_test[columns[4]]),max(df_test[columns[5]]),max(df_test[columns[6]])),linestyles='dotted')
plt.show()
###Output
_____no_output_____ |
TallerPython.ipynb | ###Markdown
Introduccion a Colab de Google--- Colaboraty es una plataforma online de google gratuita para la ejecucion de Jupyter Notebooks https://jupyter.org/Tiene una integracion con drive Para montar una unidad de drive diferente a la sesion actual iniciada
###Code
from google.colab import drive
drive.mount('/gdrive')
%cd /gdrive/My\ Drive/Taller
%cd
%cd ..
drive.flush_and_unmount()
###Output
/root
###Markdown
Si se monta la misma unidad, cambiar a una carpeta especifica indicando la ruta
###Code
from google.colab import drive
drive.mount('/content/drive')
%cd content/drive/MyDrive/Taller
###Output
Mounted at /content/drive
###Markdown
Obtener la ubicacion actual dentro de las carpetas del computador
###Code
!pwd
###Output
/content/drive/MyDrive/Taller
###Markdown
Obtener los documentos dentro de la carpeta
###Code
!ls
###Output
test.png
###Markdown
Mostrar la imagen test.pngOtros comandos de la consola https://www.hostinger.co/tutoriales/linux-comandos
###Code
from IPython.display import Image
Image('test.png')
###Output
_____no_output_____
###Markdown
Operaciones Basicas--- Suma
###Code
O_sum = 3 + 11
O_sum += 5
O_sum
###Output
_____no_output_____
###Markdown
Multiplicacion
###Code
O_mult = 3 * 10
O_mult *= 3
O_mult
###Output
_____no_output_____
###Markdown
Division
###Code
O_div = 7 / 10
O_div
###Output
_____no_output_____
###Markdown
Exponencial
###Code
O_exp = 2 ** 6
O_exp
###Output
_____no_output_____
###Markdown
Modulo
###Code
O_mod = 20 % 3
O_mod
###Output
_____no_output_____
###Markdown
Cociente
###Code
O_coci = 20 // 3
O_coci
###Output
_____no_output_____
###Markdown
Operaciones de comparacion
###Code
mi_boolean = 2 == 3
mi_boolean
mi_boolean = 'hola' != "hola"
mi_boolean
mi_boolean = 34 < 10
mi_boolean
mi_boolean = 35 >= 35
mi_boolean
mi_boolean = 35 == 35 and 2 > 10
mi_boolean
mi_boolean = 14 <= 15 or 16 > 20
mi_boolean
mi_boolean = not 'hola' != "hola"
mi_boolean
###Output
_____no_output_____
###Markdown
Variables String (alfanumerico)--- String Se puede usar tanto comillas dobles " " como comillas simples ' ' y sera interpretado como tipo string
###Code
mensaje = 'Hola mundo'
print(type(mensaje))
mensaje = "Hola mundo"
print(type(mensaje))
###Output
<class 'str'>
<class 'str'>
###Markdown
Concatenar string
###Code
mensaje += '\nBienvenidos'
print(mensaje)
###Output
Hola mundo
Bienvenidos
###Markdown
Replicar String
###Code
mensaje = mensaje + '\n'*3 + 'Hello world '*2
print(mensaje)
###Output
Hola mundo
Bienvenidos
Hello world Hello world
###Markdown
Obtener una entrada del usuario
###Code
x = input()
print(x)
type(x)
###Output
56
56
###Markdown
String format
###Code
mensaje = 'El nombre de la ciudad es {} del pais {}'.format('Bogota', 'Colombia')
mensaje
###Output
_____no_output_____
###Markdown
Tipos de conjuntos--- Tuple Tuple vacia
###Code
mi_tuple = ()
mi_tuple
###Output
_____no_output_____
###Markdown
Se pueden guardar multiple tipos de archivos en una tupla
###Code
mi_tuple = (1, 2, 'hola')
mi_tuple
###Output
_____no_output_____
###Markdown
Se usa los [ ] para llamar a los elementos de una tupla, iniciando desde el elemento 0 en *adelante*
###Code
numero1 = mi_tuple[0]
numero1
###Output
_____no_output_____
###Markdown
Llamar multiples elementos
###Code
print(mi_tuple[0:3:2])
###Output
_____no_output_____
###Markdown
List Lista vacia
###Code
mi_lista = []
mi_lista
###Output
_____no_output_____
###Markdown
Agregar elementos a una lista
###Code
mi_lista.append('Hola')
mi_lista.append('Mundo')
mi_lista
###Output
_____no_output_____
###Markdown
Lista con 3 elementos tipo string
###Code
mi_lista = ['Andres', 'Andrea', 'Karen']
print(mi_lista)
print(len(mi_lista)) # len(list) devuelve el tamaño de una lista
mi_lista[0]
###Output
_____no_output_____
###Markdown
Lista con elemntos tipo float y string
###Code
mi_lista = [4.5, 'hola']
print(type(mi_lista[0]))
print(type(mi_lista[1]))
mi_lista
###Output
_____no_output_____
###Markdown
Diccionarios
###Code
diccionario = {
"Andres": [24, 173],
"Andrea": [25, 175],
1: 123
}
diccionario['Andres']
lista = diccionario.get("Andrea")
print(lista, type(lista))
diccionario[1]
diccionario.pop(1)
diccionario['Alex'] = [21, 124]
diccionario
diccionario.clear()
diccionario
###Output
_____no_output_____
###Markdown
Estructuras de Control--- Clase booleana
###Code
mi_boolean = True
mi_boolean
mi_boolean = not(mi_boolean)
mi_boolean
booleano = "Andres" in diccionario
booleano
###Output
_____no_output_____
###Markdown
Declaracion If, Else y Elif
###Code
a = 3
if a < 10:
print('Menor que 10')
if a > 10:
print('Mayor que 10')
else:
print('Menor que 10')
a = float(input())
if a == 10:
print('Igual que 10')
elif a > 10:
print('Mayor que 10')
else:
print('Menor que 10')
###Output
1564
Mayor que 10
###Markdown
For Se usa in para iteral en cada uno de los elementos de una lista
###Code
lista = [0, 1, 2, 3, 4, 5]
for i in lista:
print(i)
lista = ['Andres', 'Andrea', 'Felipe']
for i in lista:
print(i)
###Output
Andres
Andrea
Felipe
###Markdown
Uso de range
###Code
for i in range(0, 6, 1):
print(i)
lista1 = [1 ,2, 3, 4, 5]
lista2 = ['a', 'b', 'c', 'd', 'e']
lista3 = [1.73, 1.86, 1.84, 1.62, 1.70]
for i, j, k in zip(lista1, lista2, lista3):
print(i, j, k)
###Output
1 a 1.73
2 b 1.86
3 c 1.84
4 d 1.62
5 e 1.7
###Markdown
For else, sirve para realizar acciones en caso de no ejecutarse un "break"
###Code
lista1 = [1 ,2, 3, 4, 5]
lista2 = ['a', 'b', 'c', 'd', 'e']
lista3 = [1.73, 1.86, 1.84, 1.62, 1.70]
numero = 3
for i, j, k in zip(lista1, lista2, lista3):
print(i, j, k)
if numero <= 1:
break
numero -= 1
else:
print('Todos los elementos fueron impresos')
###Output
1 a 1.73
2 b 1.86
3 c 1.84
###Markdown
While
###Code
print('hola')
print('funciona?')
###Output
funciona?
###Markdown
Debugging en Jupyter Notebook Debug despues de un error
###Code
a = 14
b = 5
b -= (a + 1)/3
Division = a / b
Division
%debug
###Output
_____no_output_____
###Markdown
Debugging y breakpoints Para ejecutar el codigo paso a paso creamos una funcion Code_debug y usamos la libreria de debug de Ipython
###Code
def Code_debug():
from IPython.core.debugger import set_trace
set_trace() # Se crea un breakpoint
a = 14
b = 5
b -= (a + 1)/3
Division = a / b
Code_debug()
###Output
_____no_output_____
###Markdown
Debugging a funciones
###Code
from IPython.core.debugger import set_trace
def Funcion1(a=1):
set_trace()
b = a ** 10
c = a / b
return c
Funcion1()
###Output
_____no_output_____
###Markdown
Bibliotecas Numpy y Sympy Funciones
###Code
import numpy as np
def f(x):
return np.sqrt(x + 2)
x = np.array([-2, -1, 0, 2, 4, 6]) # Creando el vector de valores de x
y = f(x)
list(zip(x, y))
###Output
_____no_output_____
###Markdown
Derivadas
###Code
from sympy import Derivative, diff, simplify, Symbol
x = Symbol('x') # Creando el simbolo x.
fx = (2*x + 1)*(x**3 + 2)
dx = Derivative(fx, x).doit()
dx
# simplificando los resultados
simplify(dx)
# Derivada de segundo orden con el 3er argumento.
Derivative(fx, x, 2).doit()
# Calculando derivada de (3x +1) / (2x)
fx = (3*x + 1) / (2*x)
dx = Derivative(fx, x).doit()
simplify(dx)
# la función diff nos da directamente el resultado
simplify(diff(fx, x))
# con el metodo subs sustituimos el valor de x
# para obtener el resultado numérico. Ej x = 1.
diff(fx, x).subs(x, 1)
###Output
_____no_output_____
###Markdown
Integrales
###Code
from sympy import Integral, integrate
fx = x**3 - 6*x
dx = Integral(fx, x).doit()
dx
# la función integrate nos da el mismo resultado
integrate(fx, x)
# Calculando integral definida para [0, 3]
Integral(fx, (x, 0, 3)).doit()
###Output
_____no_output_____ |
cp4s-notebooks/udi-examples/udi_clustering_processes.ipynb | ###Markdown
Load CP4S Data
###Code
!pip install matplotlib
!pip install sklearn
!pip install git+https://github.com/IBM/ibm-cp4s-client.git
from cp4s.client import CP4S
from os import environ as env
ac = CP4S(url=env['CP4S_API_ENDPOINT'],
username=env['CP4S_APIKEY_USERNAME'],
password=env['CP4S_APIKEY_PASSWORD'])
mdf = ac.search_df(
query="[ipv4-addr:value = '127.0.0.1']",
configs="all")
###Output
_____no_output_____
###Markdown
Interactive analysis
###Code
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.dates as md
from matplotlib import pyplot as plt
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from datetime import datetime
from sklearn.ensemble import IsolationForest
# method to extract child count
def getChildCount(row):
value=0
for x in new_df.index:
if row['process_pid']==new_df['process_parent_pid'][x]:
value=value+1
return value
# drop and rename
File1=mdf.drop(columns=['domain_name','process_binary_name','process_creator_user_ref','process_opened_connection_binary_hashes_md5','process_opened_connection_binary_name','process_opened_connection_command_line','process_opened_connection_created','process_opened_connection_creator_user_ref', 'process_opened_connection_name','process_opened_connection_opened_connection_','process_opened_connection_parent_name','process_opened_connection_parent_pid', 'process_opened_connection_pid','process_opened_connection_src_addr','process_parent_binary_hashes_md5', 'process_parent_binary_name'])
new_df=File1.rename(columns={'process_creator_user_user_id':'proc_username','process_opened_connection_count':'proc_netconn_count','process_parent_name':'parent_name','user_account_user_id':'proc_hostname','process_binary_hashes_md5':'proc_md5','process_command_line':'proc_cmdline'})
# add child count and duration
new_df['proc_child_count'] = new_df.apply(getChildCount, axis=1)
new_df['duration']=(pd.to_datetime(new_df['last_observed']))-(pd.to_datetime(new_df['first_observed']))
# drop more
new_df=new_df.drop(columns=['created_by_ref','first_observed','id','last_observed','network_traffic_src_addr','process_created','tod','cmd_len', 'network_traffic_dst_addr' ,'process_parent_pid', 'process_pid' ,'proc_hostname','process_opened_connection_dst_addr'])
# create dictionary to store count of unique txts in each column
def CreateCountDict():
FinalDict={}
cols=['proc_username','proc_cmdline','proc_md5','parent_name','proc_child_count','proc_netconn_count','process_name']
for x in cols:
dict1=(pd.DataFrame(new_df[x].value_counts())).to_dict()
FinalDict.update(dict1)
return FinalDict
# get the desired representation of data
def CountNormRepresntation(ProcessData):
ProcessDataC=ProcessData.copy(deep=False)
totalLength=len(ProcessDataC.index)
cols=['proc_username','proc_cmdline','proc_md5','parent_name','proc_child_count','proc_netconn_count','process_name']
for x in cols:
y=ProcessDataC[x].unique()
for i in y:
ProcessDataC[x]=ProcessDataC[x].replace(i,FinalDict_x[x][i])
return ProcessDataC
# replace unknown by label Unk
new_df=new_df.fillna("UnK")
# create dictionary and final data form
FinalDict_x=CreateCountDict()
ProcessDataC=CountNormRepresntation(new_df)
# normalize the data
cols_to_norm = ['proc_username','proc_cmdline','proc_md5','parent_name','process_name','proc_netconn_count','proc_child_count']
ProcessDataC[cols_to_norm] = ProcessDataC[cols_to_norm].apply(lambda x: (x - x.mean()) / (x.std()))
# remove the cols are not adding any info as same value
ProcessDataC=ProcessDataC.drop(columns=['proc_netconn_count','proc_child_count','duration'])
# pca for visualisation
pca = PCA(n_components=2)
datanew = pca.fit_transform(ProcessDataC)
# standardize these 2 new features
min_max_scaler = preprocessing.StandardScaler()
np_scaled = min_max_scaler.fit_transform(datanew)
datanew = pd.DataFrame(np_scaled)
# elbow method to decide on number of clusters
from sklearn.cluster import KMeans
n_cluster = range(1, 11)
kmeans = [KMeans(n_clusters=i).fit(datanew) for i in n_cluster]
scores = [kmeans[i].score(datanew) for i in range(len(kmeans))]
fig, ax = plt.subplots()
ax.plot(n_cluster, scores)
plt.show()
ProcessDataC['cluster'] = kmeans[1].predict(datanew)
print(ProcessDataC['cluster'].value_counts())
ProcessDataC['principal_feature1'] = datanew[0]
ProcessDataC['principal_feature2'] = datanew[1]
# plot the clusters
fig, ax = plt.subplots()
colors = {0:'red', 1:'blue'}
ax.scatter(ProcessDataC['principal_feature1'],ProcessDataC['principal_feature2'],c=ProcessDataC["cluster"].apply(lambda x: colors[x]))
plt.show()
x=new_df.loc[ProcessDataC["cluster"] == 0,:]
x['proc_cmdline'].unique()
#in cluster 0
x
#in cluster 1
x=new_df.loc[ProcessDataC["cluster"] == 1,:]
x
###Output
_____no_output_____ |
ICA/ICA4_MachineLearning (1) (1).ipynb | ###Markdown
___Enter Team Member Names here (*double click to edit*):- Name 1:Nancy Le- Name 2:Andrew Whigham- Name 3:Suleiman Hijazeen- Name 4:Thomas Adams___ In Class Assignment FourIn the following assignment you will be asked to fill in python code and derivations for a number of different problems. Please read all instructions carefully and turn in the rendered notebook (or HTML of the rendered notebook) before the end of class (or right after class). The initial portion of this notebook is given before class and the remainder is given during class. Please answer the initial questions before class, to the best of your ability. Once class has started you may rework your answers as a team for the initial part of the assignment. Contents* Loading KDDCup Data* KDDCup Evaluation and Cross Validation* More Cross Validation* Statistical Comparison**Before coming to class**, please make sure you have the latest version of `scikit-learn`. This notebook was created for version 0.18 and higher. ________________________________________________________________________________________________________Back to Top Loading KDDCup DataPlease run the following code to read in the "KDD Cup" dataset from sklearn's data loading module. It consists of examples of different simulated attacks for the 1998 DARPA Intrusion Detection System (IDS). This will load the data into the variable `ds`. `ds` is a `bunch` object with fields like `ds.data` and `ds.target`. The field `ds.data` is a numpy matrix of the continuous features in the dataset. **The object is not a pandas dataframe. It is a numpy matrix.** Each row is a set of observed instances, each column is a different feature. It also has a field called `ds.target` that is an integer value we are trying to predict (i.e., a specific integer represents a specific person). Each entry in `ds.target` is a label for each row of the `ds.data` matrix.
###Code
# fetch the dataset
from sklearn.datasets import fetch_kddcup99
from sklearn import __version__ as sklearn_version
print('Sklearn Version:',sklearn_version)
ds = fetch_kddcup99(subset='http')
import numpy as np
# get some of the specifics of the dataset
X = ds.data
y = ds.target != b'normal.'
n_samples, n_features = X.shape
n_classes = len(np.unique(y))
print("n_samples: {}".format(n_samples))
print("n_features: {}".format(n_features))
print("n_classes: {}".format(n_classes))
unique, counts = np.unique(y, return_counts=True)
np.asarray((unique, counts)).T
###Output
_____no_output_____
###Markdown
___**Question 1:** How many instances are in the binary classification problem loaded above? How many instances are in each class? **Plot a pie chart or bar chart of the number of instances in each of the classes.**
###Code
from matplotlib import pyplot as plt
%matplotlib inline
plt.style.use('ggplot')
plt.pie(bi, labels=np.unique(y), startangle=90, autopct='%.1f%%')
plt.show()
#=== Fill in code below========
print('Number of instances in each class:',np.asarray((unique, counts)).T)
###Output
_____no_output_____
###Markdown
Back to Top KDDCup Evaluation and Cross Validation
###Code
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold, ShuffleSplit
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
from sklearn.metrics import make_scorer, accuracy_score
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.linear_model import LogisticRegression
# select model
clf = LogisticRegression()
#select cross validation
cv = KFold(n_splits=10)
# select evaluation criteria
my_scorer = make_scorer(accuracy_score)
# run model training and cross validation
per_fold_eval_criteria = cross_val_score(estimator=clf,
X=X,
y=y,
cv=cv,
scoring=my_scorer
)
plt.bar(range(len(per_fold_eval_criteria)),per_fold_eval_criteria)
plt.ylim([min(per_fold_eval_criteria)-0.01,max(per_fold_eval_criteria)])
###Output
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
###Markdown
____**Question 2** Is the code above a proper separation of training and testing sets for the given dataset (i.e., using KFold)? Why or why not? *Enter your answer here (double click)**Yes or No and why*Nothere is two reasons :1- because the data is split into 10 fold without making sure that each fold has the same percantage of the two classes like the orignal one2-the classfier should be trained on any 9 fold andtestedon the remaining one fold ___ **Question 3:** Is the evaluation metric chosen in the above code appropriate for the dataset (i.e., using accuracy)? Why or Why not? *Enter your answer here (double click)**Yes or No and why* Nobecause the prediction might have a high accuracy with a high variance , so we need to look at the accuracy variance and the biase___ **Exercise 1:** If the code above is not a proper separation of the train or does not use the proper evaluation criteria, fix the code in the block below to use appropriate train/test separation and appropriate evaluation criterion (criteria). **Mark changes in the code with comments.**
###Code
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold, ShuffleSplit
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
from sklearn.metrics import make_scorer, accuracy_score
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.linear_model import LogisticRegression
# these imports above might help you
#=====Write your code below here=================
# select model
clf = LogisticRegression()
#select cross validation
#cv = KFold(n_splits=10)
rs=StratifiedShuffleSplit(n_splits=10, random_state=1)
# select evaluation criteria
my_scorer = make_scorer(accuracy_score)
# run model training and cross validation
per_fold_eval_criteria = cross_val_score(estimator=clf,
X=X,
y=y,
cv=rs,
scoring=my_scorer
)
plt.bar(range(len(per_fold_eval_criteria)),per_fold_eval_criteria)
plt.ylim([min(per_fold_eval_criteria)-0.01,max(per_fold_eval_criteria)])
###Output
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
###Markdown
____**Question 4**: Does the learning algorithm perform well based on the evaluation criteria? Why or why not? *Enter you answer here (double click to edit)* yes it dose, becasue we presrved the percentage of samples for each class in each folds Back to Top More Cross Validation**Exercise 2:** Does the code below contain any errors in the implementation of the cross validation? If so, fix the code below so that there are no longer any errors in the cross validation. there is two erorrs in the code :1- the best practice is to first scale data then do dimensionalty reduction and after that start our learning algrothem 2-use StratifiedShuffleSplit to presrved the percentage of samples for each class
###Code
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
#======If there are errors, fix them below======
n_components = 1
pca = PCA(n_components=n_components)
Xpca = pca.fit_transform(X)
clf = Pipeline([('scl',StandardScaler()),
('clf',LogisticRegression())])
per_fold_eval_criteria = cross_val_score(estimator=clf,
X=Xpca,
y=y,
cv=cv,
scoring=my_scorer
)
plt.bar(range(len(per_fold_eval_criteria)),per_fold_eval_criteria)
plt.ylim([min(per_fold_eval_criteria)-0.01,max(per_fold_eval_criteria)])
# =====fixed code======
# write the fixed code (if needed) below
clf = Pipeline([('scl',StandardScaler()),
('pca',PCA(n_components=n_components)),
('clf',LogisticRegression())])
per_fold_eval_criteria = cross_val_score(estimator=clf,
X=X,
y=y,
cv=rs,
scoring=my_scorer
)
plt.bar(range(len(per_fold_eval_criteria)),per_fold_eval_criteria)
plt.ylim([min(per_fold_eval_criteria)-0.01,max(per_fold_eval_criteria)])
###Output
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\utils\validation.py:595: DataConversionWarning: Data with input dtype object was converted to float64 by StandardScaler.
warnings.warn(msg, DataConversionWarning)
###Markdown
___ Circumstances ChangeFor this question, the circumstances for the DARPA KDD99 cup are changed in the following way:- When the model for detecting attacks is deployed, we now think that it will often need to be retrained because new attacks will emerge. - DARPA anticipates that there will be a handful of different style attacks on their systems that have never been seen before. To detect these new attacks, they are employing programmers and analysts to find them manually every day. - DARPA believes the perpetrators of these new attacks are more sophisticated, so finding the new attacks will take priority over detecting the older, known attacks. - DARPA wants to use your learning algorithm for detecting only these new attacks. However, they tell you that the amount of training data for the new attacks will be extremely small. That is, the analysts can only identify a handful of new style attacks each day (so you will only have about 3-5 examples of the attacks for training).- **DARPA asks you: Do you think its a good idea to employ retraining your model each day to find these new attacks?** They need an answer in the next 20 minutes. **Question 5**: How would you change the method of cross validation to answer this question from DARPA? That is, how can you change your cross validation method to better mirror how your system will be used and deployed by DARPA when there are only 3-5 attack examples avaliable for training? *Note: you do not have access to these new training examples. You need to change you cross validation method with the existing data to answer this question.* *Enter your answer here (double click)**We could... and why*___
###Code
#plotting function for use in next question
# takes input 'test_scores', axis labels, and an x-axis label
def plot_filled(test_scores,train_x_axis, xlabel=''):
test_mean = np.percentile(test_scores,50, axis=1)
test_max = np.percentile(test_scores,95, axis=1)
test_min = np.percentile(test_scores,5, axis=1)
plt.plot(train_x_axis, test_mean,
color='blue', linestyle='--',
marker='s', markersize=5,
label='validation set')
plt.fill_between(train_x_axis,
test_min,
test_max,
alpha=0.15, color='blue')
plt.grid(True)
plt.xlabel(xlabel)
plt.ylabel('Evaluation Criterion')
plt.legend(loc='lower right')
plt.tight_layout()
###Output
_____no_output_____
###Markdown
___DARPA is also concerned about how much training data they will need from the analysts in order to have a high performing model. They would like to use the current dataset to help answer that question. The code below is written for you to help answer DARPA's question about how many examples will be needed for training. Examine the code and the output that is already run for you, then answer the following question:**Question 6**: Based on the analysis graphed below, how many positive examples are required to have a good tradeoff between bias and variance for the given evaluation criteria? Why? *Note: the x-axis really is a percentage, so the value 0.1 is actually 0.1%.*
###Code
clf = LogisticRegression()
test_scores = []
train_sizes=np.linspace(5e-4,5e-3,10)
for size in train_sizes:
cv = StratifiedShuffleSplit(n_splits=100,
train_size = size,
test_size = 1-size,
)
test_scores.append(cross_val_score(estimator=clf,X=X,y=y,cv=cv,scoring=my_scorer))
plot_filled(np.array(test_scores), train_sizes*100, 'Percentage training data (%)')
###Output
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
###Markdown
___*Enter you answer here (double click)*It seems that .3% (177 samples) is the best point, because the accuracy is the maxmum at this point and the variance in the lowesthowever as showen above after .2% the variance did not change at all, so if we are intrested in minmizing the data size , we can say that .2% is a good size in terms of accuracy since we only got a .002 % increase when going from .2% data size to .3% data size___ ___Back to Top Statistical ComparisonNow lets create a few different models and see if any of them have statistically better performances. We are creating three different classifiers below to compare to one another. For creating different training and testing splits, we are using stratified shuffle splits on the datasets.
###Code
clf1 = LogisticRegression(C=100)
clf2 = LogisticRegression(C=1)
clf3 = LogisticRegression(C=0.1)
train_size = 0.003 # small training size
cv = StratifiedShuffleSplit(n_splits=10,train_size=train_size,test_size=1-train_size)
evals1 = cross_val_score(estimator=clf1,X=X,y=y,scoring=my_scorer,cv=cv)
evals2 = cross_val_score(estimator=clf2,X=X,y=y,scoring=my_scorer,cv=cv)
evals3 = cross_val_score(estimator=clf3,X=X,y=y,scoring=my_scorer,cv=cv)
###Output
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
C:\Users\sulem\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\linear_model\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
FutureWarning)
###Markdown
**Question 7**: Given the code above, what statistical test is more appropriate for selecting confidence intervals, and **why**? Your options are:- **A**: approximating the evaluation criterion as a binomial distribution and bounding by the variance (the first option we used in the flipped lecture video)- **B**: approximating the bounds using the folds of the cross validation to get mean and variance (the second option we used in the flipped lecture video)- **C**: Either are acceptable statistical tests for obtaining confidence intervals The reason that I think (B) is correct is because the three classifiers are not independent but they all are correlated into each other, since they all using the same data this means that if classifier 1 did slightly better then classifier 2 would do slightly better and same goes to classfier 3 ___**Final Exercise:** With 95% confidence, perform the statistical test that you selected above. Is any model or set of models statistically the best performer(s)? Or can we not say if the models are different with greater than 95% confidence?If you chose option A, use a multiplier of Z=1.96. The number of instances used in testing can be calculated from the variable `train_size`.If you chose option B, use a multiplier of t=2.26 and k=10.
###Code
#===================================================
# Enter your code below
clf1 = LogisticRegression(C=100, solver='lbfgs')
clf2 = LogisticRegression(C=1, solver='lbfgs')
clf3 = LogisticRegression(C=0.1, solver='lbfgs')
train_size = 0.003 # small training size
cv = StratifiedShuffleSplit(n_splits=10,train_size=train_size,test_size=1-train_size)
evals1 = cross_val_score(estimator=clf1,X=X,y=y,scoring=my_scorer,cv=cv)
evals2 = cross_val_score(estimator=clf2,X=X,y=y,scoring=my_scorer,cv=cv)
evals3 = cross_val_score(estimator=clf3,X=X,y=y,scoring=my_scorer,cv=cv)
test_scores1 = []
test_scores1.append(evals1)
test_scores2 = []
test_scores2.append(evals2)
test_scores3 = []
test_scores3.append(evals3)
e1=1-np.array(test_scores1)
e2=1-np.array(test_scores2)
e3=1-np.array(test_scores3)
var1=np.mean(e1e2)
t=2.26/np.sqrt(10)
# comparing E1 with E2----------------------
e1e2=e1[0]-e2[0]
e1e2_m=np.mean(e1e2)
std12=np.std(e1e2)
conf12=t*std12
print(e1e2_m,conf12)
print('since the mean of the diffrance is positive and no zero crossing this means that classfier 2 is better than classfier 1')
print('')
# comparing E2 with E3-------------------------
e2e3=e2[0]-e3[0]
e2e3_m=np.mean(e2e3)
std23=np.std(e1e2)
conf23=t*std23
print(e2e3_m,conf23)
print('since the mean of the diffrance is negative and no zero crossing this means that classfier 2 is better than classfier 3 ')
print(' ')
print('Model 2 has statistically the best XXX with 95% confidence')
#===================================================
###Output
2.5619566516932313e-05 1.910609793227356e-05
since the mean of the diffrance is positive and no zero crossing this means that classfier 2 is better than classfier 1
-0.00015713334130380564 1.910609793227356e-05
since the mean of the diffrance is negative and no zero crossing this means that classfier 2 is better than classfier 3
Model 2 has statistically the best XXX with 95% confidence
|
ensemble/bagging_random_forest.ipynb | ###Markdown
Bagging Classifier with Titanic Dataset
###Code
#load already pre-processed titanic trianing dataset
X = np.load("tatanic_X_train.npy")
y = np.load("tatanic_y_train.npy")
X[0]
y[:10]
from sklearn.ensemble import BaggingClassifier
clf2 = DecisionTreeClassifier(random_state=1)
eclf = BaggingClassifier(clf2, oob_score=True)
from sklearn.model_selection import cross_val_score
cross_val_score(eclf, X, y, cv=5).mean()
params = {
"n_estimators": [10, 20, 30, 40, 50, 55, 100],
"max_samples": [0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
}
from sklearn.model_selection import GridSearchCV
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid = grid.fit(X, y)
grid.best_score_
grid.best_params_
###Output
_____no_output_____
###Markdown
Random Forest
###Code
from sklearn.ensemble import RandomForestClassifier
eclf = RandomForestClassifier(n_estimators=100, max_features=int(len(X[0])/3), n_jobs=7, oob_score=True)
cross_val_score(eclf, X, y, cv=5).mean()
params = {
"n_estimators": [10, 20, 30, 40, 50, 55, 100],
"max_features": [1,2,3,4,5,6,7, 10, 15, 20, 25]
}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid = grid.fit(X, y)
grid.best_score_
grid.best_params_
###Output
_____no_output_____ |
docs/build/doctrees/nbsphinx/sample/sample3.ipynb | ###Markdown
Jupyter でも書ける
###Code
print(list(range(10)))
###Output
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
|
lessons/05-Functions/WMC3-Function-Scope.ipynb | ###Markdown
Watch Me Code 3: Understanding Function Variable Scope
###Code
def area_incorrect():
area = length * width # these are in the global scope!!!!
return area
## This is a bad idea
length = 10
width = 5
area = area_incorrect()
print ("length=",length, "width=",width, "area=",area)
## Always pass in arguments from the global scope!
def area_correct(length, width):
area = length * width # these are local copies from the global scope...
length = 0
width = 0 # what happens here, stays here!
return area
# in the global scope
length = 5
width = 10
area = area_correct(length,width)
print ("length=",length, "width=",width, "area=",area)
###Output
length= 5 width= 10 area= 50
|
notebook/03 query SQL su db sqlite/sqlite.ipynb | ###Markdown
Come scrivere comodamente comandi SQL in un notebookViene sfruttato il componente che rende disponibile un comando `magic` dedicato.
###Code
import pandas as pd
%load_ext sql
###Output
_____no_output_____
###Markdown
Connessione
###Code
%%sql
sqlite:///db.sqlite
###Output
_____no_output_____
###Markdown
Query
###Code
%%sql
select * from tabella
%%sql
SELECT load_extension('mod_spatialite');
###Output
* sqlite:///db.sqlite
(sqlite3.OperationalError) not authorized [SQL: "SELECT load_extension('mod_spatialite');"] (Background on this error at: http://sqlalche.me/e/e3q8)
###Markdown
Convesione in dataframe
###Code
# converto in dataframe l'output
result = %sql select * from tabella
dataframe = result.DataFrame()
dataframe
###Output
_____no_output_____
###Markdown
Connessione a db sqlite e conversione in dataframe
###Code
import sqlite3
with sqlite3.connect('db.sqlite') as conn:
dataframe = pd.io.sql.read_sql("""
SELECT *
FROM tabella;""", conn)
###Output
_____no_output_____
###Markdown
Abilitare modulo spatialite in sqlite
###Code
conna=sqlite3.connect('db.sqlite')
conna.enable_load_extension(True)
conna.load_extension('mod_spatialite')
conna.execute('select InitSpatialMetadata(1)')
conna.execute("SELECT AddGeometryColumn('tabella', 'geom', 4326, 'POINT', 2);")
conna.execute('''
UPDATE tabella SET
geom = GeomFromText(('POINT(13 38)'),4326);
''')
conna.commit()
conna.close()
%%sql
select * from tabella
###Output
* sqlite:///db.sqlite
Done.
|
notebooks/Analyzing Firmware with Centrifuge Example 2.ipynb | ###Markdown
AutomationDirect P3-530 PLCThis device is a programmable logic controller. The firmware can be found here:https://ftp.automationdirect.com/firmware/FirmwareOnly/P3-530_1.2.7.39.adfwWhen unpacked with Binwalk, there are several binary files that can be analyzed. The file analyzed here is `P3_530.bin`. When a signature scan of this file is run, the output is as follows:```$ binwalk P3_530.bin DECIMAL HEXADECIMAL DESCRIPTION--------------------------------------------------------------------------------1140488 0x116708 Copyright string: "Copyright MGC 2003 - Nucleus PLUS - MPC824x Metrowerks v. 1.14"1141840 0x116C50 Base64 standard index table1143036 0x1170FC HTML document header1157304 0x11A8B8 DES SP1, big endian1157560 0x11A9B8 DES SP2, big endian1160064 0x11B380 SHA256 hash constants, big endian1225624 0x12B398 CRC32 polynomial table, big endian1300760 0x13D918 HTML document header1301521 0x13DC11 HTML document footer1301644 0x13DC8C HTML document header1301793 0x13DD21 HTML document footer1301804 0x13DD2C HTML document header1301892 0x13DD84 HTML document footer1313328 0x140A30 HTML document header1314231 0x140DB7 HTML document footer1314532 0x140EE4 HTML document header1315095 0x141117 HTML document footer1315104 0x141120 HTML document header1315182 0x14116E HTML document footer1315192 0x141178 HTML document header1315279 0x1411CF HTML document footer1319268 0x142164 PEM certificate1319324 0x14219C PEM certificate request1319512 0x142258 PEM RSA private key1319708 0x14231C PEM EC private key1319776 0x142360 PEM DSA private key```Time to use Centrifuge.
###Code
import sys
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = [16, 9]
sys.path[0:0] = ['.', '..']
from centrifuge.binfile import BinFile
file_handle = open("_P3-530_1.2.7.39.adfw.extracted/P3_530.bin", "rb")
plc_bin = BinFile(file_handle)
plc_bin.slice_file()
###Output
_____no_output_____
###Markdown
We can look at a couple of plots to get our bearings:
###Code
plc_bin.plot_file_entropy()
plc_bin.plot_file_feature("zeroes", "black")
###Output
_____no_output_____
###Markdown
There seem to be 2 large discrete areas with a few smaller ones. Let's see what can be found with clustering:
###Code
plc_bin.cluster_DBSCAN(epsilon=0.7,
minimum_samples=10,
find_optimal_epsilon=True)
plc_bin.plot_DBSCAN_results()
results = plc_bin.identify_cluster_data_types()
###Output
Searching for machine code
--------------------------------------------------------------------
[+] Checking Cluster 1 for possible match
[+] Closely matching CPU architecture reference(s) found for Cluster 1
[+] Sending sample to https://isadetect.com/
[+] response:
{
"prediction": {
"architecture": "powerpc",
"endianness": "big",
"wordsize": 32
},
"prediction_probability": 0.58
}
Searching for utf8-english data
-------------------------------------------------------------------
[+] UTF-8 (english) detected in Cluster 2
Wasserstein distance to reference: 9.522864483173079
Searching for high entropy data
-------------------------------------------------------------------
[X] No high entropy data cluster detected.
###Markdown
Turns out that there is PowerPC machine code present. For fun, we can take a look at the UTF-8 data in cluster 2:
###Code
_, cluster_byte_values = plc_bin.extract_clusters()
bytes(cluster_byte_values[2])[:1000]
###Output
_____no_output_____
###Markdown
Full results:
###Code
results
###Output
_____no_output_____ |
EN/p8_matplotlib.ipynb | ###Markdown
Plotting Using Package ``matplotlib``In Python, one can use package ``matplotlib`` to handle most plotting tasks. Its interface is very similar to plotting in Matlab. For an instance, this is the code for plotting a simple sine function:
###Code
# We assign values from a certain range to x.
x = np.arange(0, 8, 0.1)
# We compute sin(x) for each x.
y = np.sin(x)
# We plot the resulting relationship.
plt.plot(x, y)
###Output
_____no_output_____
###Markdown
Axes can be labelled using ``plt.xlabel`` and ``plt.ylabel``.
###Code
x = np.arange(0, 8, 0.1)
y = np.sin(x)
plt.plot(x, y)
# new code:
plt.xlabel("x")
plt.ylabel("y")
plt.legend(['sin(x)'])
###Output
_____no_output_____
###Markdown
We can also do other things, such as adding a grid using function ``plt.grid``:
###Code
x = np.arange(0, 8, 0.1)
y = np.sin(x)
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(['sin(x)'])
# new code:
plt.grid(ls='--')
###Output
_____no_output_____
###Markdown
We can also specify other properties of the plot when calling ``plt.plot``, e.g. the line color (such as 'r' for red) or line style (such as '--' for a dashed line):
###Code
x = np.arange(0, 8, 0.1)
y = np.sin(x)
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(['sin(x)'])
plt.grid(ls='--')
# new code:
z = np.exp(0.1*x)
plt.plot(x, z, 'r--')
###Output
_____no_output_____
###Markdown
We update the legend – expression ``np.exp`` can be written using LaTeX notation, in which case it displays in the proper mathematical format:
###Code
x = np.arange(0, 8, 0.1)
y = np.sin(x)
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(['sin(x)'])
plt.grid(ls='--')
z = np.exp(0.1*x)
plt.plot(x, z, 'r--')
# new code:
plt.legend(['$\sin(x)$', '$e^{0.1x}$'])
###Output
_____no_output_____
###Markdown
Further FunctionalityPackage ``matplotlib`` also contains a huge number of other function and features – it really is very flexible. More information on these topics can be found in its [documentation](http://matplotlib.org/index.html). It is also possible to study the following [simple tutorial](http://matplotlib.org/users/pyplot_tutorial.html). There is also a [gallery of plots](http://matplotlib.org/gallery.html) for inspiration.
###Code
###Output
_____no_output_____ |
notebooks/01m_Phase_2_ML_FullFeatureSet-BestInitialScaling_ak.ipynb | ###Markdown
**This script takes the best models and features from Phase 1 of the model selection process and undertakes a deeper dive in reviewing/selecting the optimal models. It also adapts wrangling steps to a Pipeline.**To-Do:- Use balanced sample of 50K observations DONE- Align features across scripts DONE for outcome variable - PENDING for features (scale, reformat, normalize)- Substitute dummies for label encoding, create a pipeline - PENDING- Add CV; randomize splits - PENDING- Review validation curve, precision versus recall- Robustness checks for county_type versus MSA binary- Holdout sets- Review learning curve- Grid search- Analyses on all years
###Code
%matplotlib inline
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
###Output
_____no_output_____
###Markdown
**Import 2017 sample of 50,000 observations balanced for 1/0 Action Taken, removing two action_taken_name values.** Note import warning:"Columns (29,30,39,40) have mixed types. Specify dtype option on import or set low_memory=False."
###Code
filepath = os.path.abspath(os.path.join( "..", "fixtures", "hmda2017sample_balanced.csv"))
DATA = pd.read_csv(filepath, low_memory=False)
###Output
_____no_output_____
###Markdown
**Drop features with missing data, locality information, and features resulting in model leakage**
###Code
DATA = DATA.drop(DATA.columns[0], axis=1)
DATA = DATA.drop(['purchaser_type_name',
'preapproval_name',
'rate_spread',
'sequence_number',
'respondent_id',
'state_name',
'state_abbr',
'county_name',
'edit_status_name',
'denial_reason_name_3',
'denial_reason_name_2',
'denial_reason_name_1',
'co_applicant_race_name_5',
'co_applicant_race_name_4',
'co_applicant_race_name_3',
'co_applicant_race_name_2',
'census_tract_number',
'application_date_indicator',
'applicant_race_name_5',
'applicant_race_name_4',
'applicant_race_name_3',
'applicant_race_name_2',
'agency_name'],
axis=1)
DATA['locality_type'] = None
DATA['locality_type'] = None
cloc = DATA.columns.get_loc('locality_type')
i = 0
for x in DATA['msamd_name'].isna():
if x == True:
DATA.iat[i, cloc]= 0
else:
DATA.iat[i, cloc]= 1
i+=1
pd.crosstab(DATA['msamd_name'],DATA['locality_type'], margins=True, dropna=False) #dropna=False
DATA.describe(include='all')
print("DATA dimensions: {}".format(DATA.shape))
###Output
DATA dimensions: (50000, 25)
###Markdown
**Write the initial script using subset of features which are already int or float, plus the target** **NOTE: discard file closed, call 'application approved but not accepted" a 1 or discard, discard 'application withdrawn by applicant'. Concern about overfitting if we leave too much stuff in.**
###Code
DATA['action_taken'] = DATA.action_taken_name.apply(lambda x: 1 if x in ['Loan purchased by the institution', 'Loan originated'] else 0)
pd.crosstab(DATA['action_taken_name'],DATA['action_taken'], margins=True)
###Output
_____no_output_____
###Markdown
**ACTION: look at imputing income using hud household median income rather than mean**
###Code
DATA_targ_numeric = DATA[['action_taken',
'tract_to_msamd_income',
'population',
'minority_population',
'number_of_owner_occupied_units',
'number_of_1_to_4_family_units',
'loan_amount_000s',
'hud_median_family_income',
'applicant_income_000s', 'locality_type'
]]
#resolve missing values in applicant_income_000s
DATA_targ_numeric.fillna(DATA_targ_numeric.mean(), inplace=True)
DATA_targ_numeric.info()
DATA_basefile = DATA_targ_numeric
###Output
C:\Users\akx00\Anaconda3\lib\site-packages\pandas\core\generic.py:6130: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
self._update_inplace(new_data)
###Markdown
**Use one-hot encoding via Pandas, concatenate to the rest of the data frame.**Reference link:https://stackoverflow.com/questions/37292872/how-can-i-one-hot-encode-in-python
###Code
DATA = DATA.drop(['action_taken_name', 'msamd_name'], axis=1)
DATA.columns
non_categorical_features = ['action_taken',
'tract_to_msamd_income',
'population',
'minority_population',
'number_of_owner_occupied_units',
'number_of_1_to_4_family_units',
'loan_amount_000s',
'hud_median_family_income',
'applicant_income_000s',
'locality_type'
]
for categorical_feature in list(DATA.columns):
if categorical_feature not in non_categorical_features:
DATA[categorical_feature] = DATA[categorical_feature].astype('category')
dummies = pd.get_dummies(DATA[categorical_feature], prefix=categorical_feature)
DATA_basefile = pd.concat([DATA_basefile, dummies], axis=1)
DATA_basefile.info(verbose=True)
tofilepath = os.path.abspath(os.path.join( "..", "fixtures", "hmda2017sample_alltest_state_localitytest.csv"))
DATA_basefile.to_csv(tofilepath, index=False)
# Determine the shape of the data
print("{} instances with {} features\n".format(*DATA_basefile.shape))
# Determine the frequency of each class
print(pd.crosstab(index=DATA['action_taken'], columns="count"))
###Output
50000 instances with 69 features
col_0 count
action_taken
0 25000
1 25000
###Markdown
Classification
###Code
from sklearn import metrics
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression, SGDClassifier
from sklearn.svm import LinearSVC, NuSVC, SVC
from sklearn import tree
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import BaggingClassifier, ExtraTreesClassifier, RandomForestClassifier
from yellowbrick.classifier import ClassificationReport
X = DATA_basefile[DATA_basefile.columns[1:]]
y = DATA_basefile['action_taken']
def score_model(X, y, model, **kwargs):
"""
Test various estimators.
"""
#NOTE: for capstone add X_test, X_train, Y_test, Y_train for capstone code.
#Bake into model to see if it does cross validation, if not there do CV.
scores = {'precision':[], 'recall':[], 'accuracy':[], 'f1':[]}
# Instantiate the classification model and visualizer
model.fit(X, y, **kwargs)
expected = y
predicted = model.predict(X)
# Append our scores to the tracker
scores['precision'].append(metrics.precision_score(expected, predicted, average="binary"))
scores['recall'].append(metrics.recall_score(expected, predicted, average="binary"))
scores['accuracy'].append(metrics.accuracy_score(expected, predicted))
scores['f1'].append(metrics.f1_score(expected, predicted, average="binary"))
# Compute and return F1 (harmonic mean of precision and recall), Precision, Recall, Accuracy
print("{}".format(model.__class__.__name__))
print("Validation scores are as follows:\n")
print(pd.DataFrame(scores).mean())
# Try them all!
models = [
GaussianNB(),
MultinomialNB(),
BernoulliNB(),
tree.DecisionTreeClassifier(),
LinearDiscriminantAnalysis(),
LogisticRegression(solver='lbfgs', max_iter=6000),
LogisticRegressionCV(cv=3, max_iter=6000),
BaggingClassifier(),
ExtraTreesClassifier(n_estimators=100),
RandomForestClassifier(n_estimators=100),
LinearSVC(max_iter=6000)
]
for model in models:
score_model(X, y, model)
def visualize_model(X, y, estimator):
"""
Test various estimators.
"""
# Instantiate the classification model and visualizer
visualizer = ClassificationReport(
model, classes=[1,0],
cmap="Blues", size=(600, 360)
)
visualizer.fit(X, y)
visualizer.score(X, y)
visualizer.poof()
for model in models:
visualize_model(X, y, model)
from yellowbrick.features import FeatureImportances
model = RandomForestClassifier(n_estimators=10)
viz = FeatureImportances(model, size=(1080, 720))
viz.fit(X, y)
# Note: the FeatureImportances visualizer is a model visualizer,
# not a feature visualizer, so it doesn't have a transform method!
viz.poof()
from yellowbrick.features import Rank2D
# Instantiate the visualizer with the Pearson ranking algorithm
visualizer = Rank2D(algorithm='pearson', size=(1080, 720))
visualizer.fit(X, y)
visualizer.transform(X)
visualizer.poof()
###Output
_____no_output_____ |
Interview Preparation Kit/13. Recursion and Backtracking/Crossword Puzzle.ipynb | ###Markdown
Crossword Puzzle![image](https://user-images.githubusercontent.com/50367487/86590512-b5a4d900-bfca-11ea-9e30-322cd559a148.png)
###Code
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the crosswordPuzzle function below.
from copy import deepcopy
def checkWords(crossword, k, i, j, direct):
if k == len(words):
return answer.append(crossword)
if words[k] == ';':
return findEmpty(crossword, k + 1)
if not (0 <= i < 10 > j >= 0) or crossword[i][j] == "+":
return
if crossword[i][j] != '-' and crossword[i][j] != words[k]:
return
copy_crossword = deepcopy(crossword)
copy_crossword[i][j] = words[k]
if direct == 1:
checkWords(copy_crossword, k + 1, i, j + 1, 1)
elif direct == 2:
checkWords(copy_crossword, k + 1, i + 1, j, 2)
def findEmpty(crossword, k):
for i in range(10):
for j in range(10):
if crossword[i][j] != '+':
checkWords(crossword, k, i, j, 1)
checkWords(crossword, k, i, j, 2)
def crosswordPuzzle(crossword, words):
global answer
crossword = [list(s) for s in crossword]
answer = []
findEmpty(crossword, 0)
return ["".join(s) for s in answer[0]]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
crossword = []
for _ in range(10):
crossword_item = input()
crossword.append(crossword_item)
words = input()
result = crosswordPuzzle(crossword, words)
fptr.write('\n'.join(result))
fptr.write('\n')
fptr.close()
###Output
_____no_output_____ |
data/dev/NQ_dataset_sample_local.ipynb | ###Markdown
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/stmnk/qa/blob/master/data/dev/NQ_dataset_sample-local.ipynb)Import utilities.
###Code
import gzip
import shutil
###Output
_____no_output_____
###Markdown
Extract a `.gz` file.
###Code
with gzip.open(f'nq-dev-00.jsonl.gz', 'rb') as f_in:
with open(f'nq-dev-00.jsonl', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
###Output
_____no_output_____
###Markdown
Read data from a `.jsonl` file.
###Code
import json
from pandas.io.json import json_normalize
import pandas as pd
def read_jsonl(input_path) -> list:
"""
Read list of objects from a JSON lines file.
"""
data = []
with open(input_path, 'r', encoding='utf-8') as f:
for line in f:
data.append(json.loads(line.rstrip('\n|\r')))
print(f'Loaded {len(data)} records from {input_path}'
return data
###Output
_____no_output_____
###Markdown
Inspect the data frame.
###Code
nq_sample_list = read_jsonl(f'nq-dev-00.jsonl')
df = pd.DataFrame(nq_sample_list, columns=[
'example_id',
'question_text', 'question_tokens',
'document_url', 'document_html', # 'document_tokens',
'long_answer_candidates',
'annotations',
])
df
###Output
Loaded 1600 records from nq-dev-00.jsonl
###Markdown
Write data to a `.jsonl` file.
###Code
def write_jsonl(data, output_path, append=False):
"""
Write list of objects to a JSON lines file.
"""
mode = 'a+' if append else 'w'
with open(output_path, mode, encoding='utf-8') as f:
for line in data:
json_record = json.dumps(line, ensure_ascii=False)
f.write(json_record + '\n')
print('Wrote {} records to {}'.format(len(data), output_path))
###Output
_____no_output_____ |
Assignments&Projects/Clustering/Clustering ML.ipynb | ###Markdown
Background & OverviewOnline retail is a transnational data set which contains all the transactions occurring between January 12, 2010 - September 12, 2011 for a store in the United Kingdoms. The company mainly sells unique all-occasion gifts. I will be pretending to be a data scientist who was hired by a global retail marketing company to analyze consumer data of this UK store, so that the company can create enriching marketing campaigns to attract more customers into buying their gift items. EDA (Exploratory Data Analysis) Part 1
###Code
# import required libraries for dataframe and visualization
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import datetime as dt
import altair as alt
from altair import datum
alt.data_transformers.disable_max_rows()
alt.data_transformers.enable('json')
# import required libraries for clustering
import sklearn
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from yellowbrick.cluster import KElbowVisualizer
from sklearn.decomposition import PCA
from sklearn.cluster import SpectralClustering
from sklearn.metrics import silhouette_score
###Output
_____no_output_____
###Markdown
Loading in the dataset into my dataframe variable
###Code
df = pd.read_csv(r'D:\OnlineRetail.csv',encoding= 'unicode_escape')
df.head(5)
###Output
_____no_output_____
###Markdown
Creating a more comprehensive look into the datasets columns, data-types, and more. Instead of listing out the data-types, column names, and other specifices. I decided to create a list-style display to show all information regarding the dataset. You will see that this dataset contains the following below:- 8 columns- 540k rows- 1 int64 datatype- 2 float datatypes- 5 object datatypes- 64k Column distinctive values- 13k NULL/NAN values
###Code
def initial_eda(df):
if isinstance(df, pd.DataFrame):
total_na = df.isna().sum().sum()
print("Dimensions : %d rows, %d columns" % (df.shape[0], df.shape[1]))
print("Total NA Values : %d " % (total_na))
print("%38s %10s %10s %10s" % ("Column Name", "Data Type", "#Distinct", "NA Values"))
col_name = df.columns
dtyp = df.dtypes
uniq = df.nunique()
na_val = df.isna().sum()
for i in range(len(df.columns)):
print("%38s %10s %10s %10s" % (col_name[i], dtyp[i], uniq[i], na_val[i]))
else:
print("Expect a DataFrame but got a %15s" % (type(df)))
initial_eda(df)
###Output
Dimensions : 541909 rows, 8 columns
Total NA Values : 136534
Column Name Data Type #Distinct NA Values
InvoiceNo object 25900 0
StockCode object 4070 0
Description object 4223 1454
Quantity int64 722 0
InvoiceDate object 23260 0
UnitPrice float64 1630 0
CustomerID float64 4372 135080
Country object 38 0
###Markdown
Descriptive StatisticsSince the dataset has a mixture of all values I wanted to run a quick statistical description view on the dataset to gain more instance. From reviewing the output we can see that the columns 'Quantity' and 'Unitprice' are returned but not 'CustomerID'. This was down as CustomerID is more of an identifier, now with Quantity you can see that there is a strange minimum value of -80995.00 and maximum value 80995.00. This tells me that this column is very likely to contain outliers same with the column Unitprice having a minimum value of -11062.06. Later on in the notebook you see that I am removing the low and upper values that contains those such outliers.
###Code
df.drop(columns='CustomerID').describe().T
df= df[df.Quantity > 0]
df= df[df.UnitPrice > 0]
df.drop(columns='CustomerID').describe().T
###Output
_____no_output_____
###Markdown
Data cleaningAs shown above we have 132220 null values in total but when breaking it down to individual columns majority of the nulls come from the CustomerID. We can now see that after dropping the rows that contained any NULL values are total entries drop to 397884, this would be about a 10% decrease in the total size we originally had at the start.
###Code
df.isnull().sum()
df.dropna(inplace=True)
df.isnull().sum()
initial_eda(df)
###Output
Dimensions : 397884 rows, 8 columns
Total NA Values : 0
Column Name Data Type #Distinct NA Values
InvoiceNo object 18532 0
StockCode object 3665 0
Description object 3877 0
Quantity int64 301 0
InvoiceDate object 17282 0
UnitPrice float64 440 0
CustomerID float64 4338 0
Country object 37 0
###Markdown
Data attribute information- InvoiceNo: Invoice number 6-digit integral number, Object.- StockCode: Product code 5-digit integral number, Object.- Description: Product name, Object.- Quantity: The quantities of each product per transaction, int64.- InvoiceDate: Invice date and time, int64.- UnitPrice: Unit price, int64.- CustomerID: Customer number, Object.- Country: Country name, Object. Altering the columnsYou will see below me altering the existing columns in the data set as well as adding new columns to strengthen my model further down in the notebook.
###Code
# change the column names
df.rename(index=str, columns={'InvoiceNo': 'invoice_num',
'StockCode' : 'stock_code',
'Description' : 'description',
'Quantity' : 'quantity',
'InvoiceDate' : 'invoice_date',
'UnitPrice' : 'unit_price',
'CustomerID' : 'cust_id',
'Country' : 'country'}, inplace=True)
# change the invoice_date format - String to Timestamp format
df['invoice_date'] = pd.to_datetime(df.invoice_date, format='%d-%m-%Y %H:%M')
df['cust_id'] = df['cust_id'].astype('int64')
# change description - UPPER case to LOWER case
df['description'] = df.description.str.lower()
# Creating a new column in the dataframe for total price of a unit and rearranging the dataframe/dataset
df['amount_spent'] = df['quantity'] * df['unit_price']
df= df[['invoice_num','invoice_date','stock_code','description','quantity','unit_price','amount_spent','cust_id','country']]
df.head()
df.insert(loc=2, column='year_month', value=df['invoice_date'].map(lambda x: 100*x.year + x.month))
df.insert(loc=3, column='month', value=df.invoice_date.dt.month)
# +1 to make Monday=1.....until Sunday=7
df.insert(loc=4, column='day', value=(df.invoice_date.dt.dayofweek)+1)
df.insert(loc=5, column='hour', value=df.invoice_date.dt.hour)
df.head()
###Output
_____no_output_____
###Markdown
Data Vizualization Number of Orders for different Customers As we can see here that the data is fairly balanced with the number of orders made by customers except for the few outliers that we do have. These outliers can be seen with the bars that break out of the normal trend of not passing 2k-3k orders made. We can also see that The top 4 out of 5 customers who have the most orders are from the United Kingdoms.
###Code
orders = df.groupby(by=['cust_id','country'], as_index=False)['invoice_num'].count()
plt.subplots(figsize=(15,6))
plt.plot(orders.cust_id, orders.invoice_num)
plt.xlabel('Customers ID')
plt.ylabel('Number of Orders')
plt.title('Number of Orders for different Customers')
plt.show()
print('The TOP 5 customers with most number of orders...')
orders.sort_values(by='invoice_num', ascending=False).head()
###Output
The TOP 5 customers with most number of orders...
###Markdown
Money Spent for different Customers From what you can see below you have the most money spent per customer but with this being shown there are more outliers given an a lower amount of data that is in a normal range you can see that when the amount spent goes over 50k you can detemine this to be an outlier. Lastly, when comparing the top 5 orders to top 5 money spent there is only one match and that is.- 14911 EIRE with 5675 orders and $143,825.06 spent
###Code
money_spent = df.groupby(by=['cust_id','country'], as_index=False)['amount_spent'].sum()
plt.subplots(figsize=(15,6))
plt.plot(money_spent.cust_id, money_spent.amount_spent)
plt.xlabel('Customers ID')
plt.ylabel('Money spent (Dollar)')
plt.title('Money Spent for different Customers')
plt.show()
print('The TOP 5 customers with highest money spent...')
money_spent.sort_values(by='amount_spent', ascending=False).head()
###Output
The TOP 5 customers with highest money spent...
###Markdown
Number of orders for different Months (1st Dec 2010 - 9th Dec 2011) Below you will see the number of orders that have been place from the 1st Dec 2010 - 9th Dec 2011. In the bar graph you can see that there is a trend occurring every 2 to 3 months where the number of orders are rising and failing. If there is more to investigate would to see why there is a consistent rise and fall in throughout the year.
###Code
color = sns.color_palette()
ax = df.groupby('invoice_num')['year_month'].unique().value_counts().sort_index().plot(kind='bar',color=color[0],figsize=(15,6))
ax.set_xlabel('Month',fontsize=15)
ax.set_ylabel('Number of Orders',fontsize=15)
ax.set_title('Number of orders for different Months (1st Dec 2010 - 9th Dec 2011)',fontsize=15)
ax.set_xticklabels(('Dec_10','Jan_11','Feb_11','Mar_11','Apr_11','May_11','Jun_11','July_11','Aug_11','Sep_11','Oct_11','Nov_11','Dec_11'), rotation='horizontal', fontsize=13)
plt.show()
###Output
_____no_output_____
###Markdown
Number of orders for different DaysWe can see here that the most popular day to order an item is on Thrusday. This can be useful to increase targeting customers before and during this day to increase it further or find out why that is so that we can bring up the weekend orders.
###Code
ax = df.groupby('invoice_num')['day'].unique().value_counts().sort_index().plot(kind='bar',color=color[0],figsize=(15,6))
ax.set_xlabel('Day',fontsize=15)
ax.set_ylabel('Number of Orders',fontsize=15)
ax.set_title('Number of orders for different Days',fontsize=15)
ax.set_xticklabels(('Mon','Tue','Wed','Thur','Fri','Sun'), rotation='horizontal', fontsize=15)
plt.show()
###Output
_____no_output_____
###Markdown
Number of orders for different HoursFrom reviewing the chart below we can see that the optimal range of when customer buy items is between 10am-3pm. This will be very useful to try and push ads through out this timeframe.
###Code
ax = df.groupby('invoice_num')['hour'].unique().value_counts().iloc[:-1].sort_index().plot(kind='bar',color=color[0],figsize=(15,6))
ax.set_xlabel('Hour',fontsize=15)
ax.set_ylabel('Number of Orders',fontsize=15)
ax.set_title('Number of orders for different Hours',fontsize=15)
ax.set_xticklabels(range(6,21), rotation='horizontal', fontsize=15)
plt.show()
###Output
_____no_output_____
###Markdown
Money Spent by different CountriesWe can see that that number one country that spent the most money is the United Kingdom, however that does not give the full picture as to what the other countries spent. So you can see here that I have a chart below that shows the total amount spent by country with and without the UK so that we can get a full look into the amount spent for every country.
###Code
group_country_amount_spent = df.groupby('country')['amount_spent'].sum().sort_values()
# del group_country_orders['United Kingdom']
# plot total money spent by each country (with UK)
plt.subplots(figsize=(15,8))
group_country_amount_spent.plot(kind='barh', fontsize=12, color=color[0])
plt.xlabel('Money Spent (Dollar)', fontsize=12)
plt.ylabel('Country', fontsize=12)
plt.title('Money Spent by different Countries', fontsize=12)
plt.show()
group_country_amount_spent = df.groupby('country')['amount_spent'].sum().sort_values()
del group_country_amount_spent['United Kingdom']
# plot total money spent by each country (without UK)
plt.subplots(figsize=(15,8))
group_country_amount_spent.plot(kind='barh', fontsize=12, color=color[0])
plt.xlabel('Money Spent (Dollar)', fontsize=12)
plt.ylabel('Country', fontsize=12)
plt.title('Money Spent by different Countries', fontsize=12)
plt.show()
###Output
_____no_output_____
###Markdown
Feature Engineering Since we can see that the mass amounts of order buy and have are from the United Kingdom let us focus on this particular country as this is the biggest customer. To havee a better breakdown on to what the Unit Kingdom is ordering/buying I am going to create a couple of frames to display the information. - Number of days since last purchase- Number of transactions- Total amount of transactions (revenue contributed)
###Code
df.head()
###Output
_____no_output_____
###Markdown
Creating three field that will be used to show clustering data.I am creating three new fields and assigning them a new dataframe in which I will be reviewing for the clustering of the dataset. - (Recency): Number of days since last purchase- (Frequency): Number of tracsactions- (Monetary): Total amount of transactions (revenue contributed)
###Code
# New Attribute : Monetary
df['Amount'] = df['quantity']*df['unit_price']
amount = df.groupby('cust_id')['Amount'].sum()
amount = amount.reset_index()
amount.head()
# New Attribute : Frequency
Frequency = df.groupby('cust_id')['invoice_num'].count()
Frequency = Frequency.reset_index()
Frequency.columns = ['cust_id', 'Frequency']
Frequency.head()
AF = pd.merge(amount, Frequency, on='cust_id', how='inner')
AF.head()
# Compute the maximum date to know the last transaction date
max_date = max(df['invoice_date'])
max_date
df['Diff'] = max_date - df['invoice_date']
df.head()
DaysSince = df.groupby('cust_id')['Diff'].min().dt.days
DaysSince = DaysSince.reset_index()
DaysSince.head()
AFD = pd.merge(AF, DaysSince, on='cust_id', how='inner')
AFD.columns = ['CustomerID', 'Amount', 'Frequency', 'Recency']
AFD.head()
###Output
_____no_output_____
###Markdown
Outlier check.Since we created new fields with in the dataframe I would like to take away any outliers it may have. To better help me see this I am using a box and whisker plot to do so. Then I am taking the data I have found and removing it.
###Code
attributes = ['Amount','Frequency','Recency']
plt.rcParams['figure.figsize'] = [10,8]
sns.boxplot(data = AFD[attributes], orient="v", palette="Set2" ,whis=1.5,saturation=1, width=0.7)
plt.title("Outliers Variable Distribution", fontsize = 14, fontweight = 'bold')
plt.ylabel("Range", fontweight = 'bold')
plt.xlabel("Attributes", fontweight = 'bold')
# Removing (statistical) outliers for Amount
Q1 = AFD.Amount.quantile(0.05)
Q3 = AFD.Amount.quantile(0.95)
IQR = Q3 - Q1
AFD = AFD[(AFD.Amount >= Q1 - 1.5*IQR) & (AFD.Amount <= Q3 + 1.5*IQR)]
# Removing (statistical) outliers for Recency
Q1 = AFD.Recency.quantile(0.05)
Q3 = AFD.Recency.quantile(0.95)
IQR = Q3 - Q1
AFD = AFD[(AFD.Recency >= Q1 - 1.5*IQR) & (AFD.Recency <= Q3 + 1.5*IQR)]
# Removing (statistical) outliers for Frequency
Q1 = AFD.Frequency.quantile(0.05)
Q3 = AFD.Frequency.quantile(0.95)
IQR = Q3 - Q1
AFD = AFD[(AFD.Frequency >= Q1 - 1.5*IQR) & (AFD.Frequency <= Q3 + 1.5*IQR)]
###Output
_____no_output_____
###Markdown
Build the clustering models with comparison Rescaling the AttributesIt is extremely important to rescale the variables so that they have a comparable scale.| There are two common ways of rescaling:Min-Max scalingStandardisation (mean-0, sigma-1)Here, we will use Standardisation Scaling.
###Code
AFD = AFD[['Amount', 'Frequency', 'Recency']]
# Instantiate
scaler = StandardScaler()
# fit_transform
AFD_scaled = scaler.fit_transform(AFD)
AFD_scaled.shape
AFD_scaled = pd.DataFrame(AFD_scaled)
AFD_scaled.columns = ['Amount', 'Frequency', 'Recency']
AFD_scaled.head()
###Output
_____no_output_____
###Markdown
K-Means ClusteringK-means clustering is one of the simplest and popular unsupervised machine learning algorithms.The algorithm works as follows:First we initialize k points, called means, randomly.We categorize each item to its closest mean and we update the mean’s coordinates, which are the averages of the items categorized in that mean so far.We repeat the process for a given number of iterations and at the end, we have our clusters.
###Code
kmeans = KMeans(n_clusters=4, max_iter=50)
kmeans.fit(AFD_scaled)
kmeans.labels_
###Output
_____no_output_____
###Markdown
Finding the Optimal Number of ClustersElbow Curve to get the right number of Clusters.A fundamental step for any unsupervised algorithm is to determine the optimal number of clusters into which the data may be clustered. The Elbow Method is one of the most popular methods to determine this optimal value of k.
###Code
# Elbow-curve/SSD
ssd = []
range_n_clusters = [2, 3, 4, 5, 6, 7, 8]
for num_clusters in range_n_clusters:
kmeans = KMeans(n_clusters=num_clusters, max_iter=50)
kmeans.fit(AFD_scaled)
ssd.append(kmeans.inertia_)
# plot the SSDs for each n_clusters
plt.plot(ssd)
k_means = KMeans(n_clusters= 3, init='k-means++', random_state=0).fit(AFD_scaled)
clusters = k_means.predict(AFD_scaled)
AFD['Cluster_Label'] = cl_labels_k = k_means.labels_
AFD.head()
###Output
_____no_output_____
###Markdown
K-Means Clustering with 3 Cluster Ids Inference- Customers with Cluster Id 2 are the customers with high amount of transactions as compared to other customers.- Customers with Cluster Id 2 are frequent buyers.- Customers with Cluster Id 2 are not recent buyers and hence least of importance from business point of view.- Customer with Cluster Id 0,1 are very low compared to the second cluster and this will have to be targeted more - Customer with Cluster Id 0,1 are also mostly likely not to be frequent buyers. We should investigate the reason as to why they are low compared to Cluster 2.
###Code
sns.boxplot(x='Cluster_Label', y='Amount', data=AFD)
sns.boxplot(x='Cluster_Label', y='Frequency', data=AFD)
sns.boxplot(x='Cluster_Label', y='Recency', data=AFD)
###Output
_____no_output_____ |
FDA _Tasks2020.ipynb | ###Markdown
Tasks 2020Task submissions for October - December 2020*** Task 1Write a Python function called counts that takes a list as input and returns a dictionary of unique items in the list as keys and the number of times each item appears as values*** * For this task I write a function `counts` which takes a list as an argument* There are a number of possible methods which add lists to dictionaries, such as using list comrehensions as well and through the importation of modules (`collections` module) amongst others. This blog post at geesforgeeks.org details some of these [1]* I found the simplest and most useful solution on stackoverflow.com which I demonstrate below [2]. This method is also suggested at kite.com [3] Function description* Inside the `counts` function I create an empty dictionary `d`* Next I use a `for` loop to iterate over the list, adding a list-item to the dictionary on each iteration* Inside the `for` loop I use conditional statements to determine whether a new key-value pair should be added to the dictionary. If the key exists already, its corresponding value increases by 1* When the `for` loop ends, the dictionary containing the list items is returned* Finally, I call the function and print the returned dictionary to the console References[1] geeksforgeeks.com; Python | Ways to create a dictionary of Lists; https://www.geeksforgeeks.org/python-ways-to-create-a-dictionary-of-lists/ [2] stackoverflow.com; How to add list elements into dictionary https://stackoverflow.com/questions/30208044/how-to-add-list-elements-into-dictionary[3] kite.com; How to append an element to a key in a dictionary with Python; https://www.kite.com/python/answers/how-to-append-an-element-to-a-key-in-a-dictionary-with-python *** **Function code**
###Code
# create a simple list of 5 elements
l = ['A', 'A', 'B', 'C', 'A']
'''
This function takes a list as an argument and adds list items to a dictionary
'''
def counts(lst):
# empy dictionary d initialised
d = {}
# iterate through list items and add to dict d
for item in lst:
if item in d:
d[item] += 1
else:
d[item] = 1
return d
# call function and print to console
print(counts(l))
###Output
{'A': 3, 'B': 1, 'C': 1}
###Markdown
End task 1*** Task 2Write a Python function called `dicerolls()` that simulates rolling dice. Your function should take two parameters: the number of dice *k* and the number of times to roll the dice *n*. The function should simulate randomly rolling *k* dice *n* times, keeping track of each total face value. It should then return a dictionary with the number of times each possible total face value occurred.**** As requested by the task, the function takes two parameters: k (no. of dice) and n (no. of rolls). * I have used nested for loops to carry out the simulation. I have based the algorithm used on useful information about nested loops at w3schools.com [1]. * The outer loop simulates the number of times the dice are rolled while the inner loop simulates the number of dice.* To simulate a random roll of a single die, I use the `integers()` function from `numpy.random` package.* On every iteration of the outer loop (n dice rolls), the inner loop runs k times (no. of dice)* The results of each iteration of the inner loop are appended to list l (initialised as an empty list within the function)* I use the function from Task 1 (above) to add each list item to dictionary d (initialised as an empty dictionary within the function)* The function returns a sorted dictionary. I found the code for this on a discussion on stackoverflow.com [2]* I call the `dicerolls()` 4 times. First with 2 dice, then 3, 4 and 5. The dice are rolled 100,000 times on each functon call. Plotting the output* I plot the output of each function call on a separate bar charts. To plot a dictionary on a bar chart, I used code found on a stackoverflow.com discussion [3].* I used the matplotlib documentation to plot 4 subplots on a single figure [4].* From observation of the bar charts, it is clear that the results are normally distributed, with the curve becoming increasingly rounded the more dice are thrown. References[1] w3schools; Python Nested Loops; https://www.w3schools.com/python/gloss_python_for_nested.asp [2] https://stackoverflow.com/questions/9001509/how-can-i-sort-a-dictionary-by-key%22 [3] https://stackoverflow.com/questions/21195179/plot-a-histogram-from-a-dictionary [4] Pyplot tutorial; intro to pyplot; https://matplotlib.org/tutorials/introductory/pyplot.html Build function and run simulation
###Code
# import default_rng for random number generation, matplotlib.pyplot for visualisation
from numpy.random import default_rng
import matplotlib.pyplot as plt
# construct a new Generator object
rng = default_rng()
'''
This function simulates the rolling of k=int dice, n=int times
'''
def dicerolls(k, n):
l = [] # initialise empty list
d = {} # initialise empty dict
for roll in range(n): # outer loop simulates no. of times dice are rolled
dice_sum = 0 # dice_sum counter set to 0
for die in range(k): # inner loop simulates each of k dice thrown at random
dice = rng.integers(1, 7)
dice_sum += dice # face value of each dice added together
l.append(dice_sum)
d = counts(l) # function from Task 1 called
return dict(sorted(d.items()))
# 2 dice rolled 100,000 times and dictionary output
two_dice = dicerolls(2, 100000)
two_dice
###Output
_____no_output_____
###Markdown
Plot output: Different values for k (number of dice)
###Code
# plot 4 subplots on a single figure for better visualisation comparison
plt.figure(figsize=(10, 7))
# plot bar chart for 2 dice rolled
plt.subplot(221)
plt.bar(list(two_dice.keys()), two_dice.values())
plt.title('2 dice')
# plot bar chart for 3 dice rolled
three_dice = dicerolls(3, 100000)
plt.subplot(222)
plt.bar(list(three_dice.keys()), three_dice.values())
plt.title('3 dice')
# plot bar chart for 4 dice rolled
four_dice = dicerolls(4, 100000)
plt.subplot(223)
plt.bar(list(four_dice.keys()), four_dice.values())
plt.title('4 dice')
# plot bar chart for 5 dice rolled
five_dice = dicerolls(5, 100000)
plt.subplot(224)
plt.bar(list(five_dice.keys()), five_dice.values())
plt.title('5 dice')
###Output
_____no_output_____
###Markdown
Note the increased roundedness of the curve the more dice are thrown. All 4 have the appearance of a normal distribution. End task 2*** Task 3Write some python code that simulates flipping a coin 100 times. Then run this code 1,000 times, keeping track of the number of heads in each of the 1,000 simulations. Select an appropriate plot to depict the resulting list of 1,000 numbers, showing that it roughly follows a bell-shaped curve. Use the `numpy.random.binomial` function to generate the simulation.* To demonstrate the binomial distribution in practical terms, the coin toss example is often used [1], [2], [3]. * The coin toss is also an example of a Bernoulli trial. This is a single trial from which there are exactly two possible outcomes, usually denoted as 'success' or 'failure'. [4]* When we toss a fair coin, there are only two possible outcomes - heads or tails - and each outcome has an equal probability (p=0.5) of arising. * If we say that 'heads' denotes a 'success', we can perform the coin toss n number of times, counting the number of successes we observe. This number will have a binomial distribution.* Using rng.binomial below, I simulate the coin toss and plot the resulting distribution on a histogram. The bell shaped curve is evident.* There are 100 trials (n=100), with a 50% probability of success (p=0.5). This is performed 1,000 times. References[1] Hamel, Greg; Python for Data 22: Probability Distributions; https://www.kaggle.com/hamelg/python-for-data-22-probability-distributions [2] onlinestatsbook.com; Binomial Distribution; http://onlinestatbook.com/2/probability/binomial.html [3] Wikipedia; Bernoulli Trial; https://en.wikipedia.org/wiki/Bernoulli_trial[4] *Ibid*
###Code
# construct a new Generator object
rng = default_rng()
n, p = 100, .5 # number of trials, probability of each trial
unbiased_coin = rng.binomial(n, p, 10000) # result of tossing a coin 10 times, tested 1000 times.
plt.hist(unbiased_coin, color='green')
plt.title("unbiased coin")
plt.show()
###Output
_____no_output_____
###Markdown
This has the appearance of a normal distribution (explored below). If the probability of success is changed however (i.e. we add a bias to the coin), we observe a change in the shape of the distribution - it becomes asymmetrical:
###Code
n, p = 100, .9 # number of trials, probability of success
biased_coin = rng.binomial(n, p, 10000) # result of tossing a coin 10 times, tested 10000 times.
plt.hist(biased_coin, color='green')
plt.title("biased coin")
plt.show()
###Output
_____no_output_____
###Markdown
End task 3*** Task 4Use numpy to create four data sets, each with an `x` array and a corresponding `y` array, to demonstrate Simpson’s paradox. You might create your `x` arrays using `numpy.linspace` and create the `y` array for each `x` using notation like `y = a * x + b` where you choose the `a` and `b` for each `x`, `y` pair to demonstrate the paradox. You might see the Wikipedia page for Simpson’s paradox for inspiration. Simpson's Paradox* Simpson's Paradox is a phenomenon in statistics whereby seemingly contradictory trends are observed within the same data set, depending on how the data set is analysed. * For example, the data may be split into four smaller groups and within each of these groups, a positive trend is observed. However when taken in aggregate, the overall trend is seen to be negative [1].* In order to demonstrate this phenomenon I have generated four data sets below, using `numpy.linspace`. I then plot these data sets on a graph for visualisation. Data set generation * First, I import `numpy` and `matplotlib.pylot` for data set generation and subsequent visualisation respectively:
###Code
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
* Next, I generate four data sets of equal sample size (20). * The coordinates for the x values and the corresponding y values have been intentionally chosen to so that each individual data set will show a positive trend. * These same data sets, when taken in aggregate however, show an overall negative trend. * When plotted on a graph (below), Simpson's paradox can be visualised and thus understood intuitively. There are many examples of such visualisations which guided the selection of values for my data sets [2], [3], [4]. * The `linspace` function generates the x array while the corresponding y array is populated using the formula `a * x + b` as suggested in the task brief. * I have added the function `numpy.radnom.randn` to the formula in order to closer simulate the random nature of reality. I use code found on a discussion on stackoverflow.com for this [5].
###Code
# Assign values for a and b
a = 1
b = 20
# Generate an x array using the numpy.linspace function
x = np.linspace(3, 10, 20)
# Calculate corresponding y values
y = a * x + b + np.random.randn(*x.shape)
b = 15
x2 = np.linspace(5, 12, 20)
y2 = a * x + b + np.random.randn(*x2.shape)
b = 10
x3 = np.linspace(7, 17, 20)
y3 = a * x + b + np.random.randn(*x3.shape)
b = 5
x4 = np.linspace(9, 22, 20)
y4 = a * x + b + np.random.randn(*x4.shape)
###Output
_____no_output_____
###Markdown
Plotting and Visualtisation* When we plot and visualise the data on a scatter plot, we observe a positive trend in each of the four data sets individually, while there is a clearly observable negative trend in the overall relationship between the variables if we take the data in aggregate. * To generate this plot, I used code found in a discussion on stackoverflow.com [6]. When we plot one individual data set, we can clearly see a positive trend in the relationship between the x and y variables.
###Code
# Plot single data set 'x'
plt.scatter (x, y)
plt.xlim(0, 15)
plt.ylim(20, 40)
###Output
_____no_output_____
###Markdown
Plotting each data set on the same plot however, reveals a different picture. On the plot below, we observer that each of the four individual data is associated with a positive trend, while the overall data reveals that there is a negative trend in the relationship between both variables.
###Code
# Initialise a figure with a single axes
fig = plt.figure()
ax1 = fig.add_subplot(111)
# Plot each data set on a scatter plot
ax1.scatter(x, y, s=10, c='b', marker="s", label='x')
ax1.scatter(x2 ,y2 , s=10, c='r', marker="o", label='x2')
ax1.scatter(x3, y3, s=10, c='g', marker="x", label='x3')
ax1.scatter(x4 ,y4 , s=10, c='y', marker="^", label='x4')
plt.xlim(-5, 30)
plt.ylim(0, 40)
plt.legend(loc='upper right');
plt.xlabel('supply', fontsize=16)
plt.ylabel('price', fontsize=16)
plt.show()
###Output
_____no_output_____ |
nb/demo_bk_survey.ipynb | ###Markdown
calculating the bispectrum for survey geometry
###Code
import os, time
import numpy as np
from simbig import halos as Halos
from simbig import galaxies as Galaxies
from simbig import forwardmodel as FM
from simbig import obs as CosmoObs
# --- plotting ---
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
###Output
_____no_output_____
###Markdown
Read in `Quijote` halo catalog and populate with galaxiesI'm using `i=1118`th cosmology in the LHC because that's the closest to the cosmology used in Manera+(2015)
###Code
# read in halo catalog
halos = Halos.Quijote_LHC_HR(1118, z=0.5)
print('Om, Ob, h, ns, s8:')
print(Halos.Quijote_LHC_cosmo(1118))
# get LOWZ HOD parameters
theta_hod = Galaxies.thetahod_lowz_sgc()
# populate halos
hod = Galaxies.hodGalaxies(halos, theta_hod, seed=0)
###Output
Om, Ob, h, ns, s8:
(0.2671, 0.03993, 0.6845, 0.96030000000000004, 0.78549999999999998)
###Markdown
Forward model survey geometry and generate matching randoms
###Code
gals = FM.BOSS(hod, sample='lowz-south', seed=0, veto=False, fiber_collision=False, silent=False)
rand = FM.BOSS_randoms(gals, sample='lowz-south', veto=False) # random without veto mask
###Output
_____no_output_____
###Markdown
Calculate bispectrum using `simbig.obs.B0k_survey`
###Code
t0 = time.time()
b123 = CosmoObs.B0k_survey(gals, rand,
P0=1e4,
Ngrid=360,
Lbox=1400,
silent=False)
print('B0 take %f sec' % ((time.time() - t0)))
klim = (b123[0] < 0.5) & (b123[1] < 0.5) & (b123[2] < 0.5)
fig = plt.figure(figsize=(20,5))
sub = fig.add_subplot(111)
sub.plot(range(np.sum(klim)), b123[3][klim])
sub.set_xlabel('triangle configurations', fontsize=25)
sub.set_xlim(0, np.sum(klim))
sub.set_ylabel('$B_0(k_1, k_2, k_3)$', fontsize=25)
sub.set_yscale('log')
###Output
_____no_output_____ |
lect08_requests_BS/.ipynb_checkpoints/class_03-checkpoint.ipynb | ###Markdown
Парсинг – продолжение
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import requests
url = 'http://books.toscrape.com/catalogue/page-1.html'
response = requests.get(url)
response
response.content[:1000]
from bs4 import BeautifulSoup
tree = BeautifulSoup(response.content, 'html.parser')
tree.html.head.title.text.strip()
books = tree.find_all('article', {'class' : 'product_pod'})
books[0]
books[0].find('p', {'class': 'price_color'}).text
books[0].p.get('class')[1]
books[0].a.get('href')
books[0].h3.a.get('title')
def get_page(p):
url = 'http://books.toscrape.com/catalogue/page-{}.html'.format(p)
response = requests.get(url)
tree = BeautifulSoup(response.content, 'html.parser')
books = tree.find_all('article', {'class' : 'product_pod'})
info = []
for book in books:
info.append({'price': book.find('p', {'class': 'price_color'}).text,
'href': book.h3.a.get('href'),
'title': book.h3.a.get('title'),
'rating': book.p.get('class')[1]})
return info
import time
infa = []
for p in range(1,51):
try:
infa.extend(get_page(p))
time.sleep(5)
except:
print(p)
import pandas as pd
df = pd.DataFrame(infa)
print(df.shape)
df.head()
df.to_csv('books_parsed.csv', index=False)
df.to_excel('books_parsed.xlsx', index=False)
df.info()
float(df.loc[0, 'price'][1:])
def get_price(price):
return float(price[1:])
df['price'] = df['price'].apply(get_price)
sns.histplot(data=df, x='price', bins=30);
def get_rating(r):
if r == "One":
return 1
elif r == "Two":
return 2
elif r == 'Three':
return 3
elif r == 'Four':
return 4
else:
return 5
df['rating'] = df['rating'].apply(get_rating)
df.rating.value_counts()
###Output
_____no_output_____
###Markdown
Парсинг – задание По аналогии с работой на семинаре Вам предстоит собрать данные с сайта https://quotes.toscrape.com/. Нужно получить pandas dataframe, где есть колонки:* `quote` – цитата* `author` – автор* `название_тега` – 1, если этот тег стоит у цитаты, и 0, если нет. Количество таких колонок равно количеству тегов на сайте.Выведите все цитаты, у которых есть тег "truth".
###Code
url = 'https://quotes.toscrape.com/page/1/'
response = requests.get(url)
response
tree = BeautifulSoup(response.content, 'html.parser')
quotes = tree.find_all('div', {'class' : 'quote'})
quotes[0]
quotes[0].span.text
quotes[0].find('small', {'class':'author'}).text
quotes[0].find_all('a', {'class': 'tag'})
quotes[0].find_all('a', {'class': 'tag'})[0].text
tags = []
for tag in quotes[0].find_all('a', {'class': 'tag'}):
tags.append(tag.text)
tags
info = []
for q in quotes:
tags = []
for tag in q.find_all('a', {'class': 'tag'}):
tags.append(tag.text)
info.append({'quote': q.span.text,
'author': q.find('small', {'class':'author'}).text,
'tags': tags})
info
response.content[:1000]
def get_page(p):
url = 'https://quotes.toscrape.com/page/{}/'.format(p)
response = requests.get(url)
tree = BeautifulSoup(response.content, 'html.parser')
quotes = tree.find_all('div', {'class' : 'quote'})
info = []
for q in quotes:
tags = []
for tag in q.find_all('a', {'class': 'tag'}):
tags.append(tag.text)
info.append({'quote': q.span.text,
'author': q.find('small', {'class':'author'}).text,
'tags': tags})
return info
info = []
for p in range(1,11):
info.extend(get_page(p))
len(info)
df = pd.DataFrame(info)
df.head()
tags_set = set(df['tags'].explode().values)
tags_set
for tag in tags_set:
df[tag] = [tag in df['tags'].loc[i] for i in df.index]
pd.set_option('display.max_columns', 500)
df.head()
df.columns
for q in df[df['truth']]['quote'].values:
print(q)
###Output
“The reason I talk to myself is because I’m the only one whose answers I accept.”
“A lie can travel half way around the world while the truth is putting on its shoes.”
“The truth." Dumbledore sighed. "It is a beautiful and terrible thing, and should therefore be treated with great caution.”
“Never tell the truth to people who are not worthy of it.”
###Markdown
Работа с json файлами Создать pandas dataframe с такими колонками:* `username`* `changed_lines` – количество измененных строчек* `commits` – количество коммитов* `new_files` – количество новых файлов, которые сделал этот разработчикОтсортировать по `username` pandas
###Code
from pandas import json_normalize
import json
with open('commits.json', 'r') as f:
data = json.load(f)
data[0]
data[0]['username']
data = json_normalize(data, 'files', ['username', 'commit_time'])
data
data['commit_time'] = pd.to_datetime(data['commit_time'])
data.info()
# commits
res = data.groupby('username')[['commit_time']].nunique().reset_index()
res
# changed_lines
data.groupby('username')['changed_lines'].sum().values
res['changed_lines'] = data.groupby('username')['changed_lines'].sum().values
agg = data.groupby(['name', 'username'])[['commit_time']].min().sort_values(['name', 'commit_time'])
agg
d = {}
for file in agg.reset_index()['name'].unique():
d[file] = agg.loc[file].iloc[0].name
d
pd.DataFrame([d]).T.reset_index().groupby(0).count()['index'].values
res['new_files'] = pd.DataFrame([d]).T.reset_index().groupby(0).count()['index'].values
res.sort_values('username', inplace=True)
res
###Output
_____no_output_____
###Markdown
словари
###Code
from collections import defaultdict
with open('commits.json', 'r') as f:
data = json.load(f)
data = sorted(data, key=lambda x: pd.to_datetime(x['commit_time']))
data[0]
table = defaultdict(lambda: {'commits': 0, 'changed_lines':0, 'new_files':0})
new_files = set()
for commit in data:
user = commit['username']
table[user]['commits'] += 1
for file in commit['files']:
table[user]['changed_lines'] += file['changed_lines']
if file['name'] not in new_files:
new_files.add(file['name'])
table[user]['new_files'] += 1
table
fin = pd.DataFrame(table).T.reset_index().rename(columns={'index': 'username'}).sort_values('username')
fin
###Output
_____no_output_____ |
notebooks/02-Train.ipynb | ###Markdown
Feature engineering
###Code
train = replace_na(train, ['review_content', 'review_title'])
X_dummies = to_categorical(train, 'review_stars')
X_content = to_tfidf(train, 'review_content', stopwords)
X_title = to_tfidf(train, 'review_title', stopwords)
X_length = to_sparse_int(train, 'review_content')
sparse_merge = stack_sparse([X_dummies, X_content, X_title, X_length])
X_train, X_test, y_train, y_test = split_train(sparse_merge, y, 0.2)
###Output
_____no_output_____
###Markdown
LightGBM
###Code
model_lgb = model_lightgbm(X_train, y_train)
preds = model_lgb.predict_proba(X_test)
preds1 = preds[:,1]
score_function(y_test, preds1)
fpr, tpr, _ = roc_curve(y_test, preds1)
plot_roc(fpr, tpr)
###Output
_____no_output_____
###Markdown
Ridge
###Code
model_rdg = model_ridge(X_train, y_train, )
preds = model_rdg.predict(X=X_test)
score_function(y_test, preds)
fpr, tpr, _ = roc_curve(y_test, preds)
plot_roc(fpr, tpr)
###Output
_____no_output_____
###Markdown
Xgboost
###Code
model_xgboost = model_xgb(X_train, y_train)
preds = model_xgboost.predict_proba(X_test)
preds1 = preds[:,1]
score_function(y_test, preds1)
fpr, tpr, _ = roc_curve(y_test, preds1)
plot_roc(fpr, tpr)
###Output
_____no_output_____ |
module1-statistics-probability-and-inference/Arturo_Obregon_LS_DS_131_Statistics_Probability_Assignment.ipynb | ###Markdown
*Data Science Unit 1 Sprint 3 Assignment 1* Apply the t-test to real dataYour assignment is to determine which issues have "statistically significant" differences between political parties in this [1980s congressional voting data](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records). The data consists of 435 instances (one for each congressperson), a class (democrat or republican), and 16 binary attributes (yes or no for voting for or against certain issues). Be aware - there are missing values!Your goals:1. Load and clean the data (or determine the best method to drop observations when running tests)2. Using hypothesis testing, find an issue that democrats support more than republicans with p < 0.013. Using hypothesis testing, find an issue that republicans support more than democrats with p < 0.014. Using hypothesis testing, find an issue where the difference between republicans and democrats has p > 0.1 (i.e. there may not be much of a difference)Note that this data will involve *2 sample* t-tests, because you're comparing averages across two groups (republicans and democrats) rather than a single group against a null hypothesis.Stretch goals:1. Refactor your code into functions so it's easy to rerun with arbitrary variables2. Apply hypothesis testing to your personal project data (for the purposes of this notebook you can type a summary of the hypothesis you formed and tested)
###Code
### YOUR CODE STARTS HERE
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data
import pandas as pd
import numpy as np
df = pd.read_csv('house-votes-84.data',
header=None,
names=['party','handicapped-infants','water-project',
'budget','physician-fee-freeze', 'el-salvador-aid',
'religious-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels', 'education', 'right-to-sue','crime','duty-free',
'south-africa'])
print(df.shape)
df.head(1000)
df = df.replace({'?':np.NaN, 'n':0, 'y':1})
df.head()
dem = df[df['party'] == 'democrat']
rep = df[df['party'] == 'republican']
dem.groupby(['party']).sum()
rep.groupby(['party']).sum()
from scipy.stats import ttest_1samp
rep['immigration'].mean()
rep['immigration'].isnull().sum()
len(rep['immigration']) - rep['immigration'].isnull().sum()
###Output
_____no_output_____
###Markdown
1) Null Hypothesis:In 1-sample t-tests YOU GET TO CHOOSE YOUR NULL HYPOTHESISH0 : 0.0 - There is ZERO republican support for this bill2) Alternative HypothesisHa : x¯≠0 - There is non-zero support for the budget bill among repulbicans.3) Confidence Level: 95% or .95
###Code
ttest_1samp(rep['immigration'], 0, nan_policy = 'omit')
###Output
_____no_output_____
###Markdown
4) t-statistic: 14.37655) p-value of .00000000000000000000000000007541______________________________________________________________________Concluision: Due to a p-value of 0 I reject the null hypothesis that republican support is zero and conclude that republican support is non-zero. 1) Null Hypothesis:In 1-sample t-tests YOU GET TO CHOOSE YOUR NULL HYPOTHESISH0 : 0.0 - There is ZERO democratic support for this bill2) Alternative HypothesisHa : x¯≠0 - There is non-zero support for the budget bill among democratics.3) Confidence Level: 95% or .95
###Code
ttest_1samp(dem['synfuels'], 0, nan_policy = 'omit')
###Output
_____no_output_____
###Markdown
4) t-statistic: 16.12595) p-value of .000000000000000000000000000000000000009159______________________________________________________________________Concluision: Due to a p-value of 0 I reject the null hypothesis that democratic support is zero and conclude that democratic support is non-zero. ------2-SAMPLE TEST
###Code
from scipy.stats import ttest_ind
import numpy as np
ttest_ind(rep['education'], dem['education'], nan_policy = 'omit')
rep['education'].mean()
dem['education'].mean()
###Output
_____no_output_____ |
EvaluationMachineLearning.ipynb | ###Markdown
Evaluating Machine Learning Algorithms - Extended Examples Preparations* Download [Anaconda with Python 3.6](https://www.anaconda.com/download) to install a nearly complete Python enviroment for data science projects* Install [Keras: The Python Deep Learning Library](https://keras.io/) and other missing packages with the following command: ```conda install keras```* Start your local Jupyter instance with ```jupyter notebook```If you cannot see line numbers press ```Shift+L```to switch them on or check the ```View``` menu.
###Code
# The %... is an iPython thing, and is not part of the Python language.
# In this case we're just telling the plotting library to draw things on
# the notebook, instead of on a separate window.
%matplotlib inline
# the import statements load differnt Python packages that we need for the tutorial
# See all the "as ..." contructs? They're just aliasing the package names.
# That way we can call methods like plt.plot() instead of matplotlib.pyplot.plot().
# packages for scientif computing and visualization
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import pandas as pd
import time
# configuration of the notebook
pd.set_option('display.width', 500)
pd.set_option('display.max_columns', 100)
pd.set_option('display.notebook_repr_html', True)
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("notebook")
# machine learning library imports
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.utils import np_utils
###Output
_____no_output_____
###Markdown
Setting Up the ExperimentIn this example, we will rely on the [NIST MNIST data set](http://yann.lecun.com/exdb/mnist/ ), a data set for the recognition of hand-written digits. MNIST is a data set that has been used by the [NIST](https://www.nist.gov/) such as the discussed [TREC campaign](https://trec.nist.gov/).The following script will display some sample digits to give an example of the contents of the data set.
###Code
# load (download if needed) the MNIST dataset of handwritten numbers
# we will get a training and test set consisting of bitmaps
# in the X_* arrays and the associated labels in the y_* arrays
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# plot 4 images as gray scale images using subplots without axis labels
plt.subplot(221)
plt.axis('off')
# -1 inverts the image because of aesthetical reasons
plt.imshow(X_train[0]*-1, cmap=plt.get_cmap('gray'))
plt.subplot(222)
plt.axis('off')
plt.imshow(X_train[1]*-1, cmap=plt.get_cmap('gray'))
plt.subplot(223)
plt.axis('off')
plt.imshow(X_train[2]*-1, cmap=plt.get_cmap('gray'))
plt.subplot(224)
plt.axis('off')
plt.imshow(X_train[3]*-1, cmap=plt.get_cmap('gray'))
# show the plot
#plt.savefig("test.pdf",format="pdf")
plt.show()
###Output
_____no_output_____
###Markdown
Next, we define out machine learning model with different layers. Roughly speaking, the function baseline_model() defines how the neural network looks like. For more details, see the [documentation](https://keras.io/getting-started/sequential-model-guide/).
###Code
# define baseline model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(num_pixels, input_dim=num_pixels, kernel_initializer='normal', activation='relu'))
model.add(Dense(num_classes, kernel_initializer='normal', activation='softmax'))
# Compile model, use logarithmic loss for evaluation
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
# flatten 28*28 images from the MNIST data set to a 784 vector for each image
num_pixels = X_train.shape[1] * X_train.shape[2]
X_train = X_train.reshape(X_train.shape[0], num_pixels).astype('float32')
X_test = X_test.reshape(X_test.shape[0], num_pixels).astype('float32')
# normalize inputs from 0-255 to 0-1
X_train = X_train / 255
X_test = X_test / 255
# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
# build the model
model = baseline_model()
# fit the model, i.e., start the actual learning
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200, verbose=2)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
# print the error rate of the algorithm
print("Baseline Error: %.2f%%" % (100-scores[1]*100))
###Output
Train on 60000 samples, validate on 10000 samples
Epoch 1/10
- 36s - loss: 0.2797 - acc: 0.9205 - val_loss: 0.1411 - val_acc: 0.9576
Epoch 2/10
- 37s - loss: 0.1116 - acc: 0.9675 - val_loss: 0.0922 - val_acc: 0.9713
Epoch 3/10
- 37s - loss: 0.0717 - acc: 0.9798 - val_loss: 0.0790 - val_acc: 0.9764
Epoch 4/10
- 44s - loss: 0.0502 - acc: 0.9861 - val_loss: 0.0749 - val_acc: 0.9770
Epoch 5/10
- 43s - loss: 0.0369 - acc: 0.9895 - val_loss: 0.0665 - val_acc: 0.9791
Epoch 6/10
- 45s - loss: 0.0266 - acc: 0.9930 - val_loss: 0.0627 - val_acc: 0.9802
Epoch 7/10
- 37s - loss: 0.0207 - acc: 0.9946 - val_loss: 0.0632 - val_acc: 0.9807
Epoch 8/10
- 36s - loss: 0.0139 - acc: 0.9971 - val_loss: 0.0621 - val_acc: 0.9807
Epoch 9/10
- 36s - loss: 0.0105 - acc: 0.9979 - val_loss: 0.0585 - val_acc: 0.9817
Epoch 10/10
- 37s - loss: 0.0078 - acc: 0.9986 - val_loss: 0.0577 - val_acc: 0.9819
Baseline Error: 1.81%
###Markdown
OverfittingIn the next cell, we will use very few training data up to the same amount of training data used before to illustrate the overfitting phenomenon.__ATTENTION!__ This will take some time.
###Code
# define baseline model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(num_pixels, input_dim=num_pixels, kernel_initializer='normal', activation='relu'))
model.add(Dense(num_classes, kernel_initializer='normal', activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# the steps indicate the size of the training sample
steps=[18,100,1000,5000,10000,20000,30000,40000,50000]
# this dict (basically a hashmap) holds the error rate for each iteration
errorPerStep=dict()
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
for step in steps:
# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# limit the training data size to the current step, the : means "from 0 to step"
X_train=X_train[0:step]
y_train=y_train[0:step]
# flatten 28*28 images to a 784 vector for each image
num_pixels = X_train.shape[1] * X_train.shape[2]
X_train = X_train.reshape(X_train.shape[0], num_pixels).astype('float32')
X_test = X_test.reshape(X_test.shape[0], num_pixels).astype('float32')
# normalize inputs from 0-255 to 0-1
X_train = X_train / 255
X_test = X_test / 255
# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
# build the model
model = baseline_model()
# Fit the model
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200, verbose=2)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("Baseline Error: %.2f%%" % (100-scores[1]*100))
errorPerStep[step]=(100-scores[1]*100)
###Output
Train on 18 samples, validate on 10000 samples
Epoch 1/10
- 3s - loss: 2.2611 - acc: 0.1111 - val_loss: 2.1296 - val_acc: 0.2652
Epoch 2/10
- 2s - loss: 1.5880 - acc: 0.7778 - val_loss: 1.9998 - val_acc: 0.3323
Epoch 3/10
- 3s - loss: 1.0800 - acc: 0.8889 - val_loss: 1.8929 - val_acc: 0.3801
Epoch 4/10
- 3s - loss: 0.7361 - acc: 1.0000 - val_loss: 1.7894 - val_acc: 0.4209
Epoch 5/10
- 3s - loss: 0.4807 - acc: 1.0000 - val_loss: 1.6933 - val_acc: 0.4593
Epoch 6/10
- 3s - loss: 0.2989 - acc: 1.0000 - val_loss: 1.6176 - val_acc: 0.4888
Epoch 7/10
- 3s - loss: 0.1881 - acc: 1.0000 - val_loss: 1.5588 - val_acc: 0.5063
Epoch 8/10
- 3s - loss: 0.1196 - acc: 1.0000 - val_loss: 1.5155 - val_acc: 0.5208
Epoch 9/10
- 3s - loss: 0.0769 - acc: 1.0000 - val_loss: 1.4869 - val_acc: 0.5298
Epoch 10/10
- 3s - loss: 0.0506 - acc: 1.0000 - val_loss: 1.4705 - val_acc: 0.5345
Baseline Error: 46.55%
Train on 100 samples, validate on 10000 samples
Epoch 1/10
- 3s - loss: 2.4231 - acc: 0.0500 - val_loss: 2.1669 - val_acc: 0.2375
Epoch 2/10
- 3s - loss: 1.9784 - acc: 0.4000 - val_loss: 1.9493 - val_acc: 0.4205
Epoch 3/10
- 3s - loss: 1.5395 - acc: 0.7100 - val_loss: 1.7734 - val_acc: 0.4999
Epoch 4/10
- 3s - loss: 1.2084 - acc: 0.8400 - val_loss: 1.6050 - val_acc: 0.5663
Epoch 5/10
- 3s - loss: 0.9355 - acc: 0.9100 - val_loss: 1.4626 - val_acc: 0.6132
Epoch 6/10
- 3s - loss: 0.7277 - acc: 0.9500 - val_loss: 1.3462 - val_acc: 0.6429
Epoch 7/10
- 3s - loss: 0.5651 - acc: 0.9600 - val_loss: 1.2561 - val_acc: 0.6548
Epoch 8/10
- 3s - loss: 0.4370 - acc: 0.9600 - val_loss: 1.1915 - val_acc: 0.6623
Epoch 9/10
- 3s - loss: 0.3384 - acc: 0.9800 - val_loss: 1.1457 - val_acc: 0.6654
Epoch 10/10
- 3s - loss: 0.2618 - acc: 0.9900 - val_loss: 1.1130 - val_acc: 0.6679
Baseline Error: 33.21%
Train on 1000 samples, validate on 10000 samples
Epoch 1/10
- 4s - loss: 1.8976 - acc: 0.4350 - val_loss: 1.2778 - val_acc: 0.7272
Epoch 2/10
- 3s - loss: 0.9248 - acc: 0.8150 - val_loss: 0.7777 - val_acc: 0.8027
Epoch 3/10
- 4s - loss: 0.5600 - acc: 0.8730 - val_loss: 0.5807 - val_acc: 0.8297
Epoch 4/10
- 3s - loss: 0.3998 - acc: 0.9060 - val_loss: 0.4973 - val_acc: 0.8469
Epoch 5/10
- 3s - loss: 0.3153 - acc: 0.9220 - val_loss: 0.4438 - val_acc: 0.8621
Epoch 6/10
- 3s - loss: 0.2552 - acc: 0.9390 - val_loss: 0.4281 - val_acc: 0.8669
Epoch 7/10
- 3s - loss: 0.2114 - acc: 0.9500 - val_loss: 0.4007 - val_acc: 0.8760
Epoch 8/10
- 3s - loss: 0.1744 - acc: 0.9600 - val_loss: 0.3925 - val_acc: 0.8815
Epoch 9/10
- 4s - loss: 0.1472 - acc: 0.9730 - val_loss: 0.3849 - val_acc: 0.8824
Epoch 10/10
- 3s - loss: 0.1260 - acc: 0.9830 - val_loss: 0.3771 - val_acc: 0.8842
Baseline Error: 11.58%
Train on 5000 samples, validate on 10000 samples
Epoch 1/10
- 6s - loss: 0.8799 - acc: 0.7566 - val_loss: 0.3909 - val_acc: 0.8914
Epoch 2/10
- 6s - loss: 0.3014 - acc: 0.9148 - val_loss: 0.3117 - val_acc: 0.9059
Epoch 3/10
- 6s - loss: 0.2247 - acc: 0.9354 - val_loss: 0.2750 - val_acc: 0.9203
Epoch 4/10
- 6s - loss: 0.1762 - acc: 0.9556 - val_loss: 0.2511 - val_acc: 0.9236
Epoch 5/10
- 6s - loss: 0.1356 - acc: 0.9680 - val_loss: 0.2368 - val_acc: 0.9298
Epoch 6/10
- 6s - loss: 0.1118 - acc: 0.9748 - val_loss: 0.2260 - val_acc: 0.9321
Epoch 7/10
- 6s - loss: 0.0897 - acc: 0.9792 - val_loss: 0.2264 - val_acc: 0.9323
Epoch 8/10
- 6s - loss: 0.0728 - acc: 0.9848 - val_loss: 0.2086 - val_acc: 0.9375
Epoch 9/10
- 6s - loss: 0.0572 - acc: 0.9896 - val_loss: 0.2060 - val_acc: 0.9388
Epoch 10/10
- 6s - loss: 0.0487 - acc: 0.9922 - val_loss: 0.2068 - val_acc: 0.9389
Baseline Error: 6.11%
Train on 10000 samples, validate on 10000 samples
Epoch 1/10
- 9s - loss: 0.6345 - acc: 0.8230 - val_loss: 0.3117 - val_acc: 0.9112
Epoch 2/10
- 8s - loss: 0.2510 - acc: 0.9295 - val_loss: 0.2454 - val_acc: 0.9295
Epoch 3/10
- 9s - loss: 0.1873 - acc: 0.9489 - val_loss: 0.2203 - val_acc: 0.9364
Epoch 4/10
- 9s - loss: 0.1426 - acc: 0.9598 - val_loss: 0.1948 - val_acc: 0.9430
Epoch 5/10
- 8s - loss: 0.1104 - acc: 0.9704 - val_loss: 0.1818 - val_acc: 0.9457
Epoch 6/10
- 9s - loss: 0.0852 - acc: 0.9797 - val_loss: 0.1699 - val_acc: 0.9472
Epoch 7/10
- 9s - loss: 0.0687 - acc: 0.9835 - val_loss: 0.1622 - val_acc: 0.9522
Epoch 8/10
- 9s - loss: 0.0531 - acc: 0.9893 - val_loss: 0.1535 - val_acc: 0.9544
Epoch 9/10
- 9s - loss: 0.0412 - acc: 0.9928 - val_loss: 0.1503 - val_acc: 0.9547
Epoch 10/10
- 9s - loss: 0.0330 - acc: 0.9951 - val_loss: 0.1445 - val_acc: 0.9573
Baseline Error: 4.27%
Train on 20000 samples, validate on 10000 samples
Epoch 1/10
- 15s - loss: 0.4717 - acc: 0.8660 - val_loss: 0.2475 - val_acc: 0.9294
Epoch 2/10
- 14s - loss: 0.1925 - acc: 0.9472 - val_loss: 0.1768 - val_acc: 0.9482
Epoch 3/10
- 14s - loss: 0.1326 - acc: 0.9635 - val_loss: 0.1536 - val_acc: 0.9539
Epoch 4/10
- 14s - loss: 0.0947 - acc: 0.9753 - val_loss: 0.1333 - val_acc: 0.9597
Epoch 5/10
- 14s - loss: 0.0709 - acc: 0.9806 - val_loss: 0.1125 - val_acc: 0.9672
Epoch 6/10
- 14s - loss: 0.0512 - acc: 0.9876 - val_loss: 0.1087 - val_acc: 0.9660
Epoch 7/10
- 14s - loss: 0.0395 - acc: 0.9905 - val_loss: 0.1110 - val_acc: 0.9671
Epoch 8/10
- 14s - loss: 0.0295 - acc: 0.9939 - val_loss: 0.0976 - val_acc: 0.9713
Epoch 9/10
- 15s - loss: 0.0214 - acc: 0.9966 - val_loss: 0.0975 - val_acc: 0.9720
Epoch 10/10
- 14s - loss: 0.0164 - acc: 0.9982 - val_loss: 0.0939 - val_acc: 0.9726
Baseline Error: 2.74%
Train on 30000 samples, validate on 10000 samples
Epoch 1/10
- 20s - loss: 0.3967 - acc: 0.8879 - val_loss: 0.1988 - val_acc: 0.9433
Epoch 2/10
- 20s - loss: 0.1608 - acc: 0.9531 - val_loss: 0.1400 - val_acc: 0.9596
Epoch 3/10
- 20s - loss: 0.1064 - acc: 0.9700 - val_loss: 0.1288 - val_acc: 0.9591
Epoch 4/10
- 19s - loss: 0.0777 - acc: 0.9784 - val_loss: 0.0983 - val_acc: 0.9703
Epoch 5/10
- 19s - loss: 0.0537 - acc: 0.9854 - val_loss: 0.0877 - val_acc: 0.9740
Epoch 6/10
- 19s - loss: 0.0401 - acc: 0.9906 - val_loss: 0.0836 - val_acc: 0.9739
Epoch 7/10
- 19s - loss: 0.0292 - acc: 0.9933 - val_loss: 0.0810 - val_acc: 0.9752
Epoch 8/10
- 19s - loss: 0.0218 - acc: 0.9958 - val_loss: 0.0834 - val_acc: 0.9741
Epoch 9/10
- 19s - loss: 0.0161 - acc: 0.9974 - val_loss: 0.0747 - val_acc: 0.9778
Epoch 10/10
- 19s - loss: 0.0117 - acc: 0.9984 - val_loss: 0.0748 - val_acc: 0.9787
Baseline Error: 2.13%
Train on 40000 samples, validate on 10000 samples
Epoch 1/10
- 284s - loss: 0.3397 - acc: 0.9037 - val_loss: 0.1739 - val_acc: 0.9498
Epoch 2/10
- 26s - loss: 0.1373 - acc: 0.9612 - val_loss: 0.1170 - val_acc: 0.9666
Epoch 3/10
- 25s - loss: 0.0893 - acc: 0.9745 - val_loss: 0.1034 - val_acc: 0.9700
Epoch 4/10
- 25s - loss: 0.0637 - acc: 0.9821 - val_loss: 0.0881 - val_acc: 0.9729
Epoch 5/10
- 25s - loss: 0.0453 - acc: 0.9879 - val_loss: 0.0734 - val_acc: 0.9793
Epoch 6/10
- 25s - loss: 0.0331 - acc: 0.9916 - val_loss: 0.0758 - val_acc: 0.9772
Epoch 7/10
- 24s - loss: 0.0239 - acc: 0.9945 - val_loss: 0.0716 - val_acc: 0.9778
Epoch 8/10
- 24s - loss: 0.0178 - acc: 0.9963 - val_loss: 0.0737 - val_acc: 0.9794
Epoch 9/10
- 24s - loss: 0.0128 - acc: 0.9980 - val_loss: 0.0687 - val_acc: 0.9790
Epoch 10/10
- 25s - loss: 0.0092 - acc: 0.9990 - val_loss: 0.0736 - val_acc: 0.9778
Baseline Error: 2.22%
Train on 50000 samples, validate on 10000 samples
Epoch 1/10
- 30s - loss: 0.3109 - acc: 0.9123 - val_loss: 0.1631 - val_acc: 0.9518
Epoch 2/10
- 30s - loss: 0.1253 - acc: 0.9639 - val_loss: 0.1020 - val_acc: 0.9696
Epoch 3/10
- 30s - loss: 0.0804 - acc: 0.9773 - val_loss: 0.0841 - val_acc: 0.9736
Epoch 4/10
- 30s - loss: 0.0567 - acc: 0.9838 - val_loss: 0.0792 - val_acc: 0.9744
Epoch 5/10
- 30s - loss: 0.0414 - acc: 0.9881 - val_loss: 0.0711 - val_acc: 0.9774
Epoch 6/10
- 30s - loss: 0.0306 - acc: 0.9921 - val_loss: 0.0693 - val_acc: 0.9784
Epoch 7/10
- 30s - loss: 0.0213 - acc: 0.9948 - val_loss: 0.0622 - val_acc: 0.9805
Epoch 8/10
- 30s - loss: 0.0167 - acc: 0.9963 - val_loss: 0.0654 - val_acc: 0.9802
Epoch 9/10
- 30s - loss: 0.0125 - acc: 0.9975 - val_loss: 0.0621 - val_acc: 0.9807
Epoch 10/10
- 30s - loss: 0.0081 - acc: 0.9990 - val_loss: 0.0642 - val_acc: 0.9804
Baseline Error: 1.96%
###Markdown
Next, we will illustrate our results.
###Code
print(errorPerStep)
x=[]
y=[]
for e in errorPerStep:
x.append(e)
y.append(errorPerStep[e])
plt.xlabel("Training Samples")
plt.ylabel("Baseline Error (%)")
plt.plot(x,y,'o-')
plt.savefig("test.pdf",format="pdf")
###Output
{18: 46.550000000000004, 100: 33.209999999999994, 1000: 11.579999999999998, 5000: 6.109999999999999, 10000: 4.269999999999996, 20000: 2.739999999999995, 30000: 2.1299999999999955, 40000: 2.219999999999999, 50000: 1.9599999999999937}
###Markdown
The graph indicates clearly that the baseline error decreases with the increase of training data. In other words, the overfitting effect is limited in relation to the amount of data the learning algorithm has seen.To end the example, we will check how well the model can predict new input.
###Code
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# choose a random sample as our test image
test_im = X_train[25]
# display the image
plt.imshow(test_im.reshape(28,28)*-1, cmap=plt.get_cmap('gray'), interpolation='none')
plt.axis('off')
num_pixels = X_train.shape[1] * X_train.shape[2]
# as we are dealing with only one image, we have to restrict the array to a 1D * 784
test_im = test_im.reshape(1, num_pixels).astype('float32')
# let the model predict the image
r=model.predict(test_im)
itemindex = np.where(r[0]==1)
print("The model predicts: %i for the following image:"%itemindex[0])
###Output
The model predicts: 2 for the following image:
###Markdown
Accuracy and Error RateThe next cell illustrates how accuracy changes with respect to different distributions between two classes if the model always predict that an element belongs to class A.$$Accuracy=\frac{|tp+tn|}{|tp|+|tn|+|fp|+|fn|}\equiv\frac{|\mbox{correct predictions}|}{|\mbox{predictions}|}$$
###Code
# arrays for plotting
x=[] # samples in A
y=[] # samples in B
accuracies=[] # calculated accuracies for each distribution
# distributions between class A and B, first entry means 90% in A, 10% in B
distributions=[[90,10],[55,45],[70,30],[50,50],[20,80]]
for distribution in distributions:
x.append(distribution[0])
y.append(distribution[1])
samplesA=np.ones((1,distribution[0])) # membership of class A is encoded as 1
samplesB=np.zeros((1,distribution[1])) # membership of class B is encoded as 0
# combine both arrays
reality=np.concatenate((samplesA,samplesB),axis=None)
# as said above, our model always associates the elements with class A (encoded by 1)
prediction=np.ones((1,100))
tpCount=0
# count the true positives
for (i,val) in enumerate(prediction[0]):
if not reality[i]==val:
pass
else:
tpCount+=1
# calculate the accuracy and add the to the accuracies array for later visualization
acc=float(tpCount+tnCount)/100.0
accuracies.append(acc*1000) # the multiplication by 1000 is done for visualization purposes only
print("Accuracy: %.2f"%(acc))
# plot the results as a bubble chart
plt.xlim(0,100)
plt.ylim(0,100)
plt.xlabel("Samples in A")
plt.ylabel("Samples in B")
plt.title("Accuracy of a Always-A Predictor")
plt.scatter(x, y, s=accuracies*100000,alpha=0.5)
#plt.savefig("test.png",format="png")
plt.show()
###Output
Accuracy: 0.90
Accuracy: 0.55
Accuracy: 0.70
Accuracy: 0.50
Accuracy: 0.20
###Markdown
Logarithmic LossThe $Logarithmic ~Loss=\frac{-1}{N}\sum_{i=1}^N\sum_{j=1}^M y_{ij}\log(p_{ij}) \rightarrow [0,\infty)$ penalizes wrong predicitions. For the sake of simplicity, we simply use the function provided by [sklearn](http://scikit-learn.org/stable/), a machine-learning toolkit for Python.The [manual](http://scikit-learn.org/stable/modules/model_evaluation.htmllog-loss) will give you more details.
###Code
from sklearn.metrics import log_loss
# the correct cluster for each sample, i.e., sample 1 is in class 0
y_true = [0, 0, 1, 1,2]
# the predictions: 1st sample is 90% predicted to be in class 0
y_pred = [[.9, .1,.0], [.8, .2,.0], [.3, .7,.0], [.01, .99,.0],[.0,.0,1.0]]
print(log_loss(y_true, y_pred))
# perfect prediction
y_perfect = [[1.0, .0,.0], [1.0, .0,.0], [.0, 1.0,.0], [0, 1.0,.0],[.0,.0,1.0]]
print(log_loss(y_true, y_perfect))
x=[]
y=[]
# the for loop modifies the first prediction of an element belonging to class 0 from 0 to 1
# in other words, from a wrong to a correct prediction
for i in range(1,11):
r2=y_perfect
r2[0][0]=float(i/10)
x.append(r2[0][0])
y.append(log_loss(y_true,r2))
# plot the result
plt.xlabel("Predicted Probability")
plt.ylabel("Logarithmic Loss")
plt.title("Does an object of class X belong do class X?")
plt.plot(x,y,'o-')
#plt.savefig("test.pdf",format="pdf")
###Output
0.1390458693528553
2.1094237467877998e-15
|
line_detection.ipynb | ###Markdown
Advanced Lane Finding ProjectThe goals / steps of this project are the following:* Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.* Apply a distortion correction to raw images.* Use color transforms, gradients, etc., to create a thresholded binary image.* Apply a perspective transform to rectify binary image ("birds-eye view").* Detect lane pixels and fit to find the lane boundary.* Determine the curvature of the lane and vehicle position with respect to center.* Warp the detected lane boundaries back onto the original image.* Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.--- First, I'll compute the camera calibration using chessboard images
###Code
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import glob
import cv2
import math
import os
%matplotlib inline
###Output
_____no_output_____
###Markdown
Camera Calibration Helper Functions
###Code
def find_corners(gray_img, nx, ny):
"""Find corners on a chessboard for camera calibration"""
return cv2.findChessboardCorners(gray_img, (nx, ny), None)
def draw_corners(img, nx, ny, corners, ret):
"""Draws chessboard corners"""
return cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
def calibrate(objpoints, imgpoints, img_shape):
"""Calibrates camera"""
return cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
def get_calibration_points():
"""
Gets object points and image points for camera calibration
"""
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
nx = 9 # the number of inside corners in x
ny = 6 # the number of inside corners in y
# plot counter
counter = 0
# Make a list of calibration images
images = glob.glob('./camera_cal/calibration*.jpg')
for fname in images:
#read in each img
# note!!! cv2.imread reads in image as BGR
img = cv2.imread(fname)
# image shape
img_size = img.shape[1::-1]
#convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# find corners
ret, corners = find_corners(gray, nx, ny)
if ret:
objpoints.append(objp)
imgpoints.append(corners)
# draw and display the corners
img = draw_corners(img, nx, ny, corners, ret)
# source points
src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
offset = 100
# destination points
dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
[img_size[0]-offset, img_size[1]-offset],
[offset, img_size[1]-offset]])
# Compute the perspective transform, M, given
# source and destination points
M = cv2.getPerspectiveTransform(src, dst)
# Warp an image using the perspective transform, M:
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
if counter < 1:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
ax1.set_title('Original', size=30)
ax1.imshow(img)
ax2.set_title('Undistorted & Transformed', size=30)
ax2.imshow(warped)
counter += 1
return objpoints, imgpoints, img_size, corners
# gets calibration points for camera calibration
objpoints, imgpoints, img_size, corners = get_calibration_points()
# only do this once
# calibrate camera
ret, mtx, dist, rvecs, tvecs = calibrate(objpoints, imgpoints, img_size)
###Output
_____no_output_____
###Markdown
Lane Detection Helper Functions
###Code
def rgb_grayscale(img):
"""
Applies the Grayscale transform
Return: Grayscale img
"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
def undistort(img, mtx, dist):
"""
Undistorts an image
Return: Undistored img
"""
return cv2.undistort(img, mtx, dist, None, mtx)
def cvt_to_hls(img):
"""
Applies a RGB to HLS color transform
Return: HLS img representation
"""
return cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
def sobel_x(channel):
"""
Takes derivative of x values
"""
return cv2.Sobel(channel, cv2.CV_64F, 1, 0)
def scale_sobel(abs_sobel):
"""
Absolute x derivative to accentuate lines away from horizontal.
"""
return np.uint8(255*abs_sobel/np.max(abs_sobel))
def get_src(img_size):
"""Returns source points"""
src = np.float32(
[[(img_size[0] / 2) - 55, img_size[1] / 2 + 100],
[((img_size[0] / 6) - 10), img_size[1]],
[(img_size[0] * 5 / 6) + 60, img_size[1]],
[(img_size[0] / 2 + 55), img_size[1] / 2 + 100]])
return src
def get_dst(img_size):
"""Returns destination points"""
# img width
width = img_size[0]
# img height
height = img_size[1]
# destination point for transformation
dst = np.float32(
[[(width / 4), 0],
[(width / 4), height],
[(width * 3 / 4), height],
[(width * 3 / 4), 0]])
return dst
###Output
_____no_output_____
###Markdown
Combined Binary Image
###Code
def get_color_thresh(img):
# Converts to HLS color space and separate the S channel
# Note: img is the undistorted image
hls = cvt_to_hls(img)
s_channel = hls[:,:,2]
# Grayscale image
gray = rgb_grayscale(img)
# Sobel x
sobelx = sobel_x(gray) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = scale_sobel(abs_sobelx)
# Threshold x gradient
thresh_min = 20
thresh_max = 100
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
# Threshold color channel
s_thresh_min = 170
s_thresh_max = 255
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh_min) & (s_channel <= s_thresh_max)] = 1
# Stack each channel to view their individual contributions in green and blue respectively
# Returns a stack of the two binary images, whose components you can see as different colors
color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) * 255
# Combine the two binary thresholds
combined_binary = np.zeros_like(sxbinary)
combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
return combined_binary, color_binary
###Output
_____no_output_____
###Markdown
Transform to Top View
###Code
def transform(binary_img):
"""
Applies an image mask and transforms the image to a birds eye view.
Return: Top/birds view of lane.
"""
# image size (width, height)
img_size = binary_img.shape[1::-1]
# gets the bounding vertices for the mask (source points)
src = get_src(img_size)
# gets destination points
dst = get_dst(img_size)
# Compute the perspective transform, M, given
# source and destination points
M = cv2.getPerspectiveTransform(src, dst)
# Warp an image using the perspective transform, M
warped = cv2.warpPerspective(binary_img, M, img_size, flags=cv2.INTER_NEAREST)
# color = [255, 0, 0]
# thickness = 4
# cv2.line(warped, (dst[0][0], dst[0][1]), (dst[1][0], dst[1][1]), color, thickness)
# cv2.line(warped, (dst[2][0], dst[2][1]), (dst[3][0], dst[3][1]), color, thickness)
# cv2.line(warped, (dst[3][0], dst[3][1]), (dst[0][0], dst[0][1]), color, thickness)
return warped
###Output
_____no_output_____
###Markdown
Locate Lines Shifting Window
###Code
def locate_lines(binary_warped):
"""
Locates the left and right lane line in a binary_warped image.
"""
# Take a histogram of the bottom half of binary_warped image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Creates an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Finds the peak of the left and right halves of the histogram
# These are the starting points for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Number of sliding windows
nwindows = 9
# Sets height of windows
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identifies the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# set/update left_fit, right_fit class attributes
left_line.set_recent_poly_coef(left_fit)
right_line.set_recent_poly_coef(right_fit)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# set/update left_fitx, right_fitx class attributes
left_line.set_recent_xfitted(left_fitx)
right_line.set_recent_xfitted(right_fitx)
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
return out_img, ploty
###Output
_____no_output_____
###Markdown
Line Search Within Region
###Code
def fit_poly(img_shape, leftx, lefty, rightx, righty):
# Fits a second order polynomial to each with np.polyfit()
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, img_shape[0]-1, img_shape[0])
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# set/update left_fit, right_fit class attributes
left_line.set_recent_poly_coef(left_fit)
right_line.set_recent_poly_coef(right_fit)
# set/update left_fitx, right_fitx class attributes
left_line.set_recent_xfitted(left_fitx)
right_line.set_recent_xfitted(right_fitx)
return left_fitx, right_fitx, ploty, left_fit, right_fit
def search_around_poly(binary_warped):
"""
Searches for lane pixels within a margin of the previously
detected lane lines.
"""
# Width of the margin around the previous polynomial to search
margin =100
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# get average of n iterations of left_fit and right_fit
left_fit = left_line.get_avg_poly_coef()
right_fit = right_line.get_avg_poly_coef()
# Sets the area of search based on activated x-values
# within the +/- margin of our polynomial function.
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +
left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +
right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit new polynomials
left_fitx, right_fitx, ploty, left_fit, right_fit = fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (255, 242, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (255, 242, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
# f, (ax1) = plt.subplots(1, 1, figsize=(24, 9))
# ax1.imshow(out_img)
# Plot the polynomial lines onto the image
# plt.plot(left_fitx, ploty, color='yellow')
# plt.plot(right_fitx, ploty, color='yellow')
return result, ploty
###Output
_____no_output_____
###Markdown
Calculate Line Curvature
###Code
def get_curvature(ploty, img_size):
"""Calculates the curvature of polynomial functions in meters."""
left_fitx = left_line.recent_xfitted[-1]
right_fitx = right_line.recent_xfitted[-1]
# Conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/640 # meters per pixel in x dimension
# Fit a second order polynomial to pixel positions in each lane line
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2)
# y-value where radius of curvature is calculated
# Maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
# Calculation of R_curve (radius of curvature)
left_curve_radius = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curve_radius = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# update left and right curve radius
left_line.set_radius_of_curvature(left_curve_radius)
right_line.set_radius_of_curvature(right_curve_radius)
# set lane center
lines.set_lane_center()
# get img midpoint/car position
img_midpoint = img_size[0] / 2
# set deviation in pixels and meters
lines.set_deviation(img_midpoint, xm_per_pix)
###Output
_____no_output_____
###Markdown
Draw Predicted Lines
###Code
def draw_lines(img, warped, ploty):
# get the avg left_fitx, right_fitx from Line class
left_fitx = left_line.get_avg_xfitted()
right_fitx = right_line.get_avg_xfitted()
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (110, 0, 211))
# image size
img_size = img.shape[1::-1]
# get the bounding vertices for the mask (source points)
src = get_src(img_size)
# get destination points
dst = get_dst(img_size)
# inverse perspective transform
M = cv2.getPerspectiveTransform(dst, src)
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, M, img_size)
# Combine the result with the original image
return cv2.addWeighted(img, 1, newwarp, 0.5, 0)
###Output
_____no_output_____
###Markdown
Combine Visual Results
###Code
def merge_outputs(binary_img, color_img, output_img, result):
"""Merges multiple visuals into one output image"""
# img width
width = result.shape[1]
# img height
height = result.shape[0]
# img midpoint
midpoint = int(width / 2)
img = np.zeros_like(result)
# resize images
color_img = cv2.resize(color_img, (0,0), fx=0.5, fy=0.5)
output_img = cv2.resize(output_img, (0,0), fx=0.5, fy=0.5)
# concat resized images vertically
vert = np.concatenate((output_img, color_img), axis=0)
result = np.concatenate((result, vert), axis=1)
# draw and fill rectangle for lane line info
cv2.rectangle(result, (0, 0), (470, 100),(0, 0, 0), -1)
# Display text info of left lane radius
cv2.putText(result, "Corner Radius: {} km".format(round(lines.get_radius_of_curvature()/1000, 1)),
(10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 228, 0), 3)
# get car deviation from center in meters
m_from_center = lines.dist_from_center_m
# get car deviation from center in pixels
px_from_center = lines.dist_from_center_px
# position of lane center
lane_center = lines.lane_center
# draw lane center
# cv2.line(result, (int(lane_center), height - 120), (int(lane_center), height - 70), (0, 228, 0), 4)
# draw current car position
# cv2.line(result, (int(midpoint), height - 110), (int(midpoint), height - 80), (0, 228, 0), 5)
if m_from_center > 0:
direction = 'right'
else:
direction = 'left'
# display text for car position
cv2.putText(result, "Deviation: {:.2f}m {}".format(np.absolute(lines.dist_from_center_m), direction),
(10, 80), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 228, 0), 3)
return result
###Output
_____no_output_____
###Markdown
Pipeline
###Code
def pipeline(img):
# undistort image
undist = undistort(img, mtx, dist)
# create copy of undistorted image
img = np.copy(undist)
# img size
img_size = img.shape[1::-1]
# get gradient thresholds
binary_image, color_img = get_color_thresh(undist)
# transform img to arial view
warped = transform(binary_image)
if not lines.detected:
# Locate lane lines
output_img, ploty = locate_lines(warped)
# draw lines on located lines
result = draw_lines(img, warped, ploty)
# Window method has found ane lines, set detected to true
lines.set_detected(True)
else:
# Locate lane lines within small region
output_img, ploty = search_around_poly(warped)
# draw lines on located lines
result = draw_lines(img, warped, ploty)
# Calculate lane curvature
get_curvature(ploty, img_size)
# combine visual results
final = merge_outputs(binary_image, color_img, output_img, result)
# To visualize output for test images, uncomment
# f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
# ax1.imshow(final)
# ax2.imshow(output_img)
return final
###Output
_____no_output_____
###Markdown
Test Images
###Code
# Class to receive the characteristics of each line detection
class Line():
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# current xfitted
self.current_xfitted = None
# x values of the last n fits of the line
self.recent_xfitted = []
#polynomial coefficients for the most recent fit
self.poly_coef = []
#radius of curvature of the line in some units
self.radius_of_curvature = None
# center of lane
self.lane_center = None
#distance in meters of vehicle center from the line
self.dist_from_center_m = None
#distance in pixels of vehicle center from the line
self.dist_from_center_px = None
def set_detected(self, boolean):
"""Were lane lines located using window method."""
self.detected = boolean
def set_recent_xfitted(self, xfitted):
"""
Stores x values of the last 5 fits of the line
param: left_fitx, right_fitx
"""
self.current_xfitted = xfitted
self.recent_xfitted.append(xfitted)
if len(self.recent_xfitted) > 5:
self.recent_xfitted.pop(0)
def get_avg_xfitted(self):
"""
Returns the average x values of the fitted line over the
last 5 iterations.
Return: avg of self.recent_xfitted
"""
return np.average(self.recent_xfitted, axis=0)
def set_recent_poly_coef(self, fit):
"""
Stores polynomial coefficients over the last
5 iterations.
Params: left_fit or right_fit"""
self.poly_coef.append(fit)
if len(self.poly_coef) > 5:
self.poly_coef.pop(0)
def get_avg_poly_coef(self):
"""
Returns the polynomial coefficients averaged over
the last 5 iterations
Return: avg of self.poly_coef
"""
return np.average(self.poly_coef, axis=0)
def set_radius_of_curvature(self, radius):
"""Sets curvature radius for new line"""
self.radius_of_curvature = radius
def get_radius_of_curvature(self):
"""Get curvature radius"""
return (left_line.radius_of_curvature + right_line.radius_of_curvature) / 2
def set_lane_center(self):
"""Calculates center of lane from base of left and right lane lines."""
self.lane_center = (left_line.current_xfitted[-1] + right_line.current_xfitted[-1])/2.
def set_deviation(self, img_midpoint, xm_per_pix):
"""Set Car Deviation"""
self.dist_from_center_m = (img_midpoint - self.lane_center)*xm_per_pix #Convert to meters
self.dist_from_center_px = (img_midpoint - self.lane_center)
lines = Line()
left_line = Line()
right_line = Line()
###Output
_____no_output_____
###Markdown
Test Images
###Code
# Note: Lane identification will be obscured as lane positioning is averaged over 5 frames.
images = ['straight_lines1.jpg',
'straight_lines2.jpg',
'test1.jpg',
'test2.jpg',
'test3.jpg',
'test4.jpg',
'test5.jpg',
'test6.jpg']
for image in images:
img = mpimg.imread('test_images/' + image)
pipeline(img)
###Output
_____no_output_____
###Markdown
Create Video Output
###Code
# Import everything needed to edit/save/watch video clips
# from moviepy.editor import VideoFileClip
from moviepy.editor import *
from IPython.display import HTML
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# you should return the final output (image where lines are drawn on lanes)
# read in colored image
return pipeline(image)
white_output = 'test_videos_output/project_video.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
# clip1 = VideoFileClip("./project_video.mp4").subclip(0,5)
clip1 = VideoFileClip("./project_video.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
# %time white_clip.write_videofile(white_output, audio=False)
white_clip.write_videofile(white_output, audio=False)
###Output
[MoviePy] >>>> Building video test_videos_output/project_video.mp4
[MoviePy] Writing video test_videos_output/project_video.mp4
|
old_versions/1main-v4-MCMC-symmetry-equilibrium-ln4.ipynb | ###Markdown
Network inference of categorical variables: non-sequential data
###Code
import sys
import numpy as np
from scipy import linalg
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
%matplotlib inline
import inference
import fem
# setting parameter:
np.random.seed(1)
n = 20 # number of positions
m = 5 # number of values at each position
l = int(4*((n*m)**2)) # number of samples
g = 2.
nm = n*m
def itab(n,m):
i1 = np.zeros(n)
i2 = np.zeros(n)
for i in range(n):
i1[i] = i*m
i2[i] = (i+1)*m
return i1.astype(int),i2.astype(int)
# generate coupling matrix w0:
def generate_interactions(n,m,g):
nm = n*m
w = np.random.normal(0.0,g/np.sqrt(nm),size=(nm,nm))
i1tab,i2tab = itab(n,m)
for i in range(n):
i1,i2 = i1tab[i],i2tab[i]
w[i1:i2,:] -= w[i1:i2,:].mean(axis=0)
for i in range(n):
i1,i2 = i1tab[i],i2tab[i]
w[i1:i2,i1:i2] = 0. # no self-interactions
for i in range(nm):
for j in range(nm):
if j > i: w[i,j] = w[j,i]
return w
i1tab,i2tab = itab(n,m)
w0 = inference.generate_interactions(n,m,g)
#plt.imshow(w0,cmap='rainbow',origin='lower')
#plt.clim(-0.5,0.5)
#plt.colorbar(fraction=0.045, pad=0.05,ticks=[-0.5,0,0.5])
#plt.show()
#print(w0)
def generate_sequences2(w,n,m,l):
i1tab,i2tab = itab(n,m)
# initial s (categorical variables)
s_ini = np.random.randint(0,m,size=(l,n)) # integer values
#print(s_ini)
# onehot encoder
enc = OneHotEncoder(n_values=m)
s = enc.fit_transform(s_ini).toarray()
print(s)
nrepeat = 500
for irepeat in range(nrepeat):
for i in range(n):
i1,i2 = i1tab[i],i2tab[i]
h = s.dot(w[i1:i2,:].T) # h[t,i1:i2]
h_old = (s[:,i1:i2]*h).sum(axis=1) # h[t,i0]
k = np.random.randint(0,m,size=l)
for t in range(l):
if np.exp(h[t,k[t]] - h_old[t]) > np.random.rand():
s[t,i1:i2] = 0.
s[t,i1+k[t]] = 1.
return s
# 2018.11.07: Tai
def nrgy_tai(s,w):
l = s.shape[0]
n,m = 20,3
i1tab,i2tab = itab(n,m)
p = np.zeros((l,n))
for i in range(n):
i1,i2 = i1tab[i],i2tab[i]
h = s.dot(w[i1:i2,:].T)
#e = (s[:,i1:i2]*h).sum(axis=1)
#p[:,i] = np.exp(e)
#p_sum = np.sum(np.exp(h),axis=1)
#p[:,i] /= p_sum
p[:,i] = np.exp((s[:,i1:i2]*h).sum(axis=1))/(np.exp(h).sum(axis=1))
#like = p.sum(axis=1)
return np.sum(np.log(p),axis=1)
# Vipul:
def nrgy_vp(onehot,w):
nrgy = onehot*(onehot.dot(w.T))
# print(nrgy - np.log(2*np.cosh(nrgy)))
return np.sum(nrgy - np.log(2*np.cosh(nrgy)),axis=1) #ln prob
# equilibrium
def nrgy(onehot,w):
nrgy = onehot*(onehot.dot(w.T))
# print(nrgy - np.log(2*np.cosh(nrgy)))
return np.sum(nrgy,axis=1) # - np.log(2*np.cosh(nrgy)),axis=1) #ln prob
def generate_sequences_vp(w,n_positions,n_residues,n_seq):
n_size = n_residues*n_positions
n_trial = 100*(n_size) #monte carlo steps to find the right sequences
b = np.zeros((n_size))
trial_seq = np.tile(np.random.randint(0,n_residues,size=(n_positions)),(n_seq,1))
print(trial_seq[0])
enc = OneHotEncoder(n_values=n_residues)
onehot = enc.fit_transform(trial_seq).toarray()
old_nrgy = nrgy(onehot,w) #+ n_positions*(n_residues-1)*np.log(2)
for trial in range(n_trial):
# print('before',np.mean(old_nrgy))
index_array = np.random.choice(range(n_positions),size=2,replace=False)
index,index1 = index_array[0],index_array[1]
r_trial = np.random.randint(0,n_residues,size=(n_seq))
r_trial1 = np.random.randint(0,n_residues,size=(n_seq))
mod_seq = np.copy(trial_seq)
mod_seq[:,index] = r_trial
mod_seq[:,index1] = r_trial1
mod_nrgy = nrgy(enc.fit_transform(mod_seq).toarray(),w) #+ n_positions*(n_residues-1)*np.log(2)
seq_change = mod_nrgy-old_nrgy > np.log(np.random.rand(n_seq))
#seq_change = mod_nrgy/(old_nrgy+mod_nrgy) > np.random.rand(n_seq)
if trial>n_size:
trial_seq[seq_change,index] = r_trial[seq_change]
trial_seq[seq_change,index1] = r_trial1[seq_change]
old_nrgy[seq_change] = mod_nrgy[seq_change]
else:
best_seq = np.argmax(mod_nrgy-old_nrgy)
trial_seq = np.tile(mod_seq[best_seq],(n_seq,1))
old_nrgy = np.tile(mod_nrgy[best_seq],(n_seq))
if trial%(10*n_size) == 0: print('after',np.mean(old_nrgy))#,trial_seq[0:5])
print(trial_seq[:10,:10])
#return trial_seq
return enc.fit_transform(trial_seq).toarray()
s = generate_sequences_vp(w0,n,m,l)
def generate_sequences_time_series(s_ini,w,n,m):
i1tab,i2tab = itab(n,m)
l = s_ini.shape[0]
# initial s (categorical variables)
#s_ini = np.random.randint(0,m,size=(l,n)) # integer values
#print(s_ini)
# onehot encoder
enc = OneHotEncoder(n_values=m)
s = enc.fit_transform(s_ini).toarray()
#print(s)
ntrial = 20*m
for t in range(l-1):
h = np.sum(s[t,:]*w[:,:],axis=1)
for i in range(n):
i1,i2 = i1tab[i],i2tab[i]
k = np.random.randint(0,m)
for itrial in range(ntrial):
k2 = np.random.randint(0,m)
while k2 == k:
k2 = np.random.randint(0,m)
if np.exp(h[i1+k2]- h[i1+k]) > np.random.rand():
k = k2
s[t+1,i1:i2] = 0.
s[t+1,i1+k] = 1.
return s
# generate non-sequences from time series
#l1 = 100
#s_ini = np.random.randint(0,m,size=(l1,n)) # integer values
#s = np.zeros((l,nm))
#for t in range(l):
# np.random.seed(t+10)
# s[t,:] = generate_sequences_time_series(s_ini,w0,n,m)[-1,:]
print(s.shape)
print(s[:10,:10])
## 2018.11.07: for non sequencial data
def fit_additive(s,n,m):
nloop = 10
i1tab,i2tab = itab(n,m)
nm = n*m
nm1 = nm - m
w_infer = np.zeros((nm,nm))
for i in range(n):
i1,i2 = i1tab[i],i2tab[i]
# remove column i
x = np.hstack([s[:,:i1],s[:,i2:]])
x_av = np.mean(x,axis=0)
dx = x - x_av
c = np.cov(dx,rowvar=False,bias=True)
c_inv = linalg.pinv(c,rcond=1e-15)
#print(c_inv.shape)
h = s[:,i1:i2].copy()
for iloop in range(nloop):
h_av = h.mean(axis=0)
dh = h - h_av
dhdx = dh[:,:,np.newaxis]*dx[:,np.newaxis,:]
dhdx_av = dhdx.mean(axis=0)
w = np.dot(dhdx_av,c_inv)
#w = w - w.mean(axis=0)
h = np.dot(x,w.T)
p = np.exp(h)
p_sum = p.sum(axis=1)
#p /= p_sum[:,np.newaxis]
for k in range(m):
p[:,k] = p[:,k]/p_sum[:]
h += s[:,i1:i2] - p
w_infer[i1:i2,:i1] = w[:,:i1]
w_infer[i1:i2,i2:] = w[:,i1:]
return w_infer
w2 = fit_additive(s,n,m)
plt.plot([-1,1],[-1,1],'r--')
plt.scatter(w0,w2)
i1tab,i2tab = itab(n,m)
nloop = 5
nm1 = nm - m
w_infer = np.zeros((nm,nm))
wini = np.random.normal(0.0,1./np.sqrt(nm),size=(nm,nm1))
for i in range(n):
i1,i2 = i1tab[i],i2tab[i]
x = np.hstack([s[:,:i1],s[:,i2:]])
y = s.copy()
# covariance[ia,ib]
cab_inv = np.empty((m,m,nm1,nm1))
eps = np.empty((m,m,l))
for ia in range(m):
for ib in range(m):
if ib != ia:
eps[ia,ib,:] = y[:,i1+ia] - y[:,i1+ib]
which_ab = eps[ia,ib,:] !=0.
xab = x[which_ab]
# ----------------------------
xab_av = np.mean(xab,axis=0)
dxab = xab - xab_av
cab = np.cov(dxab,rowvar=False,bias=True)
cab_inv[ia,ib,:,:] = linalg.pinv(cab,rcond=1e-15)
w = wini[i1:i2,:].copy()
for iloop in range(nloop):
h = np.dot(x,w.T)
for ia in range(m):
wa = np.zeros(nm1)
for ib in range(m):
if ib != ia:
which_ab = eps[ia,ib,:] !=0.
eps_ab = eps[ia,ib,which_ab]
xab = x[which_ab]
# ----------------------------
xab_av = np.mean(xab,axis=0)
dxab = xab - xab_av
h_ab = h[which_ab,ia] - h[which_ab,ib]
ha = np.divide(eps_ab*h_ab,np.tanh(h_ab/2.), out=np.zeros_like(h_ab), where=h_ab!=0)
dhdx = (ha - ha.mean())[:,np.newaxis]*dxab
dhdx_av = dhdx.mean(axis=0)
wab = cab_inv[ia,ib,:,:].dot(dhdx_av) # wa - wb
wa += wab
w[ia,:] = wa/m
w_infer[i1:i2,:i1] = w[:,:i1]
w_infer[i1:i2,i2:] = w[:,i1:]
#return w_infer
plt.plot([-1,1],[-1,1],'r--')
plt.scatter(w0,w_infer)
#plt.scatter(w0[0:3,3:],w[0:3,:])
###Output
_____no_output_____ |
Random Forest/Random Forest - Credit Default Prediction.ipynb | ###Markdown
**Random Forest - Credit Default Prediction** In this lab, we will build a random forest model to predict whether a given customer defaults or not. Credit default is one of the most important problems in the banking and risk analytics industry. There are various attributes which can be used to predict default, such as demographic data (age, income, employment status, etc.), (credit) behavioural data (past loans, payment, number of times a credit payment has been delayed by the customer etc.).We'll start the process with data cleaning and preparation and then tune the model to find optimal hyperparameters. **Data Understanding and Cleaning**
###Code
# Importing the required libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# To ignore warnings
import warnings
warnings.filterwarnings("ignore")
from google.colab import files
uploaded = files.upload()
# Reading the csv file and putting it into 'df' object.
df = pd.read_csv('credit-card-default.csv')
df.head()
# Let's understand the type of columns
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 30000 entries, 0 to 29999
Data columns (total 25 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 ID 30000 non-null int64
1 LIMIT_BAL 30000 non-null int64
2 SEX 30000 non-null int64
3 EDUCATION 30000 non-null int64
4 MARRIAGE 30000 non-null int64
5 AGE 30000 non-null int64
6 PAY_0 30000 non-null int64
7 PAY_2 30000 non-null int64
8 PAY_3 30000 non-null int64
9 PAY_4 30000 non-null int64
10 PAY_5 30000 non-null int64
11 PAY_6 30000 non-null int64
12 BILL_AMT1 30000 non-null int64
13 BILL_AMT2 30000 non-null int64
14 BILL_AMT3 30000 non-null int64
15 BILL_AMT4 30000 non-null int64
16 BILL_AMT5 30000 non-null int64
17 BILL_AMT6 30000 non-null int64
18 PAY_AMT1 30000 non-null int64
19 PAY_AMT2 30000 non-null int64
20 PAY_AMT3 30000 non-null int64
21 PAY_AMT4 30000 non-null int64
22 PAY_AMT5 30000 non-null int64
23 PAY_AMT6 30000 non-null int64
24 defaulted 30000 non-null int64
dtypes: int64(25)
memory usage: 5.7 MB
###Markdown
In this case, we know that there are no major data quality issues, so we'll go ahead and build the model. **Data Preparation and Model Building**
###Code
# Importing test_train_split from sklearn library
from sklearn.model_selection import train_test_split
# Putting feature variable to X
X = df.drop('defaulted',axis=1)
# Putting response variable to y
y = df['defaulted']
# Splitting the data into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=101)
###Output
_____no_output_____
###Markdown
**Default Hyperparameters**Let's first fit a random forest model with default hyperparameters.
###Code
# Importing random forest classifier from sklearn library
from sklearn.ensemble import RandomForestClassifier
# Running the random forest with default parameters.
rfc = RandomForestClassifier()
# fit
rfc.fit(X_train,y_train)
# Making predictions
predictions = rfc.predict(X_test)
# Importing classification report and confusion matrix from sklearn metrics
from sklearn.metrics import classification_report,confusion_matrix, accuracy_score
# Let's check the report of our default model
print(classification_report(y_test,predictions))
# Printing confusion matrix
print(confusion_matrix(y_test,predictions))
print(accuracy_score(y_test,predictions))
###Output
0.8182222222222222
###Markdown
So far so good, let's now look at the list of hyperparameters which we can tune to improve model performance. **Hyperparameter Tuning** The following hyperparameters are present in a random forest classifier. Note that most of these hypereparameters are actually of the decision trees that are in the forest.- **n_estimators**: integer, optional (default=10): The number of trees in the forest.- **criterion**: string, optional (default=”gini”)The function to measure the quality of a split. Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. Note: this parameter is tree-specific.- **max_features** : int, float, string or None, optional (default=”auto”)The number of features to consider when looking for the best split: - If int, then consider max_features features at each split. - If float, then max_features is a percentage and int(max_features * n_features) features are considered at each split. - If “auto”, then max_features=sqrt(n_features). - If “sqrt”, then max_features=sqrt(n_features) (same as “auto”). - If “log2”, then max_features=log2(n_features). - If None, then max_features=n_features. - Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than max_features features.- **max_depth** : integer or None, optional (default=None)The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples.- **min_samples_split** : int, float, optional (default=2)The minimum number of samples required to split an internal node:** - **If int, then consider min_samples_split as the minimum number. - **If float, then min_samples_split is a percentage and ceil(min_samples_split, n_samples) are the minimum number of samples for each split.- **min_samples_leaf** : int, float, optional (default=1)The minimum number of samples required to be at a leaf node:** - **If int, then consider min_samples_leaf as the minimum number.** - **If float, then min_samples_leaf is a percentage and ceil(min_samples_leaf * n_samples) are the minimum number of samples for each node.**- **min_weight_fraction_leaf** : float, optional (default=0.)The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided.- **max_leaf_nodes** : int or None, optional (default=None)Grow trees with max_leaf_nodes in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes.- **min_impurity_split** : float,Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. **Tuning max_depth** Let's try to find the optimum values for ```max_depth``` and understand how the value of max_depth impacts the overall accuracy of the ensemble.
###Code
# GridSearchCV to find optimal n_estimators
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
# specify number of folds for k-fold CV
n_folds = 5
# parameters to build the model on
parameters = {'max_depth': range(2, 20, 5)}
# instantiate the model
rf = RandomForestClassifier()
# fit tree on training data
rf = GridSearchCV(rf, parameters,
cv=n_folds,
scoring="accuracy",return_train_score=True)
rf.fit(X_train, y_train)
# scores of GridSearch CV
scores = rf.cv_results_
pd.DataFrame(scores).head()
# plotting accuracies with max_depth
plt.figure()
plt.plot(scores["param_max_depth"],
scores["mean_train_score"],
label="training accuracy")
plt.plot(scores["param_max_depth"],
scores["mean_test_score"],
label="test accuracy")
plt.xlabel("max_depth")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
You can see that as we increase the value of max_depth, both train and test scores increase till a point, but after that test score starts to decrease. The ensemble tries to overfit as we increase the max_depth.Thus, controlling the depth of the constituent trees will help reduce overfitting in the forest. **Tuning n_estimators** Let's try to find the optimum values for n_estimators and understand how the value of n_estimators impacts the overall accuracy. Notice that we'll specify an appropriately low value of max_depth, so that the trees do not overfit.
###Code
# GridSearchCV to find optimal n_estimators
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
# specify number of folds for k-fold CV
n_folds = 5
# parameters to build the model on
parameters = {'n_estimators': range(100, 1500, 400)}
# instantiate the model (note we are specifying a max_depth)
rf = RandomForestClassifier(max_depth=4)
# fit tree on training data
rf = GridSearchCV(rf, parameters,
cv=n_folds,
scoring="accuracy",return_train_score=True)
rf.fit(X_train, y_train)
# scores of GridSearch CV
scores = rf.cv_results_
pd.DataFrame(scores).head()
# plotting accuracies with n_estimators
plt.figure()
plt.plot(scores["param_n_estimators"],
scores["mean_train_score"],
label="training accuracy")
plt.plot(scores["param_n_estimators"],
scores["mean_test_score"],
label="test accuracy")
plt.xlabel("n_estimators")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
**Tuning max_features**Let's see how the model performance varies with ```max_features```, which is the maximum numbre of features considered for splitting at a node.
###Code
# GridSearchCV to find optimal max_features
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
# specify number of folds for k-fold CV
n_folds = 5
# parameters to build the model on
parameters = {'max_features': [4, 8, 14, 20, 24]}
# instantiate the model
rf = RandomForestClassifier(max_depth=4)
# fit tree on training data
rf = GridSearchCV(rf, parameters,
cv=n_folds,
scoring="accuracy",return_train_score=True)
rf.fit(X_train, y_train)
# scores of GridSearch CV
scores = rf.cv_results_
pd.DataFrame(scores).head()
# plotting accuracies with max_features
plt.figure()
plt.plot(scores["param_max_features"],
scores["mean_train_score"],
label="training accuracy")
plt.plot(scores["param_max_features"],
scores["mean_test_score"],
label="test accuracy")
plt.xlabel("max_features")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Apparently, the training and test scores *both* seem to increase as we increase max_features, and the model doesn't seem to overfit more with increasing max_features. Think about why that might be the case. **Tuning min_samples_leaf** The hyperparameter **min_samples_leaf** is the minimum number of samples required to be at a leaf node:- If int, then consider min_samples_leaf as the minimum number.- If float, then min_samples_leaf is a percentage and ceil(min_samples_leaf * n_samples) are the minimum number of samples for each node. Let's now check the optimum value for min samples leaf in our case.
###Code
# GridSearchCV to find optimal min_samples_leaf
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
# specify number of folds for k-fold CV
n_folds = 5
# parameters to build the model on
parameters = {'min_samples_leaf': range(100, 400, 50)}
# instantiate the model
rf = RandomForestClassifier()
# fit tree on training data
rf = GridSearchCV(rf, parameters,
cv=n_folds,
scoring="accuracy",return_train_score=True)
rf.fit(X_train, y_train)
# scores of GridSearch CV
scores = rf.cv_results_
pd.DataFrame(scores).head()
# plotting accuracies with min_samples_leaf
plt.figure()
plt.plot(scores["param_min_samples_leaf"],
scores["mean_train_score"],
label="training accuracy")
plt.plot(scores["param_min_samples_leaf"],
scores["mean_test_score"],
label="test accuracy")
plt.xlabel("min_samples_leaf")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
You can see that the model starts of overfit as you decrease the value of min_samples_leaf. **Tuning min_samples_split**Let's now look at the performance of the ensemble as we vary min_samples_split.
###Code
# GridSearchCV to find optimal min_samples_split
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
# specify number of folds for k-fold CV
n_folds = 5
# parameters to build the model on
parameters = {'min_samples_split': range(200, 500, 50)}
# instantiate the model
rf = RandomForestClassifier()
# fit tree on training data
rf = GridSearchCV(rf, parameters,
cv=n_folds,
scoring="accuracy",return_train_score=True)
rf.fit(X_train, y_train)
# scores of GridSearch CV
scores = rf.cv_results_
pd.DataFrame(scores).head()
# plotting accuracies with min_samples_split
plt.figure()
plt.plot(scores["param_min_samples_split"],
scores["mean_train_score"],
label="training accuracy")
plt.plot(scores["param_min_samples_split"],
scores["mean_test_score"],
label="test accuracy")
plt.xlabel("min_samples_split")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
**Grid Search to Find Optimal Hyperparameters** We can now find the optimal hyperparameters using GridSearchCV.
###Code
# Create the parameter grid based on the results of random search
param_grid = {
'max_depth': [4,8,10],
'min_samples_leaf': range(100, 400, 200),
'min_samples_split': range(200, 500, 200),
'n_estimators': [100,200, 300],
'max_features': [5, 10]
}
# Create a based model
rf = RandomForestClassifier()
# Instantiate the grid search model
grid_search = GridSearchCV(estimator = rf, param_grid = param_grid,
cv = 3, n_jobs = -1,verbose = 1)
# Fit the grid search to the data
grid_search.fit(X_train, y_train)
# printing the optimal accuracy score and hyperparameters
print('We can get accuracy of',grid_search.best_score_,'using',grid_search.best_params_)
###Output
We can get accuracy of 0.8184761904761905 using {'max_depth': 4, 'max_features': 10, 'min_samples_leaf': 100, 'min_samples_split': 200, 'n_estimators': 100}
###Markdown
**Fitting the final model with the best parameters obtained from grid search.**
###Code
# model with the best hyperparameters
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(bootstrap=True,
max_depth=10,
min_samples_leaf=100,
min_samples_split=200,
max_features=10,
n_estimators=100)
# fit
rfc.fit(X_train,y_train)
# predict
predictions = rfc.predict(X_test)
# evaluation metrics
from sklearn.metrics import classification_report,confusion_matrix
print(classification_report(y_test,predictions))
print(confusion_matrix(y_test,predictions))
(6753+692)/(6753+692+305+1250)
###Output
_____no_output_____ |
simon_fix.ipynb | ###Markdown
Scrap the data from the web using pandas
###Code
#Assign 2016-2020 URL's to variables
url_2016 = 'https://en.wikipedia.org/wiki/2016_NFL_Draft'
url_2017 = 'https://en.wikipedia.org/wiki/2017_NFL_Draft'
url_2018 = 'https://en.wikipedia.org/wiki/2018_NFL_Draft'
url_2019 = 'https://en.wikipedia.org/wiki/2019_NFL_Draft'
url_2020 = 'https://en.wikipedia.org/wiki/2020_NFL_Draft'
#Read in 2016-2020 URL's into table's using Pandas
table_2016 = pd.read_html(url_2016)
table_2017 = pd.read_html(url_2017)
table_2018 = pd.read_html(url_2018)
table_2019 = pd.read_html(url_2019)
table_2020 = pd.read_html(url_2020)
#Check the variable type
type(table_2016)
#Check the Length of the table
len(table_2016)
###Output
_____no_output_____
###Markdown
Clean the Data Using Pandas
###Code
#Convert the tables to a dataframe
df_2016_combine = table_2016[4]
df_2017_combine = table_2017[4]
df_2018_combine = table_2018[4]
df_2019_combine = table_2019[4]
df_2020_combine = table_2020[4]
#Clean the 2016 Dataframe
df_2016_combine = df_2016_combine.drop(columns = ['Unnamed: 0', 'Notes'])
df_2016_combine = df_2016_combine.rename(columns = {"Pick #": "Pick_no"})
df_2016_combine
#Clean the 2017 Dataframe
df_2017_combine = df_2017_combine.drop(columns = ['Unnamed: 0', 'Notes'])
df_2017_combine = df_2017_combine.rename(columns = {"Pick #": "Pick_no"})
df_2017_combine
#Clean the 2018 Dataframe
df_2018_combine = df_2018_combine.drop(columns = ['Unnamed: 0', 'Notes'])
df_2018_combine = df_2018_combine.rename(columns = {"Pick #": "Pick_no"})
df_2018_combine
#Clean the 2019 Dataframe
df_2019_combine = df_2019_combine.drop(columns = ['Unnamed: 0', 'Notes'])
df_2019_combine = df_2019_combine.rename(columns = {"Pick #": "Pick_no"})
df_2019_combine
#Clean the 2020 Dataframe
df_2020_combine = df_2020_combine.drop(columns = ['Unnamed: 0', 'Notes'])
df_2020_combine = df_2020_combine.rename(columns = {"Pick #": "Pick_no"})
df_2020_combine
###Output
_____no_output_____
###Markdown
Append the data into a single DataFrame to post it to the database
###Code
# Add a year column for data storage Purposes
df_2016_combine['year']='2016'
df_2017_combine['year']='2017'
df_2018_combine['year']='2018'
df_2019_combine['year']='2019'
df_2020_combine['year']='2020'
df_2016_combine
# Append the 5 years DataFrames into one dataframe for storage purposes
draft_df = df_2020_combine.append(df_2019_combine,ignore_index=True,verify_integrity=True)
draft_df = draft_df.append(df_2018_combine,ignore_index=True,verify_integrity=True)
draft_df = draft_df.append(df_2017_combine,ignore_index=True,verify_integrity=True)
draft_df = draft_df.append(df_2016_combine,ignore_index=True,verify_integrity=True)
draft_df
###Output
_____no_output_____ |
Misc/.ipynb_checkpoints/Cholesky+and+SVD+Correlated+Random-checkpoint.ipynb | ###Markdown
Using Cholesky and Singular Value Decomposition to generated correlated random numbers The problem:The ability to simulate correlated risk factors is key to many risk models. Historical Simulation achieves this implicitly, by using actual timeseries data for risk factors and applying changes for all risk factors for a given day, for a large number of days (250 or 500 typically). The empirically observed correlations, as well as the means and standard deviations, are implicitly embedded across the historical timeseries data sets.If we are doing *Monte Carlo* simulation however we need to do something different, since random drawings from a Normal(Gaussian)distribution will be uncorrelated - whereas real data will exhibit correlations. Therefore a technique must be developed to transform uncorrelated random variables to variables which exhibit the empirically observed correlations.In this Jupyter notebook we explore some techniques for producing correlated random variables and variations on these techniques.- Cholesky Factorisation : $LL^T=\Sigma$, using both covariance and correlation matrix variations to generate trials - Singular Value Decomposition : $UDV^T=\Sigma$ [TODO - help appreciated!] Theory - Cholesky Factorisation approach:Consider a random vector, X, consisting of uncorrelated random variables with each random variable, $X_i$, having zero mean and unit variance 1 ($X\sim N(0,1)$). What we hant is some sort of technique for converting these standard normal variables to correlated variables which exhibit the observed empirical means and variances of theproblem we are modelling.- Useful identities and results: - $\mathbb E[XX^T] = I$, where $X\sim N(0,1)$ Since $Var[XX^T]=\mathbb E [XX^T] + \mathbb E[X] \mathbb E[X^T]$- To show that we can create new, correlated, random variables $Y$, where $Y=LX$ and - $L$ is the Cholesky factorisation matrix (see above "Cholesky"), - X is a vector of independent uncorrelated variables from a Normal distribution with mean of zero and variance of one : $\boxed {X\sim N(0,1)}$ - $Cov[Y,Y] = \mathbb E[YY^T]
###Code
import pandas as pd
from IPython.display import display, Math, Latex, IFrame
import pandas as pd
#import pandas.io.data as pd_io
from pandas_datareader import data, wb
import numpy as np
import scipy as sci
G=pd.DataFrame(np.random.normal(size=(10000000,5)))
m=pd.DataFrame(np.matmul(G.transpose(), G))
display(Math(r'Demonstration~of~~ \mathbb E[XX^T] = I, ~~where~X\sim N(0,1)'))
print(m/10000000)
import pandas as pd
from pandas_datareader import data, wb
import numpy as np
import scipy as sci
stocks=['WDC', 'AAPL', 'IBM', 'MSFT', 'ORCL']
p=data.DataReader(stocks,data_source='google')#[['Adj Close']]
print(type(p))
from pivottablejs import pivot_ui
pivot_ui(m)
df=p.ix[0]
#df.pop('ATML') get rid of duff entry with NaNs!! - handy as you can just remove (and optionally save) a chunk!!
df=np.log(df/df.shift(1) )
df=df.dropna()
print("Days:{}".format(len(df)))
corr=df.corr()
print(corr)
chol=np.linalg.cholesky(corr)
#chol=sci.linalg.cholesky(corr, lower=True)
print chol
sigma=df.std()
mu=df.mean()
print("sigma=\n{}\n mu=\n{}".format(sigma,mu))
#No generate random normal samples with observed means ("mu"s) and st_devs ("sigma"s)
#G_rands=np.random.normal(loc=mu,scale=sigma,size=(1000,len(sigma)))
G_rands=pd.DataFrame(np.random.normal(size=(1000000,len(sigma))))
#G_Corr_rand=G_rands.dot(chol)
G_Corr_rand=(chol.dot(G_rands.transpose())).transpose()
# Now apply the std dev and mean by multiplation and addition, respectively - return as pandas df
G_=pd.DataFrame(G_Corr_rand * np.broadcast_to(sigma,(1000000,len(sigma))) + np.broadcast_to(mu,(1000000,len(mu))))
print(G_.head())
print(corr)
print(G_.corr())
df.describe().T
import pandas as pd
from pandas_datareader import data, wb
import numpy as np
import scipy as sci
stocks=['WDC', 'AAPL', 'IBM', 'MSFT', 'ORCL']
p=data.DataReader(stocks,data_source='yahoo')[['Adj Close']]
df=p.ix[0] #convert pandas "panel" to pandas "data frame"
df=np.log(df/df.shift(1) )
df=df.dropna()
cov=df.cov()
chol=np.linalg.cholesky(cov) # default is left/lower; use chol=sci.linalg.cholesky(cov, lower=False) otherwise
print ('Cholesky L=\n{}, \nL^T=\n{},\nLL^T=\n{}'.format(chol, chol.transpose(), chol.dot(chol.T)))
G_rands=pd.DataFrame(np.random.normal(size=(1000000,len(sigma))))
G_=pd.DataFrame((chol.dot(G_rands.transpose())).transpose())
print(G_.head())
print(cov)
print(G_.cov())
#Check for tiny size - LL^T should be equal to cov, so diff should be negligible
chol.dot(chol.T) - cov
print (chol.dot(chol.T) - cov).max()
###Output
_____no_output_____ |
nst/ExploringMNISTNeuralNet.ipynb | ###Markdown
MNIST Dataset
###Code
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
default_batch_size = 32
loader_args = {'batch_size' : default_batch_size, 'shuffle' : True}
if use_cuda:
loader_args.update({'pin_memory' : True, 'num_workers' : 1})
testset = datasets.MNIST(root='../data', train=False, download=True, transform=transforms.ToTensor())
test_loader = utils.data.DataLoader(testset, **loader_args)
label_size = 10
###Output
_____no_output_____
###Markdown
MNIST CNN Model
###Code
class MNISTClassifier(nn.Module):
def __init__(self, isize, osize):
super(MNISTClassifier, self).__init__()
fc1_isize = int((((isize - 2 - 2) / 2) ** 2) * 32)
self.conv1 = nn.Conv2d(1, 64, 3)
self.conv2 = nn.Conv2d(64, 32, 3)
self.pool = nn.MaxPool2d(2)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(fc1_isize, 128)
self.fc2 = nn.Linear(128, osize)
def forward(self, x):
return self.f_fc2(x)
# extended to access intermediate layer outputs
def f_conv1(self, x):
x = self.conv1(x)
x = F.relu(x)
return x
def f_conv2(self, x):
x = self.f_conv1(x)
x = self.conv2(x)
x = F.relu(x)
return x
def f_pool1(self, x):
x = self.f_conv2(x)
x = self.pool(x)
return x
def f_fc1(self, x):
x = self.f_pool1(x)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
return x
def f_fc2(self, x):
x = self.f_fc1(x)
x = self.dropout2(x)
x = self.fc2(x)
x = F.log_softmax(x, dim=1)
return x
model_file = '../models/mnist_classifier.pt'
model = torch.load(model_file)
model = model.to(device)
###Output
_____no_output_____
###Markdown
Report Model Performance with Confusion Matrix
###Code
def predict(model, device, loader):
model.eval()
inputs = np.empty((0,1,28,28), dtype=float)
predictions = np.empty(0)
targets = np.empty(0)
with torch.no_grad():
for data, target in loader:
inputs = np.concatenate((inputs, data), axis=0)
data = data.to(device)
output = model(data)
prediction = output.argmax(dim=1)
prediction = prediction.cpu()
targets = np.concatenate((targets, target), axis=0)
predictions = np.concatenate((predictions, prediction), axis=0)
return (predictions, targets, inputs)
def predictions_to_matrix(predictions, targets, n_classes):
mtx = [[0 for i in range(n_classes)] for i in range(n_classes)]
for i in range(len(predictions)):
mtx[int(predictions[i])][int(targets[i])] += 1
return mtx
predictions, targets, inputs = predict(model, device, test_loader)
confusion_matrix = predictions_to_matrix(predictions, targets, label_size)
df = pd.DataFrame(confusion_matrix, index=[i for i in range(label_size)], columns=[i for i in range(label_size)])
plt.figure(figsize=(10, 10))
sn.heatmap(df, annot=True)
###Output
_____no_output_____
###Markdown
Sample of Incorrect Predictions
###Code
tensor2image = transforms.ToPILImage()
def incorrect(predictions, targets, inputs):
ret = []
for i, (pred, targ) in enumerate(zip(predictions, targets)):
if pred != targ:
ret.append((i, targ, pred, inputs[i]))
return ret
incorrects = incorrect(predictions, targets, inputs)
sample_idxes = [random.randint(0, len(incorrects) - 1) for _ in range(25)]
incorrect_images = np.empty((0,1,28,28))
for i in sample_idxes:
incorrect_images = np.concatenate((incorrect_images, np.expand_dims(incorrects[i][3], axis=0)), axis=0)
incorrect_images = torch.from_numpy(incorrect_images)
incorrect_image_grid = torchvision.utils.make_grid(incorrect_images, nrow=5)
tensor2image(incorrect_image_grid)
sample_idx = random.randint(0, len(incorrects) - 1)
incorrect_image = torch.from_numpy(incorrects[sample_idx][3]).type(torch.FloatTensor)
tensor2image(incorrect_image)
print("Correct Label: {} Prediction Label: {}".format(incorrects[sample_idx][1], incorrects[sample_idx][2]))
###Output
Correct Label: 1.0 Prediction Label: 3.0
###Markdown
Visualizing Model Internals with sample image
###Code
incorrect_input = incorrect_image
incorrect_input = incorrect_input.unsqueeze_(0)
incorrect_input = incorrect_input.to(device)
conv1_output = model.f_conv1(incorrect_input)
conv1_output.size()
def create_output_grid(output, rowsize, layer_size, imgsize):
output = output.squeeze().cpu().detach()
output_images = torch.reshape(output, (layer_size, 1, imgsize, imgsize))
grid = torchvision.utils.make_grid(output_images, nrow=rowsize)
return grid
###Output
_____no_output_____
###Markdown
Visualizing First Convolutional Layer output
###Code
conv1_output_image_grid = create_output_grid(conv1_output, 8, 64, 26)
tensor2image(conv1_output_image_grid)
### Visualizing Second Convolutional Layer output
conv2_output = model.f_conv2(incorrect_input)
conv2_output.size()
conv2_output_image_grid = create_output_grid(conv2_output, 6, 32, 24)
tensor2image(conv2_output_image_grid)
###Output
_____no_output_____
###Markdown
Visualizing Max Pooling Layer
###Code
pool1_output = model.f_pool1(incorrect_input)
pool1_output.size()
pool1_output_image_grid = create_output_grid(pool1_output, 6, 32, 12)
tensor2image(pool1_output_image_grid)
###Output
_____no_output_____ |
src/bead_count.ipynb | ###Markdown
Bead countThis module will demonstrate how to count the beads in the cluster images: - Load cluster images. - Convert image to binary. - Scale image up to increase resolution. - Dilate image to reduce the possibility to get close local maximas during watershedding. - Convert image to set. - Dilate image by factor x. - For all foreground pixels find connected pixels as new set with flood fill algorithm. - Get boundary boxes. - Extract subimages. - Write subimages to disk.
###Code
import math
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
import skimage as ski
from skimage.morphology import watershed
from skimage.feature import peak_local_max
from skimage.morphology import binary_erosion
from skimage import data, color
from skimage.transform import rescale, hough_circle, hough_circle_peaks
from skimage.filters import scharr
from skimage.feature import canny
from skimage.draw import circle_perimeter
from skimage.util import img_as_ubyte
import modules.oiplib as oiplib
gray2Binary = oiplib.gray2Binary
# Load all clusters.
clusters = oiplib.loadImages("../images/clusters")
# Determine bead count for all clusters.
beadCounts = {}
for cluster in clusters:
labelImg = oiplib.labelRegionWatershed(cluster)
labels = np.unique(labelImg)
beadCount = len(labels) - 1
if beadCounts.get(beadCount) is None:
beadCounts[beadCount] = 1
else:
beadCounts[beadCount] += 1
# General histogram variables.
maxBeadCount = max(beadCounts.keys())
maxOccurrenceCount = max(beadCounts.values())
xAxis = np.arange(1, maxBeadCount + 1)
yAxis = np.arange(0, math.ceil(maxOccurrenceCount / 5) + 1) * 5
yHist = np.zeros(maxBeadCount)
yHistCum = np.zeros(maxBeadCount)
# Create histogram.
for key, value in beadCounts.items():
yHist[key - 1] = value
fig, ax = plt.subplots(figsize=(10, 10))
plot = ax.bar(xAxis, yHist)
ax.grid()
ax.set_axisbelow(True)
ax.set_title("Histogram of clusters per bead count")
ax.set_xlabel("Bead count")
ax.set_ylabel("Clusters with bead count")
ax.set_xticks(xAxis);
ax.set_yticks(yAxis);
###Output
_____no_output_____ |