blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
45e08d9b88f2b7b3c5a10edeb9df093dca0a64b5 | b87afd6e709349dc1ee75179061c00faa115a6d3 | /Warmup 2.py | a243f29c80282ea9252af30cec45cb7d980807b1 | [] | no_license | DrQuestion/PoCS2-Assignement-2 | 0403ccb912a0ee2c93b5077606e651822ee1b42c | 2a4660e5b22f555d9811caff190c515a8d5620b1 | refs/heads/master | 2021-08-18T20:09:24.862356 | 2017-11-23T19:36:22 | 2017-11-23T19:36:22 | 108,999,682 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | #!/bin/python3
import sys
def simpleArraySum(n, ar):
return sum(ar)
n = int(input().strip())
ar = list(map(int, input().strip().split(' ')))
result = simpleArraySum(n, ar)
print(result)
| [
"[email protected]"
] | |
e81cabf420f4de9517a2d9dfb950464584a62284 | c77af74f854d1cff9527b84d0b27a6a9ffd63f90 | /1-basics/5-functions/3-parameter/bot.py | 12bbf070979433ab6db2a95c71bb6e6d87ecb1ed | [] | no_license | 4whitp37/COM404 | 75a573e5ac622120637b589d2190139d746a0964 | 492445748b513d81e63d9afe4ac2756e49721c04 | refs/heads/master | 2020-07-31T09:17:12.713718 | 2019-12-10T06:12:52 | 2019-12-10T06:12:52 | 210,557,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | #function with parameter
def escape_by(plan):
if plan == "jumping over":
print("We cannot escape that way! The boulder is too big!")
if plan == "running around":
print("We cannot escape that way! The boulder is moving too fast!")
if plan == "going deeper":
print("That might just work! Let's go deeper!")
escape_by("jumping over")
escape_by("running around")
escape_by("going deeper") | [
"[email protected]"
] | |
c05c093706617910400ebe1532819cde8c97efeb | f4578e55ea7cf0bc3a7d69381ff64bd31b0c3a5a | /src/Models/ReferenceLog.py | fe46e61d1f9e3044aa5bd784d4aec4d5754dfc0d | [] | no_license | shareil23/JULO_Code_Exercise | c3202bf40293a5b478fc9c0845c74d995587834c | eb6d1a200fe82fcec04200b20dda29f6c3fe53e2 | refs/heads/master | 2023-06-16T02:22:07.702021 | 2021-06-26T18:44:08 | 2021-06-26T18:44:08 | 380,564,657 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | from sqlalchemy.dialects.postgresql import UUID
import uuid
from ..Config import db
class ReferenceLog(db.Model):
__tablename__ = 'reference_log'
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
reference_id = db.Column(UUID(as_uuid=True))
category = db.Column(db.Text())
created_at = db.Column(db.DateTime(timezone=True))
def __init__(self, **data):
self.reference_id = data['reference_id']
self.category = data['category']
self.created_at = data['created_at'] | [
"[email protected]"
] | |
3fb37e4e8de34e4048105e131d3210363d9df6c5 | 911f43f3e5154a4eb18f9f312eacc7e6a3155ced | /gail/main.py | 6e84b71110ae03bfeaecbf17f18595a3681b8afa | [] | no_license | BinaryKR/GAIL_Sketch | c7a0a4f5f0234f0b7cf30a5650ecc9ea42b07115 | ae2ed87ec014b3835d1caa77d54474c5ddf55669 | refs/heads/master | 2022-04-13T17:08:21.342269 | 2020-04-06T03:49:52 | 2020-04-06T03:49:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,165 | py | import os
import gym
import pickle
import argparse
import numpy as np
from collections import deque
from environment import Env
import torch
import torch.optim as optim
from tensorboardX import SummaryWriter
from utils.utils import *
from utils.zfilter import ZFilter
from model import Actor, Critic, Discriminator
from train_model import train_actor_critic, train_discrim
import matplotlib.pyplot as plt
temp_learner = []
temp_expert = []
parser = argparse.ArgumentParser(description='PyTorch GAIL')
parser.add_argument('--env_name', type=str, default="Hopper-v2",
help='name of the environment to run')
parser.add_argument('--load_model', type=str, default=None,
help='path to load the saved model')
parser.add_argument('--render', action="store_true", default=False,
help='if you dont want to render, set this to False')
parser.add_argument('--gamma', type=float, default=0.99,
help='discounted factor (default: 0.99)')
parser.add_argument('--lamda', type=float, default=0.98,
help='GAE hyper-parameter (default: 0.98)')
parser.add_argument('--hidden_size', type=int, default=100,
help='hidden unit size of actor, critic and discrim networks (default: 100)')
parser.add_argument('--learning_rate', type=float, default=3e-4,
help='learning rate of models (default: 3e-4)')
parser.add_argument('--l2_rate', type=float, default=1e-3,
help='l2 regularizer coefficient (default: 1e-3)')
parser.add_argument('--clip_param', type=float, default=0.2,
help='clipping parameter for PPO (default: 0.2)')
parser.add_argument('--discrim_update_num', type=int, default=2,
help='update number of discriminator (default: 2)')
parser.add_argument('--actor_critic_update_num', type=int, default=10,
help='update number of actor-critic (default: 10)')
parser.add_argument('--total_sample_size', type=int, default=2048,
help='total sample size to collect before PPO update (default: 2048)')
parser.add_argument('--batch_size', type=int, default=64,
help='batch size to update (default: 64)')
parser.add_argument('--suspend_accu_exp', type=float, default=0.8,
help='accuracy for suspending discriminator about expert data (default: 0.8)')
parser.add_argument('--suspend_accu_gen', type=float, default=0.8,
help='accuracy for suspending discriminator about generated data (default: 0.8)')
parser.add_argument('--max_iter_num', type=int, default=25000,
help='maximal number of main iterations (default: 4000)')
# parser.add_argument('--max_iter_num', type=int, default=4000,
# help='maximal number of main iterations (default: 4000)')
parser.add_argument('--seed', type=int, default=500,
help='random seed (default: 500)')
parser.add_argument('--logdir', type=str, default='logs',
help='tensorboardx logs directory')
args = parser.parse_args()
# env.build_canvas ####f
def main():
expert_demo= pickle.load(open('./Ree1_expert.p', "rb"))
# Ree1 : action 1
# Ree2 : action 100
# Ree3 : action 50
# Ree4 : action 10
# Ree5 : action 4
# Ree6 : action 0.5
# print('expert_demo_shape : ', np.array(expert_demo).shape)
expert_x = int(expert_demo[1][0])
expert_y = int(expert_demo[1][1])
env = Env(expert_x, expert_y)
# env = Env(0,0)
# env.seed(args.seed)
torch.manual_seed(args.seed)
num_inputs = 2
num_actions = 8
running_state = ZFilter((num_inputs,), clip=5)
print('state size:', num_inputs)
print('action size:', num_actions)
actor = Actor(num_inputs, num_actions, args)
critic = Critic(num_inputs, args)
discrim = Discriminator(num_inputs + num_actions, args)
actor_optim = optim.Adam(actor.parameters(), lr=args.learning_rate)
critic_optim = optim.Adam(critic.parameters(), lr=args.learning_rate,
weight_decay=args.l2_rate)
discrim_optim = optim.Adam(discrim.parameters(), lr=args.learning_rate)
# load demonstrations
# expert_demo, _ = pickle.load(open('./expert_demo/expert_demo.p', "rb"))
demonstrations = np.array(expert_demo[0])
# print("demonstrations.shape", demonstrations.shape)
writer = SummaryWriter(args.logdir)
if args.load_model is not None:
saved_ckpt_path = os.path.join(os.getcwd(), 'save_model', str(args.load_model))
ckpt = torch.load(saved_ckpt_path)
actor.load_state_dict(ckpt['actor'])
critic.load_state_dict(ckpt['critic'])
discrim.load_state_dict(ckpt['discrim'])
running_state.rs.n = ckpt['z_filter_n']
running_state.rs.mean = ckpt['z_filter_m']
running_state.rs.sum_square = ckpt['z_filter_s']
print("Loaded OK ex. Zfilter N {}".format(running_state.rs.n))
episodes = 0
train_discrim_flag = True
for iter in range(args.max_iter_num):
actor.eval(), critic.eval()
memory = deque()
steps = 0
scores = []
while steps < args.total_sample_size:
state = env.reset()
score = 0
state = running_state(state)
for _ in range(1000):
if args.render:
env.render()
steps += 1
mu, std = actor(torch.Tensor(state).unsqueeze(0))
action2 = np.argmax(get_action(mu, std)[0])
action = get_action(mu, std)[0]
next_state, reward, done, _ = env.step(action2)
# next_state, reward, done, _ = env.step(action)
irl_reward = get_reward(discrim, state, action)
if done:
mask = 0
else:
mask = 1
memory.append([state, action, irl_reward, mask])
next_state = running_state(next_state)
state = next_state
score += reward
if done:
break
episodes += 1
scores.append(score)
score_avg = np.mean(scores)
print('{}:: {} episode score is {:.2f}'.format(iter, episodes, score_avg))
writer.add_scalar('log/score', float(score_avg), iter)
actor.train(), critic.train(), discrim.train()
if train_discrim_flag:
expert_acc, learner_acc = train_discrim(discrim, memory, discrim_optim, demonstrations, args)
print("Expert: %.2f%% | Learner: %.2f%%" % (expert_acc * 100, learner_acc * 100))
temp_learner.append(learner_acc * 100)
temp_expert.append(expert_acc * 100)
if ((expert_acc > args.suspend_accu_exp and learner_acc > args.suspend_accu_gen and iter % 55==0) or iter % 50 == 0):
# train_discrim_flag = False
plt.plot(temp_learner, label = 'learner')
plt.plot(temp_expert, label = 'expert')
plt.xlabel('Episode')
plt.ylabel('Accuracy')
plt.xticks([])
plt.legend()
plt.savefig('accuracy{}.png'.format(iter))
# plt.show()
model_path = 'C:/Users/USER/9 GAIL/lets-do-irl/mujoco/gail'
ckpt_path = os.path.join(model_path, 'ckpt_' + str(score_avg) + '.pth.tar')
print("check path",ckpt_path)
save_checkpoint({
'actor': actor.state_dict(),
'critic': critic.state_dict(),
'discrim': discrim.state_dict(),
'z_filter_n': running_state.rs.n,
'z_filter_m': running_state.rs.mean,
'z_filter_s': running_state.rs.sum_square,
'args': args,
'score': score_avg
}, filename=ckpt_path)
train_actor_critic(actor, critic, memory, actor_optim, critic_optim, args)
if iter % 100:
score_avg = int(score_avg)
model_path = os.path.join(os.getcwd(),'save_model')
if not os.path.isdir(model_path):
os.makedirs(model_path)
model_path = 'C:/Users/USER/9 GAIL/lets-do-irl/mujoco/gail'
ckpt_path = os.path.join(model_path, 'ckpt_'+ str(score_avg)+'.pth.tar')
save_checkpoint({
'actor': actor.state_dict(),
'critic': critic.state_dict(),
'discrim': discrim.state_dict(),
'z_filter_n':running_state.rs.n,
'z_filter_m': running_state.rs.mean,
'z_filter_s': running_state.rs.sum_square,
'args': args,
'score': score_avg
}, filename=ckpt_path)
plt.plot(temp_learner)
plt.plot(temp_expert)
plt.xlabel('Episode')
plt.ylabel('Accuracy')
plt.xticks([])
plt.savefig('accuracy.png')
# plt.show()
if __name__=="__main__":
main() | [
"[email protected]"
] | |
352b804747cc226f09e7a42316e6262c0a63a77b | 15581a76b36eab6062e71d4e5641cdfaf768b697 | /LeetCode_30days_challenge/2020/August/Design HashSet.py | 8ac9ddd111bec7159b93c31f2ee92c9ef039863d | [] | no_license | MarianDanaila/Competitive-Programming | dd61298cc02ca3556ebc3394e8d635b57f58b4d2 | 3c5a662e931a5aa1934fba74b249bce65a5d75e2 | refs/heads/master | 2023-05-25T20:03:18.468713 | 2023-05-16T21:45:08 | 2023-05-16T21:45:08 | 254,296,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | class MyHashSet:
def __init__(self):
self.numBuckets = 15000
self.buckets = [[] for _ in range(self.numBuckets)]
def hash_function(self, key):
return key % self.numBuckets
def add(self, key):
i = self.hash_function(key)
if not key in self.buckets[i]:
self.buckets[i].append(key)
def remove(self, key):
i = self.hash_function(key)
if key in self.buckets[i]:
self.buckets[i].remove(key)
def contains(self, key):
i = self.hash_function(key)
if key in self.buckets[i]:
return True
else:
return False
| [
"[email protected]"
] | |
94675a9a4c3a35afe8b4a98198dd51c0877c1b33 | 0ad3f130560a342cceb47889ab7ceac6d8442834 | /ltdmason/s3upload.py | 0d89112f890c34645e75833dfcb596932d59229c | [
"MIT"
] | permissive | AvdN/ltd-mason | dfd4cf4df19170e9422282b986ef95a06d566e51 | b14940037010b1399bd66888c4caef3dd350d76c | refs/heads/master | 2021-01-24T06:44:00.270568 | 2017-03-13T17:44:27 | 2017-03-13T17:44:27 | 93,316,933 | 0 | 0 | null | 2017-06-04T13:58:02 | 2017-06-04T13:58:02 | null | UTF-8 | Python | false | false | 13,865 | py | """S3 upload/sync utilities."""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from builtins import * # NOQA
from future.standard_library import install_aliases
install_aliases() # NOQA
import os
import logging
import mimetypes
import boto3
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
def upload(bucket_name, path_prefix, source_dir,
upload_dir_redirect_objects=True,
surrogate_key=None, acl=None,
cache_control_max_age=31536000,
aws_access_key_id=None, aws_secret_access_key=None,
aws_profile=None):
"""Upload built documentation to S3.
This function places the contents of the Sphinx HTML build directory
into the ``/path_prefix/`` directory of an *existing* S3 bucket.
Existing files on S3 are overwritten; files that no longer exist in the
``source_dir`` are deleted from S3.
S3 credentials are assumed to be stored in a place where boto3 can read
them, such as :file:`~/.aws/credentials`. `aws_profile_name` allows you
to select while AWS credential profile you wish to use from the
:file:`~/.aws/credentials`.
See http://boto3.readthedocs.org/en/latest/guide/quickstart.html.
Parameters
----------
bucket_name : str
Name of the S3 bucket where documentation is uploaded.
path_prefix : str
The root directory in the bucket where documentation is stored.
source_dir : str
Path of the Sphinx HTML build directory on the local file system.
The contents of this directory are uploaded into the ``/path_prefix/``
directory of the S3 bucket.
upload_dir_redirect_objects : bool, optional
A feature flag to enable uploading objects to S3 for every directory.
These objects contain headers ``x-amz-meta-dir-redirect=true`` HTTP
headers that tell Fastly to issue a 301 redirect from the directory
object to the '/index.html' in that directory.
surrogate_key : str, optional
The surrogate key to insert in the header of all objects
in the ``x-amz-meta-surrogate-key`` field. This key is used to purge
builds from the Fastly CDN when Editions change.
If `None` then no header will be set.
acl : str, optional
The pre-canned AWS access control list to apply to this upload.
Defaults to ``'public-read'``, which allow files to be downloaded
over HTTP by the public. See
https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
for an overview of S3's pre-canned ACL lists. Note that ACL settings
are not validated locally.
cache_control_max_age : int, optional
Defaults to 31536000 seconds = 1 year.
aws_access_key_id : str, optional
The access key for your AWS account. Also set `aws_secret_access_key`.
aws_secret_access_key : str, optional
The secret key for your AWS account.
aws_profile : str, optional
Name of AWS profile in :file:`~/.aws/credentials`. Use this instead
of `aws_access_key_id` and `aws_secret_access_key` for file-based
credentials.
"""
log.debug('s3upload.upload({0}, {1}, {2})'.format(
bucket_name, path_prefix, source_dir))
session = boto3.session.Session(
profile_name=aws_profile,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
s3 = session.resource('s3')
bucket = s3.Bucket(bucket_name)
metadata = None
if surrogate_key is not None:
if metadata is None:
metadata = {}
metadata['surrogate-key'] = surrogate_key
if cache_control_max_age is not None:
cache_control = 'max-age={0:d}'.format(cache_control_max_age)
else:
cache_control = None
manager = ObjectManager(session, bucket_name, path_prefix)
for (rootdir, dirnames, filenames) in os.walk(source_dir):
# name of root directory on S3 bucket
bucket_root = os.path.relpath(rootdir, start=source_dir)
if bucket_root in ('.', '/'):
bucket_root = ''
# Delete bucket directories that no longer exist in source
bucket_dirnames = manager.list_dirnames_in_directory(bucket_root)
for bucket_dirname in bucket_dirnames:
if bucket_dirname not in dirnames:
log.debug(('Deleting bucket directory {0}'.format(
bucket_dirname)))
manager.delete_directory(bucket_dirname)
# Delete files that no longer exist in source
bucket_filenames = manager.list_filenames_in_directory(bucket_root)
for bucket_filename in bucket_filenames:
if bucket_filename not in filenames:
bucket_filename = os.path.join(bucket_root, bucket_filename)
log.debug('Deleting bucket file {0}'.format(bucket_filename))
manager.delete_file(bucket_filename)
# Upload files in directory
for filename in filenames:
local_path = os.path.join(rootdir, filename)
bucket_path = os.path.join(path_prefix, bucket_root, filename)
log.debug('Uploading to {0}'.format(bucket_path))
_upload_file(local_path, bucket_path, bucket,
metadata=metadata, acl=acl,
cache_control=cache_control)
# Upload a directory redirect object
if upload_dir_redirect_objects is True:
bucket_dir_path = os.path.join(path_prefix, bucket_root)
bucket_dir_path = bucket_dir_path.rstrip('/')
if metadata:
redirect_metadata = dict(metadata)
else:
redirect_metadata = {}
redirect_metadata['dir-redirect'] = 'true'
_upload_object(bucket_dir_path,
content='',
bucket=bucket,
metadata=redirect_metadata,
acl=acl,
cache_control=cache_control)
def _upload_file(local_path, bucket_path, bucket,
metadata=None, acl=None, cache_control=None):
"""Upload a file to the S3 bucket.
This function uses the mimetypes module to guess and then set the
Content-Type and Encoding-Type headers.
Parameters
----------
local_path : str
Full path to a file on the local file system.
bucket_path : str
Destination path (also known as the key name) of the file in the
S3 bucket.
bucket : `boto3` Bucket instance
S3 bucket.
metadata : dict, optional
Header metadata values. These keys will appear in headers as
``x-amz-meta-*``.
acl : str, optional
A pre-canned access control list. See
https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
cache_control : str, optional
The cache-control header value. For example, 'max-age=31536000'.
``'
"""
extra_args = {}
if acl is not None:
extra_args['ACL'] = acl
if metadata is not None:
extra_args['Metadata'] = metadata
if cache_control is not None:
extra_args['CacheControl'] = cache_control
# guess_type returns None if it cannot detect a type
content_type, content_encoding = mimetypes.guess_type(local_path,
strict=False)
if content_type is not None:
extra_args['ContentType'] = content_type
log.debug(str(extra_args))
obj = bucket.Object(bucket_path)
# no return status from the upload_file api
obj.upload_file(local_path, ExtraArgs=extra_args)
def _upload_object(bucket_path, bucket, content='',
metadata=None, acl=None, cache_control=None):
"""Upload an arbitrary object to an S3 bucket.
Parameters
----------
bucket_path : str
Destination path (also known as the key name) of the file in the
S3 bucket.
content : str or bytes
Object content, optional
bucket : `boto3` Bucket instance
S3 bucket.
metadata : dict, optional
Header metadata values. These keys will appear in headers as
``x-amz-meta-*``.
acl : str, optional
A pre-canned access control list. See
https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
cache_control : str, optional
The cache-control header value. For example, 'max-age=31536000'.
``'
"""
args = {}
if metadata is not None:
args['Metadata'] = metadata
if acl is not None:
args['ACL'] = acl
if cache_control is not None:
args['CacheControl'] = cache_control
obj = bucket.Object(bucket_path)
obj.put(Body=content, **args)
class ObjectManager(object):
"""Manage objects existing in a bucket under a specific ``bucket_root``.
The ObjectManager maintains information about objects that exist in the
bucket, and can delete objects that no longer exist in the source.
Parameters
----------
session : :class:`boto3.session.Session`
A boto3 session instance provisioned with the correct identities.
bucket_name : str
Name of the S3 bucket.
bucket_root : str
The version slug is the name root directory in the bucket where
documentation is stored.
"""
def __init__(self, session, bucket_name, bucket_root):
super().__init__()
s3 = session.resource('s3')
bucket = s3.Bucket(bucket_name)
self._session = session
self._bucket = bucket
self._bucket_root = bucket_root
# Strip trailing '/' from bucket_root for comparisons
if self._bucket_root.endswith('/'):
self._bucket_root = self._bucket_root.rstrip('/')
def list_filenames_in_directory(self, dirname):
"""List all file-type object names that exist at the root of this
bucket directory.
Parameters
----------
dirname : str
Directory name in the bucket relative to ``bucket_root/``.
Returns
-------
filenames : list
List of file names (`str`), relative to ``bucket_root/``, that
exist at the root of ``dirname``.
"""
prefix = self._create_prefix(dirname)
filenames = []
for obj in self._bucket.objects.filter(Prefix=prefix):
if obj.key.endswith('/'):
continue
obj_dirname = os.path.dirname(obj.key)
if obj_dirname == prefix:
# object is at root of directory
filenames.append(os.path.relpath(obj.key,
start=prefix))
return filenames
def list_dirnames_in_directory(self, dirname):
"""List all names of directories that exist at the root of this
bucket directory.
Note that *directories* don't exist in S3; rather directories are
inferred from path names.
Parameters
----------
dirname : str
Directory name in the bucket relative to ``bucket_root``.
Returns
-------
dirnames : list
List of directory names (`str`), relative to ``bucket_root/``,
that exist at the root of ``dirname``.
"""
prefix = self._create_prefix(dirname)
dirnames = []
for obj in self._bucket.objects.filter(Prefix=prefix):
dirname = os.path.dirname(obj.key)
# if the object is a directory redirect, make it look like a dir
if dirname == '':
dirname = obj.key + '/'
rel_dirname = os.path.relpath(dirname, start=prefix)
dir_parts = rel_dirname.split('/')
if len(dir_parts) == 1:
dirnames.append(dir_parts[0])
dirnames = list(set(dirnames))
if '.' in dirnames:
dirnames.remove('.')
return dirnames
def _create_prefix(self, dirname):
if dirname in ('.', '/'):
dirname = ''
# Strips trailing slash from dir prefix for comparisons
# os.dirname() returns directory names without a trailing /
prefix = os.path.join(self._bucket_root, dirname)
if prefix.endswith('/'):
prefix = prefix.rstrip('/')
return prefix
def delete_file(self, filename):
"""Delete a file from the bucket.
Parameters
----------
filename : str
Name of the file, relative to ``bucket_root/``.
"""
key = os.path.join(self._bucket_root, filename)
objects = list(self._bucket.objects.filter(Prefix=key))
for obj in objects:
obj.delete()
def delete_directory(self, dirname):
"""Delete a directory (and contents) from the bucket.
Parameters
----------
dirname : str
Name of the directory, relative to ``bucket_root/``.
"""
key = os.path.join(self._bucket_root, dirname)
if not key.endswith('/'):
key += '/'
delete_keys = {'Objects': []}
key_objects = [{'Key': obj.key}
for obj in self._bucket.objects.filter(Prefix=key)]
assert len(key_objects) > 0
delete_keys['Objects'] = key_objects
# based on http://stackoverflow.com/a/34888103
s3 = self._session.resource('s3')
r = s3.meta.client.delete_objects(Bucket=self._bucket.name,
Delete=delete_keys)
log.debug(r)
if 'Errors' in r:
raise S3Error('S3 could not delete {0}'.format(key))
class S3Error(Exception):
"""General errors in S3 API usage."""
pass
| [
"[email protected]"
] | |
10cb00f7943485eae5beb38a5ae8ece16a0487f0 | a5679e425c39296503008c5c86e327dde01cd3e6 | /Tkinter/hello_world.py | d9544876eb91e0f264eaffe79e8c9b5687055ae5 | [] | no_license | dinhky0204/SoundHandlingPython | 93cf0a1c64b41f4cf52b3cd87fbf45fc05a1c0ab | f2647fc8a21d13cd2ffbcffba7ad08101dbf7990 | refs/heads/master | 2021-05-07T08:41:09.373425 | 2018-01-17T08:50:53 | 2018-01-17T08:50:53 | 109,397,072 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,150 | py | from Tkinter import *
class Application(Frame):
def say_hi(self):
print "hi there, everyone!"
def say_goodbye(self):
print "Good bye"
def confirmMessage(self):
print "Confirmed"
def createWidgets(self):
self.QUIT = Button(self)
self.QUIT["text"] = "QUIT"
self.QUIT["fg"] = "red"
self.QUIT["command"] = self.quit
self.QUIT.pack({"side": "left"})
self.hi_there = Button(self)
self.hi_there["text"] = "HELLO"
self.hi_there['fg'] = "blue"
self.hi_there["command"] = self.say_hi
self.hi_there.pack({"side": "left"})
self.confirm = Button(self)
self.confirm["text"] = "CONFIRM"
self.confirm["fg"] = "yellow"
self.confirm["command"] = self.confirmMessage
self.confirm.pack({"side": "left"})
fred = Button(self, text="Test fred", fg="red", bg="blue")
fred.pack(side= "left")
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.createWidgets()
root = Tk()
app = Application(master=root)
app.mainloop()
root.destroy()
| [
"[email protected]"
] | |
ed8b7dbd7403201e805b8dbacb2d97cfdf5005eb | 0f59e486ea9d7c96b8c3f7f92bf063fc8389f1e8 | /vivisect/analysis/amd64/__init__.py | 2b48787dc8ec2faa3d32624e5f1ca487682d33a5 | [
"Apache-2.0"
] | permissive | vivisect/vivisect | ac259918b6281d9431c32a0b2307c61f9cab0dec | b07e161cc28b19fdda0d047eefafed22c5b00f15 | refs/heads/master | 2023-08-25T09:02:00.526532 | 2023-07-26T03:07:07 | 2023-07-26T03:07:07 | 26,651,759 | 833 | 181 | Apache-2.0 | 2023-09-07T03:43:53 | 2014-11-14T18:28:47 | Python | UTF-8 | Python | false | false | 34 | py | '''
Amd64 Analysis Modules
'''
| [
"[email protected]"
] | |
7e2257eb51b068c89208b78c292a100a5a7d2186 | b65d1e76b8ad204e0e3cc5a67fb4de8982e7b2f5 | /lib/FunctorManagement.py | 76b1e85c8e1f4723f3b428c826f88c4e67dfdebf | [
"BSD-3-Clause"
] | permissive | rmmilewi/pira | 251f8ff1be1ac21a66778243bbda585b39d521f9 | 58d6510af3d3dfc0581921d9e00bf24d026a8c44 | refs/heads/master | 2021-01-06T19:53:07.295855 | 2020-02-19T21:59:39 | 2020-02-19T21:59:39 | 241,468,154 | 0 | 0 | BSD-3-Clause | 2020-02-18T21:10:11 | 2020-02-18T21:10:09 | null | UTF-8 | Python | false | false | 6,419 | py | """
File: FunctorManagement.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/jplehr/pira/LICENSE.txt
Description: Module to load and manage the user-supplied functors.
"""
import typing
import lib.Utility as u
from lib.Configuration import PiraConfiguration, PiraConfigurationErrorException
import lib.Logging as log
from lib.Exception import PiraException
class FunctorManagementException(PiraException):
"""The exception indicates a problem with the funtor management"""
def __init__(self, msg):
super().__init__(msg)
class FunctorManager:
""" Entity to query for functors. Needs to be initialized with a PiraConfiguration once per PIRA configuration file. """
class __FunctorManagerImpl:
def __init__(self, cfg: PiraConfiguration) -> None:
self.config = cfg
self.functor_cache = {}
def get_or_load_functor(self, build: str, item: str, flavor: str, func: str):
'''
We use the wholename, i.e. fully qualified path to the functor, as the key
in our functor cache.
'''
if func is 'basebuild':
path, name, wnm = self.get_builder(build, item, flavor, True)
elif func is 'build':
path, name, wnm = self.get_builder(build, item, flavor)
elif func is 'clean':
path, name, wnm = self.get_cleaner(build, item, flavor)
elif func is 'analyze':
path, name, wnm = self.get_analyzer(build, item, flavor)
elif func is 'run':
path, name, wnm = self.get_runner(build, item, flavor)
else:
raise Exception('No such option available to load functor for. Value = ' + func)
try:
_ = self.functor_cache[name]
except KeyError:
self.functor_cache[name] = u.load_functor(path, name)
log.get_logger().log(
'FunctorManager::get_or_load: The retrieved ' + func + ' functor: ' + str(self.functor_cache[name]),
level='debug')
return self.functor_cache[name]
def get_builder(self, build: str, item: str, flavor: str, base: bool = False) -> typing.Tuple[str, str, str]:
p = self.config.get_builder_path(build, item)
n = self.get_builder_name(build, item, flavor)
if base:
n = u.concat_a_b_with_sep('no_instr', n, '_')
wnm = self.get_builder_file(build, item, flavor)
return p, n, wnm
def get_cleaner(self, build: str, item: str, flavor: str) -> typing.Tuple[str, str, str]:
p = self.config.get_cleaner_path(build, item)
n = self.get_cleaner_name(build, item, flavor)
wnm = self.get_cleaner_file(build, item, flavor)
return p, n, wnm
def get_analyzer(self, build: str, item: str, flavor: str) -> typing.Tuple[str, str, str]:
p = self.config.get_analyzer_path(build, item)
n = self.get_analyzer_name(build, item, flavor)
wnm = self.get_analyzer_file(build, item, flavor)
return p, n, wnm
def get_runner(self, build: str, item: str, flavor: str) -> typing.Tuple[str, str, str]:
p = self.config.get_runner_path(build, item)
n = self.get_runner_name(build, item, flavor)
wnm = self.get_runner_file(build, item, flavor)
return p, n, wnm
def get_raw_name(self, build: str, item: str, flavor: str) -> str:
b_nm = self.config.get_benchmark_name(item)
raw_nm = u.concat_a_b_with_sep(b_nm, flavor, '_')
return raw_nm
def get_cleaner_name(self, build: str, item: str, flavor: str) -> str:
raw_nm = self.get_raw_name(build, item, flavor)
cl_nm = u.concat_a_b_with_sep('clean', raw_nm, '_')
return cl_nm
def get_cleaner_file(self, build: str, item: str, flavor: str) -> str:
path = self.config.get_cleaner_path(build, item)
nm = self.get_cleaner_name(build, item, flavor)
file_path = u.concat_a_b_with_sep(path, nm, '/')
full_path = u.concat_a_b_with_sep(file_path, 'py', '.')
return full_path
def get_builder_name(self, build: str, item: str, flavor: str) -> str:
raw_nm = self.get_raw_name(build, item, flavor)
# FIXME: remove as soon as the new uniform naming is in place
return raw_nm
cl_nm = u.concat_a_b_with_sep('build', raw_nm, '_')
return cl_nm
def get_builder_file(self, build: str, item: str, flavor: str) -> str:
path = self.config.get_builder_path(build, item)
nm = self.get_builder_name(build, item, flavor)
file_path = u.concat_a_b_with_sep(path, nm, '/')
full_path = u.concat_a_b_with_sep(file_path, 'py', '.')
return full_path
def get_analyzer_name(self, build: str, item: str, flavor: str) -> str:
raw_nm = self.get_raw_name(build, item, flavor)
cl_nm = u.concat_a_b_with_sep('analyse', raw_nm, '_')
return cl_nm
def get_analyzer_file(self, build: str, item: str, flavor: str) -> str:
path = self.config.get_analyzer_path(build, item)
nm = self.get_analyzer_name(build, item, flavor)
file_path = u.concat_a_b_with_sep(path, nm, '/')
full_path = u.concat_a_b_with_sep(file_path, 'py', '.')
return full_path
def get_runner_name(self, build: str, item: str, flavor: str) -> str:
raw_nm = self.get_raw_name(build, item, flavor)
cl_nm = u.concat_a_b_with_sep('runner', raw_nm, '_')
return cl_nm
def get_runner_file(self, build: str, item: str, flavor: str) -> str:
path = self.config.get_runner_path(build, item)
nm = self.get_runner_name(build, item, flavor)
file_path = u.concat_a_b_with_sep(path, nm, '/')
full_path = u.concat_a_b_with_sep(file_path, 'py', '.')
return full_path
instance = None
def __init__(self, cfg=None):
if not FunctorManager.instance:
if cfg is None:
raise FunctorManagementException('Cannot create from None')
FunctorManager.instance = FunctorManager.__FunctorManagerImpl(cfg)
else:
if cfg is not None:
if not cfg.is_valid():
raise PiraConfigurationErrorException('Invalid configuration passed to FunctorManager')
FunctorManager.instance.cfg = cfg
FunctorManager.instance.functor_cache.clear()
@classmethod
def from_config(cls, p_config: PiraConfiguration):
""" Needs to be called once per configuration. """
return cls(p_config)
def __getattr__(self, name):
return getattr(self.instance, name)
def reset(self):
FunctorManager.instance = None
| [
"[email protected]"
] | |
a986b3acd7d80ca4f18e2f26b9117e0a6a58b09d | 1ef7fe1655dc84acaa3a04c16c4cf313e113ae49 | /social/logic.py | c770e0ae7fb773eb259b8b02b6702eb0073f20bf | [] | no_license | szpython1812/swiper | 80d43542392b94785fe9a9a0d6f65125c5a47d50 | b2e85f0224f943ce6c070ca875ff29b58f4340f6 | refs/heads/develop | 2020-05-22T21:12:23.345910 | 2019-05-17T08:44:01 | 2019-05-17T08:44:01 | 186,523,229 | 1 | 1 | null | 2019-05-16T02:54:52 | 2019-05-14T01:40:22 | Python | UTF-8 | Python | false | false | 4,460 | py | import datetime
from django.core.cache import cache
from django.db.models import Q
from common import keys
from lib.cache import rds
from social.models import Swipe, Friend
from swiper import config
from user.models import User
from common import errors
def like(user, sid):
Swipe.like(user.id, sid)
# 判断对方是否喜欢我们
if Swipe.has_like_someone(sid):
# 建立好友关系.
Friend.make_friends(user.id, sid)
return {'match': True}
else:
return {'match': False}
def superlike(user, sid):
Swipe.superlike(user.id, sid)
# 判断对方是否喜欢我们
if Swipe.has_like_someone(sid):
# 建立好友关系.
Friend.make_friends(user.id, sid)
return {'match': True}
else:
return {'match': False}
def get_recmd_list(user):
# 取出已经滑过的人
swiped = Swipe.objects.filter(uid=user.id).only('sid')
swiped_list = [sw.id for sw in swiped]
# 把自己也排除
swiped_list.append(user.id)
curr_year = datetime.datetime.now().year
max_birth_year = curr_year - user.profile.min_dating_age
min_birth_year = curr_year - user.profile.max_dating_age
users = User.objects.filter(
location=user.profile.location,
birth_year__range=(min_birth_year, max_birth_year),
sex=user.profile.dating_sex
).exclude(id__in=swiped_list)[:20]
# [:20] = select xxx, xxx from User limit 20
return users
def rewind(user):
now = datetime.datetime.now()
key = keys.REWIND_KEY % (user.id, now.date())
rewind_times = cache.get(key, 0)
# 取出的次数和最大允许反悔次数做对比.
if rewind_times < config.REWIND_TIMES:
# 执行反悔操作.
# 删除Swipe中的记录.如果有好友关系,也需要一并取消.
record = Swipe.objects.filter(uid=user.id).latest(
field_name='swipe_time')
uid1, uid2 = (user.id, record.sid) if user.id < record.sid else (
record.sid, user.id)
Friend.objects.filter(uid1=uid1, uid2=uid2).delete()
# 更新缓存
rewind_times += 1
timeout = 86400 - (now.hour * 3600 + now.minute * 60 + now.second)
cache.set(key, rewind_times, timeout)
# 处理top_n
# if record.mark == 'like':
# rds.zincrby(keys.HOT_RANK, -config.LIKE_SCORE, record.sid)
# elif record.makr == 'dislike':
# rds.zincrby(keys.HOT_RANK, -config.DISLIKE_SCORE, record.sid)
# else:
# rds.zincrby(keys.HOT_RANK, -config.SUPERLIKE_SCORE, record.sid)
# 以上代码修改为使用模式匹配
score_mapping = {
'like': config.LIKE_SCORE,
'dislike': config.DISLIKE_SCORE,
'superlike': config.SUPERLIKE_SCORE
}
rds.zincrby(keys.HOT_RANK, -score_mapping[record.mark], record.sid)
record.delete()
return True
else:
raise errors.EXCEED_REWIND_TIMES()
def get_liked_me(user):
swipe = Swipe.objects.filter(sid=user.id, mark__in=['like', 'superlike']).only('uid')
liked_me_list = [sw.uid for sw in swipe]
users = User.objects.filter(id__in=liked_me_list)
return users
def get_friends_list(user):
friends = Friend.objects.filter(Q(uid1=user.id) | Q(uid2=user.id))
# 取出对方的id
sid_list = []
for friend in friends:
if friend.uid1 == user.id:
sid_list.append(friend.uid2)
else:
sid_list.append(friend.uid1)
friends_list = User.objects.filter(id__in=sid_list)
data = [friend.to_dict() for friend in friends_list]
return data
def get_top_n(num):
# [[b'6', 7.0], [b'5', 5.0], [b'4', 5.0], [b'3', 5.0], [b'9', -5.0]]
data = rds.zrevrange(keys.HOT_RANK, 0, num, withscores=True)
cleaned = [(int(id), int(score)) for (id, score) in data]
# 根据id找到用户
uid_list = [id for (id, _) in cleaned]
# users = []
# for循环中访问数据库太多次.
# for uid in uid_list:
# user = User.objects.get(id=uid)
# user.append(user)
users = User.objects.filter(id__in=uid_list)
users = sorted(users, key=lambda user: uid_list.index(user.id))
top_n = []
for rank, (_, score), user in zip(range(1, num + 1), cleaned, users):
user_dict = user.to_dict()
user_dict['rank'] = rank
user_dict['score'] = score
top_n.append(user_dict)
return top_n
| [
"[email protected]"
] | |
2adf288db73ef957c82ad3b82c56653c52cf1dfb | 53e58c213232e02250e64f48b97403ca86cd02f9 | /16/mc/ExoDiBosonResonances/EDBRTreeMaker/test/crab3_analysisQCD_HT200to300.py | ca9031e1be70ac3888d31407f97e120bf09f4de2 | [] | no_license | xdlyu/fullRunII_ntuple_102X | 32e79c3bbc704cfaa00c67ab5124d40627fdacaf | d420b83eb9626a8ff1c79af5d34779cb805d57d8 | refs/heads/master | 2020-12-23T15:39:35.938678 | 2020-05-01T14:41:38 | 2020-05-01T14:41:38 | 237,192,426 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,331 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'QCD_HT200to300'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFPuppi.txt']
#config.JobType.inputFiles = ['PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.sendExternalFolder = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/QCD_HT200to300_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3_ext1-v2/MINIAODSIM'
config.Data.inputDBS = 'global'
#config.Data.inputDBS = 'phys03'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob =5
config.Data.totalUnits = -1
config.Data.publication = False
name = 'WWW'
steam_dir = 'xulyu'
#config.Data.outLFNDirBase = '/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/STEAM/' + steam_dir + '/' + name + '/'
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'QCD_HT200to300'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
| [
"[email protected]"
] | |
9ce38efa925e54d65942de595186c26e3cb97597 | e7a74d4385dcaa0f998340fe944805eb2798e81e | /flatfield/nonLinCorr.py | c1492110b29b2592f762f4be20246c1919a0b7bb | [] | no_license | shenglan0407/run16_notebooks_dump | 3c197977eeb225de97d7165d955107e11a5a09ee | 3513595809e21ac639560056088f1f2472188348 | refs/heads/master | 2020-03-28T15:07:21.737099 | 2018-09-13T00:24:24 | 2018-09-13T00:24:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,065 | py | """
Python example to correct for image spatial dependent non-linearity.
Use function "getCorrectionFunc" in order to generate a correction
function from a intensity dependence measurement of an identical
signal.
"""
import numpy as np
import copy
from scipy import linalg
def iterfy(iterable):
if isinstance(iterable, basestring):
iterable = [iterable]
try:
iter(iterable)
except TypeError:
iterable = [iterable]
return iterable
def polyVal(comps,i0):
"""Multidimensional version of numpy.polyval"""
i0 = np.asarray(iterfy(i0))
pol = np.vander(i0,len(comps))
return np.asarray(np.matrix(pol)*np.matrix(comps.reshape((len(comps),-1)))).reshape((len(i0),)+np.shape(comps)[1:])
def polyDer(comps,m=1):
"""Multidimensional version of numpy.polyder"""
compsf = comps.reshape((len(comps),-1))
n = len(compsf) - 1
y = compsf.reshape((n+1,-1))[:-1] * np.expand_dims(np.arange(n, 0, -1),1)
if m == 0:
val = comps
return val
else:
val = polyDer(y, m - 1)
return val.reshape((n,)+np.shape(comps)[1:])
def polyFit(i0,Imat,order=3, removeOrders=[]):
"""Multidimensional version of numpy.polyfit"""
Imatf = Imat.reshape((len(Imat),-1))
pol = np.vander(i0,order+1)
removeOrders = iterfy(removeOrders)
removeOrders = np.sort(removeOrders)[-1::-1]
for remo in removeOrders:
pol = np.delete(pol,-(remo+1),axis=1)
lhs = copy.copy(pol)
scale = np.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
comps,resid,rnk,singv = linalg.lstsq(lhs,Imatf)
comps = (comps.T/scale).T
for remo in removeOrders:
comps = np.insert(comps,order-remo,0,axis=0)
return comps.reshape((order+1,)+np.shape(Imat)[1:])
def getCorrectionFunc(dmat=None,i=None,order=5,search_dc_limits=None):
"""
Create nonlinear correction function from a calibration dataset consiting of:
i array of intensity values (floats) of the calibration
dmat ND array of the reference patterns corresponding to values of i,
The first dimension corresponds to the calibration intensity
values and has the same length as i.
order the polynomial order up to which the correction will be
deveoped.
search_dc_limits absolute limits around ic which are used to determine the
calibration value of ic as linear approximation of a short interval.
optional, can sometimes help to avoid strong deviations of the
polynomial approximatiuon from the real measured points.
Returns corrFunc(D,i), a function that takes an ND array input for correction
(1st dimension corresponds to the different intensity values)
as well as the intensity array i.
"""
if search_dc_limits is not None:
search_dc_limits = iterfy(search_dc_limits)
if len(search_dc_limits)==1:
msk = (i>i-np.abs(search_dc_limits)) & (i<i+np.abs(search_dc_limits))
elif len(search_dc_limits)==2:
msk = (i>i-np.min(search_dc_limits)) & (i<i+np.max(search_dc_limits))
p0 = tools.polyFit(i[msk],dmat[msk,...],2)
dc = tools.polyVal(p0,i0_wp)
pc = tools.polyFit(i,Imat-dc,order,removeOrders=[0])
c = lambda(i): polyVal(pc,i) + dc
else:
pc = polyFit(i,dmat,order,removeOrders=[])
c = lambda(i): polyVal(pc,i)
return c
def getCorrectionFunc2(dmat=None,i=None,order=5,search_dc_limits=None):
"""
Create nonlinear correction function from a calibration dataset consiting of:
i array of intensity values (floats) of the calibration
dmat ND array of the reference patterns corresponding to values of i,
The first dimension corresponds to the calibration intensity
values and has the same length as i.
order the polynomial order up to which the correction will be
deveoped.
search_dc_limits absolute limits around ic which are used to determine the
calibration value of ic as linear approximation of a short interval.
optional, can sometimes help to avoid strong deviations of the
polynomial approximatiuon from the real measured points.
Returns corrFunc(D,i), a function that takes an ND array input for correction
(1st dimension corresponds to the different intensity values)
as well as the intensity array i.
"""
if search_dc_limits is not None:
search_dc_limits = iterfy(search_dc_limits)
if len(search_dc_limits)==1:
msk = (i>i-np.abs(search_dc_limits)) & (i<i+np.abs(search_dc_limits))
elif len(search_dc_limits)==2:
msk = (i>i-np.min(search_dc_limits)) & (i<i+np.max(search_dc_limits))
p0 = tools.polyFit(i[msk],dmat[msk,...],2)
dc = tools.polyVal(p0,i0_wp)
pc = tools.polyFit(i,Imat-dc,order,removeOrders=[0])
else:
pc = polyFit(i,dmat,order,removeOrders=[])
return pc
| [
"[email protected]"
] | |
85c3158111d9b047512556ed7c13b3065ba202d2 | 57a20109f134ec979e2a3ef408c78b8375e089d2 | /bidpazari/core/runtime/user.py | f9a774513d7dfb56f95fa34b511a038286759fe0 | [] | no_license | akca/bidpazari | 2e166225b0f6fbbb87f1f4bf879de29453c9525c | f69627ce51a5bd9fd23ea4772b27c037adb48fc3 | refs/heads/master | 2022-11-28T16:17:13.704175 | 2020-01-11T12:06:43 | 2020-01-11T12:06:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,323 | py | from decimal import Decimal
from functools import wraps
from django.utils.functional import cached_property
from bidpazari.core.exceptions import (
InsufficientBalanceError,
NonPersistentObjectError,
)
from bidpazari.core.models import User, UserHasItem
from bidpazari.core.runtime.common import runtime_manager
from bidpazari.core.runtime.exceptions import ItemAlreadyOnSale
from bidpazari.core.runtime.watchers import ItemWatcher
def persistent_user_proxy_method(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
if not self.persistent_user:
raise NonPersistentObjectError(
f"{fn.__name__} must be called on a persisted user"
)
return fn(self, *args, **kwargs)
return wrapper
class RuntimeUser:
def __init__(self, username, email, password_raw, first_name, last_name):
self.username = username
self.email = email
self._password = password_raw
self.first_name = first_name
self.last_name = last_name
self.persistent_user = None
self.initial_balance = Decimal(0)
self.reserved_balance = Decimal(0)
"""
Persistence methods
"""
def persist(self):
if self.persistent_user:
return
self.persistent_user = User.objects.create_user(
self.username,
self.email,
self._password,
first_name=self.first_name,
last_name=self.last_name,
)
@classmethod
def from_persistent_user(cls, user: User):
runtime_user = cls(
user.username,
user.email,
password_raw=None,
first_name=user.first_name,
last_name=user.last_name,
)
runtime_user.persistent_user = user
runtime_user.initial_balance = user.balance
return runtime_user
@cached_property
@persistent_user_proxy_method
def id(self):
return self.persistent_user.id
"""
Proxy methods
"""
@persistent_user_proxy_method
def verify(self, verification_number):
self.persistent_user.verify(verification_number)
@persistent_user_proxy_method
def change_password(self, new_password, old_password=None):
return self.persistent_user.change_password(new_password, old_password)
@persistent_user_proxy_method
def list_items(self, item_type=None, on_sale=None):
return set(self.persistent_user.list_items(item_type, on_sale))
@persistent_user_proxy_method
def create_auction(self, item_id: int, bidding_strategy_identifier: str, **kwargs):
uhi = UserHasItem.objects.get(user_id=self.id, item_id=item_id, is_sold=False)
runtime_manager.create_auction(
uhi=uhi, bidding_strategy_identifier=bidding_strategy_identifier, **kwargs
)
return uhi.id
@persistent_user_proxy_method
def add_balance_transaction(self, amount):
self.persistent_user.add_balance(amount)
self.initial_balance = self.persistent_user.balance
@property
@persistent_user_proxy_method
def transaction_history(self):
return self.persistent_user.transaction_history
@staticmethod
def register_item_watcher(callback_method, item_type=None):
item_watcher = ItemWatcher(callback_method, item_type=item_type)
runtime_manager.register_item_watcher(item_watcher)
@property
def reservable_balance(self):
return self.initial_balance - self.reserved_balance
def reserve_balance(self, amount):
if amount > self.reservable_balance:
raise InsufficientBalanceError("Amount is higher than reservable balance.")
self.reserved_balance += amount
def unreserve_balance(self, amount):
if amount > self.reserved_balance:
raise InsufficientBalanceError("Amount is higher than reserved balance.")
self.reserved_balance -= amount
def unreserve_all(self):
self.reserved_balance = Decimal(0)
def connect(self):
runtime_manager.online_users.add(self)
def disconnect(self):
runtime_manager.online_users.remove(self)
def __hash__(self):
return self.id
def __eq__(self, other):
return isinstance(other, RuntimeUser) and self.id == other.id
| [
"[email protected]"
] | |
347f42ae15c798ecf8ca8ac0f5ffdc4317729626 | 1dffbdb1301297b0e7eadc7bfb67cd49601e6322 | /treshold_enhanced_method.py | aa37bb86d54514ef3cf776f2a350737c408342ec | [] | no_license | Thunrada01082548/record. | 7d1879e17b8ca8d23105276da5ea6b0170e9fa3a | cbcf31369e7937e11989f5e21905504de164c0c1 | refs/heads/main | 2023-08-10T09:06:36.311228 | 2021-09-08T14:17:15 | 2021-09-08T14:17:15 | 387,848,188 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,364 | py | import numpy as np #importing libraries
import matplotlib.pyplot as plt #importing libraries
import scipy.io.wavfile as wavfile #importing libraries
import sounddevice as sd
import scipy.fft as fft
import scipy.fftpack as fftpk
plt.rcParams['figure.figsize'] = [10,16]
plt.rcParams.update({'font.size': 18})
plt.style.use('seaborn')
fs = 44100 # Sample rate
seconds = 5 # Duration of recording
#record the input
data = sd.rec(int(seconds * fs), samplerate=fs, channels=1, dtype = 'float32')
data = data.reshape(len(data))
sd.wait()
sd.play(data, fs)
sd.wait()
n = len(data)
t=np.linspace(0,len(data)/fs,n)#sanmple to time
fhat = fft.fft(data) #computes the fft
psd = fhat * np.conj(fhat)/n
freq = fftpk.fftfreq(len(data), (1.0/fs)) #frequency array
idxs_half = np.arange(0, n//2) #first half index
th1=int(len(fhat)/fs*500) #index place for 500Hz
th2=int(len(fhat)/fs*8000) #index place for 8000Hz
k1=int(len(fhat)/fs*1000) #index place for 1000Hz
k2=int(len(fhat)/fs*10000) #index place for 10000Hz
n1=int(len(fhat)/fs*0.1) #index place for 0.1Hz
n2=int(len(fhat)/fs*90) #index place for 90Hz
n3=int(len(fhat)/fs*10001) #index place for 1001Hz
n4=int(len(fhat)/fs*25000) #index place for 25000Hz
threshold = 0.25
psd_idxs = np.ones(n)
psd_idxs[:th1] = psd[:th1] > threshold #array of 0 and 1
psd_idxs[th2:] = psd[th2:] > threshold #array of 0 and 1
psd_clean = psd * psd_idxs #zero out all the unnecessary powers
fhat_clean = psd_idxs * fhat #used to retrieve the signal
fhat = fhat_clean
fd=abs(fhat) #Taking absolute values
x=fd[0:int((len(fd)/2)-1)] #Half Range
faxis=np.linspace(0,fs/2,len(x)) #Range frequency axis
fhat[n3:n4]=fhat[n3:n4]/60 # Noise reduction
fhat[int(len(fhat)-n4):int(len(fhat)-n3)]=fhat[int(len(fhat)-n4):int(len(fhat)-n3)]/60
fhat[n1:n2]=fhat[n1:n2]/60
fhat[int(len(fhat)-n2):int(len(fhat)-n1)]=fhat[int(len(fhat)-n2):int(len(fhat)-n1)]/60
fhat[k1:k2]=fhat[k1:k2]*10# hamonics amplification
fhat[int(len(fhat)-k2):int(len(fhat)-k1)]=fhat[int(len(fhat)-k2):int(len(fhat)-k1)]*10
enhanced=np.fft.ifft(fhat) #IFFT function transform to time domain
clr=np.real(enhanced) # Real part extraction
audio = clr.astype(np.float32) #Convert to 32 bit data
sd.play(audio, fs)
sd.wait()
wavfile.write('improved.wav',fs,audio) #writing enhanced audio file | [
"[email protected]"
] | |
1ca9421c30d507ffd0c20ef335be2cd7e57b5697 | 347a6aac6fc40edab03d75a53e89053aeeb8fd72 | /quizzes/Quiz7.py | f9e8bdc18bfc69f7deba22dcd76922245d4bc853 | [] | no_license | jwilke/cs373 | 0d5de4676c13e83c8b9dbcab66140be53cebeaf9 | 7923f3710eaa76d38d8261d6dc596f2bfaf12a8e | refs/heads/master | 2021-01-16T00:23:35.227487 | 2012-07-05T22:53:14 | 2012-07-05T22:53:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | #!/usr/bin/env python
"""
CS373: Quiz #7 (9 pts)
"""
""" ----------------------------------------------------------------------
1. In the paper, "A Bug and a Crash" about the Ariane 5, what was the
software bug?
(2 pts)
the conversion of a 64-bit number to a 16-bit number
"""
""" ----------------------------------------------------------------------
2. In the paper, "Mariner 1", what was the software bug?
(1 pt)
the ommission of a hyphen
"""
""" ----------------------------------------------------------------------
3. What is the output of the following program?
(2 pts)
True
False
"""
a = [2, 3, 4]
b = a
b += [5]
print a is b
a = (2, 3, 4)
b = a
b += (5,)
print a is b
""" ----------------------------------------------------------------------
4. What semantic difference is there between Java's conditional expression
and Python's? Why?
(4 pts)
Java's then and else clause must be of the same type
Java is typed and the compiler must be able to determine the type of the
entire conditional expression
"""
| [
"[email protected]"
] | |
af8a2ebd0fe7f5f06e5aeccc636bf8704d77ef8a | ccf6945c819e25a3eae92103116b7d1063e21871 | /chess_piece_detection/app/other_models/alexnet.py | 6da26d5ea58921656765650c08f6ac477852e5d1 | [] | no_license | ace-racer/Extending-Board-Games-using-deep-learning | 67355eabec91ed8afb1150e4c2d6bb0068e74910 | 7171b14c80be35b0ee882322c318307779379e9a | refs/heads/master | 2020-04-10T05:32:18.705335 | 2019-04-22T11:42:47 | 2019-04-22T11:42:47 | 160,830,611 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,261 | py | # Import necessary components to build LeNet
# Reference: https://github.com/eweill/keras-deepcv/blob/master/models/classification/alexnet.py
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras.optimizers import SGD, RMSprop, Adam
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
# Other imports
import numpy as np
import os
# custom imports
import appconfigs
import modelconfigs
import constants
import utils
def train_alexnet_model(model_configs, train_model=True, num_samples=None):
print("Alexnet model...")
X_train, y_train = utils.get_required_data_with_labels_for_CNN(appconfigs.location_of_train_data, num_samples)
X_test, y_test = utils.get_required_data_with_labels_for_CNN(appconfigs.location_of_test_data, num_samples)
# Initialize model
alexnet = Sequential()
# Layer 1
alexnet.add(Conv2D(96, (11, 11), input_shape=(200, 200, 3),
padding='same', kernel_regularizer=l2(0.)))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 2
alexnet.add(Conv2D(256, (5, 5), padding='same'))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 3
alexnet.add(ZeroPadding2D((1, 1)))
alexnet.add(Conv2D(512, (3, 3), padding='same'))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 4
alexnet.add(ZeroPadding2D((1, 1)))
alexnet.add(Conv2D(1024, (3, 3), padding='same'))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
# Layer 5
alexnet.add(ZeroPadding2D((1, 1)))
alexnet.add(Conv2D(1024, (3, 3), padding='same'))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(MaxPooling2D(pool_size=(2, 2)))
# Layer 6
alexnet.add(Flatten())
alexnet.add(Dense(3072))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(Dropout(0.5))
# Layer 7
alexnet.add(Dense(4096))
alexnet.add(BatchNormalization())
alexnet.add(Activation('relu'))
alexnet.add(Dropout(0.5))
# Layer 8
alexnet.add(Dense(constants.num_output_classes))
alexnet.add(BatchNormalization())
alexnet.add(Activation('softmax'))
batch_size = model_configs["batch_size"][0]
# number of training epochs
nb_epoch = model_configs["epochs"][0]
if train_model:
filepath = os.path.join(appconfigs.model_folder_location, model_configs["model_weights_file_name"][0])
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1,
save_best_only=True,
mode='max')
earlystop = EarlyStopping(monitor='val_acc', min_delta=0.001, patience=10,
verbose=1, mode='max')
tensorboard = TensorBoard(log_dir=appconfigs.tensorboard_logs_folder_location, histogram_freq=0, write_graph=True, write_images=True)
callbacks_list = [checkpoint, earlystop, tensorboard]
adam = Adam(lr=model_configs["lr"][0], beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
alexnet.compile(loss='sparse_categorical_crossentropy',
optimizer=adam,
metrics=['accuracy'])
hist = alexnet.fit(X_train, y_train, shuffle=True, batch_size=batch_size,
epochs=nb_epoch, verbose=1,
validation_data=(X_test, y_test), callbacks=callbacks_list)
return hist, alexnet, X_test, y_test
else:
adam = Adam(lr=model_configs["lr"][0], beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
alexnet.compile(loss='sparse_categorical_crossentropy',
optimizer=adam,
metrics=['accuracy'])
return None, alexnet, X_test, y_test
| [
"[email protected]"
] | |
97358e87fe7c34d33d52c336aed1c0e46d87e94c | fc8d217261d47198b71fdd611b62d08522bd0a39 | /OOP/oop2.py | 39bef5385ec4d6a2e6df4a72df7b2275531122b2 | [] | no_license | PatrickBruso/Code_in_Place | dbd1e57f3dabea33a8fc7f24af703699d9d1930a | 99fdd278c638b29c90032f3b737b518a55a62f3f | refs/heads/master | 2023-07-13T08:06:11.167278 | 2021-08-26T23:04:29 | 2021-08-26T23:04:29 | 369,669,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | """ Extra lesson in object-oriented programming """
import csv
from participant import Participant # importing our Participant class from participant.py
STUDENT_DATA = "students.csv"
def read_student_data():
with open("students.csv") as f:
reader = csv.DictReader(f)
for line in reader:
student_name = line['name'] # The CSV file has this as the name for some reason
student_email = line['email']
section_number = int(line['section_number'])
new_student = Participant(student_name, student_email, "student")
print(f"The participant's name is {new_student.name}")
print(f"The participant's email is {new_student.email}")
print(f"The participant's role is {new_student.role}")
def main():
read_student_data()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
3415d6c90827899f16903882ab9e54d437fd5b09 | 34eb0a65adb4290d6224075a2700d432d5649de6 | /tachyon/common/restclient.py | 1d6c499cbfc98be41954791218186029b21669a2 | [
"BSD-3-Clause"
] | permissive | Vuader/tachyon_common | ae950e13ac14df590cc76b3c7f98c26434104217 | a675d13a251aeda16dba7a416354872ee41509e6 | refs/heads/master | 2021-01-19T09:51:55.904274 | 2017-02-15T11:50:00 | 2017-02-15T11:50:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,684 | py | # Tachyon OSS Framework
#
# Copyright (c) 2016-2017, see Authors.txt
# All rights reserved.
#
# LICENSE: (BSD3-Clause)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENTSHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import thread
import json
import tachyon.ui
try:
# python 3
from io import BytesIO
except ImportError:
# python 2
from StringIO import StringIO as BytesIO
try:
# python 3
from urllib.parse import urlencode
except ImportError:
# python 2
from urllib import urlencode
import nfw
log = logging.getLogger(__name__)
sessions = {}
class RestClient(nfw.RestClient):
def __init__(self, url, username=None, password=None, domain=None):
global sessions
self.thread_id = thread.get_ident()
if self.thread_id not in sessions:
sessions[self.thread_id] = {}
self.session = sessions[self.thread_id]
self.url = url
if url in self.session:
self.username = self.session[url]['username']
self.password = self.session[url]['password']
self.domain = self.session[url]['domain']
self.tachyon_headers = self.session[url]['headers']
super(RestClient, self).__init__()
else:
self.session[url] = {}
self.session[url]['username'] = username
self.session[url]['password'] = password
self.session[url]['domain'] = domain
self.session[url]['headers'] = {}
self.username = username
self.password = password
self.domain = domain
super(RestClient, self).__init__()
self.tachyon_headers = self.session[url]['headers']
if username is not None:
self.authenticate(url, username, password, domain)
def authenticate(self, username, password, domain):
url = self.url
auth_url = "%s/login" % (url,)
if 'token' in self.tachyon_headers:
del self.tachyon_headers['token']
self.tachyon_headers['X-Domain'] = domain
data = {}
data['username'] = username
data['password'] = password
data['expire'] = 1
server_headers, result = self.execute("POST", auth_url,
data, self.tachyon_headers)
if 'token' in result:
self.token = result['token']
self.tachyon_headers['X-Auth-Token'] = self.token
else:
raise tachyon.ui.exceptions.Authentication("Could not connect/authenticate")
self.session[url]['headers'] = self.tachyon_headers
return result
def token(self, token, domain, tenant):
log.error("TOKEN %s" % (token,))
url = self.url
auth_url = "%s/login" % (url,)
self.tachyon_headers['X-Tenant'] = tenant
self.tachyon_headers['X-Domain'] = domain
self.tachyon_headers['X-Auth-Token'] = token
server_headers, result = self.execute("GET", auth_url,
None, self.tachyon_headers)
if 'token' in result:
self.token = token
else:
raise tachyon.ui.exceptions.Authentication("Could not connect/authenticate")
self.session[url]['headers'] = self.tachyon_headers
return result
def domain(self, domain):
self.tachyon_headers['X-Domain'] = domain
self.session[url]['headers'] = self.tachyon_headers
def tenant(self, tenant):
self.tachyon_headers['X-Tenant'] = tenant
self.session[url]['headers'] = self.tachyon_headers
def execute(self, request, url, obj=None, headers=None):
if obj is not None:
data = json.dumps(obj)
else:
data = None
if self.url not in url:
url = "%s/%s" % (self.url, url)
if headers is None:
headers = self.tachyon_headers
else:
headers.update(self.tachyon_headers)
server_headers, response = super(RestClient, self).execute(request, url, data, headers)
if response is not None:
response = json.loads(response)
return [server_headers, response]
| [
"[email protected]"
] | |
b26f76a2669cc4d09dca8971a42e76453803ec6c | 154b94193e7a0997921508bbbaee52816c844ed6 | /six_channel_model_whole_datasets.py | 1e7602cb024b3bc0bb6b08543561d56ea13800cf | [] | no_license | Chris-XY/git_for_win | 803121e68362a3f2647fd6a90fa9832687d44bae | bb1ebd378508d4eb0b5fc0cf87e72e6f7cda334f | refs/heads/master | 2022-06-16T03:02:18.242943 | 2020-05-07T01:19:01 | 2020-05-07T01:19:01 | 261,312,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,153 | py | import torch
import pandas as pd
import math
import numpy as np
import torch.nn as nn
import modules
import utils
if __name__ == "__main__":
if torch.cuda.is_available():
device = torch.device("cuda:0") # you can continue going on here, like cuda:1 cuda:2....etc.
print("Running on the GPU")
else:
device = torch.device("cpu")
print("Running on the CPU")
# External Dimension
max_temps = pd.read_csv('data/maximum_temp.csv')
min_temps = pd.read_csv('data/minimum_temp.csv')
solar_exps = pd.read_csv('data/solar_exposure.csv')
rainfalls = pd.read_csv('data/rainfall.csv')
jul_normal_tensor = utils.external_dimension_creator(2018, 7, 31, "20180701", max_temps, min_temps, solar_exps,
rainfalls)
aug_normal_tensor = utils.external_dimension_creator(2018, 8, 31, "20180801", max_temps, min_temps, solar_exps,
rainfalls)
sep_normal_tensor = utils.external_dimension_creator(2018, 9, 30, "20180901", max_temps, min_temps, solar_exps,
rainfalls)
oct_normal_tensor = utils.external_dimension_creator(2018, 10, 31, "20181001", max_temps, min_temps, solar_exps,
rainfalls)
nov_normal_tensor = utils.external_dimension_creator(2018, 11, 30, "20181101", max_temps, min_temps, solar_exps,
rainfalls)
dec_normal_tensor = utils.external_dimension_creator(2018, 12, 31, "20181201", max_temps, min_temps, solar_exps,
rainfalls)
jan_normal_tensor = utils.external_dimension_creator(2019, 1, 31, "20190101", max_temps, min_temps, solar_exps,
rainfalls)
feb_normal_tensor = utils.external_dimension_creator(2019, 2, 28, "20190201", max_temps, min_temps, solar_exps,
rainfalls)
mar_normal_tensor = utils.external_dimension_creator(2019, 3, 31, "20190301", max_temps, min_temps, solar_exps,
rainfalls)
apr_normal_tensor = utils.external_dimension_creator(2019, 4, 30, "20190401", max_temps, min_temps, solar_exps,
rainfalls)
may_normal_tensor = utils.external_dimension_creator(2019, 5, 31, "20190501", max_temps, min_temps, solar_exps,
rainfalls)
jun_normal_tensor = utils.external_dimension_creator(2019, 6, 30, "20190601", max_temps, min_temps, solar_exps,
rainfalls)
external_dimension_for_day = torch.cat((jul_normal_tensor[504:], aug_normal_tensor, sep_normal_tensor,
oct_normal_tensor, nov_normal_tensor, dec_normal_tensor, jan_normal_tensor,
feb_normal_tensor, mar_normal_tensor, apr_normal_tensor, may_normal_tensor,
jun_normal_tensor), 0)
# Create Data for ST CNN part
matrix_data_7 = utils.data_deal_CNN('data/result_jul.csv', 31, '-1')
matrix_data_8 = utils.data_deal_CNN('data/result_aug.csv', 31, '-1')
matrix_data_9 = utils.data_deal_CNN('data/result_sep.csv', 30, '-1')
matrix_data_10 = utils.data_deal_CNN('data/result_oct.csv', 31, '-1')
matrix_data_11 = utils.data_deal_CNN('data/result_nov.csv', 30, '-1')
matrix_data_12 = utils.data_deal_CNN('data/result_dec.csv', 31, '0')
matrix_data_1 = utils.data_deal_CNN('data/result_jan.csv', 31, '-1')
matrix_data_2 = utils.data_deal_CNN('data/result_feb.csv', 28, '-1')
matrix_data_3 = utils.data_deal_CNN('data/result_mar.csv', 31, '-1')
matrix_data_4 = utils.data_deal_CNN('data/result_apr.csv', 30, '-1')
matrix_data_5 = utils.data_deal_CNN('data/result_may.csv', 31, '-1')
matrix_data_6 = utils.data_deal_CNN('data/result_jun.csv', 30, '-1')
matrix_data_CNN = torch.cat((matrix_data_7, matrix_data_8, matrix_data_9, matrix_data_10, matrix_data_11,
matrix_data_12, matrix_data_1, matrix_data_2, matrix_data_3, matrix_data_4,
matrix_data_5, matrix_data_6), 0)
# Create three channels
# different hours
timeline_data_C_ST = torch.zeros((365 * 24 - 504), 3, 2, 100, 100)
# different day same time
timeline_data_P_ST = torch.zeros((365 * 24 - 504), 3, 2, 100, 100)
# different week same time
timeline_data_T_ST = torch.zeros((365 * 24 - 504), 3, 2, 100, 100)
# result_ST = torch.zeros((365 * 24 - 504), 2, 100, 100)
i = 0
for T in range(504, 365 * 24):
if T % 10000 == 0:
print(T)
timeline_data_C_ST[i][0][0] = matrix_data_CNN[T - 1][0]
timeline_data_C_ST[i][1][0] = matrix_data_CNN[T - 2][0]
timeline_data_C_ST[i][2][0] = matrix_data_CNN[T - 3][0]
timeline_data_C_ST[i][0][1] = matrix_data_CNN[T - 1][1]
timeline_data_C_ST[i][1][1] = matrix_data_CNN[T - 2][1]
timeline_data_C_ST[i][2][1] = matrix_data_CNN[T - 3][1]
timeline_data_P_ST[i][0][0] = matrix_data_CNN[T - 24][0]
timeline_data_P_ST[i][1][0] = matrix_data_CNN[T - 24 - 24][0]
timeline_data_P_ST[i][2][0] = matrix_data_CNN[T - 24 - 24 - 24][0]
timeline_data_P_ST[i][0][1] = matrix_data_CNN[T - 24][1]
timeline_data_P_ST[i][1][1] = matrix_data_CNN[T - 24 - 24][1]
timeline_data_P_ST[i][2][1] = matrix_data_CNN[T - 24 - 24 - 24][1]
timeline_data_T_ST[i][0][0] = matrix_data_CNN[T - 7 * 24 * 1][0]
timeline_data_T_ST[i][1][0] = matrix_data_CNN[T - 7 * 24 * 2][0]
timeline_data_T_ST[i][2][0] = matrix_data_CNN[T - 7 * 24 * 3][0]
timeline_data_T_ST[i][0][1] = matrix_data_CNN[T - 7 * 24 * 1][1]
timeline_data_T_ST[i][1][1] = matrix_data_CNN[T - 7 * 24 * 2][1]
timeline_data_T_ST[i][2][1] = matrix_data_CNN[T - 7 * 24 * 3][1]
i += 1
board = math.ceil((365 * 24 - 504) * 0.8)
train_X_C_ST, test_X_C_ST = timeline_data_C_ST[0:board], timeline_data_C_ST[board:-1]
train_X_P_ST, test_X_P_ST = timeline_data_P_ST[0:board], timeline_data_P_ST[board:-1]
train_X_T_ST, test_X_T_ST = timeline_data_T_ST[0:board], timeline_data_T_ST[board:-1]
train_X_Ext_ST, test_X_Ext_ST = external_dimension_for_day[0:board], external_dimension_for_day[board:-1]
# build a mask
data_jul = pd.read_csv('data/result_jul.csv')
data_aug = pd.read_csv('data/result_aug.csv')
data_sep = pd.read_csv('data/result_sep.csv')
data_oct = pd.read_csv('data/result_oct.csv')
data_nov = pd.read_csv('data/result_nov.csv')
data_dec = pd.read_csv('data/result_dec.csv')
data_jan = pd.read_csv('data/result_jan.csv')
data_feb = pd.read_csv('data/result_feb.csv')
data_mar = pd.read_csv('data/result_mar.csv')
data_apr = pd.read_csv('data/result_apr.csv')
data_may = pd.read_csv('data/result_may.csv')
data_jun = pd.read_csv('data/result_jun.csv')
intersection_matrix_in = np.zeros((100, 100))
intersection_matrix_out = np.zeros((100, 100))
plot_data_lists = [data_jul, data_aug, data_sep, data_oct, data_nov,
data_dec, data_jan, data_feb, data_mar, data_apr,
data_may, data_jun]
for plot_data in plot_data_lists:
for i in range(len(plot_data)):
intersection_matrix_in[plot_data['bslat_new'][i] - 1][plot_data['bslon_new'][i] - 1] += 1
intersection_matrix_out[plot_data['aslat_new'][i] - 1][plot_data['aslon_new'][i] - 1] += 1
x_in = intersection_matrix_in
mask_in = x_in < 1
x_out = intersection_matrix_out
mask_out = x_out < 1
# Create Data for GNN part
stop_distribution_BSID = utils.stop_distribution_reader('BSID_top_list_10.txt')
stop_distribution_ASID = utils.stop_distribution_reader('ASID_top_list_10.txt')
stop_distribution = [stop[0] for stop in stop_distribution_ASID]
cols = 10 + 1 # 10 + 1
rows = len(stop_distribution)
matrix_data_7, ground_truth_7 = utils.data_deal_GNN('data/result_jul.csv', 31,
stop_distribution, stop_distribution_BSID,
stop_distribution_ASID, '-1'
)
matrix_data_8, ground_truth_8 = utils.data_deal_GNN('data/result_aug.csv', 31,
stop_distribution, stop_distribution_BSID,
stop_distribution_ASID, '-1'
)
matrix_data_9, ground_truth_9 = utils.data_deal_GNN('data/result_sep.csv', 30,
stop_distribution, stop_distribution_BSID,
stop_distribution_ASID, '-1'
)
matrix_data_10, ground_truth_10 = utils.data_deal_GNN('data/result_oct.csv', 31,
stop_distribution,
stop_distribution_BSID,
stop_distribution_ASID, '-1'
)
matrix_data_11, ground_truth_11 = utils.data_deal_GNN('data/result_nov.csv', 30,
stop_distribution,
stop_distribution_BSID,
stop_distribution_ASID, '-1'
)
matrix_data_12, ground_truth_12 = utils.data_deal_GNN('data/result_dec.csv', 31,
stop_distribution,
stop_distribution_BSID,
stop_distribution_ASID, '0'
)
matrix_data_1, ground_truth_1 = utils.data_deal_GNN('data/result_jan.csv', 31,
stop_distribution, stop_distribution_BSID,
stop_distribution_ASID, '-1'
)
matrix_data_2, ground_truth_2 = utils.data_deal_GNN('data/result_feb.csv', 28,
stop_distribution, stop_distribution_BSID,
stop_distribution_ASID, '-1'
)
matrix_data_3, ground_truth_3 = utils.data_deal_GNN('data/result_mar.csv', 31,
stop_distribution, stop_distribution_BSID,
stop_distribution_ASID, '-1'
)
matrix_data_4, ground_truth_4 = utils.data_deal_GNN('data/result_apr.csv', 30,
stop_distribution, stop_distribution_BSID,
stop_distribution_ASID, '-1'
)
matrix_data_5, ground_truth_5 = utils.data_deal_GNN('data/result_may.csv', 31,
stop_distribution, stop_distribution_BSID,
stop_distribution_ASID, '-1'
)
matrix_data_6, ground_truth_6 = utils.data_deal_GNN('data/result_jun.csv', 30,
stop_distribution, stop_distribution_BSID,
stop_distribution_ASID, '-1'
)
# Create three channels
# different hours
timeline_data_C_GNN = torch.zeros((365 * 24 - 504), 3, 2, rows, cols - 1)
# different day same time
timeline_data_P_GNN = torch.zeros((365 * 24 - 504), 3, 2, rows, cols - 1)
# different week same time
timeline_data_T_GNN = torch.zeros((365 * 24 - 504), 3, 2, rows, cols - 1)
result_GNN = torch.zeros((365 * 24 - 504), 2, rows)
i = 0
matrix_data_GNN = torch.cat((matrix_data_7, matrix_data_8, matrix_data_9, matrix_data_10, matrix_data_11,
matrix_data_12, matrix_data_1, matrix_data_2, matrix_data_3, matrix_data_4,
matrix_data_5, matrix_data_6), 0)
ground_truth_GNN = torch.cat((ground_truth_7, ground_truth_8, ground_truth_9, ground_truth_10,
ground_truth_11, ground_truth_12, ground_truth_1, ground_truth_2,
ground_truth_3, ground_truth_4, ground_truth_5, ground_truth_6))
for T in range(504, 365*24):
if T % 10000 == 0:
print(T)
timeline_data_C_GNN[i][0][0] = matrix_data_GNN[T - 1][0]
timeline_data_C_GNN[i][1][0] = matrix_data_GNN[T - 2][0]
timeline_data_C_GNN[i][2][0] = matrix_data_GNN[T - 3][0]
timeline_data_C_GNN[i][0][1] = matrix_data_GNN[T - 1][1]
timeline_data_C_GNN[i][1][1] = matrix_data_GNN[T - 2][1]
timeline_data_C_GNN[i][2][1] = matrix_data_GNN[T - 3][1]
timeline_data_P_GNN[i][0][0] = matrix_data_GNN[T - 24][0]
timeline_data_P_GNN[i][1][0] = matrix_data_GNN[T - 24 - 24][0]
timeline_data_P_GNN[i][2][0] = matrix_data_GNN[T - 24 - 24 - 24][0]
timeline_data_P_GNN[i][0][1] = matrix_data_GNN[T - 24][1]
timeline_data_P_GNN[i][1][1] = matrix_data_GNN[T - 24 - 24][1]
timeline_data_P_GNN[i][2][1] = matrix_data_GNN[T - 24 - 24 - 24][1]
timeline_data_T_GNN[i][0][0] = matrix_data_GNN[T - 7 * 24][0]
timeline_data_T_GNN[i][1][0] = matrix_data_GNN[T - 7 * 24 * 2][0]
timeline_data_T_GNN[i][2][0] = matrix_data_GNN[T - 7 * 24 * 3][0]
timeline_data_T_GNN[i][0][1] = matrix_data_GNN[T - 7 * 24][1]
timeline_data_T_GNN[i][1][1] = matrix_data_GNN[T - 7 * 24 * 2][1]
timeline_data_T_GNN[i][2][1] = matrix_data_GNN[T - 7 * 24 * 3][1]
result_GNN[i][0] = ground_truth_GNN[T][0]
result_GNN[i][1] = ground_truth_GNN[T][1]
i += 1
board = math.ceil((365 * 24 - 504) * 0.8)
external_dimension_for_day_GNN = external_dimension_for_day
train_X_C_GNN, test_X_C_GNN = timeline_data_C_GNN[0:board], timeline_data_C_GNN[board:-1]
train_X_P_GNN, test_X_P_GNN = timeline_data_P_GNN[0:board], timeline_data_P_GNN[board:-1]
train_X_T_GNN, test_X_T_GNN = timeline_data_T_GNN[0:board], timeline_data_T_GNN[board:-1]
train_X_Ext_GNN, test_X_Ext_GNN = external_dimension_for_day_GNN[0:board], external_dimension_for_day_GNN[board:-1]
train_Y_GNN, test_Y_GNN = result_GNN[0:board], result_GNN[board:-1]
stop_number_location = utils.stop_location_reader('stops_locations.txt')
# ================================================================================
gCNN = modules.GCModel()
optimizer_GNN = torch.optim.Adam(gCNN.parameters(), lr=0.001)
criterion_GNN = nn.MSELoss(size_average=True)
loss_list_GNN = []
for epoch in range(2):
for i in range(len(train_X_C_GNN)):
output = gCNN(train_X_C_GNN[i], train_X_P_GNN[i], train_X_T_GNN[i], train_X_Ext_GNN[i], train_X_C_ST[i],
train_X_P_ST[i], train_X_T_ST[i], mask_in, mask_out, stop_distribution, stop_number_location)
output_2, min_in, max_in, min_out, max_out = utils.minmaxscalar_for_torch_min_max_GNN(train_Y_GNN[i])
loss = torch.sqrt(criterion_GNN(output, output_2))
print(loss)
loss_list_GNN.append(loss)
loss.backward()
optimizer_GNN.step()
optimizer_GNN.zero_grad()
sum_loss = torch.zeros(1)
for i in range(len(test_Y_GNN)):
output = gCNN(test_X_C_GNN[i], test_X_P_GNN[i], test_X_T_GNN[i], test_X_Ext_GNN[i], test_X_C_ST[i],
test_X_P_ST[i], test_X_T_ST[i], mask_in, mask_out, stop_distribution, stop_number_location)
_, min_in, max_in, min_out, max_out = utils.minmaxscalar_for_torch_min_max_GNN(test_Y_GNN[i])
loss = torch.sqrt(criterion_GNN(output[0][0] * (max_in - min_in) + min_in, test_Y_GNN[i][0]))
sum_loss += loss
loss = torch.sqrt(criterion_GNN(output[0][1] * (max_out - min_out) + min_out, test_Y_GNN[i][1]))
sum_loss += loss
print(sum_loss)
with open('result.txt', 'wt') as f:
f.write(str(sum_loss))
f.close()
| [
"[email protected]"
] | |
ea3dafcec5cd664291f7873db6aee0a381378a84 | 11523d908822a9949bd40f17c42971b40d99af6b | /EduDLProj/exercise/dnn.py | 3a7881fb79a4abd02a4265f512d100d045aea3d7 | [] | no_license | cocoriel/myWiki | bd2496222d7f9b69fd3c88abbafde46256b86306 | bec52c473194f7d9421e1e5a5cce124f73774a89 | refs/heads/master | 2021-01-12T10:56:01.274801 | 2018-06-02T09:17:45 | 2018-06-02T09:17:45 | 72,761,793 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | import tensorflow as tf
import numpy as np
xy = np.loadtxt("../data/07train.txt")
x_data = xy[:, :-1]
y_data = xy[:, [-1]]
X = tf.placeholder(tf.float32, [None, 2])
Y = tf.placeholder(tf.float32, [None, 1])
W1 = tf.Variable(tf.random_normal([2, 5]))
W2 = tf.Variable(tf.random_normal([5, 4]))
W3 = tf.Variable(tf.random_normal([4, 1]))
b1 = tf.Variable(tf.random_normal([5]))
b2 = tf.Variable(tf.random_normal([4]))
b3 = tf.Variable(tf.random_normal([1]))
L1 = tf.sigmoid(tf.matmul(X, W1) + b1)
L2 = tf.sigmoid(tf.matmul(L1, W2) + b2)
hypothesis = tf.sigmoid(tf.matmul(L2, W3) + b3)
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis))
train = tf.train.GradientDescentOptimizer(0.1).minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(5001):
sess.run(train, feed_dict={X:x_data, Y:y_data})
if step % 100 == 0:
print('step : ', step, 'cost : ', sess.run(cost, feed_dict={X:x_data, Y:y_data}),
'\nWeight : \n', sess.run(W1), sess.run(W2), sess.run(W3),
'\nbias : \n', sess.run(b1), sess.run(b2), sess.run(b3))
predicted = tf.cast(hypothesis > 0.5, tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), tf.float32))
h, p, a = sess.run([hypothesis, predicted, accuracy], feed_dict={X:x_data, Y:y_data})
print('\nhypothesis\n', h, '\npredicted\n', p, '\nrealvalue\n', y_data, '\naccuracy\n', a) | [
"[email protected]"
] | |
4783edc0803cf779c56a934137bef41d5775c607 | b45e134f1ba6aebb4cf05df81acc7c95d909f9c5 | /declarations_json_by_year.py | 2c19b86b0e3914f6c290d651ab8b32166e392a90 | [] | no_license | yonadaaa/hive_declarator | 001df1bae2cb25d22e1cd6f21892429f90318247 | 40cc19b78cf855e3453078cb3e3fd83f8fef8bbe | refs/heads/master | 2023-02-17T10:38:31.144695 | 2020-02-07T15:52:23 | 2020-02-07T15:52:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | import json
import requests
def get_declarations(year):
declarations = []
# api-endpoint
url = "https://declarator.org/api/v1/search/sections"
max_pages = 403
page_number = 0
while url and page_number < max_pages:
print(page_number)
# defining a params dict for the parameters to be sent to the API
params = {'year': year}
# sending get request and saving the response as response object
r = requests.get(url=url, params=params)
# extracting data in json format
data = r.json()
declarations += data["results"]
url = data["next"]
page_number += 1
return declarations
def main():
year = 2018
declarations_2018 = get_declarations(year)
with open('declarations_2018.json', 'w') as outfile:
json.dump(declarations_2018, outfile)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
772c732aadfc888fd8118998d74695a359e85d78 | c953e8a7d251e4aba08910156a1bcf6997940c80 | /2016/24/24_1.py | a285a6e8dd42b8750bb76e86651fb037a0b4c552 | [] | no_license | simonbrahan/adventofcode | 848ca9ab2fdd7b22e1eb13d43bb5cf93972e2e5f | 8e0053399defb2a8a83cd4bb4062f7e213b10174 | refs/heads/master | 2021-06-04T00:52:30.269561 | 2018-12-12T22:55:04 | 2018-12-12T22:55:04 | 56,775,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,749 | py | import networkx, itertools, datetime
start_time = datetime.datetime.now()
rows = []
for line in open('input'):
rows.append(line.strip())
num_rows, num_columns = len(rows), len(rows[0])
hit_locations = {}
#
# Build the maze
# The generator builds a graph of num_rows by num_columns, with each node connected horizintally and vertically to its neighbours
# Any nodes containing '#' (walls in the input) are removed, and any hit locations are recorded
#
maze = networkx.generators.classic.grid_2d_graph(num_rows, num_columns)
for y in xrange(num_rows):
for x in xrange(num_columns):
if rows[y][x] == '#':
maze.remove_node((y, x))
if rows[y][x].isdigit():
hit_locations[int(rows[y][x])] = (y, x)
num_locations = len(hit_locations)
#
# Calculate distances between each hit location
# Once this is known, the puzzle is a travelling salesman problem
#
hit_location_distances = {}
for i in xrange(num_locations):
for j in xrange(num_locations):
hit_location_distances[i, j] = networkx.shortest_path_length(maze, hit_locations[i], hit_locations[j])
# Store distance both ways to allow permutations below to try either direction
hit_location_distances[j, i] = hit_location_distances[i, j]
#
# Iterate through all possible paths, finding length and comparing with previous best
#
best = None
for p in itertools.permutations(range(1, num_locations)):
hit_location_plan = [0] + list(p)
time = sum(
hit_location_distances[ hit_location_plan[location + 1], hit_location_plan[location] ] for location in xrange(len(hit_location_plan) - 1))
if best is None or time < best:
best = time
print best
print 'Script took', datetime.datetime.now() - start_time, 'seconds'
| [
"[email protected]"
] | |
12f85dfe3b746c02305c2dd7cc147b806129fb82 | 391d648132c1a05e7da575205eef89a7208a892a | /scripts/playground/phase.py | 0fe2427793d433ebdd4a2e65cbfcf3a84a814b9a | [] | no_license | michaelbateman/DemographicInference | c3ceaf69f8b554f3973473607d6b5201cca423f9 | b1e2529b1ce0710f82d2867d08588ae4a6c72bb2 | refs/heads/master | 2021-01-10T06:28:43.698581 | 2015-10-06T18:25:45 | 2015-10-06T18:25:45 | 43,770,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,500 | py | import subprocess
in_file = 'ag1000g.phase1.AR2.3L.PASS.vcf.gz'
region_size = 5000000
num_regions = 50e6 / region_size
print num_regions
print int(num_regions)
for k in range(0,int(num_regions)):
i = int(num_regions) -1 - k
left = i * region_size
right = (i+1) * region_size
window = str(left) + '-' + str(right)
out_file = 'ag1000g.phase1.AR2.3L.PASS.' + window + '.vcf'
call_string = 'time ../bcftools/bcftools view -o ' + out_file + ' ' + in_file + ' -r 3L:' + window
print call_string
print 'Now creating the file: ', out_file
print '.....'
subprocess.call(call_string, shell=True)
call_string = '../pbwtCurrent/pbwt/pbwt -readVcfGT ' + out_file + ' -writeAll temp_file_name'
print call_string
print 'Now preparing site file...'
subprocess.call(call_string, shell=True)
# The 1530 just below is the number of haplotypes in 765 samples
# Should change in different situation
phased_name = 'ag1000g.phase1.AR2.3L.PASS.' + window + '.phased.vcf'
call_string = '../pbwtCurrent/pbwt/pbwt -readVcfGT ' + out_file + ' -phase 1530 -readAll temp_file_name -writeVcf ' + phased_name
print call_string
print 'Now phasing...'
subprocess.call(call_string, shell=True)
call_string = 'time gzip ' + phased_name
print call_string
subprocess.call(call_string, shell=True)
call_string = 'rm ' + out_file
print call_string
subprocess.call(call_string, shell=True)
print 'Progress: %d out of %d regions complete.' %(k+1, num_regions)
print call_string | [
"[email protected]"
] | |
2a8058588ffdcb2d86ce6799fbbe81d880108b51 | d41c59d9a55dfa07663c863dd4d47f2f3a443bad | /exercicios-extras/exercicio-extra-01-break.py | 1a10bd6e8da5fce031080fdbca62cf192ce58628 | [] | no_license | yrto/python-coding-tank | b9ae29cd8b85a274693e848de7293e315ef50ae7 | d75670d43b23c8e77ebc1af0807d46de947f0066 | refs/heads/master | 2022-12-17T13:47:45.245531 | 2020-09-25T00:35:37 | 2020-09-25T00:35:37 | 293,183,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | while True:
genero = input("Informe seu gênero: ")
if genero == "M" or genero == "F" or genero == "outro":
break
else:
print("Genero inválido.")
| [
"[email protected]"
] | |
79c5b6261dc41cb10d40f2d6f68e42876b62adbf | 5d987bfce3dc6862339aa0139d9370d1447ee3e5 | /models/plm_approval.py | 16d0e596e4740228c2762eee5a0140d1b179ee8e | [] | no_license | jcambert/weOdooErpPlm | f299cc21d260fd1a00c14480e147cf27523bc67a | a54e9a316c8ce265f8c855da540de70db9c877dc | refs/heads/master | 2023-05-31T13:42:03.205013 | 2021-07-04T17:56:26 | 2021-07-04T17:56:26 | 326,800,722 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,951 | py | from odoo import models, fields, api
from .models import Model,INNER_MODELS
class PlmApproval(Model):
_name=INNER_MODELS['approval']
_description="Mrp Plm Validation "
approval_date=fields.Datetime(string="Date de validation")
approval_template_id=fields.Many2one(INNER_MODELS['approval_tmpl'] ,ondelete='cascade',required=True,string="Modèle")
# display_name=fields.Char(readonly=True,help="Nom affiché")
eco_id=fields.Many2one(INNER_MODELS['plm'] ,'Technical Change',ondelete='cascade',required=True,help="Modification technique")
eco_stage_id=fields.Many2one(INNER_MODELS['stage'],string="Étape OMT")
is_approved=fields.Boolean("Is approved",compute='_compute_is_approved',readonly=True)
is_closed=fields.Boolean("Is closed",compute='_compute_is_closed',store=True,readonly=True)
is_rejected=fields.Boolean("Is rejected",compute='_compute_is_rejected',store=True,readonly=True)
# name=fields.Char(required=True,string="Rôle")
roles=fields.Many2one('res.users.role',string="Roles")
roles_names=fields.Char('Roles', compute='_compute_roles_name')
# required_user_ids=fields.Many2many('res.users',string="Utilisateurs requis")
status=fields.Selection([
('none','not yet'),
('comment','Comment'),
('approved','Approved'),
('rejected','Rejected')],'Statut',required=True)
template_stage_id=fields.Many2one(INNER_MODELS['stage'] ,string="Étape de validation")
user_id=fields.Many2one('res.users','Approuvé par',ondelete='set null')
@api.model
def default_get(self, fields):
defaults = super(PlmApproval, self).default_get(fields)
defaults['status']='none'
return defaults
@api.model
def approve(self):
if self.is_treated():
return
self.status='approved'
self.approval_date=fields.Datetime.now()
self.user_id=self.env.user.id
@api.model
def reject(self):
if self.is_treated():
return
self.status='rejected'
self.approval_date=fields.Datetime.now()
self.user_id=self.env.user.id
@api.model
def is_treated(self):
return self.status=='approved' or self.status=='rejected'
@api.model
def need_approval(self):
return self.status!='approved' and self.status!='rejected'
@api.depends('status')
def _compute_is_approved(self):
for record in self:
record.is_approved=record.status=='approved'
@api.depends('status')
def _compute_is_rejected(self):
for record in self:
record.is_rejected= record.status=='rejected'
def _compute_is_closed(self):
for record in self:
record.is_closed= False
def _compute_roles_name(self):
for record in self:
roles=[]
for role in record.roles:
roles.append(role.name)
record.roles_names=','.join(roles) | [
"[email protected]"
] | |
a95882637047320b85c2d0c76750fc0b25ee61ce | 192d920524840f30c2392af7959afe2d0a3ef6f3 | /simulation/vision_simulation_agent.py | b4f3a766c47db1495fca29595eedfef11f8e78e2 | [] | no_license | camman3d/Differential-Drive-Robot | 8f2f8ddb833837c9f927529a357f8041f0ea8115 | d805e20cf701907777aca8e8797e8c7d7fd31d40 | refs/heads/master | 2020-05-02T10:31:29.830693 | 2014-04-27T00:37:42 | 2014-04-27T00:37:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,964 | py | import math
import cv2
from simulation.blender_source import BlenderSource
import image_processor
__author__ = 'josh'
rotation_granularity = 0.25
reading_threshold = 0
robot_translation_speed = 10
robot_rotation_speed = 0.4
cap = 20000
sigma = 0.5
image_width = 1024
destination_constant = 1
obstacle_constant = -1
class VisionSimulationAgent:
def __init__(self, robot, show_img, env):
self.robot = robot
self.last_img = None
self.show_img = show_img
self.blender = BlenderSource(env)
self.obst_hue = "red"
self.dest_hue = "green"
self.image_width = image_width
pass
def get_image(self):
self.blender.set(self.robot[0], self.robot[1], self.robot[2])
img = self.blender.get()
self.last_img = img
if self.show_img:
self.show()
return img
def show(self, render=False):
if render:
self.get_image()
cv2.imshow('frame', self.last_img)
cv2.waitKey(5)
def search_full(self):
"""This will rotate the robot until the best angle is discovered"""
print("Determining best path....")
reading = self.get_reading()
last = -9999.9
while reading >= last or reading <= reading_threshold:
last = reading
self.rotate_left(rotation_granularity)
reading = self.get_reading()
self.show()
self.rotate_right(rotation_granularity)
self.show(True)
# graph.plot_configuration_attraction(self.config)
def search_local(self):
"""This will recalibrate the robot to turn to the optimal direction"""
print("Adjusting path...")
reading = self.get_reading()
last = -9999.9
while reading >= last:
last = reading
self.rotate_left(rotation_granularity)
reading = self.get_reading()
self.show()
last = -9999.9
while reading >= last:
last = reading
self.rotate_right(rotation_granularity)
reading = self.get_reading()
self.show()
self.rotate_left(rotation_granularity)
self.show(True)
# graph.plot_configuration_attraction(self.config)
@staticmethod
def calculate_value(img, color, constant, profile=False):
# Do image analysis
data = image_processor.threshold(img, color)
# Normalize
if profile:
value = data[0]
else:
value = min(cap, data[0])
if data[1] is not None:
# Offset contains a number from -1 to 1
w = image_width / 2
offset = float(data[1][0] - w) / w
weight = (2 / (sigma * math.sqrt(2 * math.pi))) * math.e ** (-(offset ** 2) / (2 * sigma ** 2))
value *= weight
return value * constant
def get_reading(self, profile=False):
img = self.get_image()
attract = self.calculate_value(img, "green", destination_constant, profile)
repel = self.calculate_value(img, "red", obstacle_constant, profile)
return attract + repel
def move_forward(self, duration):
dx = math.cos(self.robot[2]) * robot_translation_speed * duration
dy = math.sin(self.robot[2]) * robot_translation_speed * duration
self.robot[0] += dx
self.robot[1] += dy
# if self.show_img:
# self.show(True)
def move_backward(self, duration):
dx = math.cos(self.robot[2]) * robot_translation_speed * duration
dy = math.sin(self.robot[2]) * robot_translation_speed * duration
self.robot[0] -= dx
self.robot[1] -= dy
def rotate_left(self, theta):
self.robot[2] += theta
self.robot[2] %= math.pi * 2
def rotate_right(self, theta):
self.robot[2] -= theta
self.robot[2] %= math.pi * 2
def get_angle(self):
return self.robot[2] | [
"[email protected]"
] | |
454aae705cffe315959a21168b5c3bab031884ba | e1a737819e95fe535abec061018f2b64789f85c3 | /unit_tests.py | d809ed90254e5fd0285c80ff2f3586b03f89ebdc | [] | no_license | debbiemaborekhe/myVAATool | b3fab5b2404b2c2c239b0ffa08ff29a8c623017b | acf4e7843c9f09c9c10e78341d6a428198774e8c | refs/heads/main | 2023-08-14T21:46:17.053352 | 2021-10-08T12:03:36 | 2021-10-08T12:03:36 | 414,957,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,727 | py | import unittest
from csvReader import *
from timeAnalysis import *
from hourlyAnalysis import *
from searchAccidentByDcaCode import *
from alcoholImpact import *
from geographicImpact import *
class UnitTests(unittest.TestCase):
existingFileName = "Crash Statistics Victoria.csv"
nonExistingFileName = "Crash Statistics Victoria Not Exist.csv"
def testLoadingExistingData(self):
db = loadData(self.existingFileName)
self.assertFalse(db.empty)
def testLoadingNonExistingData(self):
db = loadData(self.nonExistingFileName)
self.assertTrue(db.empty)
def testValidationData(self):
db = loadData(self.existingFileName)
result = validateData(db)
self.assertFalse(db.empty)
def testFrameDisplayedForSelectedPeriod(self):
db = loadData(self.existingFileName)
frame = getAccidentsFrame(db)
self.assertTrue(frame)
def testFrameDisplayedForSelectedPeriodInHourOfDay(self):
db = loadData(self.existingFileName)
frame = getAccidentsInHourFrame(db)
self.assertTrue(frame)
def testFrameDisplayedForSearchByDCACode(self):
db = loadData(self.existingFileName)
frame = getSearchAccidentsFrame(db)
self.assertTrue(frame)
def testFrameDisplayedForImpactOfAlcohol(self):
db = loadData(self.existingFileName)
frame = getImpactOfAlcoholChartFrame(db)
self.assertTrue(frame)
def testFrameDisplayedForImpactOfAlcohol(self):
db = loadData(self.existingFileName)
frame = listAccidentsInRegionFrame(db)
self.assertTrue(frame)
if __name__ == "__main__":
unittest.main() | [
"[email protected]"
] | |
2ec97f5c31d31374dfce457d80211216f5865676 | 7bc75d5cc46da19d38b86b60e5cbb5b45f71f91e | /custom_components/gazpar/gazpar.py | 07af27197dddc01b31e91f3cc67effdf7b9ff679 | [] | no_license | Meroje/gazpar-home-assistant | 444629892143e212ed24a66c2d92f76b0f15e22f | 2816fefa5f7635903c22d7b82902f0eb23ce2d37 | refs/heads/main | 2023-04-14T11:00:24.424900 | 2020-10-26T11:36:24 | 2020-10-26T11:36:24 | 307,351,615 | 0 | 1 | null | 2023-04-04T00:24:58 | 2020-10-26T11:34:32 | Python | UTF-8 | Python | false | false | 17,138 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Retrieves energy consumption data from your GrDf account.
"""
# Linkindle - Linky energy consumption curves on a Kindle display.
# Copyright (C) 2016 Baptiste Candellier
# Adapted to gaspar (C) 2018 epierre
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# connexion -> accueil -> consommation/tableau-de-bord -> consommation/consommations
import requests
import sys
import os
import re
import logging
from lxml import etree
import io
import json
import datetime
LOGIN_BASE_URI = 'https://monespace.grdf.fr/web/guest/monespace'
API_BASE_URI = 'https://monespace.grdf.fr/monespace/particulier'
global JAVAVXS
API_ENDPOINT_LOGIN = '?p_p_id=EspacePerso_WAR_EPportlet&p_p_lifecycle=2&p_p_state=normal&p_p_mode=view&p_p_cacheability=cacheLevelPage&p_p_col_id=column-2&p_p_col_count=1&_EspacePerso_WAR_EPportlet__jsfBridgeAjax=true&_EspacePerso_WAR_EPportlet__facesViewIdResource=%2Fviews%2FespacePerso%2FseconnecterEspaceViewMode.xhtml'
API_ENDPOINT_HOME = '/accueil'
API_ENDPOINT_DATA = '/consommation/tableau-de-bord'
DATA_NOT_REQUESTED = -1
DATA_NOT_AVAILABLE = -2
class LinkyLoginException(Exception):
"""Thrown if an error was encountered while retrieving energy consumption data."""
pass
class GazparServiceException(Exception):
"""Thrown when the webservice threw an exception."""
pass
def parse_lxml(c):
root = etree.fromstring(c)
log = root.xpath("//update[@id = 'javax.faces.ViewState']")
return (log[0].text)
def login(username, password):
"""Logs the user into the Linky API.
"""
session = requests.Session()
global JAVAVXS
payload = {
'javax.faces.partial.ajax': 'true',
'javax.faces.source': '_EspacePerso_WAR_EPportlet_:seConnecterForm:meConnecter',
'javax.faces.partial.execute': '_EspacePerso_WAR_EPportlet_:seConnecterForm',
'javax.faces.partial.render': 'EspacePerso_WAR_EPportlet_:global _EspacePerso_WAR_EPportlet_:groupTitre',
'javax.faces.behavior.event': 'click',
'javax.faces.partial.event': 'click',
'_EspacePerso_WAR_EPportlet_:seConnecterForm': '_EspacePerso_WAR_EPportlet_:seConnecterForm',
'javax.faces.encodedURL': 'https://monespace.grdf.fr/web/guest/monespace?p_p_id=EspacePerso_WAR_EPportlet&p_p_lifecycle=2&p_p_state=normal&p_p_mode=view&p_p_cacheability=cacheLevelPage&p_p_col_id=column-2&p_p_col_count=1&_EspacePerso_WAR_EPportlet__jsfBridgeAjax=true&_EspacePerso_WAR_EPportlet__facesViewIdResource=%2Fviews%2FespacePerso%2FseconnecterEspaceViewMode.xhtml',
'_EspacePerso_WAR_EPportlet_:seConnecterForm:email': username,
'_EspacePerso_WAR_EPportlet_:seConnecterForm:passwordSecretSeConnecter': password
}
session.headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Mobile Safari/537.36',
'Accept-Language': 'fr,fr-FR;q=0.8,en;q=0.6',
'Accept-Encoding': 'gzip, deflate, br',
'Accept': 'application/xml, application/json, text/javascript, */*; q=0.01',
'Faces-Request': 'partial/ajax',
'Sec-Fetch-Mode': 'no-cors',
'Sec-Fetch-Site': 'same-origin',
'Origin': 'https://monespace.grdf.fr',
'Referer': 'https://monespace.grdf.fr/monespace/connexion'}
session.cookies['KPISavedRef'] = 'https://monespace.grdf.fr/monespace/connexion'
session.get(LOGIN_BASE_URI + API_ENDPOINT_LOGIN, verify=False, timeout=None, data=payload, allow_redirects=False)
req = session.post(LOGIN_BASE_URI + API_ENDPOINT_LOGIN, data=payload, allow_redirects=False)
javaxvs2 = parse_lxml(req.text)
JAVAVXS = javaxvs2
# print(session.cookies.get_dict())
# 2nd request
payload = {
'javax.faces.partial.ajax': 'true',
'javax.faces.source': '_EspacePerso_WAR_EPportlet_:seConnecterForm:meConnecter',
'javax.faces.partial.execute': '_EspacePerso_WAR_EPportlet_:seConnecterForm',
'javax.faces.partial.render': 'EspacePerso_WAR_EPportlet_:global _EspacePerso_WAR_EPportlet_:groupTitre',
'javax.faces.behavior.event': 'click',
'javax.faces.partial.event': 'click',
'javax.faces.ViewState': javaxvs2,
'_EspacePerso_WAR_EPportlet_:seConnecterForm': '_EspacePerso_WAR_EPportlet_:seConnecterForm',
'javax.faces.encodedURL': 'https://monespace.grdf.fr/web/guest/monespace?p_p_id=EspacePerso_WAR_EPportlet&p_p_lifecycle=2&p_p_state=normal&p_p_mode=view&p_p_cacheability=cacheLevelPage&p_p_col_id=column-2&p_p_col_count=1&_EspacePerso_WAR_EPportlet__jsfBridgeAjax=true&_EspacePerso_WAR_EPportlet__facesViewIdResource=%2Fviews%2FespacePerso%2FseconnecterEspaceViewMode.xhtml',
'_EspacePerso_WAR_EPportlet_:seConnecterForm:email': username,
'_EspacePerso_WAR_EPportlet_:seConnecterForm:passwordSecretSeConnecter': password
}
req = session.post(LOGIN_BASE_URI + API_ENDPOINT_LOGIN, data=payload, allow_redirects=False)
# print(payload)
session_cookie = req.cookies.get('GRDF_EP')
# print(session_cookie)
# print('\n0- monespace req.text\n')
# print(req.text)
if not 'GRDF_EP' in session.cookies:
raise LinkyLoginException("Login unsuccessful. Check your credentials.")
return session
def get_data_per_hour(session, start_date, end_date):
"""Retreives hourly energy consumption data."""
return _get_data(session, 'Heure', start_date, end_date)
def get_data_per_day(session, start_date, end_date):
"""Retreives daily energy consumption data."""
return _get_data(session, 'Jour', start_date, end_date)
def get_data_per_week(session, start_date, end_date):
"""Retreives weekly energy consumption data."""
return _get_data(session, 'Semaine', start_date, end_date)
def get_data_per_month(session, start_date, end_date):
"""Retreives monthly energy consumption data."""
return _get_data(session, 'Mois', start_date, end_date)
def get_data_per_year(session):
"""Retreives yearly energy consumption data."""
return _get_data(session, 'Mois')
def _get_data(session, resource_id, start_date=None, end_date=None):
global JAVAVXS
session.headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Mobile Safari/537.36',
'Accept-Language': 'fr,fr-FR;q=0.8,en;q=0.6',
'Accept-Encoding': 'gzip, deflate, br',
'Accept': 'application/xml, application/json, text/javascript, */*; q=0.01',
'Faces-Request': 'partial/ajax',
'Host': 'monespace.grdf.fr',
'Origin': 'https://monespace.grdf.fr',
'Referer': 'https://monespace.grdf.fr/monespace/particulier/consommation/consommation',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'X-Requested-With': 'XMLHttpRequest'}
payload = {
'javax.faces.partial.ajax': 'true',
'javax.faces.source': '_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille:j_idt139',
'javax.faces.partial.execute': '_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille:j_idt139',
'javax.faces.partial.render': '_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille',
'javax.faces.behavior.event': 'click',
'javax.faces.partial.event': 'click',
'_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille': ' _eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille',
'javax.faces.encodedURL': 'https://monespace.grdf.fr/web/guest/monespace/particulier/consommation/consommations?p_p_id=eConsoconsoDetaille_WAR_eConsoportlet&p_p_lifecycle=2&p_p_state=normal&p_p_mode=view&p_p_cacheability=cacheLevelPage&p_p_col_id=column-3&p_p_col_count=5&p_p_col_pos=3&_eConsoconsoDetaille_WAR_eConsoportlet__jsfBridgeAjax=true&_eConsoconsoDetaille_WAR_eConsoportlet__facesViewIdResource=%2Fviews%2Fconso%2Fdetaille%2FconsoDetailleViewMode.xhtml',
'javax.faces.ViewState': JAVAVXS}
params = {
'p_p_id': 'eConsosynthese_WAR_eConsoportlet',
'p_p_lifecycle': '2',
'p_p_state': 'normal',
'p_p_mode': 'view',
'p_p_cacheability': 'cacheLevelPage',
'p_p_col_id': 'column-3',
'p_p_col_count': '5',
'p_p_col_pos': '3',
'_eConsosynthese_WAR_eConsoportlet__jsfBridgeAjax': 'true',
'_eConsosynthese_WAR_eConsoportlet__facesViewIdResource': '/views/compteur/synthese/syntheseViewMode.xhtml'}
r = session.get('https://monespace.grdf.fr/monespace/particulier/consommation/consommations', allow_redirects=False,
verify=False, timeout=None)
if r.status_code != requests.codes.ok:
print("status 1e appel:" + r.status_code + '\n');
# print(session.headers)
# print(session.cookies)
# print(payload)
# print('\n1- consommations 1er appel r.text\n')
# print(r.text)
# m = re.search("ViewState\" +value=\"(.*?)\"", r.text)
# value = m.group(1)
parser = etree.HTMLParser()
tree = etree.parse(io.StringIO(r.text), parser)
value = tree.xpath(
"//div[@id='_eConsoconsoDetaille_WAR_eConsoportlet_']/form[@id='_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille']/input[@id='javax.faces.ViewState']/@value")
# print('\n2- consommations id xpath value\n')
# print(value)
JAVAVXS = value
# Step 1
payload = {
'javax.faces.partial.ajax': 'true',
'javax.faces.source': '_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille:j_idt139',
'javax.faces.partial.execute': '_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille:j_idt139',
'javax.faces.partial.render': '_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille',
'javax.faces.behavior.event': 'click',
'javax.faces.partial.event': 'click',
'_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille': '_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille',
'javax.faces.encodedURL': 'https://monespace.grdf.fr/web/guest/monespace/particulier/consommation/consommations?p_p_id=eConsoconsoDetaille_WAR_eConsoportlet&p_p_lifecycle=2&p_p_state=normal&p_p_mode=view&p_p_cacheability=cacheLevelPage&p_p_col_id=column-3&p_p_col_count=5&p_p_col_pos=3&_eConsoconsoDetaille_WAR_eConsoportlet__jsfBridgeAjax=true&_eConsoconsoDetaille_WAR_eConsoportlet__facesViewIdResource=%2Fviews%2Fconso%2Fdetaille%2FconsoDetailleViewMode.xhtml',
'javax.faces.ViewState': JAVAVXS
}
params = {
'p_p_id': 'eConsoconsoDetaille_WAR_eConsoportlet',
'p_p_lifecycle': '2',
'p_p_state': 'normal',
'p_p_mode': 'view',
'p_p_cacheability': 'cacheLevelPage',
'p_p_col_id': 'column-3',
'p_p_col_count': '5',
'p_p_col_pos': '3',
'_eConsoconsoDetaille_WAR_eConsoportlet__jsfBridgeAjax': 'true',
'_eConsoconsoDetaille_WAR_eConsoportlet__facesViewIdResource': '/views/conso/detaille/consoDetailleViewMode.xhtml'
}
session.cookies['KPISavedRef'] = 'https://monespace.grdf.fr/monespace/particulier/consommation/consommations'
req = session.post('https://monespace.grdf.fr/monespace/particulier/consommation/consommations',
allow_redirects=False, data=payload, params=params)
if req.status_code != requests.codes.ok:
print("status 2e appel:" + r.status_code + '\n');
# print(session.headers)
# print(session.cookies)
# print(payload)
# print('\n3- consommations 2e appel req.text\n')
# print(req.text)
# We send the session token so that the server knows who we are
payload = {
'javax.faces.partial.ajax': 'true',
'javax.faces.source': '_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille:panelTypeGranularite1:2',
'javax.faces.partial.execute': '_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille:panelTypeGranularite1',
'javax.faces.partial.render': '_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille:refreshHighchart _eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille:updateDatesBean _eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille:boutonTelechargerDonnees _eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille:panelTypeGranularite _eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille:idBlocSeuilParametrage',
'javax.faces.behavior.event': 'valueChange',
'javax.faces.partial.event': 'change',
'eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille': '_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille',
'javax.faces.encodedURL': 'https://monespace.grdf.fr/web/guest/monespace/particulier/consommation/consommations?p_p_id=eConsoconsoDetaille_WAR_eConsoportlet&p_p_lifecycle=2&p_p_state=normal&p_p_mode=view&p_p_cacheability=cacheLevelPage&p_p_col_id=column-3&p_p_col_count=5&p_p_col_pos=3&_eConsoconsoDetaille_WAR_eConsoportlet__jsfBridgeAjax=true&_eConsoconsoDetaille_WAR_eConsoportlet__facesViewIdResource=%2Fviews%2Fconso%2Fdetaille%2FconsoDetailleViewMode.xhtml',
'_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille:idDateDebutConsoDetaille': start_date,
'_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille:idDateFinConsoDetaille': end_date,
'_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille:panelTypeGranularite1': resource_id.lower(),
'_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille:panelTypeGranularite3': 'mois',
'_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille:selecteurVolumeType2': 'kwh',
'_eConsoconsoDetaille_WAR_eConsoportlet_:idFormConsoDetaille:selecteurVolumeType4': 'kwh',
'javax.faces.ViewState': JAVAVXS
}
params = {
'p_p_id': 'eConsoconsoDetaille_WAR_eConsoportlet',
'p_p_lifecycle': '2',
'p_p_state': 'normal',
'p_p_mode': 'view',
'p_p_cacheability': 'cacheLevelPage',
'p_p_col_id': 'column-3',
'p_p_col_count': '5',
'p_p_col_pos': '3',
'_eConsoconsoDetaille_WAR_eConsoportlet__jsfBridgeAjax': 'true',
'_eConsoconsoDetaille_WAR_eConsoportlet__facesViewIdResource': '/views/conso/detaille/consoDetailleViewMode.xhtml'
}
session.cookies['KPISavedRef'] = 'https://monespace.grdf.fr/monespace/particulier/consommation/consommations'
req = session.post('https://monespace.grdf.fr/monespace/particulier/consommation/consommations',
allow_redirects=False, data=payload, params=params)
if req.status_code != requests.codes.ok:
print("status recup data: " + r.status_code + '\n');
# print('\n4.1- header\n')
# print(session.headers)
# print('\n4.2- cookies\n')
# print(session.cookies)
# print('\n4- req.text\n')
# print(req.text)
# print('\n5- payload\n')
# print(payload)
# print(req.status_code)
# Parse to get the data
md = re.search("donneesCourante = \"(.*?)\"", req.text)
d = md.group(1)
# print('\n6- d\n')
# print(d)
mt = re.search("tooltipDatesInfo = \"(.*?)\"", req.text)
t = mt.group(1)
# print(mt)
# Make json
now = datetime.datetime.now()
ts = t.split(",")
ds = d.split(",")
size = len(ts)
data = []
i = 0
while i < size:
# print(ts[i]+"/"+str(now.year)+" "+ds[i]+ " "+str(i))
# data[ts[i]+"/"+str(now.year)] = ds[i]
if ds[i] != "null":
# data[ts[i].replace('Le ','')] = ds[i]
data.append({'conso': ds[i], 'time': ts[i].replace('Le ', '')})
i += 1
json_data = json.dumps(data)
# if 300 <= req.status_code < 400:
# # So... apparently, we may need to do that once again if we hit a 302
# # ¯\_(ツ)_/¯
# req = session.post(API_BASE_URI + API_ENDPOINT_DATA, allow_redirects=False, data=payload, params=params)
if req.status_code == 200 and req.text is not None and "Conditions d'utilisation" in req.text:
raise GazparLoginException("You need to accept the latest Terms of Use. Please manually log into the website, "
"then come back.")
try:
res = data
except:
logging.info("Unable to get data")
sys.exit(os.EX_SOFTWARE)
# if res['etat'] and res['etat']['valeur'] == 'erreur' and res['etat']['erreurText']:
# raise GazparServiceException(html.unescape(res['etat']['erreurText']))
return res
| [
"[email protected]"
] | |
4522d4e49db3146c28c578c30ba8412a3263284e | 449b002b111627dd13bc1dae2273dd14b4cdfaa4 | /phonenumber/views.py | 21166cd713fddd1b08089decee8b17e177237821 | [] | no_license | navinbhaskar/ClassClass_fetch_username | ae20c5eaf93c2125dd25d2886331445599a8e826 | 997fc91d26e31c20c0f01a02a8bd65db51edf325 | refs/heads/master | 2020-03-22T05:41:17.188823 | 2018-07-03T12:58:14 | 2018-07-03T12:58:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
#from .models import Flashcard
from .serializers import UserProfileSerializer
from tablib import Dataset
from student.models import UserProfile
class UserProfileList(APIView):
def get(self, request, phone_number):
UserProfile1 =UserProfile.objects.filter(phone_number = phone_number)
serializer=UserProfileSerializer(UserProfile1, many=True)
return Response(serializer.data)
def post(self):
pass
| [
"[email protected]"
] | |
153500c3dd34e92cadc95e1b1da10b33f0c05b8a | 56cac31cbecee7c5f3206d869d0956b0b4eaf4f5 | /SkilletLoader/utils/panoply.py | 4d17c5b7a446a00383f49b9357a9eb9488cf424a | [
"Apache-2.0"
] | permissive | omshankar1/SkilletLoader | a22fc747e6144b78bb089dc9cb80aca748e5ed67 | f8a6f88b6b3bf085b81f5f741d5a5bbd27497629 | refs/heads/master | 2020-06-28T08:20:28.561835 | 2019-07-31T18:41:56 | 2019-07-31T18:41:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,848 | py | # Copyright (c) 2018, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Authors: Nathan Embery
import re
import time
from pathlib import Path
from xml.etree import ElementTree
from xml.etree import ElementTree as elementTree
import requests
import requests_toolbelt
import xmltodict
from pan import xapi
from pan.xapi import PanXapiError
from .exceptions import SkilletLoaderException
from .exceptions import LoginException
from .skillet.base import Skillet
class Panoply:
"""
Panoply is a wrapper around pan-python PanXAPI class to provide additional, commonly used functions
"""
def __init__(self, hostname, api_username, api_password, api_port=443, serial_number=None):
"""
Initialize a new panos object
:param hostname: hostname or ip address of target device`
:param api_username: username
:param api_password: password
:param api_port: port to use for target device
:param serial_number: Serial number of target device if proxy through panorama
"""
self.hostname = hostname
self.user = api_username
self.pw = api_password
self.port = api_port
self.serial_number = serial_number
self.key = ''
self.debug = False
self.serial = serial_number
self.connected = False
self.facts = {}
try:
self.xapi = xapi.PanXapi(api_username=self.user, api_password=self.pw, hostname=self.hostname,
port=self.port, serial=self.serial_number)
except PanXapiError:
print('Invalid Connection information')
raise LoginException('Invalid connection parameters')
else:
self.connect(allow_offline=True)
def connect(self, allow_offline=False) -> None:
"""
Attempt to connect to this device instance
:param allow_offline: Do not raise an exception if this device is offline
:return: None
"""
try:
self.key = self.xapi.keygen()
self.facts = self.get_facts()
except PanXapiError as pxe:
err_msg = str(pxe)
if '403' in err_msg:
raise LoginException('Invalid credentials logging into device')
else:
if allow_offline:
print('FYI - Device is not currently available')
self.connected = False
else:
raise SkilletLoaderException('Could not connect to device!')
else:
self.connected = True
def commit(self) -> None:
"""
Perform a commit operation on this device instance
:return: None
"""
try:
self.xapi.commit(cmd='<commit></commit>', sync=True, timeout=600)
results = self.xapi.xml_result()
doc = elementTree.XML(results)
embedded_result = doc.find('result')
if embedded_result is not None:
commit_result = embedded_result.text
print(f'Commit result is {commit_result}')
if commit_result == 'FAIL':
raise SkilletLoaderException(self.xapi.status_detail)
except PanXapiError as pxe:
print(pxe)
raise SkilletLoaderException('Could not commit configuration')
def set_at_path(self, name: str, xpath: str, xml_str: str) -> None:
"""
Insert XML into the configuration tree at the specified xpath
:param name: name of the snippet - used in logging and debugging only
:param xpath: full xpath where the xml element will be inserted
:param xml_str: string representation of the XML element to insert
:return: None
"""
try:
print(f'Loading xpath {xpath}')
self.xapi.set(xpath=xpath, element=self.sanitize_element(xml_str))
if self.xapi.status_code == '7':
raise SkilletLoaderException(f'xpath {xpath} was NOT found for skillet: {name}')
except PanXapiError as pxe:
raise SkilletLoaderException(f'Could not push skillet {name} / snippet {xpath}! {pxe}')
def execute_cmd(self, cmd: str, params: dict) -> bool:
"""
Execute the given cmd using the xapi.
:param cmd: Valid options are 'set', 'edit', 'override', 'move', 'rename', 'clone'
:param params: valid parameters for the given cmd type
:return: bool True on success, raises SkilletLoaderException
"""
if cmd not in ('set', 'edit', 'override', 'move', 'rename', 'clone'):
raise SkilletLoaderException('Invalid cmd type given to execute_cmd')
# this code happily borrowed from ansible-pan module
# https://raw.githubusercontent.com/PaloAltoNetworks/ansible-pan/develop/library/panos_type_cmd.py
cmd = params['cmd']
func = getattr(self.xapi, cmd)
kwargs = {
'xpath': ''.join(params['xpath'].strip().split('\n')),
'extra_qs': params.get('extra_qs', dict())
}
try:
if cmd in ('set', 'edit', 'override'):
kwargs['element'] = params['element'].strip()
if cmd in ('move',):
kwargs['where'] = params['where']
# dst is optional
kwargs['dst'] = params.get('dst', None)
if cmd in ('rename', 'clone'):
if 'new_name' in params:
kwargs['newname'] = params['new_name']
else:
kwargs['newname'] = params['newname']
if cmd in ('clone',):
kwargs['xpath_from'] = params['xpath_from']
except KeyError as ke:
print(f'Invalid parameters passed to execute_cmd')
print(ke)
return False
try:
func(**kwargs)
except PanXapiError as e:
print(e)
print(f'Could not execute {cmd}')
return False
else:
return True
@staticmethod
def sanitize_element(element: str) -> str:
"""
Eliminate some undeeded characters out of the XML snippet if they appear.
:param element: element str
:return: sanitized element str
"""
element = re.sub(r"\n\s+", "", element)
element = re.sub(r"\n", "", element)
return element
def get_facts(self) -> dict:
"""
Gather system info and keep on self.facts
This gets called on every connect
:return: dict containing all system facts
"""
facts = {}
# FIXME - add better error handling here
self.xapi.op(cmd='<show><system><info></info></system></show>')
if self.xapi.status != 'success':
print('We have a problem!')
raise SkilletLoaderException('Could not get facts from device!')
results_xml_str = self.xapi.xml_result()
results = xmltodict.parse(results_xml_str)
if 'system' in results:
facts.update(results['system'])
self.xapi.show(xpath="./devices/entry[@name='localhost.localdomain']/deviceconfig/system")
results_xml_str = self.xapi.xml_result()
results = xmltodict.parse(results_xml_str)
if 'system' in results:
facts['timezone'] = results['system'].get('timezone', 'US/Pacific')
try:
facts['dns-primary'] = results['system']['dns-setting']['servers']['primary']
facts['dns-secondary'] = results['system']['dns-setting']['servers']['secondary']
except KeyError:
# DNS is not configured on the host, but we will need it later for some noob operations
facts['dns-primary'] = '1.1.1.1'
facts['dns-secondary'] = '1.0.0.1'
return facts
def load_baseline(self) -> bool:
"""
Load baseline config that contains ONLY connecting username / password
use device facts to determine which baseline template to load
see template/panos/baseline_80.xml for example
:param self:
:return: bool true on success
"""
if not self.connected:
self.connect()
if 'sw-version' not in self.facts:
raise SkilletLoaderException('Could not determine sw-version to load baseline configuration!')
version = self.facts['sw-version']
context = dict()
context['FW_NAME'] = self.facts['hostname']
context['ADMINISTRATOR_USERNAME'] = self.user
context['ADMINISTRATOR_PASSWORD'] = self.pw
if self.facts['is-dhcp'] == 'no':
context['MGMT_TYPE'] = 'static'
context['MGMT_IP'] = self.facts['ip-address']
context['MGMT_MASK'] = self.facts['netmask']
context['MGMT_DG'] = self.facts['default-gateway']
context['DNS_1'] = self.facts['dns-primary']
context['DNS_2'] = self.facts['dns-secondary']
if '8.0' in version:
# load the 8.0 baseline with
skillet_dir = 'baseline_80'
elif '8.1' in version:
# load the 8.1 baseline with
skillet_dir = 'baseline_81'
elif '9.0' in version:
# load the 9.0 baseline with
skillet_dir = 'baseline_90'
else:
print('Could not determine sw-version for baseline load')
return False
template_path = Path(__file__).parent.joinpath('..', 'skillets', 'panos', skillet_dir)
print(f'{template_path.resolve()}')
baseline_skillet = Skillet(str(template_path.resolve()))
snippets = baseline_skillet.get_snippets()
snippet = snippets[0]
print(f'Loading {snippet.name}')
file_contents = snippet.template(context)
self.import_file(snippet.name, file_contents, 'configuration')
self.load_config(snippet.name)
return True
def import_file(self, filename: str, file_contents: (str, bytes), category: str) -> bool:
"""
Import the given file into this device
:param filename:
:param file_contents:
:param category: 'configuration'
:return: bool True on success
"""
params = {
'type': 'import',
'category': category,
'key': self.key
}
mef = requests_toolbelt.MultipartEncoder(
fields={
'file': (filename, file_contents, 'application/octet-stream')
}
)
r = requests.post(
'https://' + self.hostname + '/api/',
verify=False,
params=params,
headers={'Content-Type': mef.content_type},
data=mef
)
# if something goes wrong just raise an exception
r.raise_for_status()
resp = ElementTree.fromstring(r.content)
if resp.attrib['status'] == 'error':
raise SkilletLoaderException(r.content)
return True
def load_config(self, filename: str) -> bool:
"""
Loads the named configuration file into this device
:param filename: name of the configuration file on the device to load. Note this filename must already exist
on the target device
:return: bool True on success
"""
cmd = f'<load><config><from>{filename}</from></config></load>'
self.xapi.op(cmd=cmd)
if self.xapi.status == 'success':
return True
else:
return False
def wait_for_device_ready(self, interval=30, timeout=600) -> bool:
"""
Loop and wait until device is ready or times out
:param interval: how often to check in seconds
:param timeout: how long to wait until we declare a timeout condition
:return: boolean true on ready, false on timeout
"""
mark = time.time()
timeout_mark = mark + timeout
while True:
print(f'Checking {self.hostname} if ready...')
try:
self.xapi.op(cmd='<show><chassis-ready></chassis-ready></show>')
resp = self.xapi.xml_result()
if self.xapi.status == 'success':
if resp.strip() == 'yes':
return True
except PanXapiError:
print(f'{self.hostname} is not yet ready...')
if time.time() > timeout_mark:
return False
print(f'Waiting for {self.hostname} to become ready...')
time.sleep(interval)
def update_dynamic_content(self, content_type: str) -> bool:
"""
Check for newer dynamic content and install if found
:param content_type: type of content to check. can be either: 'content', 'anti-virus', 'wildfire'
:return: bool True on success
"""
try:
version_to_install = self.check_content_updates(content_type)
if version_to_install is None:
print('Latest content version is already installed')
return True
print('Downloading latest and greatest')
cmd = f'<request>' \
f'<{content_type}><upgrade><download><latest/></download></upgrade></{content_type}>' \
f'</request>'
self.xapi.op(cmd=cmd)
results_element = self.xapi.element_result
job_element = results_element.find('.//job')
if job_element is not None:
job_id = job_element.text
if not self.wait_for_job(job_id):
raise SkilletLoaderException('Could not update dynamic content')
print(f'Installing latest and greatest ')
install_cmd = f'<request><content><upgrade><install>' \
f'<version>latest</version><commit>no</commit></install></upgrade></content></request>'
self.xapi.op(cmd=install_cmd)
results_element = self.xapi.element_result
job_element = results_element.find('.//job')
if job_element is not None:
job_id = job_element.text
if not self.wait_for_job(job_id):
raise SkilletLoaderException('Could not install dynamic content')
else:
print(f'No job returned to track')
return True
except PanXapiError:
print('Could not check for updated dynamic content')
return False
def check_content_updates(self, content_type: str) -> (str, None):
"""
Iterate through all available content of the specified type, locate and return the version with the highest
version number. If that version is already installed, return None as no further action is necessary
:param content_type: type of content to check
:return: version-number to download and install or None if already at the latest
"""
latest_version = ''
latest_version_first = 0
latest_version_second = 0
latest_version_current = 'no'
try:
print('Checking for latest content...')
self.xapi.op(cmd=f'<request><{content_type}><upgrade><check/></upgrade></{content_type}></request>')
er = self.xapi.element_root
for entry in er.findall('.//entry'):
version = entry.find('./version').text
current = entry.find('./current').text
# version will have the format 1234-1234
version_parts = version.split('-')
version_first = int(version_parts[0])
version_second = int(version_parts[1])
if version_first > latest_version_first and version_second > latest_version_second:
latest_version = version
latest_version_first = version_first
latest_version_second = version_second
latest_version_current = current
if latest_version_current == 'yes':
return None
else:
return latest_version
except PanXapiError:
return None
def wait_for_job(self, job_id: str, interval=10, timeout=600) -> bool:
"""
Loops until a given job id is completed. Will timeout after the timeout period if the device is
offline or otherwise unavailable.
:param job_id: id the job to check and wait for
:param interval: how long to wait between checks
:param timeout: how long to wait with no response before we give up
:return: bool true on content updated, false otherwise
"""
mark = time.time()
timeout_mark = mark + timeout
print(f'Waiting for job id: {job_id} to finish...')
while True:
try:
self.xapi.op(cmd=f'<show><jobs><id>{job_id}</id></jobs></show>')
except PanXapiError:
print(f'Could not locate job with id: {job_id}')
return False
if self.xapi.status == 'success':
job_element = self.xapi.element_result
job_status_element = job_element.find('.//status')
if job_status_element is not None:
job_status = job_status_element.text
if job_status == 'FIN':
print('Job is now complete')
return True
elif job_status == 'ACT':
job_progress_element = job_element.find('.//progress')
if job_progress_element is not None:
job_progress = job_progress_element.text
print(f'Progress is currently: {job_progress}')
else:
print('No job status element to be found!')
return False
else:
print(f'{self.xapi.xml_result()}')
if time.time() > timeout_mark:
return False
print('Waiting a bit longer')
time.sleep(interval)
| [
"[email protected]"
] | |
0ce6316d272b83c711221cef8a622474c6efa751 | a201ab654fdfca6faeb805c2713c9dc10251b6dc | /Python3 (old)/ex2.py | e65e5a6cce3346f3859e8c2f744afed3f73daf57 | [] | no_license | somnitrix11/automate | 6991dcb0198c305a125be19aaf2fc6d908f1c1b6 | e6b8bc504328349864b3b1e626bb67fa311e065f | refs/heads/master | 2022-11-30T04:43:54.120529 | 2020-07-26T15:12:50 | 2020-07-26T15:12:50 | 282,665,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | #This program calculates speed and distance problems
print("Input a speed and a distance:")
speed= float(input("Speed(in m/s):"))
distance= float(input("Distance(in m):"))
Time=speed*distance
print("The time taken by the object would be",Time,"seconds.")
#To convert C to F
c=float(input("Input temp. in Celsius:"))
f=(9*c/5)+32
print("Temperature is",f,"F.")
#Exercise
str1=input("Write:")
str2=input("Write again:")
a=str1+str2
print(a)
num1=int(input("Enter a no."))
num2=int(input("Enter another no."))
b=num1+num2
print(b)
| [
"[email protected]"
] | |
051901ba60282e7bc6afd98f010b913ac85977d3 | 9c56573bd524149afbf4175f271ade7b91a4c6b0 | /pre.py | 01c02e8a485c9afa56ecd8d28f05cb728d2c0157 | [] | no_license | oikiou/CNN_for_HVAC_FDD | fc379b2b04bf40aca3487b57781f4589b1edeb70 | 17ad773a9eef49ce48b41a3aecc4f97e47422626 | refs/heads/master | 2021-06-26T19:15:49.336056 | 2017-09-11T10:58:11 | 2017-09-11T10:58:11 | 103,123,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,061 | py | import numpy as np
import pandas as pd
import tensorflow as tf
#input
row_data = np.loadtxt("7.csv", delimiter=",", skiprows=1)
#标准化
row_data=pd.DataFrame(row_data)
standed_data=np.nan_to_num(row_data.apply(lambda x: (x - np.min(x)) / (np.max(x) - np.min(x))))
#随机分 7成train 3成test
def sprite(standed_data, n=0.7):
index = np.random.choice(len(standed_data), (int(len(standed_data) * n)), replace=False)
print(len(index),index)
train_data = [standed_data[i] for i in index]
#test_data = [standed_data[i] for i in range(len(standed_data)) if i not in index]
test_data_list = np.array(list(set(range(len(standed_data))) - set(index)))
print(len(test_data_list),test_data_list)
test_data = [standed_data[i] for i in test_data_list]
#print(test_data)
return [train_data, test_data]
# 时间错开,分train和test
def sprite_by_time(standed_data, n=0.7):
index = [i for i in range(int(len(standed_data) * n))]
train_data = [standed_data[i] for i in index]
test_data = [standed_data[i] for i in range(len(standed_data)) if i not in index]
return [train_data, test_data]
[train_data, test_data] = sprite(standed_data)
#[train_data, test_data] = sprite_by_time(standed_data)
train_data = np.array(train_data)
test_data = np.array(test_data)
#分x,y
train_x = train_data[:,0:-1]
train_y = train_data[:,-1]
test_x = test_data[:,0:-1]
test_y = test_data[:,-1]
print(train_y,test_y)
#y分列 softmax
def sp_softmax(input,n):
result=[]
for i in range(len(input)):
temp=np.zeros(n)
temp[int(input[i]*(n-1))]=1
result.append(temp)
return result
#7分类问题
train_y = sp_softmax(train_y,7)
test_y = sp_softmax(test_y,7)
#输出
np.savetxt('new_train_x.csv', train_x, delimiter = ',', fmt="%.3f")
np.savetxt('new_train_y.csv', train_y, delimiter = ',', fmt="%.3f")
np.savetxt('new_test_x.csv', test_x, delimiter = ',', fmt="%.3f")
np.savetxt('new_test_y.csv', test_y, delimiter = ',', fmt="%.3f")
| [
"[email protected]"
] | |
4d8d26efe3b7019edb01e9d8bda2c443a49b9e38 | 3b504a983f1807ae7c5af51078bfab8c187fc82d | /client/adapters/IStatsAdapter.py | 09fc72b393427f042a93a4eee9e8594f486462b9 | [] | no_license | SEA-group/wowp_scripts | 7d35fd213db95ea6b3dbd1ec6d3e0f13de86ba58 | 2fe54a44df34f2dcaa6860a23b835dcd8dd21402 | refs/heads/master | 2021-09-07T23:10:13.706605 | 2018-03-02T17:23:48 | 2018-03-02T17:23:48 | 117,280,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,718 | py | # Embedded file name: scripts/client/adapters/IStatsAdapter.py
from locale import getpreferredencoding
from time import localtime, strftime
import Settings
import db.DBLogic
from HelperFunctions import wowpRound
from Helpers.i18n import localizeAchievements, getFormattedTime, localizeAirplaneLong, localizeTimeIntervalHM, localizeTimeIntervalMS, localizeTooltips, separateThousandths
from adapters.DefaultAdapter import DefaultAdapter
from consts import PLANE_TYPE, PLANE_TYPE_NAME
NATION_FLAG_TEMPLATE = 'icons/shop/flagAchiev{0}.dds'
class IPlaneStatsAdapter(DefaultAdapter):
def __call__(self, account, ob, **kw):
stats = ob['stats']
lastGameTime = stats['lastGameTime']
ob['lastGameTime'] = getFormattedTime(lastGameTime, Settings.g_instance.scriptConfig.timeFormated['dmYHM']) if lastGameTime > 0 else ''
ob['row'] = stats
ob['stats'] = _convertPlaneStats(stats)
return ob
class ISummaryStatsAdapter(DefaultAdapter):
def __call__(self, account, ob, **kw):
stats = ob['stats']
lastGameTime = stats['lastGameTime']
ob['lastGameTime'] = getFormattedTime(lastGameTime, Settings.g_instance.scriptConfig.timeFormated['dmYHM']) if lastGameTime > 0 else ''
ob['flighttime'] = stats['flighttime']
ob['flighttimeStr'] = localizeTimeIntervalHM(stats['flighttime'])
ob['createdAt'] = strftime(Settings.g_instance.scriptConfig.timeFormated['dmYHM'], localtime(float(ob['stats']['createdAt']))).decode(getpreferredencoding())
ob['stats'] = _convertSummeryStats(stats)
return ob
class IShortPlaneStatsAdapter(DefaultAdapter):
def __call__(self, account, ob, **kw):
ob = super(IShortPlaneStatsAdapter, self).__call__(account, ob, **kw)
ob['flighttimeStr'] = localizeTimeIntervalHM(ob['flighttime'])
return ob
class IShortPlaneDescription(DefaultAdapter):
def __call__(self, account, ob, **kw):
ob = super(IShortPlaneDescription, self).__call__(account, ob, **kw)
planeID = kw['idTypeList'][0][0]
planeData = db.DBLogic.g_instance.getAircraftData(planeID).airplane
ob['planeName'] = localizeAirplaneLong(planeData.name)
ob['level'] = planeData.level
ob['icoPath'] = planeData.iconPath
ob['flagPath'] = NATION_FLAG_TEMPLATE.format(planeData.country)
ob['nationID'] = db.DBLogic.g_instance.getNationIDbyAircraftID(planeID)
ob['planeID'] = planeID
return ob
class IPlayerSummaryStatsAdapter(DefaultAdapter):
def __call__(self, account, ob, **kw):
stats = ob['stats']
lastGameTime = stats['lastGameTime']
ob['lastGameTime'] = getFormattedTime(lastGameTime, Settings.g_instance.scriptConfig.timeFormated['dmYHM']) if lastGameTime > 0 else ''
ob['flighttime'] = stats['flighttime']
ob['flighttimeStr'] = localizeTimeIntervalHM(stats['flighttime'])
ob['createdAt'] = strftime(Settings.g_instance.scriptConfig.timeFormated['dmYHM'], localtime(float(stats['createdAt']))).decode(getpreferredencoding()) if stats['createdAt'] > 0 else ''
ob['stats'] = _convertSummeryStats(stats)
return ob
class IPlayerShortPlaneStatsAdapter(DefaultAdapter):
def __call__(self, account, ob, **kw):
ob = super(IPlayerShortPlaneStatsAdapter, self).__call__(account, ob, **kw)
ob['flighttimeStr'] = localizeTimeIntervalHM(ob['flighttime'])
return ob
class IPlayerShortPlaneDescriptionAdapter(DefaultAdapter):
def __call__(self, account, ob, **kw):
ob = super(IPlayerShortPlaneDescriptionAdapter, self).__call__(account, ob, **kw)
import db.DBLogic
planeData = db.DBLogic.g_instance.getAircraftData(kw['idTypeList'][1][0]).airplane
ob['planeName'] = localizeAirplaneLong(planeData.name)
ob['level'] = planeData.level
ob['icoPath'] = planeData.iconPath
ob['flagPath'] = NATION_FLAG_TEMPLATE.format(planeData.country)
ob['nationID'] = db.DBLogic.g_instance.getNationIDbyAircraftID(kw['idTypeList'][1][0])
ob['planeID'] = kw['idTypeList'][1][0]
return ob
class IPlayerPlaneStatsAdapter(DefaultAdapter):
def __call__(self, account, ob, **kw):
stats = ob['stats']
lastGameTime = stats['lastGameTime']
ob['lastGameTime'] = getFormattedTime(lastGameTime, Settings.g_instance.scriptConfig.timeFormated['dmYHM']) if lastGameTime > 0 else ''
ob['stats'] = _convertPlaneStats(stats)
return ob
def _percent(total, partial, precision = 0):
if total > 0:
return wowpRound(partial * 100.0 / total, precision)
return 0
def _statRecord(name = None, value = None, percentValue = None, title = None, tooltip = None):
return dict(name=localizeAchievements(name), value=separateThousandths(value) if isinstance(value, int) else value, percentValue=percentValue, title=localizeAchievements(title) if title else None, tooltip=localizeTooltips(tooltip) if tooltip else None)
def ftostr(val):
return format(val, '.2f')
def _convertCommonStats(stats):
return [_statRecord(None, None, None, 'ACHIEVEMENTS_BETTLE_EFFICIENCY'),
_statRecord('ACHIEVEMENTS_AIRCRAFTS_DESTROYED_2-0', stats['pKill']),
_statRecord('ACHIEVEMENTS_WINS_IN_GROUP_2-0', stats['pAssist']),
_statRecord('ACHIEVEMENTS_DEFENDER-BOMBER_DESTROYED', stats['dKill'] + stats['bKill']),
_statRecord('ACHIEVEMENTS_DEFENDER-BOMBER_DESTROYED_ASSISTS', stats['dAssist'] + stats['bAssist']),
_statRecord('ACHIEVEMENTS_AIR_TARGETS_DESTROYED_AVERAGE_PER_FLIGHT', int(round((stats['pKill'] + stats['dKill'] + stats['bKill']) / float(stats['flights']) if stats['flights'] else 0))),
_statRecord('ACHIEVEMENTS_GROUND_OBJECTS_DESTROYED', stats['gKill']),
_statRecord('ACHIEVEMENTS_GROUND_OBJECTS_DESTROYED_ASSIST', stats['gAssist']),
_statRecord('ACHIEVEMENTS_GROUND_OBJECTS_DESTROYED_AVERAGE_PER_FLIGHT', int(round(stats['gKill'] / float(stats['flights']) if stats['flights'] else 0))),
_statRecord('ACHIEVEMENTS_PARTICIPATION_IN_CAP_SECTOR', stats['zoneCapture']),
_statRecord('ACHIEVEMENTS_AIRCRAFTS_DESTROED_IN_DEF_SECTOR', stats['pKillDefZone']),
_statRecord(None, None, None, 'ACHIEVEMENTS_HEROIC_DEEDS'),
_statRecord('ACHIEVEMENTS_MAX_AIRCRAFTS_DESTROED_PER_BATTLE', stats['pKillMax']),
_statRecord('ACHIEVEMENTS_MAX_DEFENDER-BOMBER_DESTROED_PER_BATTLE', stats['dbKillMax']),
_statRecord('ACHIEVEMENTS_MAX_GROUND_OBJECT_DESTROED_PER_BATTLE', stats['gKillMax']),
_statRecord('ACHIEVEMENTS_MAX_DAMAGE_AIR_TARGETS_PER_BATTLE', int(round(stats['atDamageMax']))),
_statRecord('ACHIEVEMENTS_MAX_DAMAGE_AIRCRAFT_PER_BATTLE', int(round(stats['pDamageMax']))),
_statRecord('ACHIEVEMENTS_MAX_DAMAGE_DEFENDER-BOMBER_PER_BATTLE', int(round(stats['dbDamageMax']))),
_statRecord('ACHIEVEMENTS_MAX_DAMAGE_GROUND_OBJECT_PER_BATTLE', int(round(stats['gDamageMax']))),
_statRecord('ACHIEVEMENTS_MAX_AIRCRAFT_DESTROED_IN_DEF_SECTOR', stats['pKillDefZoneMax'])]
def _convertSummeryStats(stats):
records = [_statRecord('ACHIEVEMENTS_TOTAL_BATTLES', stats['battles']),
_statRecord('ACHIEVEMENTS_WINS', stats['wins'], _percent(stats['battles'], stats['wins'], 2)),
_statRecord('ACHIEVEMENTS_DEFEATS', stats['losses'], _percent(stats['battles'], stats['losses'], 2)),
_statRecord('ACHIEVEMENTS_DRAWS', stats['draws'], _percent(stats['battles'], stats['draws'], 2)),
_statRecord('ACHIEVEMENTS_STAT_IN_NEW_MODE_CONQUEST', localizeTimeIntervalHM(stats['flighttime']))]
records.extend(_convertCommonStats(stats))
records.append(_statRecord(None, None, None, 'ACHIEVEMENTS_XP_AND_BATTLESCORE'))
records.append(_statRecord('ACHIEVEMENTS_AVG_XP_PER_BATTLE', int(stats['baseXPAvg'])))
records.append(_statRecord('ACHIEVEMENTS_MAX_XP_FOR_BATTLE', stats['baseXPMax']))
records.append(_statRecord('ACHIEVEMENTS_AVERAGE_BATTLE_POINTS_PER_BATTLE', int(stats['bScoreAvg'])))
records.append(_statRecord('ACHIEVEMENTS_MAX_BATTLE_POINTS_PER_BATTLE', stats['bScoreMax']))
idx = 16
coeff = 20
def _addEfficiency(plType):
flights = stats['flightsByPlType'].get(plType, 0)
return _statRecord('ACHIEVEMENTS_CLASS_EFFICIENCY_%s' % PLANE_TYPE.toStr(plType), '{}%'.format(round(stats['ranksByPlType'].get(plType, 0) / float(flights) * coeff, 1) if flights else 0.0))
records.insert(idx, _addEfficiency(PLANE_TYPE.BOMBER))
records.insert(idx, _addEfficiency(PLANE_TYPE.ASSAULT))
records.insert(idx, _addEfficiency(PLANE_TYPE.HFIGHTER))
records.insert(idx, _addEfficiency(PLANE_TYPE.NAVY))
records.insert(idx, _addEfficiency(PLANE_TYPE.FIGHTER))
records.insert(idx, _statRecord(None, None, None, 'ACHIEVEMENTS_CLASS_EFFICIENCY'))
return records
def _convertPlaneStats(stats):
records = [_statRecord('ACHIEVEMENTS_TOTAL_BATTLES', stats['battles']),
_statRecord('ACHIEVEMENTS_TOTAL_FLIGHT', stats['flights']),
_statRecord('ACHIEVEMENTS_AVERAGE_DURATION_FLIGHT', localizeTimeIntervalMS(stats['flighttimeAvg'])),
_statRecord('ACHIEVEMENTS_ALL_DURATION_ON_PLANES', localizeTimeIntervalHM(stats['flighttime']))]
records.extend(_convertCommonStats(stats))
records.append(_statRecord(None, None, None, 'ACHIEVEMENTS_XP_AND_BATTLESCORE'))
records.append(_statRecord('ACHIEVEMENTS_AVERAGE_EXP_PER_FLIGHT', int(stats['baseXPAvg'])))
records.append(_statRecord('ACHIEVEMENTS_MAX_XP_FOR_BATTLE', stats['baseXPMax']))
records.append(_statRecord('ACHIEVEMENTS_AVERAGE_BATTLE_POINTS_PER_FLIGHT', int(stats['bScoreAvg'])))
records.append(_statRecord('ACHIEVEMENTS_MAX_BATTLE_POINTS_PER_BATTLE', stats['bScoreMax']))
return records | [
"[email protected]"
] | |
f6a772815183c864a504e01a1ad49b1f99be9bf8 | cbac194e831c94291627d2a94a5713cb8e0e6ed4 | /chainer-feature-extraction/copy_model.py | aa39c3fed4d4e66079d67f4f355b4311558ddf77 | [
"MIT"
] | permissive | Doarakko/chainer-playground | 4e6f5f41f1adfe0569578c4a3c7b89b0b0e0a76e | 5d5eb0078f7d2f6dc5693fb76369a434cbc7d730 | refs/heads/master | 2021-09-24T08:11:31.726822 | 2018-10-05T13:36:59 | 2018-10-05T13:36:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import chainer
#学習済みのモデルのパラメータをコピーする関数
def copy_model(src, dst):
assert isinstance(src, chainer.Chain)
assert isinstance(dst, chainer.Chain)
for child in src.children():
if child.name not in dst.__dict__: continue
dst_child = dst[child.name]
if type(child) != type(dst_child): continue
if isinstance(child, chainer.Chain):
copy_model(child, dst_child)
if isinstance(child, chainer.Link):
match = True
for a, b in zip(child.namedparams(), dst_child.namedparams()):
if a[0] != b[0]:
match = False
break
if a[1].data.shape != b[1].data.shape:
match = False
break
if not match:
print('[Warning] {0}'.format(child.name))
continue
for a, b in zip(child.namedparams(), dst_child.namedparams()):
b[1].data = a[1].data
print('[Copy] {0}'.format(child.name)) | [
"[email protected]"
] | |
5177f2f778ab81b6e955d5b38dd9d053885d4200 | f0ce82edf0b66d70375841242108168ea7cfe925 | /LegoRL/targets/GAE.py | a88f26a4e7b73a3748e7e4eadde520f8ff03bd66 | [] | no_license | FortsAndMills/Lego-Reinforcement-Learning | 45b547ce398a3d2fa34f7b49b139a18983249bc2 | 92e07a085396e993657e3985672c3b3bb205c1c6 | refs/heads/master | 2021-07-11T05:43:35.701048 | 2020-12-04T15:56:23 | 2020-12-04T15:56:23 | 220,832,981 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py | from LegoRL.core.RLmodule import RLmodule
from LegoRL.buffers.storage import stack
import torch
class GAE(RLmodule):
"""
Generalized Advantage Estimation (GAE) upgrade of A2C.
Based on: https://arxiv.org/abs/1506.02438
Args:
tau - float, from 0 to 1
Provides: returns, advantage
"""
def __init__(self, sys, tau=0.95):
super().__init__(sys)
self.tau = tau
def __call__(self, rewards, values, discounts, last_V):
'''
Calculates GAE return.
input: Reward
input: V
input: Discount
input: V
output: V
'''
with torch.no_grad():
returns = []
gae = 0
next_values = last_V
for step in reversed(range(rewards.rollout_length)):
advantage = next_values.one_step(rewards[step], discounts[step]).subtract_v(values[step])
next_values = values[step]
gae = advantage + gae * discounts[step] * self.tau
returns.append(gae)
return stack(returns[::-1])
def hyperparameters(self):
return {"GAE tau": self.tau}
def __repr__(self):
return f"Estimates GAE advantages" | [
"[email protected]"
] | |
2bd5217c33d6decf4c58aaa786f5784e0daf3bce | aee0b03365591da00a927c1275f680e6b726a11c | /venv/bin/wheel | 04553c18a8708dbc0abf8b3cf63f288b684bd2f2 | [] | no_license | H-u-g-o/Chatbot_flask | e66c22cf0d9e90985b0e465a114ea5637cde1093 | 8643f9870577366eda65b0f03606ba0a3fb03e39 | refs/heads/master | 2020-05-20T06:11:06.648962 | 2019-06-07T10:23:09 | 2019-06-07T10:23:09 | 185,421,196 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | #!/home/hugo/Desktop/cours/flask/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
ad673c95708181aa0d00a8c6610b7a39a39d5c7b | 1a698cc48f23ddfc0f607366f462466ddc764c7e | /Code/ManualTextClassification.py | 7661d322465bb077dd9adb33e657fb48b74e5fc2 | [] | no_license | dyp6/Technical-Test-Sorcero | 6608fa398d992f5964db27688998b751ae93c1fe | 61c03b35d311d5b16ed1fa2b27a0a25595b253e4 | refs/heads/main | 2023-05-31T07:21:23.599808 | 2021-06-25T18:05:28 | 2021-06-25T18:05:28 | 377,518,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,333 | py | # MODIFIED FROM: https://atheros.ai/blog/text-classification-with-transformers-in-tensorflow-2
import pandas as pd
import tensorflow as tf
import tensorflow_datasets as tfds
from transformers import TFBertForSequenceClassification, BertTokenizer
def convert_example_to_feature(tokenizer,claim,max_length):
# combine step for tokenization, WordPiece vector mapping and will
#add also special tokens and truncate reviews longer than our max length
return tokenizer.encode_plus(claim,
add_special_tokens = True, # add [CLS], [SEP]
max_length = max_length,
paddding = True, # add [PAD] tokens
return_attention_mask = True, # add attention mask to not focus on pad tokens
)
# map to the expected input to TFBertForSequenceClassification, see here
def map_example_to_dict(input_ids, attention_masks, token_type_ids, label):
return {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_masks,
}, label
def encode_examples(ds,tkzr,max_len, limit=-1):
# prepare list, so that we can build up final TensorFlow dataset from slices.
input_ids_list = []
token_type_ids_list = []
attention_mask_list = []
label_list = []
if (limit > 0):
ds = ds.take(limit)
for review, label in tfds.as_numpy(ds):
bert_input = convert_example_to_feature(tkzr,review.decode(),max_len)
input_ids_list.append(bert_input['input_ids'])
token_type_ids_list.append(bert_input['token_type_ids'])
attention_mask_list.append(bert_input['attention_mask'])
label_list.append([label])
return tf.data.Dataset.from_tensor_slices((input_ids_list,
attention_mask_list,
token_type_ids_list,
label_list)).map(map_example_to_dict)
def main():
# tf.data Guide: https://www.tensorflow.org/guide/data
# LOOK UP TFRecord Format for tf.data.TFRecordDataset()
# https://www.tensorflow.org/api_docs/python/tf/data/TFRecordDataset
tr = pd.read_csv("../Data/RawDataCsvFormat/train.csv")
val = pd.read_csv("../Data/RawDataCsvFormat/dev.csv")
te = pd.read_csv("../Data/RawDataCsvFormat/test.csv")
(ds_train, ds_test), ds_info = tfds.load('imdb_reviews',
split = (tfds.Split.TRAIN, tfds.Split.TEST),
as_supervised=True,
with_info=True)
print('info',ds_info)
# can be up to 512 for BERT
max_length = 256
# the recommended batches size for BERT are 16,32 ... however on this
# dataset we are overfitting quite fast
# and smaller batches work like a regularization.
# You might play with adding another dropout layer instead.
batch_size = 16
bert_tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
# train dataset
ds_train_encoded = encode_examples(ds_train,
bert_tokenizer,
max_length).shuffle(10000).batch(batch_size)
# test dataset
ds_test_encoded = encode_examples(ds_test,
bert_tokenizer,
max_length).batch(batch_size)
# recommended learning rate for Adam 5e-5, 3e-5, 2e-5
learning_rate = 2e-5
# we will do just 1 epoch for illustration, though multiple epochs might
# be better as long as we will not overfit the model
number_of_epochs = 3
# model initialization
model = TFBertForSequenceClassification.from_pretrained('bert-base-cased')
# optimizer Adam
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate, epsilon=1e-08)
# we do not have one-hot vectors, we can use sparce categorical cross entropy and accuracy
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy')
model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
bert_history = model.fit(ds_train_encoded,
epochs=number_of_epochs,
validation_data=ds_test_encoded) | [
"[email protected]"
] | |
04f4ba77e44ef8c6c0a5d643240ccd75565461aa | 5837619fafb9381a87e3eab9665ef1ed2353a88b | /CodigosEnLosQueNicolasNoCree/obstaculos.py | fbe5593e38ab01e73d12367ba3e253a273dcd9cf | [] | no_license | Switchfools/AutonomousRobotR-Boost | 1a7e11563e976a4880147ae6c46284e956f987e8 | ddbe465100dfbb0f48863a42ac17caf13950e913 | refs/heads/master | 2021-04-09T11:52:02.218373 | 2018-05-02T15:59:54 | 2018-05-02T15:59:54 | 125,576,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,870 | py | import imageio
import numpy as np
from scipy.spatial import Voronoi, voronoi_plot_2d
import matplotlib.pyplot as plt
def Voro():
M=500
N=700
Obstaculos=np.zeros([M,N])
centros=np.zeros([M,N])
Undos=list()
Nodos=list()
ConNodos=list()
for i in range(int(M/2 - 50),int(M/2 + 50)):
for j in range(int(N/2 - 50),int(N/2 + 50)):
Obstaculos[i,j]=255
for i in range(int(M/2 - 200),int(M/2 - 150)):
for j in range(int(N/2 - 50),int(N/2 + 50)):
Obstaculos[i,j]=255
for i in range(int(M/2 - 200),int(M/2 - 150)):
for j in range(int(N/2 - 200),int(N/2 - 150)):
Obstaculos[i,j]=255
for i in range(int(M - 200),int(M - 150)):
for j in range(int(N - 200),int(N- 150)):
Obstaculos[i,j]=255
for i in range(int(M - 50),int(M-25)):
for j in range(int(N/2 - 100),int(N/2 + 50)):
Obstaculos[i,j]=255
for i in range(M):
Obstaculos[i,0:10]=255
Obstaculos[i,N-10:N]=255
for i in range(N):
Obstaculos[0:10,i]=255
Obstaculos[M-10:M,i]=255
for i in range(1,M-1):
for j in range(1,N-1):
Med=Obstaculos[i+1,j]+Obstaculos[i+1,j+1]+Obstaculos[i+1,j-1]+Obstaculos[i,j+1]+Obstaculos[i,j-1]+Obstaculos[i-1,j]+Obstaculos[i-1,j+1]+Obstaculos[i-1,j-1]
if(Obstaculos[i,j]==255 and (Med==3*(255)or Med==7*(255))):
Undos.append([i,j])
for l in range(i-1,i+1):
for m in range(j-1,j+1):
centros[i,j]=255
vor = Voronoi(Undos)
voronoi_plot_2d(vor)
plt.savefig('voro.png')
fig = plt.figure()
plt.hold(True)
# Mark the Voronoi vertices.
for vpair in vor.ridge_vertices:
uno,dos=vor.vertices[vpair[0]]
tres,cuatro=vor.vertices[vpair[1]]
if uno<M and uno>0 and dos<N and dos>0 and tres<M and tres>0 and cuatro<N and cuatro>0 :
if(Obstaculos[int(uno),int(dos)]!=255 and Obstaculos[int(tres),int(cuatro)]!=255):
ConNodos.append([vpair[0],vpair[1]])
v0 = vor.vertices[vpair[0]]
v1 = vor.vertices[vpair[1]]
Nodos.append(v0)
Nodos.append(v1)
# Draw a line from v0 to v1.
plt.plot([v0[0], v1[0]], [v0[1], v1[1]], 'k', linewidth=1)
NNN=np.array(Nodos)
plt.plot(NNN[:,0], NNN[:, 1], 'ko', ms=4)
plt.savefig('realpath.png')
lena,lenb=np.shape(vor.vertices)
MAdyacencia=np.zeros([lena+2,lena+2],dtype=int)
for i in range(len(ConNodos)):
A,B=ConNodos[i]
MAdyacencia[A,B]=1
MAdyacencia[B,A]=1
inicio=[40,40]
final=[M-100,N-300]
bestlenIn=10000
bestlenEnd=10000
for i in range(len(Nodos)):
x,y=Nodos[i]
reallenIn=np.sqrt((x-inicio[0])**2+(y-inicio[1])**2)
reallenEnd=np.sqrt((x-final[0])**2+(y-final[1])**2)
if(reallenIn<bestlenIn):
bestlenIn=reallenIn
posIn=Nodos[i]
if(reallenEnd<bestlenEnd):
bestlenEnd=reallenEnd
posEnd=Nodos[i]
for i in range(len(vor.vertices)):
x,y=vor.vertices[i]
if(posIn[0]==x and posIn[1]==y):
MAdyacencia[0,i]=1
if(posEnd[0]==x and posEnd[1]==y):
MAdyacencia[i,0]=1
Vertices=np.zeros([lena+2,2])
for i in range(lena+2):
if(i==0):
Vertices[i,0]=inicio[0]
Vertices[i,1]=inicio[1]
elif(i!=0 and i!=lena+2):
Vertices[i,0]=vor.vertices[i-2,0]
Vertices[i,1]=vor.vertices[i-2,1]
else:
Vertices[i,0]=final[0]
Vertices[i,1]=final[1]
return(MAdyacencia,Vertices)
#plt.figure()
#plt.scatter(vor.ridge_vertices[:,0],vor.ridge_vertices[:,1])
#plt.show()
imageio.imwrite('obstaculos.png',Obstaculos)
Voro()
| [
"[email protected]"
] | |
434ddc0a39dad34c4020a3bcc9b6b089cde7d888 | 6b383d1e73be85cde74b1b01f4038236d32dfb24 | /tienda/migrations/0001_initial.py | ff052e554fa2fbbe1857a6e82b32e438411fe468 | [] | no_license | luthors/MiTindaOnLine | 73e63f19b84400c11c18159a1f65eb78260a93f3 | d788ac91d80358ace370926501d8b97c3c5bcced | refs/heads/main | 2023-08-05T18:08:41.136855 | 2021-10-12T22:29:50 | 2021-10-12T22:29:50 | 414,015,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,483 | py | # Generated by Django 3.2.7 on 2021-10-03 04:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CategoriaProd',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('nombre', models.CharField(max_length=50)),
('created', models.DateTimeField(auto_now_add=True)),
('update', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'categoriaProd',
'verbose_name_plural': 'categoriasProd',
},
),
migrations.CreateModel(
name='Producto',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('nombre', models.CharField(max_length=50)),
('imagen', models.ImageField(blank=True, null=True, upload_to='tienda')),
('precio', models.FloatField()),
('disponibilidad', models.BooleanField(default=True)),
('categorias', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tienda.categoriaprod')),
],
options={
'verbose_name': 'producto',
'verbose_name_plural': 'productos',
},
),
]
| [
"[email protected]"
] | |
3336ab7328f75eaee6ce53e5339baef12dbb3f37 | cf639124cc2ea83a459b028817e63446addbc4f6 | /tours/apps/tour/migrations/0002_auto_20150807_1158.py | ac59f92e250bbb2280668f00f81c83838e896a43 | [
"Apache-2.0"
] | permissive | resilientred/OpenTourBuilder-Server | ff605b3d439643a2752b2438e382ca7383248d65 | e0afa39b27839cbb4db0445ade204b28dafd6c1d | refs/heads/master | 2020-04-11T11:32:50.452512 | 2017-06-07T15:22:42 | 2017-06-07T15:22:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,272 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import autoslug.fields
class Migration(migrations.Migration):
dependencies = [
('tour', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tourinfo',
name='icon',
field=models.CharField(default=b'fa-info-circle', max_length=20, choices=[(b'fa-info-circle', b'Info'), (b'fa-certificate', b'Certificat'), (b'fa-circle-o-notch', b'Circle'), (b'fa-sticky-note', b'Sticky Note'), (b'fa-bookmark', b'Bookmark')]),
),
migrations.AlterField(
model_name='tour',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from=b'name', always_update=True, unique=True),
),
migrations.AlterField(
model_name='tourinfo',
name='info_slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from=b'name', always_update=True, unique=True),
),
migrations.AlterField(
model_name='tourstop',
name='slug',
field=autoslug.fields.AutoSlugField(always_update=True, populate_from=b'name', editable=False),
),
]
| [
"[email protected]"
] | |
19180fe87d05b23f4942f5c69caa88c9fb25a645 | 8e38169331b6fdec8cb61e2a8ed564023e2cba9a | /telluricpy/__init__.py | 5a1309d60edfe354a8ebb966a91bde80ceca426f | [
"MIT"
] | permissive | OpenGeoVis/telluricpy | 20b4dd0f77e9a60c3dd61dbfac2668c3994ffdc3 | b5cd5ac9e373137c33b2ecc98d4dfed4d0784699 | refs/heads/master | 2020-03-19T13:31:31.479146 | 2018-06-29T07:11:24 | 2018-06-29T07:11:24 | 136,583,691 | 0 | 0 | MIT | 2018-06-29T06:50:30 | 2018-06-08T07:32:21 | Python | UTF-8 | Python | false | false | 102 | py | from . import dataFiles
from . import modelOperations
from . import vtkTools
from . import modelTools
| [
"[email protected]"
] | |
867173ba1b83c430168b8a64f31c0ddd394a8acf | a9439676d1df659f1647ade7dd7f827604c3ff73 | /Source/Renamer/builder/BuildConfig.py | 9fbf54f36d96a45e361a2fd61ae3e9cc96974ab2 | [
"BSD-2-Clause"
] | permissive | KSP-RO/KerbalRenamer | b5dcd364119e2ca3f001e41f0ee1b6e5a65e457f | bb2d11d443e51b21f06355aa4e748d7c550074c1 | refs/heads/master | 2023-04-15T02:31:32.034511 | 2023-04-09T11:47:39 | 2023-04-09T11:47:39 | 53,220,293 | 7 | 21 | BSD-2-Clause | 2022-12-28T11:27:20 | 2016-03-05T19:53:00 | C# | UTF-8 | Python | false | false | 5,621 | py | '''
This scrip builds a Renamer config file from a few csv. This can be useful if anyone wants to add cultures, or add
names to existing ones without editing the cfg file itself.
NOTE:
1) the excel workbook is the source to generate the CSV *LASTNAME.transposed.csv The data comes from wikipedia (list of surnamaes)
and some scraped manually from forebears.io
2) The firstname dataset comes from https://raw.githubusercontent.com/MatthiasWinkelmann/firstname-database/master/firstnames.csv
It would be nice to add from cultures not represented in the MatthiasWinkelmann dataset. However, rather than spend lots of time
trying to get this right, this folder contains the tool for community contribution.
'''
class KSPculture:
def __init__(self, given_name):
self.name = given_name
self.MFIRSTNAME1 = []
self.MFIRSTNAME2 = []
self.MFIRSTNAME3 = []
self.FFIRSTNAME1 = []
self.FFIRSTNAME2 = []
self.FFIRSTNAME3 = []
self.LASTNAME1 = []
self.LASTNAME2 = []
self.LASTNAME3 = []
self.FLASTNAME1 = []
self.FLASTNAME2 = []
self.FLASTNAME3 = []
def Write(self, filehandle):
space = " "
filehandle.write(space + "Culture\n "+ space + "{\n" + 2 * space + "name = %s\n"%(self.name))
for item in ["MFIRSTNAME", "FFIRSTNAME", "LASTNAME"]:
for i in range(1,4):
fullname = "%s%d"%(item,i)
self._writeCollection(filehandle, fullname, self.__getattribute__(fullname))
if (self.FLASTNAME1):
self._writeCollection(filehandle, "FLASTNAME1", self.FLASTNAME1)
self._writeCollection(filehandle, "FLASTNAME2", self.FLASTNAME2)
self._writeCollection(filehandle, "FLASTNAME3", self.FLASTNAME3)
filehandle.write("%s}\n"%(space))
def _writeCollection(self, filehandle, collectionName, collection):
space = " "
collection.sort()
filehandle.write("%s%s\n%s{\n" % (2 * space, collectionName, 2 * space))
for name in collection:
filehandle.write(" key = %s\n"%(name))
filehandle.write("%s}\n" % (2 * space))
def SanitizeCulture(name):
name = name.replace(" ", "_")
name = name.replace("/", "_or_")
name = name.replace(".", "")
return name
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
## First names #########################################
data = []
fin = open("firstnames.world.csv")
for line in fin:
data.append(line.strip().split(";"))
cultures = {}
for nameline in data[1:]:
for column in range(2, len(nameline)):
if nameline[column] != "":
if (int(nameline[column]) <= -5):
continue
culture = SanitizeCulture(data[0][column])
if not culture in cultures:
cultures[culture] = KSPculture(culture)
if "?" in nameline[1]:
nameline[1] = "MF"
elif "F" in nameline[1]:
nameline[1] = "F"
elif "M" in nameline[1]:
nameline[1] = "M"
nameline[0] = nameline[0].replace("+", " ")
if nameline[1] in ["M", "MF"]:
cultures[culture].MFIRSTNAME1.append(nameline[0])
if nameline[1] in ["F", "MF"]:
cultures[culture].FFIRSTNAME1.append(nameline[0])
for c in cultures:
print(c)
## Last names #############################################################
with open("LASTNAME.transposed.csv", mode='r', encoding='utf-8-sig') as fin:
for line in fin:
line = line.strip().split(",")
if line[0] in cultures:
for name in line[1:]:
if name:
cultures[line[0]].LASTNAME1.append(name)
else:
break
else:
print("Missing culture: %s"%(line[0]))
## FLast names #############################################################
with open("FLASTNAME.transposed.csv", mode='r', encoding='utf-8-sig') as fin:
for line in fin:
line = line.strip().split(",")
if line[0] in cultures:
for name in line[1:]:
if name:
cultures[line[0]].FLASTNAME1.append(name)
else:
break
else:
print("Missing culture: %s" % (line[0]))
## Clean up
del cultures["East_Frisia"]
## Report
for culture in cultures:
print(culture+":")
print(" MFIRSTNAME: %d"%(len(cultures[culture].MFIRSTNAME1)))
print(" FFIRSTNAME: %d" % (len(cultures[culture].FFIRSTNAME1)))
print(" LASTNAME: %d" % (len(cultures[culture].LASTNAME1)))
if (len(cultures[culture].FLASTNAME1)):
print(" FLASTNAME: %d" % (len(cultures[culture].FLASTNAME1)))
## Write outfile ##########################################################
fout = open("KerbalRenamer.cfg", "w")
fout.write("KERBALRENAMER\n {\n cultureDescriptor = Nationality\n\n")
for culture in cultures:
cultures[culture].Write(fout)
# Profiles section
fout.write(" profile\n {\n name = equiprobable\n")
for culture in cultures:
fout.write(" %s = 1\n"%(culture))
fout.write(" }\n")
fout.write("}\n")
fout.close()
| [
"[email protected]"
] | |
8aaa163787dc262da38565496f748908143f20f2 | fe1d428a926f0ae63efa25335545bd0fbdbaf849 | /captain_room.py | 4db345ec1f2e0182837d2f64ff668028d37cd74c | [] | no_license | Dhanshree-Sonar/Interview-Practice | e0b20365f2106b89df17637c89d77ae7acf12c1b | 5b14190bd98d1d7bb7b61cc66835efccc50450c0 | refs/heads/master | 2021-01-24T10:04:51.182818 | 2018-04-17T03:25:05 | 2018-04-17T03:25:05 | 123,035,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | ##Reference:
##https://www.hackerrank.com/challenges/py-the-captains-room/problem
##Read the problem on hackerrank page
def find_captain_room(k, rooms):
## room_nums = set(rooms)
## for room in room_nums:
## if rooms.count(room) == 1:
## return room
# Multiply all the unique room by group number including captain's room
# Minus the sum of room so we have (k-1)*captain's room
# Divide result by (k-1) to get captain's room number
print ((sum(set(rooms))*5) - (sum(rooms)))/(k-1)
k = 5
rooms = [1, 2, 3, 6, 5, 4, 4, 2, 5, 3, 6, 1, 6, 5, 3, 2, 4, 1, 2, 5, 1, 4,
3, 6, 8, 4, 3, 1, 5, 6, 2]
find_captain_room(k, rooms)
##Complexity:
## Space = O(1) are do not need anyother data structure.
## Time = O(1) , juts one line mathematical formula to find captians room
| [
"[email protected]"
] | |
c3adcbeba8fc8166b6429a87de5ab17b4187ccfd | aabe7008e0eb77617f1a76cddb98e4b17fd5ce27 | /nni/experiment/rest.py | bdacc7c215ac759fdb551e7d4fa3d6e296e45fd1 | [
"MIT"
] | permissive | penghouwen/nni | a09a374a81be46fe246c425275585d5fe79404af | 2e6a2fd2df0d5700cb028b25156bb535a3fc227a | refs/heads/master | 2021-12-21T14:02:32.228973 | 2021-12-13T16:54:39 | 2021-12-13T16:54:39 | 435,926,123 | 1 | 0 | MIT | 2021-12-07T15:09:36 | 2021-12-07T15:09:35 | null | UTF-8 | Python | false | false | 1,156 | py | import logging
from typing import Any, Optional
import requests
_logger = logging.getLogger(__name__)
url_template = 'http://localhost:{}/api/v1/nni{}'
timeout = 20
def request(method: str, port: Optional[int], api: str, data: Any = None) -> Any:
if port is None:
raise RuntimeError('Experiment is not running')
url = url_template.format(port, api)
if data is None:
resp = requests.request(method, url, timeout=timeout)
else:
resp = requests.request(method, url, json=data, timeout=timeout)
if not resp.ok:
_logger.error('rest request %s %s failed: %s %s', method.upper(), url, resp.status_code, resp.text)
resp.raise_for_status()
if method.lower() in ['get', 'post'] and len(resp.content) > 0:
return resp.json()
def get(port: Optional[int], api: str) -> Any:
return request('get', port, api)
def post(port: Optional[int], api: str, data: Any) -> Any:
return request('post', port, api, data)
def put(port: Optional[int], api: str, data: Any) -> None:
request('put', port, api, data)
def delete(port: Optional[int], api: str) -> None:
request('delete', port, api)
| [
"[email protected]"
] | |
b072589f2271dcbb0fa60ed730ced44bb17c6fdd | 5b4823772981be8a9c6be22ad2974101163b209b | /server/travelitinerary/migrations/0002_itinerary_owner.py | 3d11856f692ea84271f2efc1017520682b4c2aac | [] | no_license | christineyinho/cs50-travel-itinerary | 69bd3828bd509fd29dc88d7f5a29f109d7c57f9b | 581c129e83dc95f92ee3b204eb17a83914d98b5e | refs/heads/master | 2023-02-03T07:02:29.956434 | 2020-12-23T01:57:42 | 2020-12-23T01:57:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | # Generated by Django 3.1 on 2020-12-07 05:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('travelitinerary', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='itinerary',
name='owner',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='itineraries', to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
8a55b7afe474d1c272fa8bafa22e87a27f6efa37 | a9eede607432904dc80931f5c0cd7226ca3436b4 | /showcase/migrations/0002_auto_20160930_1654.py | 10f0fdc3fbf851aa0170dcde4fdf3c51b7e1e1cc | [] | no_license | DO2016/mysite | c979c0f5f7e7cef26a95465b1026bbf878877fca | ee0b6713a36420e8cab13f3b1d24504beed0c2c0 | refs/heads/master | 2020-09-25T00:07:49.169447 | 2017-01-10T14:32:54 | 2017-01-10T14:32:54 | 67,704,686 | 0 | 0 | null | 2016-12-09T10:22:29 | 2016-09-08T13:17:10 | Python | UTF-8 | Python | false | false | 707 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-30 13:54
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('showcase', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='composition',
old_name='Product',
new_name='product',
),
migrations.RenameField(
model_name='ingredient',
old_name='Products',
new_name='products',
),
migrations.AlterUniqueTogether(
name='composition',
unique_together=set([('ing', 'product')]),
),
]
| [
"[email protected]"
] | |
f48ee2cabb3338d40b3aaae10b3b34cc45096973 | 168d0e69d0d072659084e5d274672d83df2925e7 | /KCEF/sampler/hmc.py | 9637fff07fcc21ba9aa8fa7c19b4cdf1888c8614 | [
"BSD-3-Clause"
] | permissive | MichaelArbel/KCEF | 130d3eb05a087f8031bedcb5af873805ed2afa66 | cda1b6e4be5f172695e00aa3bc3cc58ccc30ecc2 | refs/heads/master | 2020-03-09T03:47:53.703923 | 2019-11-25T18:16:03 | 2019-11-25T18:16:03 | 128,572,868 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,846 | py | from abc import abstractmethod
import numpy as np
def standard_sqrt_schedule(t):
return 1. / np.sqrt(t + 1)
def leapfrog(q, dlogq, p, dlogp, step_size=0.3, num_steps=1):
# for storing trajectory
Ps = np.zeros((num_steps + 1, len(p)))
Qs = np.zeros(Ps.shape)
# create copy of state
p = np.array(p.copy())
q = np.array(q.copy())
Ps[0] = p
Qs[0] = q
# half momentum update
p = p - (step_size / 2) * -dlogq(q)
# alternate full variable and momentum updates
for i in range(num_steps):
q = q + step_size * -dlogp(p)
Qs[i + 1] = q
# precompute since used for two half-steps
dlogq_eval = dlogq(q)
# first half momentum update
p = p - (step_size / 2) * -dlogq_eval
# store p as now fully updated
Ps[i + 1] = p
# second half momentum update
if i != num_steps - 1:
p = p - (step_size / 2) * -dlogq_eval
return Qs, Ps
def leapfrog_no_storing(q, dlogq, p, dlogp, step_size=0.3, num_steps=1):
# create copy of state
p = np.array(p.copy())
q = np.array(q.copy())
# half momentum update
p = p + 0.5 * np.multiply(step_size , dlogq(q))
# alternate full variable and momentum updates
for i in range(num_steps):
q = q - np.multiply(step_size ,dlogp(p))
# precompute since used for two half-steps
dlogq_eval = dlogq(q)
# first half momentum update
p = p + 0.5 * np.multiply(step_size ,dlogq_eval)
# second half momentum update
if i != num_steps - 1:
p = p + 0.5 * np.multiply(step_size ,dlogq_eval)
return q, p
class ProposalBase(object):
def __init__(self, target, D, step_size, adaptation_schedule, acc_star):
self.target = target
self.D = D
self.step_size = step_size
self.adaptation_schedule = adaptation_schedule
self.acc_star = acc_star
self.t = 0
# some sanity checks
assert acc_star is None or acc_star > 0 and acc_star < 1
if adaptation_schedule is not None:
lmbdas = np.array([adaptation_schedule(t) for t in np.arange(100)])
assert np.all(lmbdas >= 0)
assert np.allclose(np.sort(lmbdas)[::-1], lmbdas)
def initialise(self):
pass
def proposal(self):
pass
def update(self, samples, acc_probs):
self.t += 1
previous_accpept_prob = acc_probs[-1]
if self.adaptation_schedule is not None and self.acc_star is not None:
# always update with weight
lmbda = self.adaptation_schedule(self.t)
self._update_scaling(lmbda, previous_accpept_prob)
def _update_scaling(self, lmbda, accept_prob):
# difference desired and actuall acceptance rate
diff = accept_prob - self.acc_star
new_log_step_size = np.log(self.step_size) + lmbda * diff
new_step_size = np.exp(new_log_step_size)
print("Acc. prob. diff. was %.3f-%.3f=%.3f. Updating step-size from %s to %s." % \
(accept_prob, self.acc_star, diff, self.step_size, new_step_size))
self.step_size = new_step_size
class HMCBase(ProposalBase):
def __init__(self, target, momentum, num_steps_min=10, num_steps_max=100, step_size_min=0.05,
step_size_max=0.3, adaptation_schedule=standard_sqrt_schedule, acc_star=0.7):
if not num_steps_min<=num_steps_max:
raise ValueError("Minimum number of leapfrog steps (%d) must be larger than maximum number (%d)." % \
(num_steps_min, num_steps_max))
if not num_steps_min<=num_steps_max:
raise ValueError("Minimum size of leapfrog steps (%d) must be larger than maximum size (%d)." % \
(step_size_min, step_size_max))
step_size = np.array([step_size_min, step_size_max])
ProposalBase.__init__(self, target, momentum.D, step_size, adaptation_schedule, acc_star)
self.momentum = momentum
self.num_steps_min = num_steps_min
self.num_steps_max = num_steps_max
def _proposal_trajectory(self, current, current_log_pdf):
# sample momentum and leapfrog parameters
p0 = self.momentum.sample()
p0_log_pdf = self.momentum.log_pdf(p0)
num_steps = np.random.randint(self.num_steps_min, self.num_steps_max + 1)
step_size = np.random.rand() * (self.step_size[1] - self.step_size[0]) + self.step_size[0]
print("Simulating Hamiltonian flow trajectory.")
Qs, Ps = leapfrog(current, self.target.grad, p0, self.momentum.grad, step_size, num_steps)
# compute acceptance probability, extracting log_pdf of q
print("Computing acceptance probabilies.")
acc_probs = np.zeros(len(Qs))
log_pdf_q = np.zeros(len(Qs))
for i in range(len(Qs)):
p = Ps[i]
q = Qs[i]
p_log_pdf = self.momentum.log_pdf(p)
acc_probs[i], log_pdf_q[i] = self.accept_prob_log_pdf(current, q, p0_log_pdf, p_log_pdf, current_log_pdf)
return Qs, acc_probs, log_pdf_q
def proposal(self, current, current_log_pdf):
"""
"""
n, d = current.shape
# sample momentum and leapfrog parameters
p0 = self.momentum.sample(n)
p0_log_pdf = self.momentum.log_pdf(p0)
num_steps = np.random.randint(self.num_steps_min, self.num_steps_max + 1)
step_size = np.random.rand(n) * (self.step_size[1] - self.step_size[0]) + self.step_size[0]
step_size = np.reshape(step_size, [-1, 1])
#print("Simulating Hamiltonian flow.")
q, p = leapfrog_no_storing(current, self.target.grad, p0, self.momentum.grad, step_size, num_steps)
# compute acceptance probability, extracting log_pdf of q
#print("Computing acceptance probability.")
p_log_pdf = self.momentum.log_pdf(p)
acc_prob, log_pdf_q = self.accept_prob_log_pdf(current, q, p0_log_pdf, p_log_pdf, current_log_pdf)
return q, acc_prob, log_pdf_q
@abstractmethod
def accept_prob_log_pdf(self, current, q, p0_log_pdf, p_log_pdf, current_log_pdf=None):
# potentially re-use log_pdf of last accepted state
if current_log_pdf is None:
current_log_pdf = self.target.log_pdf(current)
log_pdf_q = self.target.log_pdf(q)
H0 = -current_log_pdf - p0_log_pdf
H = -log_pdf_q - p_log_pdf
difference = -H + H0
acc_prob = np.exp(np.minimum(0., difference))
return acc_prob, log_pdf_q
| [
"[email protected]"
] | |
decf87e0b30debfc4eb0e9ec4befffa7a1c251d5 | 29c24714cf9af9cbe6bfb3f2fa2c031c85b348f1 | /src/handlers/form.py | 7949e4ae7db2f7171c9f0dc8436f15c23051afa0 | [] | no_license | IP89/octopus | 66e8aa5bb366b75cc096888b53b4b56703f761a7 | b94c9148831ee3e2e7d9ea4ebce5dc319fb89986 | refs/heads/master | 2021-08-22T18:32:25.131364 | 2017-11-30T23:50:07 | 2017-11-30T23:50:07 | 112,670,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | import tornado.ioloop
import tornado.web
import json
from models.scrapper import Scrapper
from helpers.processing import *
from helpers.db import *
class FormHandler(tornado.web.RequestHandler):
def get(self):
self.render("../templates/cloud.html", title="Cloud")
def post(self):
self.set_header("Content-Type", "text/plain")
url = self.get_argument("url")
scrapper = Scrapper(url);
w_list = scrapper.get_works_list();
top_words_ordered = order_top_list(w_list)
set_new_words(top_words_ordered)
self.write(json.dumps(top_words_ordered)) | [
"[email protected]"
] | |
8290bd478a8c2d44f9a3fa85685a885200c59165 | 81cbf9cc47c3efe1eb21723bf044d7f447be140d | /NumPy_Tutorial_7.py | 234e656ebccdd053beba01d9b5e04023b356ca56 | [] | no_license | code-of-the-future/Python-NumPy-Tutorials | 584cfffbdb982de96cdddc5ced071edb7f60ce9b | ea7ff6815b4b3e8ed73ddf6671b30de48054d5aa | refs/heads/main | 2023-06-07T14:55:04.183576 | 2021-06-28T17:01:10 | 2021-06-28T17:01:10 | 326,231,132 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | # NumPy Tutorial 7 - Handy Operations for Data Analysis
# Importing relevant modules
import numpy as np
# Operations that come in handy for data analysis!
one_dim = np.array([1, 2, 3, 4, 5]) # [1 2 3 4 5]
two_dim = np.array([[1, 2, 3], [4, 5, 6]]) # [[1 2 3] [4 5 6]]
# Summations
print(sum(one_dim)) # Produces 15
print(sum(two_dim)) # Produces [5 7 9]
print(one_dim.sum()) # Produces 15
print(two_dim.sum()) # Produces 21
# Maximum and minimum
print(two_dim.min()) # Produces 1
print(two_dim.max()) # Produces 6
# We can add elements in each row and column
two_dim2 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(two_dim2)
# Summing elements within each column
print(two_dim2.sum(axis=0))
# Summing elements within each row
print(two_dim2.sum(axis=1))
| [
"[email protected]"
] | |
07e2654cb106940a7a39446d3f174317dd6f4f79 | c5d1e1abeb7f125ed366da99085975157e4cc995 | /example/examples_src/merge.py | d388679e5ec4def303d086d7042e716ae77a60a2 | [
"MIT"
] | permissive | Nouzan/scrapy-example | 63fd832a5ee164be0cf3b5f74e9b17967f1eebbc | 6d38419d5435dee351d5fca44aadc67de67d41ac | refs/heads/master | 2020-04-21T21:00:47.192274 | 2019-05-12T15:44:25 | 2019-05-12T15:44:25 | 169,864,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | import pandas as pd
from sys import argv
output_name = argv[1]
for filename in argv[2:]:
df = pd.read_csv(filename, header=1)
df.to_csv(output_name, mode='a', index=False, header=0)
| [
"[email protected]"
] | |
53d56b5295ef1c0c91985657a505ca7778a240d0 | f1913a497f430feb1d12e494ec1e3ae39e5ed913 | /KMeans/Kmeans.py | f4839236399dded8cbabbce4ea6426f493075647 | [] | no_license | rtapan25/ML-algos-from-scratch | 6dbb25915dca6c2751264a767637ad8956b2bae6 | 524f9158872e738e9abbd0523ac902386cd67ef6 | refs/heads/main | 2023-04-15T01:21:24.777098 | 2021-05-05T10:20:47 | 2021-05-05T10:20:47 | 364,537,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,370 | py | #Implementing KMeans algorithm from scratch
import numpy as np
from sklearn.metrics import pairwise_distances
class KMeans:
def __init__(self, k = 3):
self.k = k
def fit(self, data):
self.centeroids = self.init_centeroids(data)
self.no_of_iter = 1000
for _ in range(self.no_of_iter):
#Assigning Clusters using the Euclidian Distance
self.cluster_labels = self.assign_clusters(data)
#Updating the Centeroids
self.centeroids = self.update_centeroids(data)
return self
def predict(self, data):
return self.assign_clusters(data)
def init_centeroids(self, data):
#Random initialization of centeroids
initial_cent = np.random.permutation(data.shape[0])[:self.k]
self.centeroids = data[initial_cent]
return self.centeroids
def assign_clusters(self, data):
if data.ndim == 1:
data = data.reshape(-1,1)
dis_to_cent = pairwise_distances(data, self.centeroids, metric ='euclidean')
self.cluster_labels = np.argmin(dis_to_cent, axis=1)
return self.cluster_labels
def update_centeroids(self, data):
self.centeroids = np.array([data[self.cluster_labels == i].mean(axis = 0 ) for i in range(self.k)])
return self.centeroids
| [
"[email protected]"
] | |
0307735a5c3a1b93215f0dbb6079311a511130aa | cee6275046e371ed26d10e1288a7c555e34306c1 | /projeto2_2020/venv/bin/tkconch | 5471b3bc00741567cc39e296eb7e2050c2acb9de | [] | no_license | DanielJMPinto/sio_proj_2 | 1060616a91e3f1e754344894aef53f1fd40e82a8 | 518d0e97b0660d255c34273c612a258602d493ad | refs/heads/master | 2023-02-15T05:52:52.777248 | 2021-01-03T22:24:18 | 2021-01-03T22:24:18 | 323,598,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | #!/home/daniel/uni/sio/projeto2_2020/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from twisted.conch.scripts.tkconch import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"[email protected]"
] | ||
fd6336527e32ba23233ed80c3af41fb92d405132 | d669444387b731a452c60a3e144dd33b8af04621 | /vowela.py | f5025a2c9ae62755a97ffac4c0a81845d561c0a7 | [] | no_license | vamsikrishna6668/python | 8116a4d3f5836bea636fd4ba9c035adc1d764034 | dc805a0096b27d5feda3b03ae4118f668777c0cb | refs/heads/master | 2020-04-01T19:48:01.590508 | 2018-10-18T10:05:12 | 2018-10-18T10:05:12 | 153,571,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | sk =input('enter a single character:')
if sk in('a','e','i','o','u','A','I','E','O','U'):
print('It is a vowel')
else:
print('''It is a consonant''') | [
"[email protected]"
] | |
fcd4db8356379cec770d77f2f31c758e4e6a01a0 | de84f9d44b59aa39a1612c683cf2de150ef59e9b | /easy_thumbnails/migrations/0013_auto__del_storage__del_field_source_storage__del_field_thumbnail_stora.py | 9b429e51b2880464524de0c7aae7c47d013fe624 | [] | no_license | evrenesat/uygulamatik | 25d7617d4ae6c10623b30d4a57731242efa9a4a7 | c90da279c6571549e35f51e097524752d9cc2936 | refs/heads/master | 2021-01-19T05:39:26.908079 | 2014-06-05T21:57:00 | 2014-06-05T21:57:00 | 65,093,382 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,748 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.core.files.storage import default_storage
import pickle
#from django.utils.hashcompat import md5_constructor
import hashlib
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Storage'
db.delete_table('easy_thumbnails_storage')
# Deleting field 'Source.storage'
db.delete_column('easy_thumbnails_source', 'storage_id')
# Adding index on 'Source', fields ['storage_hash']
db.create_index('easy_thumbnails_source', ['storage_hash'])
# Deleting field 'Thumbnail.storage'
db.delete_column('easy_thumbnails_thumbnail', 'storage_id')
# Adding index on 'Thumbnail', fields ['storage_hash']
db.create_index('easy_thumbnails_thumbnail', ['storage_hash'])
def backwards(self, orm):
# Removing index on 'Thumbnail', fields ['storage_hash']
db.delete_index('easy_thumbnails_thumbnail', ['storage_hash'])
# Removing index on 'Source', fields ['storage_hash']
db.delete_index('easy_thumbnails_source', ['storage_hash'])
# Adding model 'Storage'
db.create_table('easy_thumbnails_storage', (
('pickle', self.gf('django.db.models.fields.TextField')()),
('hash', self.gf('django.db.models.fields.CharField')(max_length=40, db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('easy_thumbnails', ['Storage'])
# Create a storage object. This may obviously not be the storage
# object which the source / thumbnail objects actually belong to but
# at least it lets us reverse migrate.
storage = orm.Storage()
storage.pickle = pickle.dumps(default_storage)
storage.hash = hashlib.md5(storage.pickle).hexdigest()
storage.save()
# Adding field 'Source.storage'
db.add_column('easy_thumbnails_source', 'storage', self.gf('django.db.models.fields.related.ForeignKey')(default=storage.pk, to=orm['easy_thumbnails.Storage']), keep_default=False)
# Adding field 'Thumbnail.storage'
db.add_column('easy_thumbnails_thumbnail', 'storage', self.gf('django.db.models.fields.related.ForeignKey')(default=storage.pk, to=orm['easy_thumbnails.Storage']), keep_default=False)
models = {
'easy_thumbnails.source': {
'Meta': {'object_name': 'Source'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 10, 6, 7, 59, 6, 528762)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'storage_hash': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'})
},
'easy_thumbnails.thumbnail': {
'Meta': {'object_name': 'Thumbnail'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 10, 6, 7, 59, 6, 528762)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'thumbnails'", 'to': "orm['easy_thumbnails.Source']"}),
'storage_hash': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'})
}
}
complete_apps = ['easy_thumbnails']
| [
"[email protected]"
] | |
07c2b83ed32a76879df18b6b26849c9c4974513a | 683f0a9fb5617654a5bd9ced0a5572f799e53dbd | /server/IntegrationServers/gui_outline.py | 99acd32eb62adb3cc9cbf601e244e32726a397f8 | [] | no_license | samhmcg5/embedded | 27e35a000c9c6075227cef538745df86b144d66b | 4c7e98b6973786d53d0633b59ebab23987d1121a | refs/heads/master | 2021-03-22T01:30:21.167266 | 2017-12-09T18:04:58 | 2017-12-09T18:04:58 | 102,485,356 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,628 | py | from PyQt5.QtWidgets import QWidget, QApplication
import PyQt5.QtWidgets as qtw
import PyQt5.QtCore as qtc
from PyQt5.QtGui import QFont
import sys
from styles import stylesheet
###############################
### TOP LEVEL GUI COMPONENT ###
###############################
FONT_SIZE = 18
class TopLevelUI(QWidget):
statusSig = qtc.pyqtSignal(str,str)
def __init__(self):
super().__init__()
self.initUI()
self.setStyleSheet(stylesheet)
def initUI(self):
f = QFont()
f.setPointSize(FONT_SIZE)
self.setFont(f)
self.quotaframe = SetQuotaFrame()
self.currentnums = CurrentNumbersFrame()
self.delivstats = DelivNavStatusFrame()
vbox = qtw.QVBoxLayout()
vbox.addWidget(self.quotaframe)
vbox.addWidget(self.currentnums)
vbox.addWidget(self.delivstats)
self.setLayout(vbox)
self.setGeometry(300, 300, 800, 500)
self.setWindowTitle('Server Control')
self.show()
######################
### SET THE QUOTAS ###
######################
class SetQuotaFrame(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
vbox = qtw.QVBoxLayout()
self.zoneA = QuotaRow("Zone A")
self.zoneB = QuotaRow("Zone B")
self.zoneC = QuotaRow("Zone C")
self.sendButton = qtw.QPushButton("Send New Quotas")
vbox.addWidget(qtw.QLabel("SET QUOTAS"))
vbox.addWidget(self.zoneA)
vbox.addWidget(self.zoneB)
vbox.addWidget(self.zoneC)
vbox.addWidget(self.sendButton, alignment=qtc.Qt.AlignRight)
self.setLayout(vbox)
class QuotaRow(QWidget):
def __init__(self, name):
super().__init__()
self.name = name
self.initUI()
def initUI(self):
self.red = SpinBoxQuota("Red",5)
self.green = SpinBoxQuota("Green",5)
self.blue = SpinBoxQuota("Blue",5)
hbox = qtw.QHBoxLayout()
hbox.addWidget(qtw.QLabel(self.name + ":\t"))
hbox.addWidget(self.red)
hbox.addWidget(self.green)
hbox.addWidget(self.blue)
self.setLayout(hbox)
class SpinBoxQuota(qtw.QSpinBox):
def __init__(self, pre, maxim):
super().__init__()
self.setRange(0,maxim)
self.setPrefix(pre + ": ")
###############################
### DISPLAY CURRENT NUMBERS ###
###############################
class CurrentNumbersFrame(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.zoneA = NumsRow("Zone A")
self.zoneB = NumsRow("Zone B")
self.zoneC = NumsRow("Zone C")
vbox = qtw.QVBoxLayout()
vbox.addWidget(qtw.QLabel("CURRENT NUMBERS"))
vbox.addWidget(self.zoneA)
vbox.addWidget(self.zoneB)
vbox.addWidget(self.zoneC)
self.setLayout(vbox)
class NumsRow(QWidget):
def __init__(self, name):
super().__init__()
self.name = name
self.initUI()
def initUI(self):
self.red = qtw.QLabel("Red = 0")
self.green = qtw.QLabel("Green = 0")
self.blue = qtw.QLabel("Blue = 0")
hbox = qtw.QHBoxLayout()
hbox.addWidget(qtw.QLabel(self.name + ":\t"))
hbox.addWidget(self.red)
hbox.addWidget(self.green)
hbox.addWidget(self.blue)
self.setLayout(hbox)
def setNums(self, r, g, b):
self.red.setText("Red = %i" % r)
self.green.setText("Green = %i" % g)
self.blue.setText("Blue = %i" % b)
#############################
### DELIVERY NAVIG STATUS ###
#############################
class DelivNavStatusFrame(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
vbox = qtw.QVBoxLayout()
self.execStatus = ExecStatus()
self.posGrid = PositionGrid()
vbox.addWidget(qtw.QLabel("DELIVERY STATUS"))
vbox.addWidget(self.execStatus)
vbox.addWidget(self.posGrid)
self.setLayout(vbox)
class ExecStatus(QWidget):
def __init__(self):
super().__init__()
self.status = qtw.QLabel("IDLE")
hbox = qtw.QHBoxLayout()
hbox.addWidget(qtw.QLabel("Currently: "))
hbox.addWidget(self.status, alignment=qtc.Qt.AlignLeft, stretch=1)
self.setLayout(hbox)
def setStatus(msg):
self.status.setText(msg)
class PositionGrid(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.x = qtw.QLabel("X: 0")
self.y = qtw.QLabel("Y: 20")
self.ori = qtw.QLabel("OR: 0")
self.nx = SpinBoxQuota("X",90)
self.ny = SpinBoxQuota("Y",50)
self.nori = SpinBoxQuota("OR",359)
self.sendButton = qtw.QPushButton("Send Position")
gbox = qtw.QGridLayout()
gbox.addWidget(qtw.QLabel("Current:"), 0,0)
gbox.addWidget(self.x,0,1)
gbox.addWidget(self.y,0,2)
gbox.addWidget(self.ori,0,3)
gbox.addWidget(qtw.QLabel("Corrected:"), 1,0)
gbox.addWidget(self.nx,1,1)
gbox.addWidget(self.ny,1,2)
gbox.addWidget(self.nori,1,3)
gbox.addWidget(self.sendButton,1,4)
self.setLayout(gbox)
########################
### EXECUTE THE MAIN ###
########################
if __name__ == "__main__":
app = QApplication(sys.argv)
ex = TopLevelUI()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
7f56a9ddb77ca93b0aba27a74961e627b2c5415e | 444cf9d995e1149dacbcbb2a217d6fd8b394608e | /met_ml/train/models.py | 42146b4806619126114930367bba27a1bce89002 | [
"Apache-2.0"
] | permissive | jhamman/met-ml | ca44f9e45964ded268bbd127efe09553e675fec2 | d1e971f0a472bfe563fa5cbc7eedd8b2183e1130 | refs/heads/main | 2021-06-29T17:16:28.930256 | 2021-03-01T06:28:37 | 2021-03-01T06:28:37 | 222,575,254 | 2 | 2 | Apache-2.0 | 2021-03-17T23:43:00 | 2019-11-19T00:51:34 | Jupyter Notebook | UTF-8 | Python | false | false | 1,515 | py | import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import FunctionTransformer, StandardScaler, MinMaxScaler
def elevation_scaler(x, feature_range=(0, 1), data_range=(-420, 8848)):
'''MinMaxScaler for elevations on Earth'''
fmin, fmax = feature_range
dmin, dmax = data_range
scale = (fmax - fmin) / (dmax - dmin)
x_scaled = scale * x + fmin - dmin * scale
return x_scaled
def latitude_scaler(x):
return np.sin(np.radians(x))
def day_of_year_scaler(x):
return np.cos((x - 1) / 365.25 * 2 * np.pi)
def fit_transformers(dfs):
"""takes a list of dataframes, returns a fit transformer"""
trans = {
"P": FunctionTransformer(np.cbrt, validate=False),
"elev": FunctionTransformer(elevation_scaler, validate=False),
"lat": FunctionTransformer(latitude_scaler, validate=False),
"t": FunctionTransformer(day_of_year_scaler, validate=False),
"t_min": StandardScaler(),
"t_max": StandardScaler(),
"SW_IN_F": MinMaxScaler(),
"LW_IN_F": MinMaxScaler(),
"PA_F": MinMaxScaler(),
"RH": MinMaxScaler(),
}
df = pd.concat(dfs)
transformers = {}
for key in df.columns:
transformers[key] = trans[key].fit(df[[key]])
return transformers
def transform_df(transformers, df):
out = pd.DataFrame(index=df.index)
for key in df:
out[key] = transformers[key].transform(df[[key]])
return out | [
"[email protected]"
] | |
7ce4eb04206168d652015920e49fcf4d6f28653f | cffd82720d2c819e08751d6fd930f4b5bdc55d81 | /nested.py | f84d22c0b6ac1fe0288965aeb508a1064f4c8d35 | [] | no_license | royangsuman/pythonFiles | 304011fa8c8c000ae801024639457755306c7c31 | e22090cc0b0f3e2d93416b12ee48b33426984bc5 | refs/heads/main | 2022-12-23T11:39:56.767126 | 2020-10-02T23:58:12 | 2020-10-02T23:58:12 | 300,760,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | for x in range(4):
for y in range(4):
print(f'({x},{y})') | [
"[email protected]"
] | |
a1b49240c1dcc7ac8fcb700867ba14e85dfafdbd | 318aeabbb28b90a3fe25d002201027e673a8e4a4 | /scripts/fuzzy_willingness.py | 99fcf8f136d369e836e028a4e420e9fffb684dcd | [] | no_license | gitting-around/gitagent-sar | 5e9125a01f7c97e5e5fa1f7cdf3d5ab877d887ab | b1075ed4649f11bc38a75916e01cf3ecf8faa455 | refs/heads/master | 2021-04-06T01:58:18.319453 | 2018-05-07T08:33:49 | 2018-05-07T08:33:49 | 124,538,513 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,955 | py | #!/usr/bin/env python
#Fuzzy algorithm which considers only one input --> health
import sys
import random
import numpy as np
import skfuzzy as fuzz
from skfuzzy import control as ctrl
#Define some parameters
hmin = 400
hmax = 4000
#hmin = 1
#hmax = 200000000
hdiff = 10
unitmin = 0
unitmax = 1
unitdiff = 0.1
#Define antecedents (inputs): holds variables and membership functions
#Health represents an aggregation of the values for battery, sensor, actuator and motor condition
health = ctrl.Antecedent(np.arange(hmin, hmax, hdiff), 'health')
#Best known agent to ask, in which helpfulness and success rate are combined beforehand, using a dot product
best_agent = ctrl.Antecedent(np.arange(unitmin, unitmax, unitdiff), 'best_agent')
#The environment represents a combined value of the danger associated with physical obstacles, and the general culture of the population
#as in the case of the best known agent, these can be combined using a dot product
environment = ctrl.Antecedent(np.arange(unitmin, unitmax, unitdiff), 'environment')
#Agent abilities and resources needed in the scope of one task could also be combined in order to be represented by one fuzzy input
abil_res = ctrl.Antecedent(np.arange(unitmin, unitmax, unitdiff), 'abil_res')
abil_res['some'] = fuzz.trapmf(abil_res.universe, [0.0, 0.0, 0.4, 0.4])
abil_res['all_&optional'] = fuzz.trapmf(abil_res.universe, [0.6, 0.6, 1.0, 1.0])
abil_res.view()
#The agent's own progress wrt to tasks, or plans in general could also serve as a trigger to interact or not
own_progress = ctrl.Antecedent(np.arange(unitmin, unitmax, unitdiff), 'own_progress')
#Fuzzy output, the willingness to ask for help
willingness = ctrl.Consequent(np.arange(unitmin, unitmax, unitdiff), 'willingness')
#Auto membership function population
health.automf(3)
best_agent.automf(3)
environment.automf(3)
own_progress.automf(3)
willingness.automf(3)
#health.view()
#willingness.view()
#Define rules
rules = []
## either poor health or only some of abilities and resources are enough to have high willingness to ask for help
rules.append(ctrl.Rule(health['poor'] | abil_res['some'] | own_progress['poor'], willingness['good']))
rules.append(ctrl.Rule((health['good'] | health['average']) & abil_res['all_&optional'] & (own_progress['good'] | own_progress['average']), willingness['poor']))
rules.append(ctrl.Rule(best_agent['good'] & health['average'] & abil_res['all_&optional'], willingness['average']))
rules.append(ctrl.Rule(best_agent['poor'] & health['average'] & abil_res['all_&optional'], willingness['poor']))
#
## View rules graphically
#rule1.view()
interact_ctrl = ctrl.ControlSystem(rules)
interact = ctrl.ControlSystemSimulation(interact_ctrl)
interact.input['health'] = 1000
interact.input['best_agent'] = 0.5
interact.input['abil_res'] = 0.7
interact.input['own_progress'] = 0.3
interact.compute()
print interact.output['willingness']
willingness.view(sim=interact)
#while True:
# pass
| [
"[email protected]"
] | |
4ed4b10a56b29656b1368f2736acf86ca60e3ca5 | 03ef5d01281af62beb5ea562ddc3512130334aa8 | /apiFeitaTeste/venv/bin/pip | 251a8be510dd5ac88902ff03111fddaba91d329b | [] | no_license | gregoryls1/apiFeiraTeste | d86bbbda1e8d3a681e1fbc6570496a8e885579c2 | 6cbf139ae1faf069d6a1d0df6258e05aed255289 | refs/heads/master | 2020-03-16T18:34:03.304600 | 2018-05-11T18:09:03 | 2018-05-11T18:09:03 | 132,878,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | #!/home/gregory/apiFeitaTeste/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | ||
7f377a78fed5bceca5225f286b162f4366085d4d | 9ae20f40b0a07352f7d5ec9c81a98a01039d9347 | /hazelcast/protocol/__init__.py | 22744369964eac18b0e4c219392a7c007d3ee2e3 | [
"Apache-2.0"
] | permissive | ihsanmertatalay/hazelcast-python-client | edbb4857dcae039a2dc6bdc3f70908ee4ce3609c | 364ee181d84a270086aff53e2356862e771f9025 | refs/heads/master | 2023-08-26T16:44:42.073356 | 2021-10-18T09:00:13 | 2021-10-18T09:00:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,362 | py | from hazelcast.six.moves import range
class ErrorHolder(object):
__slots__ = ("error_code", "class_name", "message", "stack_trace_elements")
def __init__(self, error_code, class_name, message, stack_trace_elements):
self.error_code = error_code
self.class_name = class_name
self.message = message
self.stack_trace_elements = stack_trace_elements
def __eq__(self, other):
return (
isinstance(other, ErrorHolder)
and self.error_code == other.error_code
and self.class_name == other.class_name
and self.message == other.message
and self.stack_trace_elements == other.stack_trace_elements
)
def __ne__(self, other):
return not self.__eq__(other)
class StackTraceElement(object):
__slots__ = ("class_name", "method_name", "file_name", "line_number")
def __init__(self, class_name, method_name, file_name, line_number):
self.class_name = class_name
self.method_name = method_name
self.file_name = file_name
self.line_number = line_number
def __eq__(self, other):
return (
isinstance(other, StackTraceElement)
and self.class_name == other.class_name
and self.method_name == other.method_name
and self.file_name == other.file_name
and self.line_number == other.line_number
)
def __ne__(self, other):
return not self.__eq__(other)
class RaftGroupId(object):
__slots__ = ("name", "seed", "id")
def __init__(self, name, seed, group_id):
self.name = name
self.seed = seed
self.id = group_id
def __eq__(self, other):
return (
isinstance(other, RaftGroupId)
and self.name == other.name
and self.seed == other.seed
and self.id == other.id
)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.name, self.seed, self.id))
def __repr__(self):
return "RaftGroupId(name=%s, seed=%s, id=%s)" % (self.name, self.seed, self.id)
class AnchorDataListHolder(object):
__slots__ = ("anchor_page_list", "anchor_data_list")
def __init__(self, page_list, data_list):
self.anchor_page_list = page_list
self.anchor_data_list = data_list
def as_anchor_list(self, to_object):
object_list = []
for i in range(len(self.anchor_data_list)):
page = self.anchor_page_list[i]
key, value = self.anchor_data_list[i]
key = to_object(key)
value = to_object(value)
object_list.append((page, (key, value)))
return object_list
class PagingPredicateHolder(object):
__slots__ = (
"anchor_data_list_holder",
"predicate_data",
"comparator_data",
"page_size",
"page",
"iteration_type_id",
"partition_key_data",
)
def __init__(
self,
anchor_data_list_holder,
predicate_data,
comparator_data,
page_size,
page,
iteration_type_id,
partition_key_data,
):
self.anchor_data_list_holder = anchor_data_list_holder
self.predicate_data = predicate_data
self.comparator_data = comparator_data
self.page_size = page_size
self.page = page
self.iteration_type_id = iteration_type_id
self.partition_key_data = partition_key_data
@staticmethod
def of(predicate, to_data):
anchor_list = predicate.anchor_list
anchor_data_list = []
page_list = []
for page, (key, value) in anchor_list:
page_list.append(page)
key = to_data(key)
value = to_data(value)
anchor_data_list.append((key, value))
anchor_data_list_holder = AnchorDataListHolder(page_list, anchor_data_list)
predicate_data = to_data(predicate)
comparator_data = to_data(predicate.comparator)
iteration_type = predicate.iteration_type
return PagingPredicateHolder(
anchor_data_list_holder,
predicate_data,
comparator_data,
predicate.page_size,
predicate.page,
iteration_type,
None,
)
| [
"[email protected]"
] | |
2be58aa42be5d9593ef75e0651cfefb8cbdd0f51 | 3b66632458e2463db62a800f9a0cf9e13c71a47e | /examples/template_tfe_multiple_optimizers/edflow.py | d0c88a39f566f2597d3f743824684633f16c3834 | [
"MIT"
] | permissive | pesser/edflow | eddb6d9341b861670946c157363933e9add52288 | 317cb1b61bf810a68004788d08418a5352653264 | refs/heads/dev | 2022-12-09T05:19:35.850173 | 2020-07-21T16:29:15 | 2020-07-21T16:29:15 | 146,750,121 | 27 | 15 | MIT | 2022-12-07T20:55:50 | 2018-08-30T12:59:11 | Python | UTF-8 | Python | false | false | 7,162 | py | import functools
import tensorflow as tf
tf.enable_eager_execution()
import tensorflow.keras as tfk
import numpy as np
from edflow import TemplateIterator, get_logger
class FullLatentDistribution(object):
# TODO: write some comment on where this comes from
def __init__(self, parameters, dim, stochastic=True):
self.parameters = parameters
self.dim = dim
self.stochastic = stochastic
ps = self.parameters.shape.as_list()
if len(ps) != 2:
self.expand_dims = True
self.parameters = tf.reshape(self.parameters, (ps[0], ps[3]))
ps = self.parameters.shape.as_list()
else:
self.expand_dims = False
assert len(ps) == 2
self.batch_size = ps[0]
event_dim = self.dim
n_L_parameters = (event_dim * (event_dim + 1)) // 2
size_splits = [event_dim, n_L_parameters]
self.mean, self.L = tf.split(self.parameters, size_splits, axis=1)
# L is Cholesky parameterization
self.L = tf.contrib.distributions.fill_triangular(self.L)
# make sure diagonal entries are positive by parameterizing them
# logarithmically
diag_L = tf.linalg.diag_part(self.L)
self.log_diag_L = diag_L # keep for later computation of logdet
diag_L = tf.exp(diag_L)
# scale down then set diags
row_weights = np.array([np.sqrt(i + 1) for i in range(event_dim)])
row_weights = np.reshape(row_weights, [1, event_dim, 1])
self.L = self.L / row_weights
self.L = tf.linalg.set_diag(self.L, diag_L)
self.Sigma = tf.matmul(self.L, self.L, transpose_b=True) # L times L^t
ms = self.mean.shape.as_list()
self.event_axes = list(range(1, len(ms)))
self.event_shape = ms[1:]
assert len(self.event_shape) == 1, self.event_shape
@staticmethod
def n_parameters(dim):
return dim + (dim * (dim + 1)) // 2
def sample(self, noise_level=1.0):
if not self.stochastic:
out = self.mean
else:
eps = noise_level * tf.random_normal([self.batch_size, self.dim, 1])
eps = tf.matmul(self.L, eps)
eps = tf.squeeze(eps, axis=-1)
out = self.mean + eps
if self.expand_dims:
out = tf.expand_dims(out, axis=1)
out = tf.expand_dims(out, axis=1)
return out
def kl(self, other=None):
if other is not None:
raise NotImplemented("Only KL to standard normal is implemented.")
delta = tf.square(self.mean)
diag_covar = tf.reduce_sum(tf.square(self.L), axis=2)
logdet = 2.0 * self.log_diag_L
kl = 0.5 * tf.reduce_sum(
diag_covar - 1.0 + delta - logdet, axis=self.event_axes
)
kl = tf.reduce_mean(kl)
return kl
class Model(tfk.Model):
def __init__(self, config):
super().__init__()
self.z_dim = config["z_dim"]
self.n_z_params = FullLatentDistribution.n_parameters(self.z_dim)
self.lr = config["lr"]
self.encode = tfk.Sequential(
[
tfk.layers.Dense(
1000,
kernel_initializer="he_uniform",
bias_initializer="random_uniform",
),
tfk.layers.LeakyReLU(0.1),
tfk.layers.Dense(
500,
kernel_initializer="he_uniform",
bias_initializer="random_uniform",
),
tfk.layers.LeakyReLU(0.1),
tfk.layers.Dense(
300,
kernel_initializer="he_uniform",
bias_initializer="random_uniform",
),
tfk.layers.LeakyReLU(0.1),
tfk.layers.Dense(
self.n_z_params,
kernel_initializer="he_uniform",
bias_initializer="random_uniform",
),
]
)
self.decode = tfk.Sequential(
[
tfk.layers.Dense(300, kernel_initializer="he_uniform"),
tfk.layers.LeakyReLU(0.1),
tfk.layers.Dense(500, kernel_initializer="he_uniform"),
tfk.layers.LeakyReLU(0.1),
tfk.layers.Dense(1000, kernel_initializer="he_uniform"),
tfk.layers.LeakyReLU(0.1),
tfk.layers.Dense(784, kernel_initializer="he_uniform"),
tfk.layers.Activation(tf.nn.tanh),
]
)
input_shape = (config["batch_size"], 28 ** 2)
self.build(input_shape)
self.submodels = {"decoder": self.decode, "encoder": self.encode}
def call(self, x):
x = tf.reshape(x, (-1, 28 ** 2))
posterior_params = self.encode(x)
posterior_distr = FullLatentDistribution(posterior_params, self.z_dim)
posterior_sample = posterior_distr.sample()
rec = self.decode(posterior_sample)
rec = tf.reshape(rec, (-1, 28, 28, 1))
output = {"x": x, "posterior_distr": posterior_distr, "rec": rec}
return output
class Iterator(TemplateIterator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# loss and optimizers
self.optimizers = {
submodel_name: tf.compat.v1.train.AdamOptimizer(learning_rate=self.model.lr)
for submodel_name, submodel in self.model.submodels.items()
}
# to save and restore
self.tfcheckpoint = tf.train.Checkpoint(model=self.model, **self.optimizers)
def save(self, checkpoint_path):
self.tfcheckpoint.write(checkpoint_path)
def restore(self, checkpoint_path):
self.tfcheckpoint.restore(checkpoint_path)
def step_op(self, model, **kwargs):
# get inputs
losses = {}
inputs = kwargs["image"]
# compute loss
with tf.GradientTape(persistent=True) as tape:
outputs = model(inputs)
loss = tf.reduce_mean(
tf.reduce_sum(tf.square(inputs - outputs["rec"]), axis=(1, 2, 3))
)
loss_kl = outputs["posterior_distr"].sample()
losses["encoder"] = loss + loss_kl
losses["decoder"] = loss
def train_op():
for loss_name, loss in losses.items():
optimizer = self.optimizers[loss_name]
submodel = self.model.submodels[loss_name]
params = submodel.trainable_variables
grads = tape.gradient(loss, params)
optimizer.apply_gradients(zip(grads, params))
image_logs = {"rec": np.array(outputs["rec"]), "x": np.array(inputs)}
scalar_logs = {"loss_rec": loss, "loss_kl": loss_kl}
def log_op():
return {
"images": image_logs,
"scalars": scalar_logs,
}
def eval_op():
eval_outputs = {}
eval_outputs.update(image_logs)
return eval_outputs
return {"train_op": train_op, "log_op": log_op, "eval_op": eval_op}
| [
"[email protected]"
] | |
f5dd8c1bee04adeb3f5e226f1533e97d2e71d4cb | 32083cd96b3d5c784a4ecc6885a463110c4bb090 | /primeNumber.py | bdd2a5be83cf2bf97b75dc44843771377a485a93 | [] | no_license | ManrajSingh1805/Prime-Numbers | 98e463864f6fef615e352b2fddf4ef236ff180dd | 61f5ec3f0473d1ddeaf186d60ac441e4fba644dc | refs/heads/main | 2022-12-22T16:13:11.767902 | 2020-10-03T18:53:12 | 2020-10-03T18:53:12 | 300,952,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | # THIS code will help people tell whether a number is prime or non prime.
fim = input("Enter a number to check whether it's a prime or not:")
p=None
y= None
import sys
try:
f= int(fim)
except:
print("Not a number")
sys.exit()
h =f/2
i=2.00
while i<h:
if f%i==0:
p=1
else:
p=0
i=i+1
if p ==0:
print("Yes,",f,"is a prime number")
elif p==1:
print("No,",f," is not a prime number")
| [
"[email protected]"
] | |
42c475f886494994630be73c948f242ac4e2bd17 | 706e57fbb805e37d8b36e3d6bde01fc9981eb9a1 | /Advent2015/01/day1.py | e1f0cbc045444705fb9fc78969e9bbc49049c8e3 | [] | no_license | micaswyers/Advent | f4257d7454b2408c38aa7ab01ce2c71ab28862e1 | c0ed8f1669dd7f1053cd989c7cb8600a5571557e | refs/heads/main | 2022-12-09T06:24:20.949096 | 2022-12-08T21:03:49 | 2022-12-08T21:03:49 | 47,536,546 | 2 | 0 | null | 2022-12-07T18:17:20 | 2015-12-07T07:32:46 | Python | UTF-8 | Python | false | false | 827 | py | # Part 1
def parse_floors(directions):
"""Determine ending floor for Santa based on () directions.
Args:
directions: Some string composed of ( & )
Returns:
start_floor: Int representing Santa's final floor
"""
start_floor = 0
for letter in directions:
if letter == "(":
start_floor += 1
elif letter == ")":
start_floor -= 1
return start_floor
# Part 2
def parse_floors2(directions):
"""Return position of first basement character (-1)"""
start_floor = 0
for pos, char in enumerate(s):
if char == "(":
start_floor += 1
elif char == ")":
start_floor -= 1
if start_floor < 0:
pos = pos + 1
return "Santa's in the basement after direction #: %s" % (pos,)
| [
"[email protected]"
] | |
14da982650fc9436cd96801a7cb3dd626f19761d | 31b9451a61773b765ccf1d8aee54b9caa338cef3 | /lesson5_jan31/lesson5_ex_db.py | 33af83777e2353504d169e116b5eb00255c1ad6d | [] | no_license | pninitd/devopscourse | 4f5d71b0e3581bf982acf07ab565451256b7133a | 06472ce6dbc4402859c2d6d8d024eda019fec24c | refs/heads/master | 2023-04-25T16:15:58.966551 | 2021-04-28T06:09:41 | 2021-04-28T06:09:41 | 338,870,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,996 | py | import json
import pymysql
db_con = ''
db_cur = ''
def open_connection():
user = 'QzeMAiiWP6'
password = 'r8Dopb5X8y'
dbname = 'QzeMAiiWP6'
schema = 'QzeMAiiWP6'
host = 'remotemysql.com'
port = 3306
global db_con
# Establishing a connection to DB
db_con = pymysql.connect(host=host, port=port, user=user, passwd=password, db=dbname, cursorclass=pymysql.cursors.DictCursor)
db_con.autocommit(True)
def querydb(sql):
if db_con == '':
# open connection
open_connection()
# Getting a cursor from Database
global db_cur
db_cur = db_con.cursor()
# Getting all data from table “users”
db_cur.execute(sql)
def insert_dogs(name, age, breed):
sql = "INSERT into QzeMAiiWP6.dogs (name, age, breed) VALUES ('%s', %d, '%s');" % (name, age, breed)
querydb(sql)
return True
def update_dog_age(name, age):
sql = "UPDATE QzeMAiiWP6.dogs SET age = %d WHERE name ='%s';" % (age, name)
querydb(sql)
def delete_dog_by_name(name):
sql = "DELETE FROM QzeMAiiWP6.dogs WHERE name ='%s';" % name
querydb(sql)
def print_dogs_name():
sql = "SELECT name FROM QzeMAiiWP6.dogs;"
querydb(sql)
for row in db_cur:
print(row)
# function for part 2
def print_all_as_json():
sql = "SELECT * FROM QzeMAiiWP6.dogs;"
querydb(sql)
data = db_cur.fetchall()
results = json.dumps(data, indent=2)
# print(results)
return results
# print_all_as_json()
def close_connection():
if db_cur != '':
db_cur.close()
if db_con != '':
db_con.close()
# 1
# DataGrip ddl:
# create table dogs
# (
# name varchar(40) not null,
# age int not null,
# breed varchar(30) not null
# );
# 2
# insert_dogs('peki', 2, 'pekingese')
# insert_dogs('goldi', 3, 'golden retriever')
# insert_dogs('arthur', 4, 'shih tzu')
# 3
# update_dog_age('goldi', 1)
# 4
# delete_dog_by_name('arthur')
# 5
# print_dogs_name()
# close all connections
close_connection()
| [
"[email protected]"
] | |
e863dd63cbdebcdde8ab59e51515cb89041ea034 | f1048b499804ecdef80a0b0b5c5d3a50b0da8f3e | /venv/bin/soup | ff723f94b7631133e63d47c471e7404e039d92f9 | [] | no_license | ps0317ix/mansionProject | 1c274d66dc5675d60ce43dba7f31e2c9822f9710 | 98628df47d1304b3da5783c4990b91fc5e1abdca | refs/heads/master | 2023-02-19T01:38:38.708880 | 2020-10-15T11:55:34 | 2020-10-15T11:55:34 | 332,578,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | #!/Users/yukinoguchi/PycharmProjects/mansionProject/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'soup==0.1.0','console_scripts','soup'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'soup==0.1.0'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('soup==0.1.0', 'console_scripts', 'soup')())
| [
"[email protected]"
] | ||
1ce01c3e5eafef0398c727f2132d92cef69b14ab | 2b93a5f46980e475375e796de139ed46a53809a6 | /Functions/Calculator.py | 0095b099701cd2e5bbf751a81ce9b7acc2e6e00c | [] | no_license | ravi4all/PythonMay_11 | c9996cb0a2741a5a022c74129aa21c1f4b495aba | f0f3fb5e99a67e704df2a109a7af3d8d18010f9d | refs/heads/master | 2020-03-16T21:28:06.549963 | 2018-05-30T05:34:50 | 2018-05-30T05:34:50 | 133,000,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | def add(x,y):
z = x + y
print("Addition is",z)
def sub(x,y):
z = x - y
print("Subtraction is",z)
def div(x,y):
z = x / y
print("Division is",z)
def mul(x,y):
z = x * y
print("Multiplication is",z)
# Menu Driven Programs
print("""
1. Add
2. Sub
3. Div
4. Mul
""")
user_choice = input("Enter your choice : ")
num_1 = int(input("Enter first number : "))
num_2 = int(input("Enter second number " ))
todo = {
"1" : add,
"2" : sub,
"3" : div,
"4" : mul
}
func = todo.get(user_choice)
# print(func)
func(num_1, num_2) | [
"[email protected]"
] | |
6f09ba818f72b8483cc98fb36385efd7f148d4be | 020f3a9bed41fc4463f97f0add254d235ad63c5e | /repository/UserRepo.py | 02fd7d5d5b0804e30256ce370fc76260e9fb91c3 | [] | no_license | Blank199/MobileAppServer | ee0611710d1ad308657b7370048bc6540ec24696 | d9ea80bc4e44e5d38c40450d5d7db360b01ccb21 | refs/heads/master | 2023-02-12T23:52:16.792417 | 2021-01-09T13:48:58 | 2021-01-09T13:48:58 | 308,892,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | from tinydb import Query
from domain.User import *
class UserRepo:
def __init__(self, db):
self.db = db
self.table = db.table("Users")
def addUser(self, user):
self.table.insert(user.toDict())
def returnOne(self, username):
user = Query()
elem = self.table.search(user.username == username)
result = User(**elem[0])
return result
def verifyLogin(self, myUser):
user = Query()
elem = self.table.search(user.username == myUser.username)
result = User(**elem[0])
if result.password != myUser.password:
result = None
return result
| [
"[email protected]"
] | |
5aee269541c1ab7a359961a7d1f60d4495f2c529 | a23436abc25f6526ade03c6c68651bf7ac8c83aa | /env/bin/alembic | 9cf917118f3776bbbea669b6906e55d2d9125ba5 | [] | no_license | annejones817/pettracker-cloud9 | f92f8d93b3870130104e4ab40c6fcf5794b77b41 | fb13eae9ee6240ad6944a177ea7df63897eb1d8e | refs/heads/master | 2021-01-12T10:15:43.145496 | 2016-12-13T21:34:11 | 2016-12-13T21:34:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | #!/home/ubuntu/workspace/thinkful/capstones/pettracker/env/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'alembic==0.8.9','console_scripts','alembic'
__requires__ = 'alembic==0.8.9'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('alembic==0.8.9', 'console_scripts', 'alembic')()
)
| [
"[email protected]"
] | ||
e6bd6f44f4b8d52a1fe03affd4b5296e02733784 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03435/s544670590.py | b7a33f056509c825aa6f270f9dacfc4421f64bb9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | c1= list(map(int, input().split()))
c2 = list(map(int, input().split()))
c3 = list(map(int, input().split()))
a1 = 0
a2 = c2[0] - c1[0]
a3 = c3[0] - c1[0]
b1 = c1[0]
b2 = c1[1]
b3 = c1[2]
if c1[0] == a1 + b1 and c1[1] == a1 + b2 and c1[2] == a1 + b3 and c2[0] == a2 + b1 and c2[1] == a2 + b2 and c2[2] == a2 + b3 and c3[0] == a3 + b1 and c3[1] == a3 + b2 and c3[2] == a3 + b3:
print('Yes')
else:
print('No') | [
"[email protected]"
] | |
70393418981766bfdf6ee019e49f8ca4810916a3 | 56b8dc29292e32d76c27a6613d693f7a27ab1fa0 | /converters/main_converter.py | e6f5890c9743a6d36dccb730bc86cc1dcb2e4a81 | [] | no_license | benrap/Assembly2Brainfuck | 4cbb121975e8f43db7414d80c89679d69736468c | 4c2ff2f4210bbb29398faff320af86e45fe42f13 | refs/heads/master | 2022-05-29T17:03:31.338498 | 2020-05-03T22:23:17 | 2020-05-03T22:23:17 | 260,976,111 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 878 | py | import sys
from inspect import getmembers, isfunction, getsource
import converters.ASM2HBF
import converters.HBF2BF
from constants.CONSTANTS import CASM, HBF, BF, N, M
def ASM2HBF(code):
return converters.ASM2HBF.ASM2HBF(code)
def HBF2BF(code, n, m):
source = getsource(converters.HBF2BF)
functions_list = [o for o in getmembers(converters.HBF2BF) if isfunction(o[1])]
functions_list.sort(key=lambda x: source.find(x[0]))
for f in functions_list[::-1]:
code = f[1](code, n, m)
return code
def convert(start, end, code, **kwargs):
if start == CASM:
if end == HBF:
return ASM2HBF(code)
elif end == BF:
return HBF2BF(ASM2HBF(code), N, M)
elif start == HBF:
if end == BF:
if 'n' in kwargs and 'm' in kwargs:
return HBF2BF(code, N, M)
else:
return HBF2BF(code, N, M)
if __name__ == '__main__':
print(convert(HBF, BF, "(dec)", n=4, m=5))
| [
"[email protected]"
] | |
0e13abc5d0f1db296489986d8c87fe051cd753f9 | ecb1825d126a4100a1617637db4d296e5688b091 | /Preprocessing/SignalD.py | ef60adb51991654f655bd91624994f5eeefb8d16 | [] | no_license | abhi1998das/MMHAREnsemNet | 0194a4a19ed3b09cd273746ff373f81060cf1081 | 416ca226450fadf6cd17a92069ca6f5c4e2e0b2e | refs/heads/master | 2022-12-25T09:21:19.599959 | 2020-09-16T08:47:55 | 2020-09-16T08:47:55 | 295,641,161 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 10 04:07:23 2020
@author: taichi10
"""
import numpy as np
import os
from PIL import Image
import glob
import tqdm
import math
import time
from pathlib import Path
from sys import exit
data_path = "/home/taichi10/Documents/college/ML/Signal"
for i in range (jadlist-1):
jadlist2.append(jadlist[i+1]-jadlist[i])
#maximum = np.max(jadlist)
#minimum = np.min(jadlist)
for i in range(jadlist2.shape[0]):
maximum = np.max(jadlist2[i])
minimum = np.min(jadlist2[i])
#print(maximum,minimum)
jadlist2[i,:] = np.floor( (jadlist2[i,:] - minimum) / (maximum -minimum) * (255.0-0) )
print(jadlist.shape)
#print(jadlist)
jadlist2 = imresize(jadlist2,(265,4620),interp='bicubic')
im = Image.fromarray(jadlist2)
#filepath = path
trainnames = ['bd' , 'mm' ]
if subject in trainnames:
filepath = r'C:/Users/AVI/Desktop/HDM05/trainang/'
else:
filepath = r'C:/Users/AVI/Desktop/HDM05/valang/'
filedir = filepath + action
if not os.path.exists(filedir):
os.mkdir(filedir)
filepath = filepath + action + r'/' + filename.replace('.npy','.jpg')
im.save(filepath) | [
"[email protected]"
] | |
465c90222f3efec528b9e202c2192587eac18c06 | 0b893ad7ce787539a7534dfdb9b62908fddafdab | /src/api/welcome_api.py | df5482276aee4eaa6c6389d856a2ac89c21be6c0 | [] | no_license | dingdan539/healer | 7a990bd4769568e1f6aa0c8022e2ff1582c7f5c8 | 386ed5c76235aaa9e6de2665e3eb9b7ed74a98db | refs/heads/master | 2021-01-10T12:04:45.283571 | 2016-04-26T10:38:12 | 2016-04-26T10:38:12 | 45,183,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | # -*- coding:utf-8 -*-
import falcon
import json
import os
from father import Father
class WelcomeApi(Father):
"""master have the api_path"""
api_path = "/hc"
error_msg = "" # 提供给AuthMiddleware做判断,如果不为空,说明有错误,日志记录要记录进去
no_auth = True
def on_get(self, req, resp):
resp.body = 'ok'
resp.status = falcon.HTTP_OK | [
"[email protected]"
] | |
6c5bf8f2813dab2bf186ad429a1552e331903925 | 328b339d8befaa515593876db30dc2a0a1c3eba7 | /python/set1/single_xor_file.py~ | c7c027be93608514b66f708fbeb0c0eec386df6f | [] | no_license | Dhole/matasano | f9731877aedc8a60a1e6a3d621ac36266c4a137b | 4c9fa062f595ac6c853ae5bdfe3227b264269949 | refs/heads/master | 2020-06-02T21:51:45.349825 | 2016-10-31T06:20:11 | 2016-10-31T06:20:11 | 27,048,347 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | #! /usr/bin/env python3
import sys, operator
LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890\',.?!-"$#=*<>/'
ETAOIN = 'ETAOIN SHRDLCUMWFGYPBVKJXQZ1234567890\',.?!"$#=*<>/'
def xor(a, b):
x = b''
for n in range(len(a)):
x += bytes([a[n] ^ b[n]])
return x
def letterCount(msg):
count = {}
for l in LETTERS:
count[l] = 0
for l in msg.upper():
if l in LETTERS:
count[l] += 1
return count
line = sys.stdin.readline()[:-1]
scores = {}
for n in range(128):
xored = xor(bytes.fromhex(line), (chr(n) * len(line)).encode())
try:
count = letterCount(xored.decode('utf-8'))
sorted_count = sorted(count.items(), key=operator.itemgetter(1), reverse=True)
sorted_vals = [x[0] for x in sorted_count]
s = 0
for p in range(len(ETAOIN)):
s += abs(p - sorted_vals.index(ETAOIN[p]))
#print(p, sorted_vals.index(ETAOIN[p]))
scores[n] = s
#print(sorted_count)
except UnicodeError:
pass
k = chr(min(scores.items(), key=operator.itemgetter(1))[0])
print(xor(bytes.fromhex(line), (k * len(line)).encode()).decode('utf-8'))
| [
"[email protected]"
] | ||
2983a873d801ab0ebf4602895cbc7188cd6c679e | f02bf676e5fea7a94814b5afaf46e762f1781489 | /src/resource.py | 8ca43fe2a5ce19f8cf911be5902db91c21a1af32 | [] | no_license | PASTAplus/pastaplus_adapter | b514a0274eada7543fc742fbd7757b13c0e58940 | 369688ea41bf87dd27e50eb89376ce894dc828b1 | refs/heads/master | 2020-04-02T01:51:52.549760 | 2017-08-12T19:47:07 | 2017-08-12T19:47:07 | 83,488,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,851 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""":Mod: resource
:Synopsis:
:Author:
servilla
:Created:
3/10/17
"""
import logging
import hashlib
import xml.etree.ElementTree as ET
import requests
import d1_common.resource_map
import d1_common.types.exceptions
import d1_common.types.generated.dataoneTypes_v1 as dataoneTypes_v_1
import d1_common.types.generated.dataoneTypes_v2_0 as dataoneTypes_v2_0
import adapter_exceptions
import adapter_utilities
import properties
from sys_meta import SysMeta
logger = logging.getLogger('resource')
class ResourceBase(object):
def __init__(self, url=None, owner=None):
logger.info('Building resource: {r}'.format(r=url))
self._acl = None
self._checksum_value = None
self._checksum_algorithm = None
self._d1_sys_meta = None
self._file_name = None
self._format_identifier = None
self._identifier = url
self._object = None
self._owner = owner
self._predecessor = None
self._replication_policy = None
self._rights_holder = properties.DEFAULT_RIGHTS_HOLDER
self._size = None
self._url = url
self._vendor_specific_header = None
def _get_checksum_value(self, path, replacement):
"""
Set the checksum value and algorithm for the given resource
:param path: PASTA resource path fragment
:param replacement: Modified path fragment for checksum value
:return: None
"""
url = self._url.replace(path, replacement)
r = adapter_utilities.requests_get_url_wrapper(url=url)
if r is not None:
return r.text.strip()
def _get_acl(self, path, replacement):
"""
Return the EML access control list of principals and permissions
:param path: PASTA resource path fragment
:param replacement: Modified path fragment for PASTA EML ACL
:param owner: Data package principal owner
:return: Access control list
"""
auth = (properties.GMN_USER, properties.GMN_PASSWD)
eml_acl = None
url = self._url.replace(path, replacement)
r = adapter_utilities.requests_get_url_wrapper(url=url, auth=auth)
if r is not None:
eml_acl = r.text.strip()
acl = []
if eml_acl is not None:
tree = ET.ElementTree(ET.fromstring(eml_acl))
for allow_rule in tree.iter('allow'):
principal = allow_rule.find('./principal')
permission = allow_rule.find('./permission')
acl.append(
{'principal': principal.text,
'permission': permission.text})
if self._owner is not None:
acl.append({'principal': self._owner,
'permission': 'changePermission'})
return acl
@property
def acl(self):
return self._acl
@acl.setter
def acl(self, a):
self._acl = a
def get_d1_sys_meta(self):
"""
Return a D1 system metadata object for the given resource as pyxb
object.
:return: D1 system metadata as a pyxb object
"""
sm = SysMeta()
sm.access_policy = self._acl
sm.checksum_algorithm = self._checksum_algorithm
sm.checksum_value = self._checksum_value
sm.format_identifier = self._format_identifier
sm.identifier = self._identifier
sm.replication_policy = self._replication_policy
sm.rights_holder = self._rights_holder
sm.size = self._size
return sm.d1_sys_meta()
@property
def identifier(self):
return self._identifier
@property
def object(self):
return self._object
@property
def owner(self):
return self._owner
@property
def url(self):
return self._url
@property
def vendor_specific_header(self):
return self._vendor_specific_header
class ResourceMetadata(ResourceBase):
def __init__(self, url=None, owner=None):
super(ResourceMetadata,self).__init__(url, owner)
self._acl = self._get_acl('/metadata/eml/', '/metadata/acl/eml/')
self._checksum_value = \
self._get_checksum_value('/metadata/eml/', '/metadata/checksum/eml/')
self._checksum_algorithm = properties.CHECKSUM_ALGORITHM
self._format_identifier = self._get_format_id()
self._size = self._get_size()
self._vendor_specific_header = {'VENDOR-GMN-REMOTE-URL': url}
def _get_format_id(self):
d1_formats = adapter_utilities.get_d1_formats()
format_id = None
url = self._url.replace('/metadata/eml/', '/metadata/format/eml/')
r = adapter_utilities.requests_get_url_wrapper(url=url)
if r is not None:
eml_version = r.text.strip()
if eml_version in d1_formats:
format_id = d1_formats[eml_version].formatId
return format_id
def _get_size(self):
size = None
r = adapter_utilities.requests_get_url_wrapper(url=self._url)
if r is not None:
size = int(r.headers['Content-Length'])
return size
@property
def predecessor(self):
return self._predecessor
@predecessor.setter
def predecessor(self, pred):
identifier = properties.PASTA_BASE_URL + 'metadata/eml/' + \
pred.replace('.', '/')
self._predecessor = identifier
class ResourceReport(ResourceBase):
def __init__(self, url=None, owner=None):
super(ResourceReport,self).__init__(url, owner)
self._acl = self._get_acl('/report/eml/', '/report/acl/eml/')
self._checksum_value = \
self._get_checksum_value('/report/eml/', '/report/checksum/eml/')
self._checksum_algorithm = properties.CHECKSUM_ALGORITHM
self._format_identifier = 'text/xml'
self._size = self._get_size()
self._vendor_specific_header = {'VENDOR-GMN-REMOTE-URL': url}
def _get_size(self):
size = None
r = adapter_utilities.requests_get_url_wrapper(url=self._url)
if r is not None:
size = int(r.headers['Content-Length'])
return size
class ResourceData(ResourceBase):
def __init__(self, url=None, owner=None):
super(ResourceData,self).__init__(url, owner)
self._acl = self._get_acl('/data/eml/', '/data/acl/eml/')
self._checksum_value = \
self._get_checksum_value('/data/eml/', '/data/checksum/eml/')
self._checksum_algorithm = properties.CHECKSUM_ALGORITHM
self._format_identifier = self._get_format_id()
self._size = self._get_size()
self._vendor_specific_header = {'VENDOR-GMN-REMOTE-URL': url}
def _get_format_id(self):
d1_formats = adapter_utilities.get_d1_formats()
format_id = None
try:
r = requests.head(self._url, allow_redirects=True)
if r.status_code == requests.codes.ok:
content_type = r.headers['Content-Type']
if content_type in d1_formats:
format_id = d1_formats[content_type].formatId
else:
format_id = 'application/octet-stream'
except Exception as e:
logger.error(e)
return format_id
def _get_size(self):
size = None
url = self._url.replace('/data/eml/', '/data/size/eml/')
r = adapter_utilities.requests_get_url_wrapper(url=url)
if r is not None:
size = int(r.text.strip())
return size
class ResourceOre(ResourceBase):
def __init__(self, doi=None, owner=None, resources=None):
super(ResourceOre,self).__init__(doi, owner)
ore_xml = _build_ore(pid=doi, resources=resources)
self._checksum_algorithm = 'SHA-1'
self._checksum_value = hashlib.sha1(ore_xml).hexdigest()
self._format_identifier = 'http://www.openarchives.org/ore/terms'
self._object = ore_xml
self._resources = None
self._size = len(ore_xml)
@property
def predecessor(self):
return self._predecessor
@predecessor.setter
def predecessor(self, doi):
self._predecessor = doi
def _build_ore(pid=None, resources=None):
data = []
data.append(resources[properties.METADATA].identifier)
data.append(resources[properties.REPORT].identifier)
for data_resource in resources[properties.DATA]:
data.append(data_resource.identifier)
ore = d1_common.resource_map.ResourceMap(base_url=properties.D1_BASE_URL)
ore.oreInitialize(pid=pid)
ore.addMetadataDocument(pid=resources[properties.METADATA].identifier)
ore.addDataDocuments(scidata_pid_list=data, scimeta_pid=resources[properties.METADATA].identifier)
return ore.serialize()
| [
"[email protected]"
] | |
1b168c8660752d0007441aec85f837fc3f33b6f2 | ca831a9dc9d6dc0b2cedc4d998b26600439b5f10 | /python/numpy/q4_np_concatenate.py | d47d611fb6d9b69f1a21103ef45d97b89b76e8e9 | [
"MIT"
] | permissive | mxdzi/hackerrank | c2579f4351fba5af1dec21a49485e043421c2dd8 | c8da62ac39a0c24f535eded74c102a9c0ccd7708 | refs/heads/master | 2022-12-26T20:10:36.948961 | 2022-12-08T18:27:51 | 2022-12-08T18:27:51 | 225,469,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | import numpy
def main():
N, M, P = map(int, input().split())
array1 = numpy.array([input().split() for _ in range(N)], int)
array2 = numpy.array([input().split() for _ in range(M)], int)
print(numpy.concatenate((array1, array2)))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
febe416c583126b6493c589aee13a27726c07b5f | 876cd0697e019009bd513a43e6d82c997111ad81 | /watches/urls.py | 5804402826df94732ac23a34136ee982077514b5 | [] | no_license | l0nelyhermit/p4-timeless | 2d9a141f8f767228dfd069bcb534fc114c778bcc | e1c88b8a629be5886a511b5dcedfc7b0e68ce13e | refs/heads/master | 2023-04-03T12:56:17.702090 | 2021-03-18T03:22:05 | 2021-03-18T03:22:05 | 348,276,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | from django.urls import path
from .views import index, create_post, show_post
urlpatterns = [
path('', index, name='index'),
path('create_post/', create_post, name='create_post'),
path('show_post/', show_post, name='show_post'),
]
| [
"[email protected]"
] | |
4b11dd4cd2213194d38521b0f83f8f3b572200c8 | d79c152d072edd6631e22f886c8beaafe45aab04 | /nicolock/users/migrations/0004_user_user_type.py | 92f9060fd70d6a9a282d30ca0b5d03f2722b99e2 | [] | no_license | kabroncelli/Nicolock | 764364de8aa146721b2678c14be808a452d7a363 | 4c4343a9117b7eba8cf1daf7241de549b9a1be3b | refs/heads/master | 2020-03-11T11:02:43.074373 | 2018-04-18T17:38:33 | 2018-04-18T17:38:33 | 129,959,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-06 23:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_companyprofile'),
]
operations = [
migrations.AddField(
model_name='user',
name='user_type',
field=models.CharField(choices=[('homeowner', 'Homeowner'), ('contractor', 'Contractor')], default='contractor', max_length=12, verbose_name='user type'),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
9e62d7926cfd8cb4e1ee9e4ddcbb8f46125fd006 | 46eb1dc4aaa30c5af38eb0c8464cd198dd671b58 | /applications/migrations/0020_new_applications_20200402_1036.py | 0e08d1676ed8c671d42694bc78619c54cc25f47d | [
"MIT"
] | permissive | oxford-hack/registration | e94250aa0fb709d2491394dee8b7ad7c07a31dad | d729f9a990d6f63dc29e14c1657dba87c3233971 | refs/heads/master | 2023-01-09T12:43:28.910506 | 2020-11-12T15:54:37 | 2020-11-12T15:54:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,814 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-04-02 17:36
from __future__ import unicode_literals
import applications.validators
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.utils.timezone
import uuid
def convert_forwards(apps, schema_editor):
convert(apps, schema_editor, False)
def convert_backwards(apps, schema_editor):
convert(apps, schema_editor, True)
def convert(apps, schema_editor, reverse):
Application = apps.get_model("applications", "Application")
HackerApplication = apps.get_model("applications", "HackerApplication")
USE = {True: HackerApplication, False: Application}
for application in USE.get(reverse).objects.all():
USE[not reverse](
uuid=application.uuid,
user=application.user,
submission_date=application.submission_date,
status_update_date=application.status_update_date,
status=application.status,
gender=application.gender,
other_gender=application.other_gender,
under_age=application.under_age,
phone_number=application.phone_number,
diet=application.diet,
other_diet=application.other_diet,
tshirt_size=application.tshirt_size,
origin=application.origin,
first_timer=application.first_timer,
lennyface=application.lennyface,
graduation_year=application.graduation_year,
university=application.university,
degree=application.degree,
contacted=application.contacted,
description=application.description,
reimb=application.reimb,
contacted_by=application.contacted_by,
invited_by=application.invited_by,
projects=application.projects,
github=application.github,
devpost=application.github,
linkedin=application.linkedin,
site=application.site,
resume=application.resume,
).save()
class Migration(migrations.Migration):
dependencies = [
('user', '0009_user_types_20200321_0441'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('applications', '0019_auto_20200321_1814'),
]
operations = [
migrations.CreateModel(
name='HackerApplication',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='hackerapplication_application', serialize=False, to=settings.AUTH_USER_MODEL)),
('submission_date', models.DateTimeField(default=django.utils.timezone.now)),
('status_update_date', models.DateTimeField(blank=True, null=True)),
('status', models.CharField(choices=[('P', 'Under review'), ('R', 'Wait listed'), ('I', 'Invited'), ('LR', 'Last reminder'), ('C', 'Confirmed'), ('X', 'Cancelled'), ('A', 'Attended'), ('E', 'Expired'), ('D', 'Dubious'), ('IV', 'Invalid')], default='P', max_length=2)),
('gender', models.CharField(choices=[('NA', 'Prefer not to answer'), ('M', 'Male'), ('F', 'Female'), ('NB', 'Non-binary'), ('X', 'Prefer to self-describe')], default='NA', max_length=23)),
('other_gender', models.CharField(blank=True, max_length=50, null=True)),
('under_age', models.BooleanField()),
('phone_number', models.CharField(blank=True, max_length=16, null=True, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+#########'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')])),
('diet', models.CharField(choices=[('None', 'No requirements'), ('Vegeterian', 'Vegeterian'), ('Vegan', 'Vegan'), ('No pork', 'No pork'), ('Gluten-free', 'Gluten-free'), ('Others', 'Others')], default='None', max_length=300)),
('other_diet', models.CharField(blank=True, max_length=600, null=True)),
('tshirt_size', models.CharField(choices=[('W-XSS', "Women's - XXS"), ('W-XS', "Women's - XS"), ('W-S', "Women's - S"), ('W-M', "Women's - M"), ('W-L', "Women's - L"), ('W-XL', "Women's - XL"), ('W-XXL', "Women's - XXL"), ('XXS', 'Unisex - XXS'), ('XS', 'Unisex - XS'), ('S', 'Unisex - S'), ('M', 'Unisex - M'), ('L', 'Unisex - L'), ('XL', 'Unisex - XL'), ('XXL', 'Unisex - XXL')], default='M', max_length=5)),
('origin', models.CharField(max_length=300)),
('first_timer', models.BooleanField(default=False)),
('lennyface', models.CharField(default='-.-', max_length=300)),
('graduation_year', models.IntegerField(choices=[(2018, '2018'), (2019, '2019'), (2020, '2020'), (2021, '2021'), (2022, '2022'), (2023, '2023'), (2024, '2024')], default=2018)),
('university', models.CharField(max_length=300)),
('degree', models.CharField(max_length=300)),
('projects', models.TextField(blank=True, max_length=500, null=True)),
('github', models.URLField(blank=True, null=True)),
('devpost', models.URLField(blank=True, null=True)),
('linkedin', models.URLField(blank=True, null=True)),
('site', models.URLField(blank=True, null=True)),
('resume', models.FileField(blank=True, null=True, upload_to='resumes', validators=[applications.validators.validate_file_extension])),
('contacted', models.BooleanField(default=False)),
('description', models.TextField(max_length=500)),
('reimb', models.BooleanField(default=False)),
('reimb_amount', models.FloatField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0, 'Negative? Really? Please put a positive value')])),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MentorApplication',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='mentorapplication_application', serialize=False, to=settings.AUTH_USER_MODEL)),
('submission_date', models.DateTimeField(default=django.utils.timezone.now)),
('status_update_date', models.DateTimeField(blank=True, null=True)),
('status', models.CharField(choices=[('P', 'Under review'), ('R', 'Wait listed'), ('I', 'Invited'), ('LR', 'Last reminder'), ('C', 'Confirmed'), ('X', 'Cancelled'), ('A', 'Attended'), ('E', 'Expired'), ('D', 'Dubious'), ('IV', 'Invalid')], default='P', max_length=2)),
('gender', models.CharField(choices=[('NA', 'Prefer not to answer'), ('M', 'Male'), ('F', 'Female'), ('NB', 'Non-binary'), ('X', 'Prefer to self-describe')], default='NA', max_length=23)),
('other_gender', models.CharField(blank=True, max_length=50, null=True)),
('under_age', models.BooleanField()),
('phone_number', models.CharField(blank=True, max_length=16, null=True, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+#########'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')])),
('diet', models.CharField(choices=[('None', 'No requirements'), ('Vegeterian', 'Vegeterian'), ('Vegan', 'Vegan'), ('No pork', 'No pork'), ('Gluten-free', 'Gluten-free'), ('Others', 'Others')], default='None', max_length=300)),
('other_diet', models.CharField(blank=True, max_length=600, null=True)),
('tshirt_size', models.CharField(choices=[('W-XSS', "Women's - XXS"), ('W-XS', "Women's - XS"), ('W-S', "Women's - S"), ('W-M', "Women's - M"), ('W-L', "Women's - L"), ('W-XL', "Women's - XL"), ('W-XXL', "Women's - XXL"), ('XXS', 'Unisex - XXS'), ('XS', 'Unisex - XS'), ('S', 'Unisex - S'), ('M', 'Unisex - M'), ('L', 'Unisex - L'), ('XL', 'Unisex - XL'), ('XXL', 'Unisex - XXL')], default='M', max_length=5)),
('origin', models.CharField(max_length=300)),
('first_timer', models.BooleanField(default=False)),
('lennyface', models.CharField(default='-.-', max_length=300)),
('graduation_year', models.IntegerField(choices=[(2018, '2018'), (2019, '2019'), (2020, '2020'), (2021, '2021'), (2022, '2022'), (2023, '2023'), (2024, '2024')], default=2018)),
('university', models.CharField(max_length=300)),
('degree', models.CharField(max_length=300)),
('projects', models.TextField(blank=True, max_length=500, null=True)),
('github', models.URLField(blank=True, null=True)),
('devpost', models.URLField(blank=True, null=True)),
('linkedin', models.URLField(blank=True, null=True)),
('site', models.URLField(blank=True, null=True)),
('resume', models.FileField(blank=True, null=True, upload_to='resumes', validators=[applications.validators.validate_file_extension])),
('english_level', models.IntegerField(default=0)),
('attendance', models.CharField(max_length=200)),
('company', models.CharField(max_length=100)),
('why_mentor', models.CharField(max_length=500)),
('first_time_mentor', models.BooleanField()),
('fluent', models.CharField(max_length=150)),
('experience', models.CharField(max_length=300)),
('study_work', models.CharField(max_length=300)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SponsorApplication',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='sponsorapplication_application', serialize=False, to=settings.AUTH_USER_MODEL)),
('submission_date', models.DateTimeField(default=django.utils.timezone.now)),
('status_update_date', models.DateTimeField(blank=True, null=True)),
('status', models.CharField(choices=[('P', 'Under review'), ('R', 'Wait listed'), ('I', 'Invited'), ('LR', 'Last reminder'), ('C', 'Confirmed'), ('X', 'Cancelled'), ('A', 'Attended'), ('E', 'Expired'), ('D', 'Dubious'), ('IV', 'Invalid')], default='P', max_length=2)),
('gender', models.CharField(choices=[('NA', 'Prefer not to answer'), ('M', 'Male'), ('F', 'Female'), ('NB', 'Non-binary'), ('X', 'Prefer to self-describe')], default='NA', max_length=23)),
('other_gender', models.CharField(blank=True, max_length=50, null=True)),
('under_age', models.BooleanField()),
('phone_number', models.CharField(blank=True, max_length=16, null=True, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+#########'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')])),
('diet', models.CharField(choices=[('None', 'No requirements'), ('Vegeterian', 'Vegeterian'), ('Vegan', 'Vegan'), ('No pork', 'No pork'), ('Gluten-free', 'Gluten-free'), ('Others', 'Others')], default='None', max_length=300)),
('other_diet', models.CharField(blank=True, max_length=600, null=True)),
('tshirt_size', models.CharField(choices=[('W-XSS', "Women's - XXS"), ('W-XS', "Women's - XS"), ('W-S', "Women's - S"), ('W-M', "Women's - M"), ('W-L', "Women's - L"), ('W-XL', "Women's - XL"), ('W-XXL', "Women's - XXL"), ('XXS', 'Unisex - XXS'), ('XS', 'Unisex - XS'), ('S', 'Unisex - S'), ('M', 'Unisex - M'), ('L', 'Unisex - L'), ('XL', 'Unisex - XL'), ('XXL', 'Unisex - XXL')], default='M', max_length=5)),
('attendance', models.CharField(max_length=200)),
('company', models.CharField(max_length=100)),
('position', models.CharField(max_length=50)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='VolunteerApplication',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='volunteerapplication_application', serialize=False, to=settings.AUTH_USER_MODEL)),
('submission_date', models.DateTimeField(default=django.utils.timezone.now)),
('status_update_date', models.DateTimeField(blank=True, null=True)),
('status', models.CharField(choices=[('P', 'Under review'), ('R', 'Wait listed'), ('I', 'Invited'), ('LR', 'Last reminder'), ('C', 'Confirmed'), ('X', 'Cancelled'), ('A', 'Attended'), ('E', 'Expired'), ('D', 'Dubious'), ('IV', 'Invalid')], default='P', max_length=2)),
('gender', models.CharField(choices=[('NA', 'Prefer not to answer'), ('M', 'Male'), ('F', 'Female'), ('NB', 'Non-binary'), ('X', 'Prefer to self-describe')], default='NA', max_length=23)),
('other_gender', models.CharField(blank=True, max_length=50, null=True)),
('under_age', models.BooleanField()),
('phone_number', models.CharField(blank=True, max_length=16, null=True, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+#########'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')])),
('diet', models.CharField(choices=[('None', 'No requirements'), ('Vegeterian', 'Vegeterian'), ('Vegan', 'Vegan'), ('No pork', 'No pork'), ('Gluten-free', 'Gluten-free'), ('Others', 'Others')], default='None', max_length=300)),
('other_diet', models.CharField(blank=True, max_length=600, null=True)),
('tshirt_size', models.CharField(choices=[('W-XSS', "Women's - XXS"), ('W-XS', "Women's - XS"), ('W-S', "Women's - S"), ('W-M', "Women's - M"), ('W-L', "Women's - L"), ('W-XL', "Women's - XL"), ('W-XXL', "Women's - XXL"), ('XXS', 'Unisex - XXS'), ('XS', 'Unisex - XS'), ('S', 'Unisex - S'), ('M', 'Unisex - M'), ('L', 'Unisex - L'), ('XL', 'Unisex - XL'), ('XXL', 'Unisex - XXL')], default='M', max_length=5)),
('origin', models.CharField(max_length=300)),
('first_timer', models.BooleanField(default=False)),
('lennyface', models.CharField(default='-.-', max_length=300)),
('graduation_year', models.IntegerField(choices=[(2018, '2018'), (2019, '2019'), (2020, '2020'), (2021, '2021'), (2022, '2022'), (2023, '2023'), (2024, '2024')], default=2018)),
('university', models.CharField(max_length=300)),
('degree', models.CharField(max_length=300)),
('english_level', models.IntegerField(default=0)),
('attendance', models.CharField(max_length=200)),
('cool_skill', models.CharField(max_length=100)),
('first_time_volunteer', models.BooleanField()),
('quality', models.CharField(max_length=150)),
('weakness', models.CharField(max_length=150)),
('fav_movie', models.CharField(max_length=60, null=True)),
('friends', models.CharField(max_length=100, null=True)),
('invited_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='volunteerapplication_invited_applications', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='sponsorapplication',
name='invited_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sponsorapplication_invited_applications', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='mentorapplication',
name='invited_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='mentorapplication_invited_applications', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='hackerapplication',
name='contacted_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='contacted_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='hackerapplication',
name='invited_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hackerapplication_invited_applications', to=settings.AUTH_USER_MODEL),
),
migrations.RunPython(convert_forwards, convert_backwards)
]
| [
"[email protected]"
] | |
080b1e4286c8c2c84610c574e16d75accadb8136 | e2c4fc612bf0095b5a7e39540978f3f8626626c1 | /comparinglists.py | ead60691a262320b12205b8b2e35dbb83f6ce640 | [] | no_license | MauriceMorrey/Python | b0582478188a1b99fcfeffff85b42c8b8b7c70f1 | 5ca9f587e9d19627af0b06e57086dca0163e5f2d | refs/heads/master | 2020-03-08T06:27:44.297555 | 2018-04-03T21:48:26 | 2018-04-03T21:48:26 | 127,972,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | list_one = [1,2,5,6,2]
list_two = [1,2,5,6,2]
list_three = [1,2,5,6,5]
list_four = [1,2,5,6,5,3]
list_five = [1,2,5,6,5,16]
list_six = [1,2,5,6,5]
list_seven = ['celery','carrots','bread','milk']
list_eight= ['celery','carrots','bread','cream']
def compare_list(list_a,list_b):
if list_a == list_b:
print "The lists are the same"
else:
print "The lists are not the same."
compare_list(list_seven,list_eight)
| [
"[email protected]"
] | |
4cc5fa1c5d42c3b5b3744dc1eb24a06ed4c8e10c | 95495baeb47fd40b9a7ecb372b79d3847aa7a139 | /test/test_siurl_list.py | 094c867212270e09508ac2b3c62b72a81fd28872 | [] | no_license | pt1988/fmc-api | b1d8ff110e12c13aa94d737f3fae9174578b019c | 075f229585fcf9bd9486600200ff9efea5371912 | refs/heads/main | 2023-01-07T09:22:07.685524 | 2020-10-30T03:21:24 | 2020-10-30T03:21:24 | 308,226,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | # coding: utf-8
"""
Cisco Firepower Management Center Open API Specification
**Specifies the REST URLs and methods supported in the Cisco Firepower Management Center API. Refer to the version specific [REST API Quick Start Guide](https://www.cisco.com/c/en/us/support/security/defense-center/products-programming-reference-guides-list.html) for additional information.** # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.siurl_list import SIURLList # noqa: E501
from swagger_client.rest import ApiException
class TestSIURLList(unittest.TestCase):
"""SIURLList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSIURLList(self):
"""Test SIURLList"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.siurl_list.SIURLList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
7ba0744fc18c12981135e6b474843ce4bdb643e4 | 14bca3c05f5d8de455c16ec19ac7782653da97b2 | /lib/requests_oauthlib/oauth2_auth.py | d8712fd46c49685f885010eb48c0e21094a5eea6 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hovu96/splunk_as_a_service_app | 167f50012c8993879afbeb88a1f2ba962cdf12ea | 9da46cd4f45603c5c4f63ddce5b607fa25ca89de | refs/heads/master | 2020-06-19T08:35:21.103208 | 2020-06-16T19:07:00 | 2020-06-16T19:07:00 | 196,641,210 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,578 | py | from __future__ import unicode_literals
from oauthlib.oauth2 import WebApplicationClient, InsecureTransportError
from oauthlib.oauth2 import is_secure_transport
from requests.auth import AuthBase
class OAuth2(AuthBase):
"""Adds proof of authorization (OAuth2 token) to the request."""
def __init__(self, client_id=None, client=None, token=None):
"""Construct a new OAuth 2 authorization object.
:param client_id: Client id obtained during registration
:param client: :class:`oauthlib.oauth2.Client` to be used. Default is
WebApplicationClient which is useful for any
hosted application but not mobile or desktop.
:param token: Token dictionary, must include access_token
and token_type.
"""
self._client = client or WebApplicationClient(client_id, token=token)
if token:
for k, v in token.items():
setattr(self._client, k, v)
def __call__(self, r):
"""Append an OAuth 2 token to the request.
Note that currently HTTPS is required for all requests. There may be
a token type that allows for plain HTTP in the future and then this
should be updated to allow plain HTTP on a white list basis.
"""
if not is_secure_transport(r.url):
raise InsecureTransportError()
r.url, r.headers, r.body = self._client.add_token(r.url,
http_method=r.method, body=r.body, headers=r.headers)
return r
| [
"[email protected]"
] | |
262f3516ed9b97ec15c6b602731188fef2efd36d | aee7a6cca6a2674f044d7a1cacf7c72d7438b8b1 | /cup_skills/stats/average_rewardtest_score.py | c332ccef793a04daac6ef2eea3f62649d5182885 | [] | no_license | lagrassa/rl-erase | efd302526504c1157fa5810e886caccba8570f1b | 0df5c8ce4835c4641a2303d11095e9c27307f754 | refs/heads/master | 2021-05-13T13:36:12.901945 | 2019-08-01T02:13:15 | 2019-08-01T02:13:15 | 116,709,555 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,107 | py | 16.363636363636363,46.36363636363636,28.18181818181818,20.909090909090907,18.181818181818183,38.18181818181819,52.72727272727272,31.818181818181817,2.727272727272727,30.909090909090907,17.272727272727273,17.272727272727273,49.09090909090909,11.818181818181818,50.0,27.27272727272727,46.36363636363636,31.818181818181817,16.363636363636363,1.8181818181818181,38.18181818181819,51.81818181818182,41.81818181818181,19.090909090909093,30.909090909090907,23.636363636363637,52.72727272727272,11.818181818181818,16.363636363636363,27.27272727272727,30.909090909090907,46.36363636363636,13.636363636363635,50.90909090909091,44.54545454545455,50.90909090909091,26.36363636363636,30.0,52.72727272727272,49.09090909090909,11.818181818181818,20.909090909090907,49.09090909090909,49.09090909090909,14.545454545454545,30.909090909090907,51.81818181818182,16.363636363636363,50.90909090909091,44.54545454545455,27.27272727272727,50.90909090909091,7.2727272727272725,22.727272727272727,24.545454545454547,46.36363636363636,32.72727272727273,32.72727272727273,52.72727272727272,23.636363636363637,48.18181818181818,18.181818181818183,51.81818181818182,30.909090909090907,25.454545454545453,47.27272727272727,44.54545454545455,28.18181818181818,16.363636363636363,12.727272727272727,23.636363636363637,51.81818181818182,30.909090909090907,23.636363636363637,50.0,10.0,36.36363636363637,48.18181818181818,42.72727272727273,16.363636363636363,32.72727272727273,40.0,11.818181818181818,48.18181818181818,12.727272727272727,33.63636363636363,50.90909090909091,21.818181818181817,29.09090909090909,49.09090909090909,18.181818181818183,50.0,47.27272727272727,25.454545454545453,16.363636363636363,30.0,38.18181818181819,16.363636363636363,24.545454545454547,11.818181818181818,20.0,22.727272727272727,49.09090909090909,30.909090909090907,52.72727272727272,48.18181818181818,20.909090909090907,35.45454545454545,27.27272727272727,36.36363636363637,42.72727272727273,24.545454545454547,25.454545454545453,45.45454545454545,47.27272727272727,27.27272727272727,27.27272727272727,48.18181818181818,31.818181818181817,32.72727272727273, | [
"[email protected]"
] | |
387e5d4c1dec8530f2c1937bba41a86f238a0b49 | d517bed2e778072afe99e130d4f4eff4050adec7 | /venv/bin/jsonschema | cbd595a945ee73e8436c2d208a38cc58fee5ed17 | [] | no_license | Siyuan-gwu/K-mean-and-PCA-Human-Activity-Recognition | 7a08322d9d65d6e4939cc4f4b3a8c2eeca8f6be6 | 009bf87f253c2ba659b75503af69b991edc37286 | refs/heads/master | 2022-12-04T21:56:10.875929 | 2019-11-12T04:33:07 | 2019-11-12T04:33:07 | 221,090,912 | 1 | 1 | null | 2022-11-15T11:49:02 | 2019-11-11T23:47:29 | Python | UTF-8 | Python | false | false | 260 | #!/Users/zhangsiyuan/PycharmProjects/project3/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from jsonschema.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"imsiyuan.z@gmail"
] | imsiyuan.z@gmail |
|
c695b0339ee0c1738a0bf26630659bfdefc11fe2 | 9d938842ead461614d9e7a9d4d2dcff49cb96810 | /receptor/valle_libs/__init__.py | 7cbd11906b4e25b0d8ea07f2a797d891807fb060 | [
"Apache-2.0"
] | permissive | vallemrv/tpvB3 | 74e89b6b7c8e72875cf3551eaceed1052b2a8e92 | 9988a528b32692b01bd042cc6486188c4dc2109b | refs/heads/master | 2021-01-13T11:22:13.764243 | 2018-05-01T16:52:36 | 2018-05-01T16:52:36 | 76,935,092 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | # @Author: Manuel Rodriguez <valle>
# @Date: 05-Mar-2018
# @Email: [email protected]
# @Last modified by: valle
# @Last modified time: 05-Mar-2018
# @License: Apache license vesion 2.0
| [
"[email protected]"
] | |
ec4d20e2bca5302042900987f93525861d3a228b | 58f68405f9d5efb394044666873419dc9155cb47 | /Heap/HeapMax.py | 1624208522ea72ca63ddbdc21f01dd14384bc17c | [] | no_license | janak11111/Data-Structure-With-Python | 204ac0cfca3cbd0f3596669d96fdb6d4a3800e5e | adf7871c578972fc2a23097a84084606d20befdf | refs/heads/master | 2023-03-15T00:40:06.042525 | 2021-03-05T12:31:13 | 2021-03-05T12:31:13 | 277,209,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | from heapq import heappop, heappush
array = [1, 4, 6, 2, 5, 3, 9, 8, 7]
heap = []
for item in array:
heappush(heap, item)
ordered = []
while heap:
ordered.append(heappop(heap))
print(array)
print(ordered)
array.sort()
print(array == ordered) | [
"[email protected]"
] | |
61e6bedda6a835f388a54f4c5a89689c8118b1a6 | 0eec9f07a208130afcaa275f3a625b02dec7bf6e | /sysprepi_front/urls.py | 4d5f2eeae0b669e7820f80085ab8318594c3dc61 | [] | no_license | rabeloAndre/sysprepi_front | 0a76bf26e5696347e5f72db143533783c5e157e7 | 3bbb3f2a59c831e2626e4f819e06ddc6051ad34e | refs/heads/main | 2023-02-23T06:05:23.070872 | 2021-01-26T01:19:59 | 2021-01-26T01:19:59 | 330,966,969 | 0 | 0 | null | 2021-01-19T12:12:38 | 2021-01-19T12:06:54 | null | UTF-8 | Python | false | false | 637 | py | """sysprepi_front URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
""" | [
"[email protected]"
] | |
3b81146f3186fdd1e173d2df67a9c142f415d844 | a7ae94eb4a109341aa5cba0837affa694d0f1783 | /pages/tests.py | 730c21e53d1856385d3bd8dfcf4e0911b53f7f45 | [] | no_license | nandreik/NetflixMoviePicker | 64dcf2e5497182a8f82711ed781b0b64efc49bcd | f4bd04e8805e4728cced02f3a6a453934140cfbd | refs/heads/master | 2023-03-15T08:19:41.760246 | 2021-02-17T22:40:37 | 2021-02-17T22:40:37 | 295,871,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,300 | py | from django.test import TestCase, SimpleTestCase
from .models import Movie, User
from django.contrib.auth import get_user_model
from webdriver import webscraper
from django.urls import reverse
# TestCase for db, SimpleTestCase for without db
class SimpleTests(SimpleTestCase):
# tested in homepagetests
# def test_home_page_status_code(self):
# response = self.client.get('/')
# self.assertEqual(response.status_code, 200)
def test_about_page_status_code(self):
response = self.client.get('/about/')
self.assertEqual(response.status_code, 200)
def test_findfriend_page_status_code(self):
response = self.client.get('/findfriend/')
self.assertEqual(response.status_code, 200)
def test_findmovie_page_status_code(self):
response = self.client.get('/findmovie/')
self.assertEqual(response.status_code, 200)
def test_login_page_status_code(self):
response = self.client.get('/accounts/login/')
self.assertEqual(response.status_code, 200)
# tested in signuppagetests
# def test_signup_page_status_code(self):
# response = self.client.get('/accounts/signup/')
# self.assertEqual(response.status_code, 200)
# def test_password_change_done_page_status_code(self): #
# response = self.client.get('/accounts/password_change/done/')
# self.assertEqual(response.status_code, 200)
#
# def test_password_change_from_page_status_code(self): #
# response = self.client.get('/accounts/password_change/')
# self.assertEqual(response.status_code, 200)
def test_password_reset_complete_page_status_code(self):
response = self.client.get('/accounts/reset/done/')
self.assertEqual(response.status_code, 200)
# def test_password_reset_confirm_page_status_code(self): #
# response = self.client.get('/accounts/reset/MQ/set-password/')
# self.assertEqual(response.status_code, 200)
def test_password_reset_done_page_status_code(self):
response = self.client.get('/accounts/password_reset/done/')
self.assertEqual(response.status_code, 200)
def test_password_reset_form_page_status_code(self):
response = self.client.get('/accounts/password_reset/')
self.assertEqual(response.status_code, 200)
class HomePageTests(SimpleTests):
def test_home_page_status_code(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_view_url_by_name(self):
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
class SignupPageTests(TestCase):
username = 'newuser'
email = '[email protected]'
def test_signup_page_status_code(self):
response = self.client.get('/accounts/signup/')
self.assertEqual(response.status_code, 200)
def test_view_url_by_name(self):
response = self.client.get(reverse('signup'))
self.assertEqual(response.status_code, 200)
def test_view_uses_correct_template(self): #
response = self.client.get(reverse('signup'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/signup.html')
def test_signup_form(self):
new_user = get_user_model().objects.create_user(self.username, self.email)
self.assertEqual(get_user_model().objects.all().count(), 1)
self.assertEqual(get_user_model().objects.all()[0].username, self.username)
self.assertEqual(get_user_model().objects.all()[0].email, self.email)
class MovieModelTest(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(username="user",
email="[email protected]",
password="password")
self.movie = Movie.objects.create(user=self.user,
name="movie name",
year="year",
imdb="imdb",
rg="rg",
length="length",
genre="genre",
desc="desc",
image="image",
userChoice="choice")
# def tearDown(self):
# self.driver.quit()
def test_string_representation(self):
movie = Movie.objects.get(id=1)
expected = str(['user', 'movie name', 'year', 'imdb', 'rg', 'length', 'genre', 'desc', 'image', 'choice'])
self.assertEqual(str(movie), expected)
def test_user_content(self):
testuser = self.user
expected = [testuser.username, testuser.email]
self.assertEqual(['user', '[email protected]'], expected)
def test_movie_content(self):
self.assertEqual(f'{self.movie.user}', 'user')
self.assertEqual(f'{self.movie.name}', 'movie name')
self.assertEqual(f'{self.movie.year}', 'year')
self.assertEqual(f'{self.movie.imdb}', 'imdb')
self.assertEqual(f'{self.movie.rg}', 'rg')
self.assertEqual(f'{self.movie.length}', 'length')
self.assertEqual(f'{self.movie.genre}', 'genre')
self.assertEqual(f'{self.movie.desc}', 'desc')
self.assertEqual(f'{self.movie.image}', 'image')
self.assertEqual(f'{self.movie.userChoice}', 'choice')
# test find movie page, find friend page
class FindMovieTest(TestCase):
def setUp(self):
self.driver = webscraper.initDriver()
def tearDown(self):
self.driver.quit()
def test_find_movie(self):
movie = webscraper.findMovie(self.driver)
check = False
if movie:
check = True
self.assertTrue(check, True)
class FindFriendTest(TestCase):
def setUp(self):
self.user1 = get_user_model().objects.create_user(username="user1",
email="[email protected]",
password="password")
self.user2 = get_user_model().objects.create_user(username="user2",
email="[email protected]",
password="password")
self.movie1 = Movie.objects.create(user=self.user1,
name="movie name",
year="year",
imdb="imdb",
rg="rg",
length="length",
genre="genre",
desc="desc",
image="image",
userChoice="Yes")
self.movie2 = Movie.objects.create(user=self.user2,
name="movie name",
year="year",
imdb="imdb",
rg="rg",
length="length",
genre="genre",
desc="desc",
image="image",
userChoice="Yes")
def test_same_movie(self):
user1 = Movie.objects.filter(user=self.user1)
print("user1: ", user1)
user2 = Movie.objects.filter(user=self.user2)
movies = []
for uMovie in user1:
for fMovie in user2:
if uMovie.name == fMovie.name and uMovie.userChoice == "Yes" and fMovie.userChoice == "Yes":
movies.append(uMovie)
self.assertEqual(user1[0].name, movies[0].name)
| [
"[email protected]"
] | |
cca703832a386f8607f5a4d65cce34c685b8b4c8 | 0b17d3176aa5591d1f74172b05dad5f568e5211e | /m9/task9.1/task9.py | 2fad87fb7e28cc5c7982e47b2d61bfd1273ba1f2 | [] | no_license | alex-kay/DevOps_online_Kharkiv_2020Q42021Q1 | 270ce003c8d58c0d17865d7896c3c62a09ebbc34 | 8d12568adde9602e407b38c1852215b40e7435b2 | refs/heads/master | 2023-08-10T17:03:43.093823 | 2023-07-28T06:59:10 | 2023-07-28T06:59:10 | 320,379,455 | 0 | 1 | null | 2023-07-28T06:59:11 | 2020-12-10T20:13:54 | HCL | UTF-8 | Python | false | false | 1,311 | py | #!/usr/bin/env python
import unittest
def fizz_buzz(i):
if i % 3 == 0 and i % 5 == 0:
return "FizzBuzz"
elif i % 3 == 0:
return "Fizz"
elif i % 5 == 0:
return "Buzz"
else:
return ""
def count_vowels(string):
vowels = 0
for character in string:
if character in "aeiou":
vowels += 1
print(f"String \"{string}\" has {vowels} vowels.")
numbers = range(1,101)
for i in numbers:
print(fizz_buzz(i))
string1 = "annnmemmmtlo"
string2 = "kjjs"
count_vowels(string1)
count_vowels(string2)
class FizzBuzzTests(unittest.TestCase):
def test_fizz(self):
numbers=range(33,101, 33)
for i in numbers:
result = fizz_buzz(i)
with self.subTest(result=result):
self.assertEqual(result, 'Fizz')
def test_buzz(self):
numbers=range(55,111, 55)
for i in numbers:
result = fizz_buzz(i)
with self.subTest(result=result):
self.assertEqual(result, 'Buzz')
def test_fizzbuzz(self):
numbers=range(15,151, 15)
for i in numbers:
result = fizz_buzz(i)
with self.subTest(result=result):
self.assertEqual(result, 'FizzBuzz')
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
2ff08ce98fe9ac05dce4fb5c1cdc0304678339eb | 2db7d724b27d34ea0b99b8197f4a744dea9ecf1a | /day 12/peeyush_goyal37.py | c69cd5460c899c729920cbcc6a8b61ebfbeb36db | [] | no_license | ashupeeyush/summertraining | b518441a16c0d0bc4f87a113cc47ca9bf205c6ef | 068c1476e12177023c9e264df0255a49daeb764c | refs/heads/master | 2020-03-21T07:19:13.247419 | 2018-06-22T07:59:36 | 2018-06-22T07:59:36 | 138,273,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 25 11:48:40 2018
@author: Peeyush Goyal
"""
import numpy as np
import matplotlib.pyplot as plt
incomes = np.random.normal(100.0, 50.0, 10000)
print incomes
plt.hist(incomes, 50)
plt.show()
np.mean(incomes)
np.median(incomes)
incomes=np.append(incomes,[10000000])
np.mean(incomes)
np.median(incomes)
plt.hist(incomes, 50)
plt.show()
| [
"[email protected]"
] | |
2c497da33e3cd5556dea27e22b8051da20dae87c | a24d1d772d8e1e8cebbc2517adba40e63cc01669 | /spark-mooc/ML_lab4_ctr_student.py | a0af0aee8d9fd78692f7db464b3d11e548d2bca3 | [] | no_license | feamster/spark-mooc | 964bdd661d45a70902c61f8bc6ac3714e78eba38 | 3a29387fe61002f9b8a6ef0e15f1abc7d33c7147 | refs/heads/master | 2022-11-13T09:20:43.020131 | 2015-07-22T07:31:30 | 2015-07-22T07:31:30 | 275,682,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55,523 | py |
# coding: utf-8
# 
# # **Click-Through Rate Prediction Lab**
# #### This lab covers the steps for creating a click-through rate (CTR) prediction pipeline. You will work with the [Criteo Labs](http://labs.criteo.com/) dataset that was used for a recent [Kaggle competition](https://www.kaggle.com/c/criteo-display-ad-challenge).
# #### ** This lab will cover: **
# + ####*Part 1:* Featurize categorical data using one-hot-encoding (OHE)
# + ####*Part 2:* Construct an OHE dictionary
# + ####*Part 3:* Parse CTR data and generate OHE features
# + #### *Visualization 1:* Feature frequency
# + ####*Part 4:* CTR prediction and logloss evaluation
# + #### *Visualization 2:* ROC curve
# + ####*Part 5:* Reduce feature dimension via feature hashing
# + #### *Visualization 3:* Hyperparameter heat map
#
# #### Note that, for reference, you can look up the details of the relevant Spark methods in [Spark's Python API](https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD) and the relevant NumPy methods in the [NumPy Reference](http://docs.scipy.org/doc/numpy/reference/index.html)
# In[1]:
labVersion = 'cs190_week4_v_1_3'
# ### ** Part 1: Featurize categorical data using one-hot-encoding **
# #### ** (1a) One-hot-encoding **
# #### We would like to develop code to convert categorical features to numerical ones, and to build intuition, we will work with a sample unlabeled dataset with three data points, with each data point representing an animal. The first feature indicates the type of animal (bear, cat, mouse); the second feature describes the animal's color (black, tabby); and the third (optional) feature describes what the animal eats (mouse, salmon).
# #### In a one-hot-encoding (OHE) scheme, we want to represent each tuple of `(featureID, category)` via its own binary feature. We can do this in Python by creating a dictionary that maps each tuple to a distinct integer, where the integer corresponds to a binary feature. To start, manually enter the entries in the OHE dictionary associated with the sample dataset by mapping the tuples to consecutive integers starting from zero, ordering the tuples first by featureID and next by category.
# #### Later in this lab, we'll use OHE dictionaries to transform data points into compact lists of features that can be used in machine learning algorithms.
# In[2]:
# Data for manual OHE
# Note: the first data point does not include any value for the optional third feature
sampleOne = [(0, 'mouse'), (1, 'black')]
sampleTwo = [(0, 'cat'), (1, 'tabby'), (2, 'mouse')]
sampleThree = [(0, 'bear'), (1, 'black'), (2, 'salmon')]
sampleDataRDD = sc.parallelize([sampleOne, sampleTwo, sampleThree])
# In[3]:
# TODO: Replace <FILL IN> with appropriate code
sampleOHEDictManual = {}
sampleOHEDictManual[(0,'bear')] = 0
sampleOHEDictManual[(0,'cat')] = 1
sampleOHEDictManual[(0,'mouse')] = 2
sampleOHEDictManual[(1,'black')] = 3
sampleOHEDictManual[(1,'tabby')] = 4
sampleOHEDictManual[(2,'mouse')] = 5
sampleOHEDictManual[(2,'salmon')] = 6
# In[4]:
# TEST One-hot-encoding (1a)
from test_helper import Test
Test.assertEqualsHashed(sampleOHEDictManual[(0,'bear')],
'b6589fc6ab0dc82cf12099d1c2d40ab994e8410c',
"incorrect value for sampleOHEDictManual[(0,'bear')]")
Test.assertEqualsHashed(sampleOHEDictManual[(0,'cat')],
'356a192b7913b04c54574d18c28d46e6395428ab',
"incorrect value for sampleOHEDictManual[(0,'cat')]")
Test.assertEqualsHashed(sampleOHEDictManual[(0,'mouse')],
'da4b9237bacccdf19c0760cab7aec4a8359010b0',
"incorrect value for sampleOHEDictManual[(0,'mouse')]")
Test.assertEqualsHashed(sampleOHEDictManual[(1,'black')],
'77de68daecd823babbb58edb1c8e14d7106e83bb',
"incorrect value for sampleOHEDictManual[(1,'black')]")
Test.assertEqualsHashed(sampleOHEDictManual[(1,'tabby')],
'1b6453892473a467d07372d45eb05abc2031647a',
"incorrect value for sampleOHEDictManual[(1,'tabby')]")
Test.assertEqualsHashed(sampleOHEDictManual[(2,'mouse')],
'ac3478d69a3c81fa62e60f5c3696165a4e5e6ac4',
"incorrect value for sampleOHEDictManual[(2,'mouse')]")
Test.assertEqualsHashed(sampleOHEDictManual[(2,'salmon')],
'c1dfd96eea8cc2b62785275bca38ac261256e278',
"incorrect value for sampleOHEDictManual[(2,'salmon')]")
Test.assertEquals(len(sampleOHEDictManual.keys()), 7,
'incorrect number of keys in sampleOHEDictManual')
# #### ** (1b) Sparse vectors **
# #### Data points can typically be represented with a small number of non-zero OHE features relative to the total number of features that occur in the dataset. By leveraging this sparsity and using sparse vector representations of OHE data, we can reduce storage and computational burdens. Below are a few sample vectors represented as dense numpy arrays. Use [SparseVector](https://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#pyspark.mllib.linalg.SparseVector) to represent them in a sparse fashion, and verify that both the sparse and dense representations yield the same results when computing [dot products](http://en.wikipedia.org/wiki/Dot_product) (we will later use MLlib to train classifiers via gradient descent, and MLlib will need to compute dot products between SparseVectors and dense parameter vectors).
# #### Use `SparseVector(size, *args)` to create a new sparse vector where size is the length of the vector and args is either a dictionary, a list of (index, value) pairs, or two separate arrays of indices and values (sorted by index). You'll need to create a sparse vector representation of each dense vector `aDense` and `bDense`.
# In[5]:
import numpy as np
from pyspark.mllib.linalg import SparseVector
# In[6]:
# TODO: Replace <FILL IN> with appropriate code
aDense = np.array([0., 3., 0., 4.])
aSparse = SparseVector(4, {1: 3., 3: 4.})
bDense = np.array([0., 0., 0., 1.])
bSparse = SparseVector(4, {3: 1.})
w = np.array([0.4, 3.1, -1.4, -.5])
print aDense.dot(w)
print aSparse.dot(w)
print bDense.dot(w)
print bSparse.dot(w)
# In[7]:
# TEST Sparse Vectors (1b)
Test.assertTrue(isinstance(aSparse, SparseVector), 'aSparse needs to be an instance of SparseVector')
Test.assertTrue(isinstance(bSparse, SparseVector), 'aSparse needs to be an instance of SparseVector')
Test.assertTrue(aDense.dot(w) == aSparse.dot(w),
'dot product of aDense and w should equal dot product of aSparse and w')
Test.assertTrue(bDense.dot(w) == bSparse.dot(w),
'dot product of bDense and w should equal dot product of bSparse and w')
# #### **(1c) OHE features as sparse vectors **
# #### Now let's see how we can represent the OHE features for points in our sample dataset. Using the mapping defined by the OHE dictionary from Part (1a), manually define OHE features for the three sample data points using SparseVector format. Any feature that occurs in a point should have the value 1.0. For example, the `DenseVector` for a point with features 2 and 4 would be `[0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0]`.
# In[8]:
# Reminder of the sample features
# sampleOne = [(0, 'mouse'), (1, 'black')]
# sampleTwo = [(0, 'cat'), (1, 'tabby'), (2, 'mouse')]
# sampleThree = [(0, 'bear'), (1, 'black'), (2, 'salmon')]
# In[9]:
# TODO: Replace <FILL IN> with appropriate code
sampleOneOHEFeatManual = SparseVector(7, {2: 1., 3: 1.})
sampleTwoOHEFeatManual = SparseVector(7, {1: 1., 4: 1., 5: 1.})
sampleThreeOHEFeatManual = SparseVector(7, {0: 1., 3: 1., 6: 1.})
# In[10]:
# TEST OHE Features as sparse vectors (1c)
Test.assertTrue(isinstance(sampleOneOHEFeatManual, SparseVector),
'sampleOneOHEFeatManual needs to be a SparseVector')
Test.assertTrue(isinstance(sampleTwoOHEFeatManual, SparseVector),
'sampleTwoOHEFeatManual needs to be a SparseVector')
Test.assertTrue(isinstance(sampleThreeOHEFeatManual, SparseVector),
'sampleThreeOHEFeatManual needs to be a SparseVector')
Test.assertEqualsHashed(sampleOneOHEFeatManual,
'ecc00223d141b7bd0913d52377cee2cf5783abd6',
'incorrect value for sampleOneOHEFeatManual')
Test.assertEqualsHashed(sampleTwoOHEFeatManual,
'26b023f4109e3b8ab32241938e2e9b9e9d62720a',
'incorrect value for sampleTwoOHEFeatManual')
Test.assertEqualsHashed(sampleThreeOHEFeatManual,
'c04134fd603ae115395b29dcabe9d0c66fbdc8a7',
'incorrect value for sampleThreeOHEFeatManual')
# #### **(1d) Define a OHE function **
# #### Next we will use the OHE dictionary from Part (1a) to programatically generate OHE features from the original categorical data. First write a function called `oneHotEncoding` that creates OHE feature vectors in `SparseVector` format. Then use this function to create OHE features for the first sample data point and verify that the result matches the result from Part (1c).
# In[11]:
print sampleOneOHEFeatManual
print sampleOHEDictManual
print sampleOne
# In[12]:
# TODO: Replace <FILL IN> with appropriate code
def oneHotEncoding(rawFeats, OHEDict, numOHEFeats):
"""Produce a one-hot-encoding from a list of features and an OHE dictionary.
Note:
You should ensure that the indices used to create a SparseVector are sorted.
Args:
rawFeats (list of (int, str)): The features corresponding to a single observation. Each
feature consists of a tuple of featureID and the feature's value. (e.g. sampleOne)
OHEDict (dict): A mapping of (featureID, value) to unique integer.
numOHEFeats (int): The total number of unique OHE features (combinations of featureID and
value).
Returns:
SparseVector: A SparseVector of length numOHEFeats with indicies equal to the unique
identifiers for the (featureID, value) combinations that occur in the observation and
with values equal to 1.0.
"""
svDict = {}
for feat in rawFeats:
svDict[OHEDict[feat]] = 1
return SparseVector(numOHEFeats, svDict)
# Calculate the number of features in sampleOHEDictManual
numSampleOHEFeats = len(sampleOHEDictManual.keys())
# Run oneHotEnoding on sampleOne
sampleOneOHEFeat = oneHotEncoding(sampleOne, sampleOHEDictManual, numSampleOHEFeats)
print sampleOneOHEFeat
# In[13]:
# TEST Define an OHE Function (1d)
Test.assertTrue(sampleOneOHEFeat == sampleOneOHEFeatManual,
'sampleOneOHEFeat should equal sampleOneOHEFeatManual')
Test.assertEquals(sampleOneOHEFeat, SparseVector(7, [2,3], [1.0,1.0]),
'incorrect value for sampleOneOHEFeat')
Test.assertEquals(oneHotEncoding([(1, 'black'), (0, 'mouse')], sampleOHEDictManual,
numSampleOHEFeats), SparseVector(7, [2,3], [1.0,1.0]),
'incorrect definition for oneHotEncoding')
# #### **(1e) Apply OHE to a dataset **
# #### Finally, use the function from Part (1d) to create OHE features for all 3 data points in the sample dataset.
# In[14]:
# TODO: Replace <FILL IN> with appropriate code
sampleOHEData = (sampleDataRDD
.map(lambda l: oneHotEncoding (l, sampleOHEDictManual, numSampleOHEFeats)))
print sampleOHEData.collect()
# In[15]:
# TEST Apply OHE to a dataset (1e)
sampleOHEDataValues = sampleOHEData.collect()
Test.assertTrue(len(sampleOHEDataValues) == 3, 'sampleOHEData should have three elements')
Test.assertEquals(sampleOHEDataValues[0], SparseVector(7, {2: 1.0, 3: 1.0}),
'incorrect OHE for first sample')
Test.assertEquals(sampleOHEDataValues[1], SparseVector(7, {1: 1.0, 4: 1.0, 5: 1.0}),
'incorrect OHE for second sample')
Test.assertEquals(sampleOHEDataValues[2], SparseVector(7, {0: 1.0, 3: 1.0, 6: 1.0}),
'incorrect OHE for third sample')
# ### ** Part 2: Construct an OHE dictionary **
# #### **(2a) Pair RDD of `(featureID, category)` **
# #### To start, create an RDD of distinct `(featureID, category)` tuples. In our sample dataset, the 7 items in the resulting RDD are `(0, 'bear')`, `(0, 'cat')`, `(0, 'mouse')`, `(1, 'black')`, `(1, 'tabby')`, `(2, 'mouse')`, `(2, 'salmon')`. Notably `'black'` appears twice in the dataset but only contributes one item to the RDD: `(1, 'black')`, while `'mouse'` also appears twice and contributes two items: `(0, 'mouse')` and `(2, 'mouse')`. Use [flatMap](https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.flatMap) and [distinct](https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.distinct).
# In[16]:
# TODO: Replace <FILL IN> with appropriate code
sampleDistinctFeats = (sampleDataRDD
.flatMap(lambda l : l)
.distinct()
)
# In[17]:
# TEST Pair RDD of (featureID, category) (2a)
Test.assertEquals(sorted(sampleDistinctFeats.collect()),
[(0, 'bear'), (0, 'cat'), (0, 'mouse'), (1, 'black'),
(1, 'tabby'), (2, 'mouse'), (2, 'salmon')],
'incorrect value for sampleDistinctFeats')
# #### ** (2b) OHE Dictionary from distinct features **
# #### Next, create an `RDD` of key-value tuples, where each `(featureID, category)` tuple in `sampleDistinctFeats` is a key and the values are distinct integers ranging from 0 to (number of keys - 1). Then convert this `RDD` into a dictionary, which can be done using the `collectAsMap` action. Note that there is no unique mapping from keys to values, as all we require is that each `(featureID, category)` key be mapped to a unique integer between 0 and the number of keys. In this exercise, any valid mapping is acceptable. Use [zipWithIndex](https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.zipWithIndex) followed by [collectAsMap](https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.collectAsMap).
# #### In our sample dataset, one valid list of key-value tuples is: `[((0, 'bear'), 0), ((2, 'salmon'), 1), ((1, 'tabby'), 2), ((2, 'mouse'), 3), ((0, 'mouse'), 4), ((0, 'cat'), 5), ((1, 'black'), 6)]`. The dictionary defined in Part (1a) illustrates another valid mapping between keys and integers.
# In[18]:
# TODO: Replace <FILL IN> with appropriate code
sampleOHEDict = (sampleDistinctFeats
.zipWithIndex()
.collectAsMap()
)
print sampleOHEDict
# In[19]:
# TEST OHE Dictionary from distinct features (2b)
Test.assertEquals(sorted(sampleOHEDict.keys()),
[(0, 'bear'), (0, 'cat'), (0, 'mouse'), (1, 'black'),
(1, 'tabby'), (2, 'mouse'), (2, 'salmon')],
'sampleOHEDict has unexpected keys')
Test.assertEquals(sorted(sampleOHEDict.values()), range(7), 'sampleOHEDict has unexpected values')
# #### **(2c) Automated creation of an OHE dictionary **
# #### Now use the code from Parts (2a) and (2b) to write a function that takes an input dataset and outputs an OHE dictionary. Then use this function to create an OHE dictionary for the sample dataset, and verify that it matches the dictionary from Part (2b).
# In[20]:
# TODO: Replace <FILL IN> with appropriate code
def createOneHotDict(inputData):
"""Creates a one-hot-encoder dictionary based on the input data.
Args:
inputData (RDD of lists of (int, str)): An RDD of observations where each observation is
made up of a list of (featureID, value) tuples.
Returns:
dict: A dictionary where the keys are (featureID, value) tuples and map to values that are
unique integers.
"""
return inputData.flatMap(lambda l : l).distinct().zipWithIndex().collectAsMap()
sampleOHEDictAuto = createOneHotDict(sampleDataRDD)
print sampleOHEDictAuto
# In[21]:
# TEST Automated creation of an OHE dictionary (2c)
Test.assertEquals(sorted(sampleOHEDictAuto.keys()),
[(0, 'bear'), (0, 'cat'), (0, 'mouse'), (1, 'black'),
(1, 'tabby'), (2, 'mouse'), (2, 'salmon')],
'sampleOHEDictAuto has unexpected keys')
Test.assertEquals(sorted(sampleOHEDictAuto.values()), range(7),
'sampleOHEDictAuto has unexpected values')
# ### **Part 3: Parse CTR data and generate OHE features**
# #### Before we can proceed, you'll first need to obtain the data from Criteo. If you have already completed this step in the setup lab, just run the cells below and the data will be loaded into the `rawData` variable.
# #### Below is Criteo's data sharing agreement. After you accept the agreement, you can obtain the download URL by right-clicking on the "Download Sample" button and clicking "Copy link address" or "Copy Link Location", depending on your browser. Paste the URL into the `# TODO` cell below. The file is 8.4 MB compressed. The script below will download the file to the virtual machine (VM) and then extract the data.
# #### If running the cell below does not render a webpage, open the [Criteo agreement](http://labs.criteo.com/downloads/2014-kaggle-display-advertising-challenge-dataset/) in a separate browser tab. After you accept the agreement, you can obtain the download URL by right-clicking on the "Download Sample" button and clicking "Copy link address" or "Copy Link Location", depending on your browser. Paste the URL into the `# TODO` cell below.
# #### Note that the download could take a few minutes, depending upon your connection speed.
# In[22]:
# Run this code to view Criteo's agreement
from IPython.lib.display import IFrame
IFrame("http://labs.criteo.com/downloads/2014-kaggle-display-advertising-challenge-dataset/",
600, 350)
# In[23]:
# TODO: Replace <FILL IN> with appropriate code
# Just replace <FILL IN> with the url for dac_sample.tar.gz
import glob
import os.path
import tarfile
import urllib
import urlparse
# Paste url, url should end with: dac_sample.tar.gz
url = 'http://labs.criteo.com/wp-content/uploads/2015/04/dac_sample.tar.gz'
url = url.strip()
baseDir = os.path.join('data')
inputPath = os.path.join('cs190', 'dac_sample.txt')
fileName = os.path.join(baseDir, inputPath)
inputDir = os.path.split(fileName)[0]
def extractTar(check = False):
# Find the zipped archive and extract the dataset
tars = glob.glob('dac_sample*.tar.gz*')
if check and len(tars) == 0:
return False
if len(tars) > 0:
try:
tarFile = tarfile.open(tars[0])
except tarfile.ReadError:
if not check:
print 'Unable to open tar.gz file. Check your URL.'
return False
tarFile.extract('dac_sample.txt', path=inputDir)
print 'Successfully extracted: dac_sample.txt'
return True
else:
print 'You need to retry the download with the correct url.'
print ('Alternatively, you can upload the dac_sample.tar.gz file to your Jupyter root ' +
'directory')
return False
if os.path.isfile(fileName):
print 'File is already available. Nothing to do.'
elif extractTar(check = True):
print 'tar.gz file was already available.'
elif not url.endswith('dac_sample.tar.gz'):
print 'Check your download url. Are you downloading the Sample dataset?'
else:
# Download the file and store it in the same directory as this notebook
try:
urllib.urlretrieve(url, os.path.basename(urlparse.urlsplit(url).path))
except IOError:
print 'Unable to download and store: {0}'.format(url)
extractTar()
# In[24]:
import os.path
baseDir = os.path.join('data')
inputPath = os.path.join('cs190', 'dac_sample.txt')
fileName = os.path.join(baseDir, inputPath)
if os.path.isfile(fileName):
rawData = (sc
.textFile(fileName, 2)
.map(lambda x: x.replace('\t', ','))) # work with either ',' or '\t' separated data
print rawData.take(1)
# #### **(3a) Loading and splitting the data **
# #### We are now ready to start working with the actual CTR data, and our first task involves splitting it into training, validation, and test sets. Use the [randomSplit method](https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.randomSplit) with the specified weights and seed to create RDDs storing each of these datasets, and then [cache](https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.cache) each of these RDDs, as we will be accessing them multiple times in the remainder of this lab. Finally, compute the size of each dataset.
# In[25]:
# TODO: Replace <FILL IN> with appropriate code
weights = [.8, .1, .1]
seed = 42
# Use randomSplit with weights and seed
rawTrainData, rawValidationData, rawTestData = rawData.randomSplit(weights,seed)
# Cache the data
rawTrainData.cache()
rawValidationData.cache()
rawTestData.cache()
nTrain = rawTrainData.count()
nVal = rawValidationData.count()
nTest = rawTestData.count()
print nTrain, nVal, nTest, nTrain + nVal + nTest
print rawData.take(1)
# In[26]:
# TEST Loading and splitting the data (3a)
Test.assertTrue(all([rawTrainData.is_cached, rawValidationData.is_cached, rawTestData.is_cached]),
'you must cache the split data')
Test.assertEquals(nTrain, 79911, 'incorrect value for nTrain')
Test.assertEquals(nVal, 10075, 'incorrect value for nVal')
Test.assertEquals(nTest, 10014, 'incorrect value for nTest')
# #### ** (3b) Extract features **
# #### We will now parse the raw training data to create an RDD that we can subsequently use to create an OHE dictionary. Note from the `take()` command in Part (3a) that each raw data point is a string containing several fields separated by some delimiter. For now, we will ignore the first field (which is the 0-1 label), and parse the remaining fields (or raw features). To do this, complete the implemention of the `parsePoint` function.
# In[27]:
# TODO: Replace <FILL IN> with appropriate code
def parsePoint(point):
"""Converts a comma separated string into a list of (featureID, value) tuples.
Note:
featureIDs should start at 0 and increase to the number of features - 1.
Args:
point (str): A comma separated string where the first value is the label and the rest
are features.
Returns:
list: A list of (featureID, value) tuples.
"""
featureTups = []
i = 0
featureList = point.split(',')[1:]
for f in featureList:
featureTups.append((i, f))
i = i + 1
return featureTups
parsedTrainFeat = rawTrainData.map(parsePoint)
print parsedTrainFeat.take(1)
numCategories = (parsedTrainFeat
.flatMap(lambda x: x)
.distinct()
.map(lambda x: (x[0], 1))
.reduceByKey(lambda x, y: x + y)
.sortByKey()
.collect())
print numCategories[2][1]
# In[28]:
# TEST Extract features (3b)
Test.assertEquals(numCategories[2][1], 855, 'incorrect implementation of parsePoint')
Test.assertEquals(numCategories[32][1], 4, 'incorrect implementation of parsePoint')
# #### **(3c) Create an OHE dictionary from the dataset **
# #### Note that parsePoint returns a data point as a list of `(featureID, category)` tuples, which is the same format as the sample dataset studied in Parts 1 and 2 of this lab. Using this observation, create an OHE dictionary using the function implemented in Part (2c). Note that we will assume for simplicity that all features in our CTR dataset are categorical.
# In[29]:
# TODO: Replace <FILL IN> with appropriate code
ctrOHEDict = createOneHotDict(rawTrainData.map(parsePoint))
numCtrOHEFeats = len(ctrOHEDict.keys())
print numCtrOHEFeats
print ctrOHEDict[(0, '')]
# In[30]:
# TEST Create an OHE dictionary from the dataset (3c)
Test.assertEquals(numCtrOHEFeats, 233286, 'incorrect number of features in ctrOHEDict')
Test.assertTrue((0, '') in ctrOHEDict, 'incorrect features in ctrOHEDict')
# #### ** (3d) Apply OHE to the dataset **
# #### Now let's use this OHE dictionary by starting with the raw training data and creating an RDD of [LabeledPoint](http://spark.apache.org/docs/1.3.1/api/python/pyspark.mllib.html#pyspark.mllib.regression.LabeledPoint) objects using OHE features. To do this, complete the implementation of the `parseOHEPoint` function. Hint: `parseOHEPoint` is an extension of the `parsePoint` function from Part (3b) and it uses the `oneHotEncoding` function from Part (1d).
# In[31]:
from pyspark.mllib.regression import LabeledPoint
# In[32]:
# TODO: Replace <FILL IN> with appropriate code
def parseOHEPoint(point, OHEDict, numOHEFeats):
"""Obtain the label and feature vector for this raw observation.
Note:
You must use the function `oneHotEncoding` in this implementation or later portions
of this lab may not function as expected.
Args:
point (str): A comma separated string where the first value is the label and the rest
are features.
OHEDict (dict of (int, str) to int): Mapping of (featureID, value) to unique integer.
numOHEFeats (int): The number of unique features in the training dataset.
Returns:
LabeledPoint: Contains the label for the observation and the one-hot-encoding of the
raw features based on the provided OHE dictionary.
"""
label = point.split(',')[0]
return LabeledPoint(label, oneHotEncoding(parsePoint(point),OHEDict,numOHEFeats))
OHETrainData = rawTrainData.map(lambda point: parseOHEPoint(point, ctrOHEDict, numCtrOHEFeats))
OHETrainData.cache()
print OHETrainData.take(1)
# Check that oneHotEncoding function was used in parseOHEPoint
backupOneHot = oneHotEncoding
oneHotEncoding = None
withOneHot = False
try: parseOHEPoint(rawTrainData.take(1)[0], ctrOHEDict, numCtrOHEFeats)
except TypeError: withOneHot = True
oneHotEncoding = backupOneHot
# In[33]:
# TEST Apply OHE to the dataset (3d)
numNZ = sum(parsedTrainFeat.map(lambda x: len(x)).take(5))
numNZAlt = sum(OHETrainData.map(lambda lp: len(lp.features.indices)).take(5))
Test.assertEquals(numNZ, numNZAlt, 'incorrect implementation of parseOHEPoint')
Test.assertTrue(withOneHot, 'oneHotEncoding not present in parseOHEPoint')
# #### **Visualization 1: Feature frequency **
# #### We will now visualize the number of times each of the 233,286 OHE features appears in the training data. We first compute the number of times each feature appears, then bucket the features by these counts. The buckets are sized by powers of 2, so the first bucket corresponds to features that appear exactly once ( $ \scriptsize 2^0 $ ), the second to features that appear twice ( $ \scriptsize 2^1 $ ), the third to features that occur between three and four ( $ \scriptsize 2^2 $ ) times, the fifth bucket is five to eight ( $ \scriptsize 2^3 $ ) times and so on. The scatter plot below shows the logarithm of the bucket thresholds versus the logarithm of the number of features that have counts that fall in the buckets.
# In[34]:
def bucketFeatByCount(featCount):
"""Bucket the counts by powers of two."""
for i in range(11):
size = 2 ** i
if featCount <= size:
return size
return -1
featCounts = (OHETrainData
.flatMap(lambda lp: lp.features.indices)
.map(lambda x: (x, 1))
.reduceByKey(lambda x, y: x + y))
featCountsBuckets = (featCounts
.map(lambda x: (bucketFeatByCount(x[1]), 1))
.filter(lambda (k, v): k != -1)
.reduceByKey(lambda x, y: x + y)
.collect())
print featCountsBuckets
# In[35]:
import matplotlib.pyplot as plt
x, y = zip(*featCountsBuckets)
x, y = np.log(x), np.log(y)
def preparePlot(xticks, yticks, figsize=(10.5, 6), hideLabels=False, gridColor='#999999',
gridWidth=1.0):
"""Template for generating the plot layout."""
plt.close()
fig, ax = plt.subplots(figsize=figsize, facecolor='white', edgecolor='white')
ax.axes.tick_params(labelcolor='#999999', labelsize='10')
for axis, ticks in [(ax.get_xaxis(), xticks), (ax.get_yaxis(), yticks)]:
axis.set_ticks_position('none')
axis.set_ticks(ticks)
axis.label.set_color('#999999')
if hideLabels: axis.set_ticklabels([])
plt.grid(color=gridColor, linewidth=gridWidth, linestyle='-')
map(lambda position: ax.spines[position].set_visible(False), ['bottom', 'top', 'left', 'right'])
return fig, ax
# generate layout and plot data
fig, ax = preparePlot(np.arange(0, 10, 1), np.arange(4, 14, 2))
ax.set_xlabel(r'$\log_e(bucketSize)$'), ax.set_ylabel(r'$\log_e(countInBucket)$')
plt.scatter(x, y, s=14**2, c='#d6ebf2', edgecolors='#8cbfd0', alpha=0.75)
pass
# #### **(3e) Handling unseen features **
# #### We naturally would like to repeat the process from Part (3d), e.g., to compute OHE features for the validation and test datasets. However, we must be careful, as some categorical values will likely appear in new data that did not exist in the training data. To deal with this situation, update the `oneHotEncoding()` function from Part (1d) to ignore previously unseen categories, and then compute OHE features for the validation data.
# In[36]:
# TODO: Replace <FILL IN> with appropriate code
def oneHotEncoding(rawFeats, OHEDict, numOHEFeats):
"""Produce a one-hot-encoding from a list of features and an OHE dictionary.
Note:
If a (featureID, value) tuple doesn't have a corresponding key in OHEDict it should be
ignored.
Args:
rawFeats (list of (int, str)): The features corresponding to a single observation. Each
feature consists of a tuple of featureID and the feature's value. (e.g. sampleOne)
OHEDict (dict): A mapping of (featureID, value) to unique integer.
numOHEFeats (int): The total number of unique OHE features (combinations of featureID and
value).
Returns:
SparseVector: A SparseVector of length numOHEFeats with indicies equal to the unique
identifiers for the (featureID, value) combinations that occur in the observation and
with values equal to 1.0.
"""
svDict = {}
for feat in rawFeats:
if feat in OHEDict:
svDict[OHEDict[feat]] = 1
return SparseVector(numOHEFeats, svDict)
OHEValidationData = rawValidationData.map(lambda point: parseOHEPoint(point, ctrOHEDict, numCtrOHEFeats))
OHEValidationData.cache()
print OHEValidationData.take(1)
# In[37]:
# TEST Handling unseen features (3e)
numNZVal = (OHEValidationData
.map(lambda lp: len(lp.features.indices))
.sum())
Test.assertEquals(numNZVal, 372080, 'incorrect number of features')
# ### ** Part 4: CTR prediction and logloss evaluation **
# #### ** (4a) Logistic regression **
# #### We are now ready to train our first CTR classifier. A natural classifier to use in this setting is logistic regression, since it models the probability of a click-through event rather than returning a binary response, and when working with rare events, probabilistic predictions are useful. First use [LogisticRegressionWithSGD](https://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#pyspark.mllib.classification.LogisticRegressionWithSGD) to train a model using `OHETrainData` with the given hyperparameter configuration. `LogisticRegressionWithSGD` returns a [LogisticRegressionModel](https://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#pyspark.mllib.regression.LogisticRegressionModel). Next, use the `LogisticRegressionModel.weights` and `LogisticRegressionModel.intercept` attributes to print out the model's parameters. Note that these are the names of the object's attributes and should be called using a syntax like `model.weights` for a given `model`.
# In[38]:
from pyspark.mllib.classification import LogisticRegressionWithSGD
# fixed hyperparameters
numIters = 50
stepSize = 10.
regParam = 1e-6
regType = 'l2'
includeIntercept = True
# In[39]:
# TODO: Replace <FILL IN> with appropriate code
model0 = LogisticRegressionWithSGD.train(OHETrainData, iterations=numIters, step=stepSize, regParam=regParam,
regType=regType, intercept=includeIntercept)
sortedWeights = sorted(model0.weights)
print sortedWeights[:5], model0.intercept
# In[40]:
# TEST Logistic regression (4a)
Test.assertTrue(np.allclose(model0.intercept, 0.56455084025), 'incorrect value for model0.intercept')
Test.assertTrue(np.allclose(sortedWeights[0:5],
[-0.45899236853575609, -0.37973707648623956, -0.36996558266753304,
-0.36934962879928263, -0.32697945415010637]), 'incorrect value for model0.weights')
# #### ** (4b) Log loss **
# #### Throughout this lab, we will use log loss to evaluate the quality of models. Log loss is defined as: $$ \begin{align} \scriptsize \ell_{log}(p, y) = \begin{cases} -\log (p) & \text{if } y = 1 \\\ -\log(1-p) & \text{if } y = 0 \end{cases} \end{align} $$ where $ \scriptsize p$ is a probability between 0 and 1 and $ \scriptsize y$ is a label of either 0 or 1. Log loss is a standard evaluation criterion when predicting rare-events such as click-through rate prediction (it is also the criterion used in the [Criteo Kaggle competition](https://www.kaggle.com/c/criteo-display-ad-challenge)). Write a function to compute log loss, and evaluate it on some sample inputs.
# In[41]:
# TODO: Replace <FILL IN> with appropriate code
from math import log
def computeLogLoss(p, y):
"""Calculates the value of log loss for a given probabilty and label.
Note:
log(0) is undefined, so when p is 0 we need to add a small value (epsilon) to it
and when p is 1 we need to subtract a small value (epsilon) from it.
Args:
p (float): A probabilty between 0 and 1.
y (int): A label. Takes on the values 0 and 1.
Returns:
float: The log loss value.
"""
epsilon = 10e-12
if y == 0.0:
if p == 1.0:
return -log(epsilon)
else:
return -log(1-p)
else:
if y == 1.0:
if p == 0.0:
return -log(epsilon)
else:
return -log(p)
print computeLogLoss(.5, 1)
print computeLogLoss(.5, 0)
print computeLogLoss(.99, 1)
print computeLogLoss(.99, 0)
print computeLogLoss(.01, 1)
print computeLogLoss(.01, 0)
print computeLogLoss(0, 1)
print computeLogLoss(1, 1)
print computeLogLoss(1, 0)
# In[42]:
# TEST Log loss (4b)
Test.assertTrue(np.allclose([computeLogLoss(.5, 1), computeLogLoss(.01, 0), computeLogLoss(.01, 1)],
[0.69314718056, 0.0100503358535, 4.60517018599]),
'computeLogLoss is not correct')
Test.assertTrue(np.allclose([computeLogLoss(0, 1), computeLogLoss(1, 1), computeLogLoss(1, 0)],
[25.3284360229, 1.00000008275e-11, 25.3284360229]),
'computeLogLoss needs to bound p away from 0 and 1 by epsilon')
# #### ** (4c) Baseline log loss **
# #### Next we will use the function we wrote in Part (4b) to compute the baseline log loss on the training data. A very simple yet natural baseline model is one where we always make the same prediction independent of the given datapoint, setting the predicted value equal to the fraction of training points that correspond to click-through events (i.e., where the label is one). Compute this value (which is simply the mean of the training labels), and then use it to compute the training log loss for the baseline model. The log loss for multiple observations is the mean of the individual log loss values.
# In[43]:
# TODO: Replace <FILL IN> with appropriate code
# Note that our dataset has a very high click-through rate by design
# In practice click-through rate can be one to two orders of magnitude lower
classOneFracTrain = (OHETrainData
.map(lambda l: l.label)
.filter(lambda l: l == 1)
.count()) / float(OHETrainData.count())
print classOneFracTrain
logLossTrBase = (OHETrainData
.map(lambda l: computeLogLoss(classOneFracTrain,l.label))
.reduce(lambda x, y: x + y)) / float(OHETrainData.count())
print 'Baseline Train Logloss = {0:.3f}\n'.format(logLossTrBase)
# In[44]:
# TEST Baseline log loss (4c)
Test.assertTrue(np.allclose(classOneFracTrain, 0.22717773523), 'incorrect value for classOneFracTrain')
Test.assertTrue(np.allclose(logLossTrBase, 0.535844), 'incorrect value for logLossTrBase')
# #### ** (4d) Predicted probability **
# #### In order to compute the log loss for the model we trained in Part (4a), we need to write code to generate predictions from this model. Write a function that computes the raw linear prediction from this logistic regression model and then passes it through a [sigmoid function](http://en.wikipedia.org/wiki/Sigmoid_function) $ \scriptsize \sigma(t) = (1+ e^{-t})^{-1} $ to return the model's probabilistic prediction. Then compute probabilistic predictions on the training data.
# #### Note that when incorporating an intercept into our predictions, we simply add the intercept to the value of the prediction obtained from the weights and features. Alternatively, if the intercept was included as the first weight, we would need to add a corresponding feature to our data where the feature has the value one. This is not the case here.
# In[45]:
# TODO: Replace <FILL IN> with appropriate code
from math import exp # exp(-t) = e^-t
def getP(x, w, intercept):
"""Calculate the probability for an observation given a set of weights and intercept.
Note:
We'll bound our raw prediction between 20 and -20 for numerical purposes.
Args:
x (SparseVector): A vector with values of 1.0 for features that exist in this
observation and 0.0 otherwise.
w (DenseVector): A vector of weights (betas) for the model.
intercept (float): The model's intercept.
Returns:
float: A probability between 0 and 1.
"""
rawPrediction = intercept + w.dot(x)
# Bound the raw prediction value
rawPrediction = min(rawPrediction, 20)
rawPrediction = max(rawPrediction, -20)
return 1. / (1 + exp(-rawPrediction))
trainingPredictions = (OHETrainData
.map(lambda l: getP(l.features, model0.weights, model0.intercept)))
print trainingPredictions.take(5)
# In[46]:
# TEST Predicted probability (4d)
Test.assertTrue(np.allclose(trainingPredictions.sum(), 18135.4834348),
'incorrect value for trainingPredictions')
# #### ** (4e) Evaluate the model **
# #### We are now ready to evaluate the quality of the model we trained in Part (4a). To do this, first write a general function that takes as input a model and data, and outputs the log loss. Then run this function on the OHE training data, and compare the result with the baseline log loss.
# In[48]:
# TODO: Replace <FILL IN> with appropriate code
def evaluateResults(model, data):
"""Calculates the log loss for the data given the model.
Args:
model (LogisticRegressionModel): A trained logistic regression model.
data (RDD of LabeledPoint): Labels and features for each observation.
Returns:
float: Log loss for the data.
"""
return (data
.map(lambda l: computeLogLoss(getP(l.features , model.weights, model.intercept),
l.label))
.reduce(lambda x, y: x + y)) / data.count()
logLossTrLR0 = evaluateResults(model0, OHETrainData)
print ('OHE Features Train Logloss:\n\tBaseline = {0:.3f}\n\tLogReg = {1:.3f}'
.format(logLossTrBase, logLossTrLR0))
# In[49]:
# TEST Evaluate the model (4e)
Test.assertTrue(np.allclose(logLossTrLR0, 0.456903), 'incorrect value for logLossTrLR0')
# #### ** (4f) Validation log loss **
# #### Next, following the same logic as in Parts (4c) and 4(e), compute the validation log loss for both the baseline and logistic regression models. Notably, the baseline model for the validation data should still be based on the label fraction from the training dataset.
# In[50]:
# TODO: Replace <FILL IN> with appropriate code
logLossValBase = (OHEValidationData
.map(lambda l: computeLogLoss(classOneFracTrain,l.label))
.reduce(lambda x, y: x + y)) / float(OHEValidationData.count())
logLossValLR0 = evaluateResults(model0, OHEValidationData)
print ('OHE Features Validation Logloss:\n\tBaseline = {0:.3f}\n\tLogReg = {1:.3f}'
.format(logLossValBase, logLossValLR0))
# In[51]:
# TEST Validation log loss (4f)
Test.assertTrue(np.allclose(logLossValBase, 0.527603), 'incorrect value for logLossValBase')
Test.assertTrue(np.allclose(logLossValLR0, 0.456957), 'incorrect value for logLossValLR0')
# #### **Visualization 2: ROC curve **
# #### We will now visualize how well the model predicts our target. To do this we generate a plot of the ROC curve. The ROC curve shows us the trade-off between the false positive rate and true positive rate, as we liberalize the threshold required to predict a positive outcome. A random model is represented by the dashed line.
# In[52]:
labelsAndScores = OHEValidationData.map(lambda lp:
(lp.label, getP(lp.features, model0.weights, model0.intercept)))
labelsAndWeights = labelsAndScores.collect()
labelsAndWeights.sort(key=lambda (k, v): v, reverse=True)
labelsByWeight = np.array([k for (k, v) in labelsAndWeights])
length = labelsByWeight.size
truePositives = labelsByWeight.cumsum()
numPositive = truePositives[-1]
falsePositives = np.arange(1.0, length + 1, 1.) - truePositives
truePositiveRate = truePositives / numPositive
falsePositiveRate = falsePositives / (length - numPositive)
# Generate layout and plot data
fig, ax = preparePlot(np.arange(0., 1.1, 0.1), np.arange(0., 1.1, 0.1))
ax.set_xlim(-.05, 1.05), ax.set_ylim(-.05, 1.05)
ax.set_ylabel('True Positive Rate (Sensitivity)')
ax.set_xlabel('False Positive Rate (1 - Specificity)')
plt.plot(falsePositiveRate, truePositiveRate, color='#8cbfd0', linestyle='-', linewidth=3.)
plt.plot((0., 1.), (0., 1.), linestyle='--', color='#d6ebf2', linewidth=2.) # Baseline model
pass
# ### **Part 5: Reduce feature dimension via feature hashing**
# #### ** (5a) Hash function **
# #### As we just saw, using a one-hot-encoding featurization can yield a model with good statistical accuracy. However, the number of distinct categories across all features is quite large -- recall that we observed 233K categories in the training data in Part (3c). Moreover, the full Kaggle training dataset includes more than 33M distinct categories, and the Kaggle dataset itself is just a small subset of Criteo's labeled data. Hence, featurizing via a one-hot-encoding representation would lead to a very large feature vector. To reduce the dimensionality of the feature space, we will use feature hashing.
# ####Below is the hash function that we will use for this part of the lab. We will first use this hash function with the three sample data points from Part (1a) to gain some intuition. Specifically, run code to hash the three sample points using two different values for `numBuckets` and observe the resulting hashed feature dictionaries.
# In[53]:
from collections import defaultdict
import hashlib
def hashFunction(numBuckets, rawFeats, printMapping=False):
"""Calculate a feature dictionary for an observation's features based on hashing.
Note:
Use printMapping=True for debug purposes and to better understand how the hashing works.
Args:
numBuckets (int): Number of buckets to use as features.
rawFeats (list of (int, str)): A list of features for an observation. Represented as
(featureID, value) tuples.
printMapping (bool, optional): If true, the mappings of featureString to index will be
printed.
Returns:
dict of int to float: The keys will be integers which represent the buckets that the
features have been hashed to. The value for a given key will contain the count of the
(featureID, value) tuples that have hashed to that key.
"""
mapping = {}
for ind, category in rawFeats:
featureString = category + str(ind)
mapping[featureString] = int(int(hashlib.md5(featureString).hexdigest(), 16) % numBuckets)
if(printMapping): print mapping
sparseFeatures = defaultdict(float)
for bucket in mapping.values():
sparseFeatures[bucket] += 1.0
return dict(sparseFeatures)
# Reminder of the sample values:
# sampleOne = [(0, 'mouse'), (1, 'black')]
# sampleTwo = [(0, 'cat'), (1, 'tabby'), (2, 'mouse')]
# sampleThree = [(0, 'bear'), (1, 'black'), (2, 'salmon')]
# In[54]:
# TODO: Replace <FILL IN> with appropriate code
# Use four buckets
sampOneFourBuckets = hashFunction(4, sampleOne, True)
sampTwoFourBuckets = hashFunction(4, sampleTwo, True)
sampThreeFourBuckets = hashFunction(4, sampleThree, True)
# Use one hundred buckets
sampOneHundredBuckets = hashFunction(100, sampleOne, True)
sampTwoHundredBuckets = hashFunction(100, sampleTwo, True)
sampThreeHundredBuckets = hashFunction(100, sampleThree, True)
print '\t\t 4 Buckets \t\t\t 100 Buckets'
print 'SampleOne:\t {0}\t\t {1}'.format(sampOneFourBuckets, sampOneHundredBuckets)
print 'SampleTwo:\t {0}\t\t {1}'.format(sampTwoFourBuckets, sampTwoHundredBuckets)
print 'SampleThree:\t {0}\t {1}'.format(sampThreeFourBuckets, sampThreeHundredBuckets)
# In[55]:
# TEST Hash function (5a)
Test.assertEquals(sampOneFourBuckets, {2: 1.0, 3: 1.0}, 'incorrect value for sampOneFourBuckets')
Test.assertEquals(sampThreeHundredBuckets, {72: 1.0, 5: 1.0, 14: 1.0},
'incorrect value for sampThreeHundredBuckets')
# #### ** (5b) Creating hashed features **
# #### Next we will use this hash function to create hashed features for our CTR datasets. First write a function that uses the hash function from Part (5a) with numBuckets = $ \scriptsize 2^{15} \approx 33K $ to create a `LabeledPoint` with hashed features stored as a `SparseVector`. Then use this function to create new training, validation and test datasets with hashed features. Hint: `parsedHashPoint` is similar to `parseOHEPoint` from Part (3d).
# In[56]:
# TODO: Replace <FILL IN> with appropriate code
def parseHashPoint(point, numBuckets):
"""Create a LabeledPoint for this observation using hashing.
Args:
point (str): A comma separated string where the first value is the label and the rest are
features.
numBuckets: The number of buckets to hash to.
Returns:
LabeledPoint: A LabeledPoint with a label (0.0 or 1.0) and a SparseVector of hashed
features.
"""
label = point.split(',')[0]
return LabeledPoint(label, SparseVector(numBuckets, hashFunction(numBuckets, parsePoint(point))))
numBucketsCTR = 2 ** 15
hashTrainData = rawTrainData.map(lambda l: parseHashPoint(l, numBucketsCTR))
hashTrainData.cache()
hashValidationData = rawValidationData.map(lambda l: parseHashPoint(l, numBucketsCTR))
hashValidationData.cache()
hashTestData = rawTestData.map(lambda l: parseHashPoint(l, numBucketsCTR))
hashTestData.cache()
print hashTrainData.take(1)
# In[57]:
# TEST Creating hashed features (5b)
hashTrainDataFeatureSum = sum(hashTrainData
.map(lambda lp: len(lp.features.indices))
.take(20))
hashTrainDataLabelSum = sum(hashTrainData
.map(lambda lp: lp.label)
.take(100))
hashValidationDataFeatureSum = sum(hashValidationData
.map(lambda lp: len(lp.features.indices))
.take(20))
hashValidationDataLabelSum = sum(hashValidationData
.map(lambda lp: lp.label)
.take(100))
hashTestDataFeatureSum = sum(hashTestData
.map(lambda lp: len(lp.features.indices))
.take(20))
hashTestDataLabelSum = sum(hashTestData
.map(lambda lp: lp.label)
.take(100))
Test.assertEquals(hashTrainDataFeatureSum, 772, 'incorrect number of features in hashTrainData')
Test.assertEquals(hashTrainDataLabelSum, 24.0, 'incorrect labels in hashTrainData')
Test.assertEquals(hashValidationDataFeatureSum, 776,
'incorrect number of features in hashValidationData')
Test.assertEquals(hashValidationDataLabelSum, 16.0, 'incorrect labels in hashValidationData')
Test.assertEquals(hashTestDataFeatureSum, 774, 'incorrect number of features in hashTestData')
Test.assertEquals(hashTestDataLabelSum, 23.0, 'incorrect labels in hashTestData')
# #### ** (5c) Sparsity **
# #### Since we have 33K hashed features versus 233K OHE features, we should expect OHE features to be sparser. Verify this hypothesis by computing the average sparsity of the OHE and the hashed training datasets.
# #### Note that if you have a `SparseVector` named `sparse`, calling `len(sparse)` returns the total number of features, not the number features with entries. `SparseVector` objects have the attributes `indices` and `values` that contain information about which features are nonzero. Continuing with our example, these can be accessed using `sparse.indices` and `sparse.values`, respectively.
# In[59]:
# TODO: Replace <FILL IN> with appropriate code
def computeSparsity(data, d, n):
"""Calculates the average sparsity for the features in an RDD of LabeledPoints.
Args:
data (RDD of LabeledPoint): The LabeledPoints to use in the sparsity calculation.
d (int): The total number of features.
n (int): The number of observations in the RDD.
Returns:
float: The average of the ratio of features in a point to total features.
"""
return (data
.map(lambda l: len(l.features.indices))
.reduce(lambda x, y: x + y)) / float(d*n)
averageSparsityHash = computeSparsity(hashTrainData, numBucketsCTR, nTrain)
averageSparsityOHE = computeSparsity(OHETrainData, numCtrOHEFeats, nTrain)
print 'Average OHE Sparsity: {0:.7e}'.format(averageSparsityOHE)
print 'Average Hash Sparsity: {0:.7e}'.format(averageSparsityHash)
# In[60]:
# TEST Sparsity (5c)
Test.assertTrue(np.allclose(averageSparsityOHE, 1.6717677e-04),
'incorrect value for averageSparsityOHE')
Test.assertTrue(np.allclose(averageSparsityHash, 1.1805561e-03),
'incorrect value for averageSparsityHash')
# #### ** (5d) Logistic model with hashed features **
# #### Now let's train a logistic regression model using the hashed features. Run a grid search to find suitable hyperparameters for the hashed features, evaluating via log loss on the validation data. Note: This may take a few minutes to run. Use `1` and `10` for `stepSizes` and `1e-6` and `1e-3` for `regParams`.
# In[61]:
numIters = 500
regType = 'l2'
includeIntercept = True
# Initialize variables using values from initial model training
bestModel = None
bestLogLoss = 1e10
# In[62]:
# TODO: Replace <FILL IN> with appropriate code
stepSizes = [1, 10]
regParams = [1e-6, 1e-3]
for stepSize in stepSizes:
for regParam in regParams:
model = (LogisticRegressionWithSGD
.train(hashTrainData, numIters, stepSize, regParam=regParam, regType=regType,
intercept=includeIntercept))
logLossVa = evaluateResults(model, hashValidationData)
print ('\tstepSize = {0:.1f}, regParam = {1:.0e}: logloss = {2:.3f}'
.format(stepSize, regParam, logLossVa))
if (logLossVa < bestLogLoss):
bestModel = model
bestLogLoss = logLossVa
print ('Hashed Features Validation Logloss:\n\tBaseline = {0:.3f}\n\tLogReg = {1:.3f}'
.format(logLossValBase, bestLogLoss))
# In[63]:
# TEST Logistic model with hashed features (5d)
Test.assertTrue(np.allclose(bestLogLoss, 0.4481683608), 'incorrect value for bestLogLoss')
# #### **Visualization 3: Hyperparameter heat map**
# #### We will now perform a visualization of an extensive hyperparameter search. Specifically, we will create a heat map where the brighter colors correspond to lower values of `logLoss`.
# #### The search was run using six step sizes and six values for regularization, which required the training of thirty-six separate models. We have included the results below, but omitted the actual search to save time.
# In[64]:
from matplotlib.colors import LinearSegmentedColormap
# Saved parameters and results. Eliminate the time required to run 36 models
stepSizes = [3, 6, 9, 12, 15, 18]
regParams = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2]
logLoss = np.array([[ 0.45808431, 0.45808493, 0.45809113, 0.45815333, 0.45879221, 0.46556321],
[ 0.45188196, 0.45188306, 0.4518941, 0.4520051, 0.45316284, 0.46396068],
[ 0.44886478, 0.44886613, 0.44887974, 0.44902096, 0.4505614, 0.46371153],
[ 0.44706645, 0.4470698, 0.44708102, 0.44724251, 0.44905525, 0.46366507],
[ 0.44588848, 0.44589365, 0.44590568, 0.44606631, 0.44807106, 0.46365589],
[ 0.44508948, 0.44509474, 0.44510274, 0.44525007, 0.44738317, 0.46365405]])
numRows, numCols = len(stepSizes), len(regParams)
logLoss = np.array(logLoss)
logLoss.shape = (numRows, numCols)
fig, ax = preparePlot(np.arange(0, numCols, 1), np.arange(0, numRows, 1), figsize=(8, 7),
hideLabels=True, gridWidth=0.)
ax.set_xticklabels(regParams), ax.set_yticklabels(stepSizes)
ax.set_xlabel('Regularization Parameter'), ax.set_ylabel('Step Size')
colors = LinearSegmentedColormap.from_list('blue', ['#0022ff', '#000055'], gamma=.2)
image = plt.imshow(logLoss,interpolation='nearest', aspect='auto',
cmap = colors)
pass
# #### ** (5e) Evaluate on the test set **
# #### Finally, evaluate the best model from Part (5d) on the test set. Compare the resulting log loss with the baseline log loss on the test set, which can be computed in the same way that the validation log loss was computed in Part (4f).
# In[66]:
# TODO: Replace <FILL IN> with appropriate code
# Log loss for the best model from (5d)
OHETestData = rawTestData.map(lambda point: parseOHEPoint(point, ctrOHEDict, numCtrOHEFeats))
logLossTest = evaluateResults(bestModel, hashTestData)
# Log loss for the baseline model
logLossTestBaseline = (OHETestData
.map(lambda l :computeLogLoss(classOneFracTrain,l.label))
.reduce(lambda x, y: x + y)) / float(OHETestData.count())
print ('Hashed Features Test Log Loss:\n\tBaseline = {0:.3f}\n\tLogReg = {1:.3f}'
.format(logLossTestBaseline, logLossTest))
# In[67]:
# TEST Evaluate on the test set (5e)
Test.assertTrue(np.allclose(logLossTestBaseline, 0.537438),
'incorrect value for logLossTestBaseline')
Test.assertTrue(np.allclose(logLossTest, 0.455616931), 'incorrect value for logLossTest')
| [
"[email protected]"
] | |
20407bfda932d7e6b053febedd6a5e1883e14e76 | b3c070597742904f963f44414e9195511770520b | /venv/lib/python3.8/site-packages/openapi_client/models/first_last_name_origined_out.py | 2563afe3a3c76fa871e515944e62259fec07abc6 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | akshitgoyal/NLP-Research-Project | 7d98cf0bccd8fdfcc13a23e5f17fcc703aa4b565 | 6adf80cb7fa3737f88faf73a6e818da495b95ab4 | refs/heads/master | 2022-12-11T05:51:08.601512 | 2020-09-03T18:05:56 | 2020-09-03T18:05:56 | 270,881,124 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,677 | py | # coding: utf-8
"""
NamSor API v2
NamSor API v2 : enpoints to process personal names (gender, cultural origin or ethnicity) in all alphabets or languages. Use GET methods for small tests, but prefer POST methods for higher throughput (batch processing of up to 100 names at a time). Need something you can't find here? We have many more features coming soon. Let us know, we'll do our best to add it! # noqa: E501
OpenAPI spec version: 2.0.10
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class FirstLastNameOriginedOut(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'first_name': 'str',
'last_name': 'str',
'country_origin': 'str',
'country_origin_alt': 'str',
'countries_origin_top': 'list[str]',
'score': 'float',
'region_origin': 'str',
'top_region_origin': 'str',
'sub_region_origin': 'str',
'probability_calibrated': 'float',
'probability_alt_calibrated': 'float'
}
attribute_map = {
'id': 'id',
'first_name': 'firstName',
'last_name': 'lastName',
'country_origin': 'countryOrigin',
'country_origin_alt': 'countryOriginAlt',
'countries_origin_top': 'countriesOriginTop',
'score': 'score',
'region_origin': 'regionOrigin',
'top_region_origin': 'topRegionOrigin',
'sub_region_origin': 'subRegionOrigin',
'probability_calibrated': 'probabilityCalibrated',
'probability_alt_calibrated': 'probabilityAltCalibrated'
}
def __init__(self, id=None, first_name=None, last_name=None, country_origin=None, country_origin_alt=None, countries_origin_top=None, score=None, region_origin=None, top_region_origin=None, sub_region_origin=None, probability_calibrated=None, probability_alt_calibrated=None): # noqa: E501
"""FirstLastNameOriginedOut - a model defined in OpenAPI""" # noqa: E501
self._id = None
self._first_name = None
self._last_name = None
self._country_origin = None
self._country_origin_alt = None
self._countries_origin_top = None
self._score = None
self._region_origin = None
self._top_region_origin = None
self._sub_region_origin = None
self._probability_calibrated = None
self._probability_alt_calibrated = None
self.discriminator = None
if id is not None:
self.id = id
if first_name is not None:
self.first_name = first_name
if last_name is not None:
self.last_name = last_name
if country_origin is not None:
self.country_origin = country_origin
if country_origin_alt is not None:
self.country_origin_alt = country_origin_alt
if countries_origin_top is not None:
self.countries_origin_top = countries_origin_top
if score is not None:
self.score = score
if region_origin is not None:
self.region_origin = region_origin
if top_region_origin is not None:
self.top_region_origin = top_region_origin
if sub_region_origin is not None:
self.sub_region_origin = sub_region_origin
if probability_calibrated is not None:
self.probability_calibrated = probability_calibrated
if probability_alt_calibrated is not None:
self.probability_alt_calibrated = probability_alt_calibrated
@property
def id(self):
"""Gets the id of this FirstLastNameOriginedOut. # noqa: E501
:return: The id of this FirstLastNameOriginedOut. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this FirstLastNameOriginedOut.
:param id: The id of this FirstLastNameOriginedOut. # noqa: E501
:type: str
"""
self._id = id
@property
def first_name(self):
"""Gets the first_name of this FirstLastNameOriginedOut. # noqa: E501
:return: The first_name of this FirstLastNameOriginedOut. # noqa: E501
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""Sets the first_name of this FirstLastNameOriginedOut.
:param first_name: The first_name of this FirstLastNameOriginedOut. # noqa: E501
:type: str
"""
self._first_name = first_name
@property
def last_name(self):
"""Gets the last_name of this FirstLastNameOriginedOut. # noqa: E501
:return: The last_name of this FirstLastNameOriginedOut. # noqa: E501
:rtype: str
"""
return self._last_name
@last_name.setter
def last_name(self, last_name):
"""Sets the last_name of this FirstLastNameOriginedOut.
:param last_name: The last_name of this FirstLastNameOriginedOut. # noqa: E501
:type: str
"""
self._last_name = last_name
@property
def country_origin(self):
"""Gets the country_origin of this FirstLastNameOriginedOut. # noqa: E501
Most likely country of Origin # noqa: E501
:return: The country_origin of this FirstLastNameOriginedOut. # noqa: E501
:rtype: str
"""
return self._country_origin
@country_origin.setter
def country_origin(self, country_origin):
"""Sets the country_origin of this FirstLastNameOriginedOut.
Most likely country of Origin # noqa: E501
:param country_origin: The country_origin of this FirstLastNameOriginedOut. # noqa: E501
:type: str
"""
self._country_origin = country_origin
@property
def country_origin_alt(self):
"""Gets the country_origin_alt of this FirstLastNameOriginedOut. # noqa: E501
Second best alternative : country of Origin # noqa: E501
:return: The country_origin_alt of this FirstLastNameOriginedOut. # noqa: E501
:rtype: str
"""
return self._country_origin_alt
@country_origin_alt.setter
def country_origin_alt(self, country_origin_alt):
"""Sets the country_origin_alt of this FirstLastNameOriginedOut.
Second best alternative : country of Origin # noqa: E501
:param country_origin_alt: The country_origin_alt of this FirstLastNameOriginedOut. # noqa: E501
:type: str
"""
self._country_origin_alt = country_origin_alt
@property
def countries_origin_top(self):
"""Gets the countries_origin_top of this FirstLastNameOriginedOut. # noqa: E501
List countries of Origin (top 10) # noqa: E501
:return: The countries_origin_top of this FirstLastNameOriginedOut. # noqa: E501
:rtype: list[str]
"""
return self._countries_origin_top
@countries_origin_top.setter
def countries_origin_top(self, countries_origin_top):
"""Sets the countries_origin_top of this FirstLastNameOriginedOut.
List countries of Origin (top 10) # noqa: E501
:param countries_origin_top: The countries_origin_top of this FirstLastNameOriginedOut. # noqa: E501
:type: list[str]
"""
self._countries_origin_top = countries_origin_top
@property
def score(self):
"""Gets the score of this FirstLastNameOriginedOut. # noqa: E501
Compatibility to NamSor_v1 Origin score value # noqa: E501
:return: The score of this FirstLastNameOriginedOut. # noqa: E501
:rtype: float
"""
return self._score
@score.setter
def score(self, score):
"""Sets the score of this FirstLastNameOriginedOut.
Compatibility to NamSor_v1 Origin score value # noqa: E501
:param score: The score of this FirstLastNameOriginedOut. # noqa: E501
:type: float
"""
self._score = score
@property
def region_origin(self):
"""Gets the region_origin of this FirstLastNameOriginedOut. # noqa: E501
Most likely region of Origin (based on countryOrigin ISO2 code) # noqa: E501
:return: The region_origin of this FirstLastNameOriginedOut. # noqa: E501
:rtype: str
"""
return self._region_origin
@region_origin.setter
def region_origin(self, region_origin):
"""Sets the region_origin of this FirstLastNameOriginedOut.
Most likely region of Origin (based on countryOrigin ISO2 code) # noqa: E501
:param region_origin: The region_origin of this FirstLastNameOriginedOut. # noqa: E501
:type: str
"""
self._region_origin = region_origin
@property
def top_region_origin(self):
"""Gets the top_region_origin of this FirstLastNameOriginedOut. # noqa: E501
Most likely region of Origin (based on countryOrigin ISO2 code) # noqa: E501
:return: The top_region_origin of this FirstLastNameOriginedOut. # noqa: E501
:rtype: str
"""
return self._top_region_origin
@top_region_origin.setter
def top_region_origin(self, top_region_origin):
"""Sets the top_region_origin of this FirstLastNameOriginedOut.
Most likely region of Origin (based on countryOrigin ISO2 code) # noqa: E501
:param top_region_origin: The top_region_origin of this FirstLastNameOriginedOut. # noqa: E501
:type: str
"""
self._top_region_origin = top_region_origin
@property
def sub_region_origin(self):
"""Gets the sub_region_origin of this FirstLastNameOriginedOut. # noqa: E501
Most likely region of Origin (based on countryOrigin ISO2 code) # noqa: E501
:return: The sub_region_origin of this FirstLastNameOriginedOut. # noqa: E501
:rtype: str
"""
return self._sub_region_origin
@sub_region_origin.setter
def sub_region_origin(self, sub_region_origin):
"""Sets the sub_region_origin of this FirstLastNameOriginedOut.
Most likely region of Origin (based on countryOrigin ISO2 code) # noqa: E501
:param sub_region_origin: The sub_region_origin of this FirstLastNameOriginedOut. # noqa: E501
:type: str
"""
self._sub_region_origin = sub_region_origin
@property
def probability_calibrated(self):
"""Gets the probability_calibrated of this FirstLastNameOriginedOut. # noqa: E501
:return: The probability_calibrated of this FirstLastNameOriginedOut. # noqa: E501
:rtype: float
"""
return self._probability_calibrated
@probability_calibrated.setter
def probability_calibrated(self, probability_calibrated):
"""Sets the probability_calibrated of this FirstLastNameOriginedOut.
:param probability_calibrated: The probability_calibrated of this FirstLastNameOriginedOut. # noqa: E501
:type: float
"""
self._probability_calibrated = probability_calibrated
@property
def probability_alt_calibrated(self):
"""Gets the probability_alt_calibrated of this FirstLastNameOriginedOut. # noqa: E501
:return: The probability_alt_calibrated of this FirstLastNameOriginedOut. # noqa: E501
:rtype: float
"""
return self._probability_alt_calibrated
@probability_alt_calibrated.setter
def probability_alt_calibrated(self, probability_alt_calibrated):
"""Sets the probability_alt_calibrated of this FirstLastNameOriginedOut.
:param probability_alt_calibrated: The probability_alt_calibrated of this FirstLastNameOriginedOut. # noqa: E501
:type: float
"""
self._probability_alt_calibrated = probability_alt_calibrated
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FirstLastNameOriginedOut):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
beb923b2521bb0f9e00e5a892115a68855650a54 | a4ea525e226d6c401fdb87a6e9adfdc5d07e6020 | /src/azure-cli/azure/cli/command_modules/servicebus/aaz/latest/servicebus/namespace/private_link_resource/_show.py | eae4030227ceaa104f1f40abb9b01954ee7cdcc9 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | Azure/azure-cli | 13340eeca2e288e66e84d393fa1c8a93d46c8686 | a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca | refs/heads/dev | 2023-08-17T06:25:37.431463 | 2023-08-17T06:00:10 | 2023-08-17T06:00:10 | 51,040,886 | 4,018 | 3,310 | MIT | 2023-09-14T11:11:05 | 2016-02-04T00:21:51 | Python | UTF-8 | Python | false | false | 6,208 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"servicebus namespace private-link-resource show",
)
class Show(AAZCommand):
"""List lists of resources that supports Privatelinks.
"""
_aaz_info = {
"version": "2022-10-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.servicebus/namespaces/{}/privatelinkresources", "2022-10-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
return self.build_paging(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.namespace_name = AAZStrArg(
options=["--namespace-name"],
help="The namespace name",
required=True,
fmt=AAZStrArgFormat(
max_length=50,
min_length=6,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.PrivateLinkResourcesGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
next_link = self.deserialize_output(self.ctx.vars.instance.next_link)
return result, next_link
class PrivateLinkResourcesGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/privateLinkResources",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"namespaceName", self.ctx.args.namespace_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-10-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
)
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType()
properties = cls._schema_on_200.value.Element.properties
properties.group_id = AAZStrType(
serialized_name="groupId",
)
properties.required_members = AAZListType(
serialized_name="requiredMembers",
)
properties.required_zone_names = AAZListType(
serialized_name="requiredZoneNames",
)
required_members = cls._schema_on_200.value.Element.properties.required_members
required_members.Element = AAZStrType()
required_zone_names = cls._schema_on_200.value.Element.properties.required_zone_names
required_zone_names.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"]
| [
"[email protected]"
] | |
780226916dc06308a04f61617df5d245870357b4 | 30c4c80abb019a86f7830fd95745653994ef3862 | /2018/6/code.py | f29ec4acc510265e6cecf1b61448711e67591ff6 | [] | no_license | dmyers2020/adventofcode | 844a81572fa5522b385c68c1da9e6e228bc4df1c | 58c58b718fc33e4b12e1f2379e8ecac364778236 | refs/heads/master | 2021-10-08T06:10:19.484192 | 2018-12-08T22:17:08 | 2018-12-08T22:17:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,220 | py | # Advent of code Year 2018 Day 6 solution
# Author = Alexe Simon
# Date = December 2018
with open((__file__.rstrip("code.py")+"input.txt"), 'r') as input_file:
input = input_file.read().split("\n")
# By turning the problem around (starting from the coordinates and expanding through the territory) it was actually a lot more complicated to code but the end result is a lot more optimized and interesting.
class Coordinate:
def __init__ (self, x, y, id, territory):
self.x = x
self.y = y
self.id = id
self.infinite = False
self.size = 0
self.territory = territory
def recursive_expend(self, x, y, dist, going_up, going_right, state, dist_left):
if state and dist_left > 0:
if going_up :
self.recursive_expend(x, y+1, dist+1, going_up, going_right, self.territory.claim(x, y+1, self, dist+1), dist_left-1)
if not going_up :
self.recursive_expend(x, y-1, dist+1, going_up, going_right, self.territory.claim(x, y-1, self, dist+1), dist_left-1)
if going_right:
self.recursive_expend(x+1, y, dist+1, going_up, going_right, self.territory.claim(x+1, y, self, dist+1), dist_left-1)
if not going_right:
self.recursive_expend(x-1, y, dist+1, going_up, going_right, self.territory.claim(x-1, y, self, dist+1), dist_left-1)
def expend(self, max_dist = float('inf')):
self.recursive_expend(self.x+1, self.y, 1, False, True, self.territory.claim(self.x+1, self.y, self, 1), max_dist-1)
self.recursive_expend(self.x, self.y+1, 1, True, True, self.territory.claim(self.x, self.y+1, self, 1), max_dist-1)
self.recursive_expend(self.x-1, self.y, 1, True, False, self.territory.claim(self.x-1, self.y, self, 1), max_dist-1)
self.recursive_expend(self.x, self.y-1, 1, False, False, self.territory.claim(self.x, self.y-1, self, 1), max_dist-1)
class Territory:
def __init__(self, min_x, max_x, min_y, max_y):
self.distance = [[-1 for y in range(max_y-min_y+1)] for x in range(max_x-min_x+1)]
self.ownership = [[None for y in range(max_y-min_y+1)] for x in range(max_x-min_x+1)]
self.min_x = min_x
self.max_x = max_x
self.min_y = min_y
self.max_y = max_y
def claim(self, x, y, owner, dist):
if x <= self.max_x and x >= self.min_x and y >= self.min_y and y <= self.max_y:
if self.distance[x-self.min_x][y-self.min_y] == -1:
self.distance[x-self.min_x][y-self.min_y] = dist
self.ownership[x-self.min_x][y-self.min_y] = owner
owner.size += 1
return True
elif self.ownership[x-self.min_x][y-self.min_y] == owner:
if dist < self.distance[x-self.min_x][y-self.min_y]:
self.distance[x-self.min_x][y-self.min_y] = dist
return True
else:
return False
elif dist < self.distance[x-self.min_x][y-self.min_y]:
self.distance[x-self.min_x][y-self.min_y] = dist
if self.ownership[x-self.min_x][y-self.min_y] != None:
self.ownership[x-self.min_x][y-self.min_y].size -= 1
self.ownership[x-self.min_x][y-self.min_y] = owner
owner.size += 1
return True
elif dist == self.distance[x-self.min_x][y-self.min_y]:
if self.ownership[x-self.min_x][y-self.min_y] != None:
self.ownership[x-self.min_x][y-self.min_y].size -= 1
self.ownership[x-self.min_x][y-self.min_y] = None
return False
else:
return False
else:
return False
coordinates = [Coordinate(int(line[:line.find(',')]), int(line[line.find(',')+2:]), idx, None) for idx, line in enumerate(input)]
max_x = max(coordinates, key = lambda coord : coord.x).x
min_x = min(coordinates, key = lambda coord : coord.x).x
min_y = min(coordinates, key = lambda coord : coord.y).y
max_y = max(coordinates, key = lambda coord : coord.y).y
territory = Territory(min_x, max_x, min_y, max_y)
for coord in coordinates:
coord.territory = territory
coord.territory.claim(coord.x, coord.y, coord, 0)
for coord in coordinates:
coord.expend()
# set all 4 borders to infinity :
for i in range(max_x-min_x+1):
if territory.ownership[i][0] != None:
territory.ownership[i][0].infinite = True
if territory.ownership[i][max_y-min_y] != None:
territory.ownership[i][max_y-min_y].infinite = True
for i in range(max_y-min_y+1):
if territory.ownership[0][i] != None:
territory.ownership[0][i].infinite = True
if territory.ownership[max_x-min_x][i] != None:
territory.ownership[max_x-min_x][i].infinite = True
# Find biggest, not infinite
best_size = 0
for coord in coordinates:
if not coord.infinite:
if coord.size > best_size:
best_size = coord.size
print("Part One : "+ str(best_size))
# Quick sum of all manhattan distances abusing masks and lists instead of loops
def quick_man(x, y, x_sorted, y_sorted):
x_mask = [1 if x > i else -1 for i in x_sorted]
y_mask = [1 if y > i else -1 for i in y_sorted]
return sum(x_mask)*x + sum(y_mask)*y - sum([i*j for i,j in zip(x_sorted,x_mask)]) - sum([i*j for i,j in zip(y_sorted,y_mask)])
x_sorted = sorted([coord.x for coord in coordinates])
y_sorted = sorted([coord.y for coord in coordinates])
total_good = 0
for i in range(min_x, max_x):
for j in range(min_y, max_y):
if quick_man(i, j, x_sorted, y_sorted) < 10000:
total_good += 1
print("Part Two : "+ str(total_good))
# if you want to print the areas distances, it's pretty beautiful
with open((__file__.rstrip("code.py")+"output.txt"), 'w+') as output_file:
for i in range(max_x-min_x+1):
for j in range(max_y-min_y+1):
output_file.write('{0:3d} '.format(territory.distance[i][j]))
output_file.write("\n") | [
"[email protected]"
] | |
d19143232beee0ca95b690e77300f2564b439978 | 91a1f6e5ea33772ffeba3d9540d68d5dd625f09c | /main.py | 0840e6b7f6a64afcfd843ea91a6aa4759343674d | [] | no_license | joaovieira23/email-bot | 0422a5d2be5ac9839d3a0ecbf818f9a9a17e467d | 694593ce1dc3119832de6fb2c3a986820eaf7f7a | refs/heads/master | 2023-02-06T10:24:49.245630 | 2020-12-22T18:12:30 | 2020-12-22T18:12:30 | 323,662,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,407 | py | import smtplib
import speech_recognition as sr
import pyttsx3
from email.message import EmailMessage
listener = sr.Recognizer()
engine = pyttsx3.init()
def talk(text):
engine.say(text)
engine.runAndWait()
def get_info():
try:
with sr.Microphone() as source:
print('Ouvindo...')
voice = listener.listen(source)
info = listener.recognize_google(voice)
print(info)
return info.lower()
except:
pass
def send_email(receiver, subject, message):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login('[email protected]', 'senhafalsa')
email = EmailMessage()
email['From'] = '[email protected]'
email['To'] = receiver
email['Subject'] = subject
email.set_content(message)
server.send_message(email)
email_list = {
'black': '[email protected]',
'test': '[email protected]',
'programming': '[email protected]',
'lisa': '[email protected]',
'pink': '[email protected]'
}
def get_email_info():
talk('Para quem você deseja enviar o e-mail?')
name = get_info()
receiver = email_list[name]
print(receiver)
talk('Qual é o assunto do seu e-mail?')
subject = get_info()
talk('Diga-me o próximo em seu e-mail')
message = get_info()
send_email(receiver, subject, message)
talk('Ei preguiçoso. Seu email foi enviado')
talk('Quer enviar mais email?')
get_email_info() | [
"[email protected]"
] | |
77bd1762c4aaac19096157edc60a32d1f6d81374 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/uribv6/db.py | b29be36943d31f0ffadcc44b5729663096da2e21 | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 4,200 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Db(Mo):
"""
"""
meta = ClassMeta("cobra.model.uribv6.Db")
meta.moClassName = "uribv6Db"
meta.rnFormat = "db-%(type)s"
meta.category = MoCategory.REGULAR
meta.label = "Database"
meta.writeAccessMask = 0x8008421042001
meta.readAccessMask = 0x8008421042001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.uribv6.Route")
meta.childNamesAndRnPrefix.append(("cobra.model.uribv6.Route", "rt-"))
meta.parentClasses.add("cobra.model.uribv6.Dom")
meta.superClasses.add("cobra.model.l3.Db")
meta.superClasses.add("cobra.model.nw.Db")
meta.superClasses.add("cobra.model.nw.Conn")
meta.superClasses.add("cobra.model.rib.Db")
meta.superClasses.add("cobra.model.nw.GEp")
meta.superClasses.add("cobra.model.nw.Item")
meta.rnPrefixes = [
('db-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 16436, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 128)]
meta.props.add("name", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "type", "type", 17496, PropCategory.REGULAR)
prop.label = "Type"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.defaultValue = 1
prop.defaultValueStr = "rt"
prop._addConstant("adj", "adjacency-database", 3)
prop._addConstant("nh", "nexthop-database", 2)
prop._addConstant("rt", "route-database", 1)
meta.props.add("type", prop)
meta.namingProps.append(getattr(meta.props, "type"))
def __init__(self, parentMoOrDn, type, markDirty=True, **creationProps):
namingVals = [type]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
38df3e8b48e43618b3e5ab052a99c7efa76be283 | ae111b84c908a2a3483a9aded33aded9a3116494 | /final_project/tester.py | f698a6fbff5409ece29d262101d1f2a88a255b12 | [] | no_license | vojtob/personal.ud120-MachineLearning | 13aaaf23e0d5f3ba8606e400b37672966800f9a3 | 48a74937232f58e540d36c8829174aa581a191d5 | refs/heads/master | 2020-04-08T11:20:31.224158 | 2018-11-27T08:42:01 | 2018-11-27T08:42:01 | 159,302,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,515 | py | #!/usr/bin/pickle
""" a basic script for importing student's POI identifier,
and checking the results that they get from it
requires that the algorithm, dataset, and features list
be written to my_classifier.pkl, my_dataset.pkl, and
my_feature_list.pkl, respectively
that process should happen at the end of poi_id.py
"""
import pickle
import sys
from sklearn.cross_validation import StratifiedShuffleSplit
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
PERF_FORMAT_STRING = "\
\tAccuracy: {:>0.{display_precision}f}\tPrecision: {:>0.{display_precision}f}\t\
Recall: {:>0.{display_precision}f}\tF1: {:>0.{display_precision}f}\tF2: {:>0.{display_precision}f}"
RESULTS_FORMAT_STRING = "\tTotal predictions: {:4d}\tTrue positives: {:4d}\tFalse positives: {:4d}\
\tFalse negatives: {:4d}\tTrue negatives: {:4d}"
def test_classifier(clf, dataset, feature_list, folds = 1000):
data = featureFormat(dataset, feature_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
cv = StratifiedShuffleSplit(labels, folds, random_state = 42)
true_negatives = 0
false_negatives = 0
true_positives = 0
false_positives = 0
for train_idx, test_idx in cv:
features_train = []
features_test = []
labels_train = []
labels_test = []
for ii in train_idx:
features_train.append( features[ii] )
labels_train.append( labels[ii] )
for jj in test_idx:
features_test.append( features[jj] )
labels_test.append( labels[jj] )
### fit the classifier using training set, and test on test set
clf.fit(features_train, labels_train)
predictions = clf.predict(features_test)
for prediction, truth in zip(predictions, labels_test):
if prediction == 0 and truth == 0:
true_negatives += 1
elif prediction == 0 and truth == 1:
false_negatives += 1
elif prediction == 1 and truth == 0:
false_positives += 1
elif prediction == 1 and truth == 1:
true_positives += 1
else:
print("Warning: Found a predicted label not == 0 or 1.")
print("All predictions should take value 0 or 1.")
print("Evaluating performance for processed predictions:")
break
try:
total_predictions = true_negatives + false_negatives + false_positives + true_positives
accuracy = 1.0*(true_positives + true_negatives)/total_predictions
precision = 1.0*true_positives/(true_positives+false_positives)
recall = 1.0*true_positives/(true_positives+false_negatives)
f1 = 2.0 * true_positives/(2*true_positives + false_positives+false_negatives)
f2 = (1+2.0*2.0) * precision*recall/(4*precision + recall)
print clf
print PERF_FORMAT_STRING.format(accuracy, precision, recall, f1, f2, display_precision = 5)
print RESULTS_FORMAT_STRING.format(total_predictions, true_positives, false_positives, false_negatives, true_negatives)
print("")
except:
print("Got a divide by zero when trying out:"), clf
print("Precision or recall may be undefined due to a lack of true positive predicitons.")
CLF_PICKLE_FILENAME = "my_classifier.pkl"
DATASET_PICKLE_FILENAME = "my_dataset.pkl"
FEATURE_LIST_FILENAME = "my_feature_list.pkl"
def dump_classifier_and_data(clf, dataset, feature_list):
with open(CLF_PICKLE_FILENAME, "w") as clf_outfile:
pickle.dump(clf, clf_outfile)
with open(DATASET_PICKLE_FILENAME, "w") as dataset_outfile:
pickle.dump(dataset, dataset_outfile)
with open(FEATURE_LIST_FILENAME, "w") as featurelist_outfile:
pickle.dump(feature_list, featurelist_outfile)
def load_classifier_and_data():
with open(CLF_PICKLE_FILENAME, "r") as clf_infile:
clf = pickle.load(clf_infile)
with open(DATASET_PICKLE_FILENAME, "r") as dataset_infile:
dataset = pickle.load(dataset_infile)
with open(FEATURE_LIST_FILENAME, "r") as featurelist_infile:
feature_list = pickle.load(featurelist_infile)
return clf, dataset, feature_list
def main():
### load up student's classifier, dataset, and feature_list
clf, dataset, feature_list = load_classifier_and_data()
### Run testing script
test_classifier(clf, dataset, feature_list)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
240469b64e179b7d6b65efba99fe6dfb5ea81a1e | faa0a02ec5de01ed031c6f10f9a30a184cc9dbdb | /ltebands/urls.py | 1000d4ed13fe7c8e409df5ff193019c4317229d1 | [] | no_license | favososo/PyScrapASUS | 54433ed5b844e6c33a9c383d418534c82426ef09 | 2df38201296a8b5f075fdec89d642f6888d3628d | refs/heads/master | 2021-08-14T09:50:18.506271 | 2017-11-15T08:46:33 | 2017-11-15T08:46:33 | 110,773,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | """ltebands URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
admin.autodiscover()
from rest_framework.routers import DefaultRouter
from LTE import views
router = DefaultRouter()
router.register(r'bands', views.bandsViewSet)
urlpatterns = [
url('admin/', admin.site.urls),
url('api/', include(router.urls)),
url('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url('^$', views.index, name='index'),
]
| [
"[email protected]"
] | |
de04ff18aa543550c8a41b7c046605778e5f1f33 | 3d9caed98443a5f3dd68e26b169dc52c97a401d4 | /xmlcls/__init__.py | 6c75a86f84d9d5c0780f76937c67e578fc4228de | [
"MIT"
] | permissive | chsergey/xmlcls | 567436f48e392d7acc887d68fce1e204cc18acb8 | bddaa8612fcede767635c14f66d1add2893b9cad | refs/heads/master | 2020-03-08T15:24:26.562661 | 2018-07-06T07:20:16 | 2018-07-06T07:20:16 | 128,211,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | # -*- coding: utf-8 -*-
from .xml_elem import XMLElement
from .xml_file import XMLFile
| [
"[email protected]"
] | |
6e582ff2da36a22f18e22eb29dba3dabff92a0d8 | 37968f9d017dde05c231515abc713e8bffd74f1c | /watchlist/interface.py | 59925eae37770448ed0407b2eb4617abb1307e3d | [] | no_license | freedomskying/mysite | c03c08ea001e21da360a3ae4636dc414019d8ec8 | 93b3d4d5753e1019979a32a4d62d3ff532223793 | refs/heads/master | 2021-08-09T01:54:34.084463 | 2018-11-19T13:38:20 | 2018-11-19T13:38:20 | 143,217,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | class IdentifyResult:
record_name = ''
id_value = ''
id_type = ''
result = 0
def __init__(self, record_name, id_value, id_type, result):
self.record_name = record_name
self.id_value = id_value
self.id_type = id_type
self.result = result
| [
"[email protected]"
] | |
51b4d394824411e8488ff400df0a553116936ee9 | 9d3b8d5f45e5407f3275542cf5792fd2510abfe4 | /Chapter8-Practice/test_8.3.3.py | be0013a975a93a720f793d9350d06c6bec324a9c | [] | no_license | Beautyi/PythonPractice | 375767583870d894801013b775c493bbd3c36ebc | 9104006998a109dcab0848d5540fb963b20f5b02 | refs/heads/master | 2020-04-23T09:58:50.065403 | 2019-04-08T02:55:52 | 2019-04-08T02:55:52 | 171,088,504 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | #返回字典
def build_person(first_name, last_name):
"""返回一个字典,包含一个人的信息"""
person = {'first': first_name, 'last': last_name}
return person
musician = build_person('jimi', 'hendrix')
print(musician)
def build_person(first_name, last_name, age=' '):
"""返回一个字典,包含一个人的信息"""
person = {'first': first_name, 'last': last_name, 'age': age}
return person
musician = build_person('jimi', 'hendrix', '27')
print(musician)
def build_person(first_name, last_name, age=' '):
"""返回一个字典,包含一个人的信息"""
person = {'first': first_name, 'last': last_name, 'age': age}
if age:
person['age'] = age
return person
musician = build_person('jimi', 'hendrix', age=27)
print(musician)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.