blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
def552ff40437456847e9a0eed95a3aeaeaacfba | 36cd2d04eafe969726a5db3a6ede37e3bcc3ec61 | /unzip/aws-landing-zone-add-on-config-deployer/lib/params.py | cc8281bb4c70e12b37236a214ead3bef588dc311 | [] | no_license | XHPSi/landing-zone | 48fbe8db39f7850eaa7fc530cfc7f8cd3d37c2f3 | 1687581c9f43eb6f32386836e6407826f95cdca9 | refs/heads/master | 2021-02-05T15:56:09.624317 | 2020-03-02T23:19:37 | 2020-03-02T23:19:37 | 243,799,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,516 | py | from lib.ssm import SSM
from lib.sts import STS
from lib.ec2 import EC2
from lib.kms import KMS
from lib.assume_role_helper import AssumeRole
from lib.helper import sanitize
from os import environ
import random
import string
import time
import inspect
class ParamsHandler(object):
def __init__(self, logger):
self.logger = logger
self.ssm = SSM(self.logger)
self.kms = KMS(self.logger)
self.assume_role = AssumeRole()
def _session(self, region, account_id):
# instantiate EC2 sessions
return EC2(self.logger, region, credentials=self.assume_role(self.logger, account_id))
def _extract_string(self, str, search_str):
return str[len(search_str):]
def _get_ssm_params(self, ssm_parm_name):
try:
return self.ssm.get_parameter(ssm_parm_name)
except Exception as e:
raise Exception("Missing SSM parameter value for: {} in the SSM Parameter Store.".format(ssm_parm_name))
def _get_kms_key_id(self):
alias_name = environ.get('kms_key_alias_name')
response = self.kms.describe_key(alias_name)
self.logger.debug(response)
key_id = response.get('KeyMetadata', {}).get('KeyId')
return key_id
def get_azs_from_member_account(self, region, qty, account, key_az=None):
"""gets a predefined quantity of (random) az's from a specified region
Args:
region (str): region name
qty: quantity of az's to return
account: account id of the member account
Returns:
list: availability zone names
"""
try:
if key_az:
self.logger.info("Looking up values in SSM parameter:{}".format(key_az))
existing_param = self.ssm.describe_parameters(key_az)
if existing_param:
self.logger.info('Found existing SSM parameter, returning exising AZ list.')
return self.ssm.get_parameter(key_az)
if account is not None:
ec2 = self._session(region, account)
self.logger.info("Getting list of AZs in region: {} from account: {}".format(region, account))
return self._get_az(ec2, key_az, qty)
else:
self.logger.info("Creating EC2 Session in {} region".format(region))
ec2 = EC2(self.logger, region)
return self._get_az(ec2, key_az, qty)
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def _get_az(self, ec2, key_az, qty):
# Get AZs
az_list = ec2.describe_availability_zones()
self.logger.info("_get_azs output: %s" % az_list)
random_az_list = ','.join(random.sample(az_list, qty))
description = "Contains random AZs selected by Landing Zone Solution"
if key_az:
self.ssm.put_parameter(key_az, random_az_list, description)
return random_az_list
def create_key_pair(self, account, region, param_key_material=None, param_key_fingerprint=None, param_key_name=None):
if param_key_name:
self.logger.info("Looking up values in SSM parameter:{}".format(param_key_name))
existing_param = self.ssm.describe_parameters(param_key_name)
if existing_param:
return self.ssm.get_parameter(param_key_name)
key_name = sanitize("%s_%s_%s_%s" % ('lz', account, region, time.strftime("%Y-%m-%dT%H-%M-%S")))
try:
ec2 = self._session(region, account)
# create EC2 key pair in member account
self.logger.info("Create key pair in the member account {} in region: {}".format(account, region))
response = ec2.create_key_pair(key_name)
# add key material and fingerprint in the SSM Parameter Store
self.logger.info("Adding Key Material and Fingerprint to SSM PS")
description = "Contains EC2 key pair asset created by Landing Zone Solution: " \
"EC2 Key Pair Custom Resource."
# Get Landing Zone KMS Key ID
key_id = self._get_kms_key_id()
if param_key_fingerprint:
self.ssm.put_parameter_use_cmk(param_key_fingerprint, response.get('KeyFingerprint'),
key_id, description)
if param_key_material:
self.ssm.put_parameter_use_cmk(param_key_material, response.get('KeyMaterial'),
key_id, description)
if param_key_name:
self.ssm.put_parameter(param_key_name, key_name, description)
return key_name
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def random_password(self, length, key_password=None, alphanum=True):
"""Generates a random string, by default only including letters and numbers
Args:
length (int): length of string to generate
alphanum (bool): [optional] if False it will also include ';:=+!@#%^&*()[]{}' in the character set
"""
try:
response = '_get_ssm_secure_string_' + key_password
if key_password:
self.logger.info("Looking up values in SSM parameter:{}".format(key_password))
existing_param = self.ssm.describe_parameters(key_password)
if existing_param:
return response
additional = ''
if not alphanum:
additional = ';:=+!@#%^&*()[]{}'
chars = string.ascii_uppercase + string.ascii_lowercase + string.digits + additional
# Making sure the password has two numbers and symbols at the very least
password = ''.join(random.SystemRandom().choice(chars) for _ in range(length-4)) + \
''.join(random.SystemRandom().choice(string.digits) for _ in range(2)) + \
''.join(random.SystemRandom().choice(additional) for _ in range(2))
self.logger.info("Adding Random password to SSM PS")
description = "Contains random password created by Landing Zone Solution"
if key_password:
key_id = self._get_kms_key_id()
self.ssm.put_parameter_use_cmk(key_password, password, key_id, description)
return response
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def update_params(self, params_in, account = None, region = None, substitute_ssm_values = True):
"""
Args:
params_in (list): Python List of dict of input params e.g.
[{
"ParameterKey": "LoggingAccountId",
"ParameterValue": "$[alfred_ssm_/org/member/logging/account_id]"
},{
"ParameterKey": "foo",
"ParameterValue": "bar"
}]
Return:
params_out (dict): Python dict of output params e.g.
{
"LoggingAccountId": "${AWS::AccountId}",
"foo": "bar"
}
"""
try:
self.logger.info("params in : {}".format(params_in))
params_out = {}
for param in params_in:
key = param.get("ParameterKey")
value = param.get("ParameterValue")
if not isinstance(value, list):
if value.startswith('$[') and value.endswith(']'):
# Apply transformations
keyword = value[2:-1]
# Check if supported keyword e.g. alfred_ssm_, alfred_genaz_, alfred_getaz_, alfred_genuuid, etcself.
if keyword.startswith('alfred_ssm_'):
ssm_param_name = self._extract_string(keyword, 'alfred_ssm_')
if ssm_param_name:
# If this flag is True, it will replace the SSM parameter name i.e. /org/member/ss/directory-name with its
# value i.e. example, whereas if its False, it will leave the parameter name as-is
if substitute_ssm_values:
value = self._get_ssm_params(ssm_param_name)
else:
raise Exception("Missing SSM parameter name for: {} in the parameters JSON file.".format(key))
elif keyword.startswith('alfred_genkeypair'):
keymaterial_param_name = None
keyfingerprint_param_name = None
keyname_param_name = None
ssm_parameters = param.get('ssm_parameters', [])
if type(ssm_parameters) is list:
for ssm_parameter in ssm_parameters:
val = ssm_parameter.get('value')[2:-1]
if val.lower() == 'keymaterial':
keymaterial_param_name = ssm_parameter.get('name')
elif val.lower() == 'keyfingerprint':
keyfingerprint_param_name = ssm_parameter.get('name')
elif val.lower() == 'keyname':
keyname_param_name = ssm_parameter.get('name')
value = self.create_key_pair(account, region, keymaterial_param_name, keyfingerprint_param_name, keyname_param_name)
elif keyword.startswith('alfred_genpass_'):
sub_string = self._extract_string(keyword, 'alfred_genpass_')
if sub_string:
pw_length = int(sub_string)
else:
pw_length = 8
password_param_name = None
ssm_parameters = param.get('ssm_parameters', [])
if type(ssm_parameters) is list:
for ssm_parameter in ssm_parameters:
val = ssm_parameter.get('value')[2:-1]
if val.lower() == 'password':
password_param_name = ssm_parameter.get('name')
value = self.random_password(pw_length, password_param_name, False)
elif keyword.startswith('alfred_genaz_'):
sub_string = self._extract_string(keyword, 'alfred_genaz_')
if sub_string:
no_of_az = int(sub_string)
else:
no_of_az = 2
az_param_name = None
ssm_parameters = param.get('ssm_parameters', [])
if type(ssm_parameters) is list:
for ssm_parameter in ssm_parameters:
val = ssm_parameter.get('value')[2:-1]
if val.lower() == 'az':
az_param_name = ssm_parameter.get('name')
value = self.get_azs_from_member_account(region, no_of_az, account, az_param_name)
else:
value = keyword
params_out.update({key: value})
self.logger.info("params out : {}".format(params_out))
return params_out
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
| [
"[email protected]"
] | |
2d6a5abed2c76cfec59eb17ce5607c81c9fe669b | 0d22b1231fc1d66b0bcbda463a723dc44e1cd628 | /lesson_002/04_my_family.py | a1e7a012016ba479f00f52bf374c8691a6705209 | [] | no_license | Vladis90/pythonProject | 03133c59a77d2bbf1c20321f19fd8fbd970d4d2c | e3a306034e20fd7985b3e28b951c249306b4f5ee | refs/heads/master | 2023-08-19T19:50:32.046744 | 2021-10-29T16:01:35 | 2021-10-29T16:01:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | # -*- coding: utf-8 -*-
# Создайте списки:
# моя семья (минимум 3 элемента, есть еще дедушки и бабушки, если что)
from typing import List, Tuple, Union
my_family = ['father', 'mother', 'son']
# список списков приблизителного роста членов вашей семьи
my_family_height = ['father', 183], ['mother', 165], ['son', 95]
# Выведите на консоль рост отца в формате
# Рост отца - ХХ см
print('rost ' + my_family[0] + ' ', my_family_height[0][1], 'cm')
# Выведите на консоль общий рост вашей семьи как сумму ростов всех членов
# Общий рост моей семьи - ХХ см
sum_height = 0
sum_height += my_family_height[0][1]
sum_height += my_family_height[1][1]
sum_height += my_family_height[2][1]
print(sum_height)
| [
"[email protected]"
] | |
fd3c08bf7ff6f7e08533bb9129c270b01118af5b | 565c69222d1736f98cf83c1a4e2d88416091c0cf | /tests/test_sort_complex_number.py | 1277de1278615bff19345d2f440b4cf6729cf30d | [] | no_license | lnarasim/250_problems | 466e9ac0a8845eb83c275591992552c4c143645e | 561efe86f06aad8b2ce4b7659323be4d730bde3d | refs/heads/master | 2022-11-05T15:24:31.493816 | 2020-06-04T16:56:46 | 2020-06-04T16:56:46 | 264,413,351 | 0 | 1 | null | 2020-06-04T16:56:48 | 2020-05-16T10:33:28 | Python | UTF-8 | Python | false | false | 1,170 | py | from pyproblems.complex_number_sorter import sort_complex_numbers
import pytest
def test_sort_complex_number():
assert sort_complex_numbers(4+5j, 5+8j,3j,4) == (3j, (4+5j), 4, (5+8j))
assert sort_complex_numbers(1,2,4,5) == (1,2,4,5)
assert sort_complex_numbers(1+4j,2j,4.5 + 3j,5j) == (2j,5j,1+4j,4.5+3j)
assert sort_complex_numbers(1+2j,1.5+9j,2.5+7j,2+4j,1+8j) == (1+2j,1+8j,1.5+9j,2+4j,2.5+7j)
assert sort_complex_numbers(-1-2j,4-5j,3j) == (-1-2j,3j,4-5j)
assert sort_complex_numbers() == ()
def test_sort_complex_number_errors_1():
with pytest.raises(TypeError):
sort_complex_numbers(True,5+8j,3j,4)
def test_sort_complex_number_errors_2():
with pytest.raises(TypeError):
sort_complex_numbers(False,5+8j,-2+3j,4-6j)
def test_sort_complex_number_errors_3():
with pytest.raises(TypeError):
sort_complex_numbers([True,5+8j,3j,4])
def test_sort_complex_number_errors_4():
with pytest.raises(TypeError):
sort_complex_numbers((1+2j,1.5+9j),[2.5+7j,2+4j],1+8j)
def test_sort_complex_number_errors_5():
with pytest.raises(TypeError):
sort_complex_numbers({-1-2j,4-5j,3j})
| [
"[email protected]"
] | |
90c673e500897ed1e29191a096b9044e21532118 | 9af3dc963bf937b9320261eb74874661d97caa40 | /client.py | 8c27c1a8f741c22df535d6072988de2fe2728e63 | [] | no_license | Camp-Butterfly/backendAPI | 85ee68345908dbf199e2b8cc4b476fe49aca7215 | bf9ca3cb687ef050d5dca10a3ec77787bdeb13c2 | refs/heads/locally-working | 2022-12-10T17:12:39.521402 | 2019-12-17T20:36:18 | 2019-12-17T20:36:18 | 228,474,526 | 0 | 0 | null | 2022-12-08T03:19:07 | 2019-12-16T20:57:19 | Python | UTF-8 | Python | false | false | 2,718 | py | from flask import Flask
from flask import request
from flask_cors import CORS
from flask_cors import cross_origin
#from gevent.pywsgi import WSGIServer
import grpc
import numpy as np
import requests
import tensorflow as tf
import os
import base64
import io
import PIL
import json
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from tensorflow.keras.preprocessing import image
from PIL import Image
app = Flask(__name__)
CORS(app, resources={r"/api/*": {"origins": "*"}})
app.config['CORS_HEADERS'] = 'Content-Type'
@app.route('/api/v1/model', methods=['POST'])
@cross_origin()
def image_post():
#get base-64 from json object
test = request.get_json(force=True)
print(test)
img_c = test['image_content']
#preprocessing for base64 encoded image which has to do what
#image.load_img does => opens file, resizes to target size then maps to a keras array
###
img_c = base64.b64decode(img_c)
buf = io.BytesIO(img_c)
img = Image.open(buf)
img = img.resize([150,150])
###
img_tensor = image.img_to_array(img)
img_tensor = np.expand_dims(img_tensor, axis=0)
data = img_tensor
print(data)
#instantiate request
channel = grpc.insecure_channel('35.193.112.218:8500')
grpc.channel_ready_future(channel).result()
# create variable for service that sends object to channel
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
# assign values to props of request
req = predict_pb2.PredictRequest()
req.model_spec.name = 'model'
req.model_spec.signature_name = 'serving_default'
req.inputs['conv2d_input'].CopyFrom(
tf.make_tensor_proto(data,shape=[1,150,150,3])
)
#make request to docker image container
result = stub.Predict(req,10.0)
#response from model as tensorflow array
floats = np.array(list(result.outputs['dense_1'].float_val))
#empty response catch
#if(not floats):
# max_ = 4
#if(not floats.argmax()):
# max_ = 4
#else:
max_ = floats.argmax()
print("\n")
print(floats)
print("\n")
print(max_)
print("\n")
# convert numpy integer to json; response to React app
res = json.dumps(max_)
return res
@app.route("/", methods=['GET'])
@cross_origin()
def helloWorld():
return "Hello, cross-origin-world!"
#@app.after_request
#def after_request(response):
# response.headers.add('Access-Control-Allow-Origin', '*')
# response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
# response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
# return response
#if __name__=='__main__':
app.run(host='146.95.184.180', port=5000)
#http_server = WSGIServer(('146.95.184.180', 5000), app)
#http_server.serve_forever()
| [
"[email protected]"
] | |
480042958ddbf7fdce1f8de076f6e2e1849f75b3 | 1280843d603d367e589277fac54f571c0cb3a1e6 | /ML0101_Kmeans_clustering.py | 8aab80792065cd91806ab8813a3d3b4e89c6595d | [] | no_license | m-mgh/Project_DataScience | 8afa6b4fd61959e73a37862ecc927150b95cdabc | d674446d8615c45e309e95ad9e611892ea41ddc3 | refs/heads/master | 2023-01-03T19:07:31.169800 | 2020-10-28T02:18:05 | 2020-10-28T02:18:05 | 280,716,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,262 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 23:12:13 2020
@author: Mercedeh_Mgh
"""
import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
#creating our own random data set
#first we need to creat a random seed
np.random.seed(0)
#generate random clusters of points. n_samples defines the number of points.centers is the number of centers to be generated or location of fixed centers.cluster_std is the standard deviation of the clusters.
X, y=make_blobs(n_samples=5000, centers=[[4,4],[-2,-1],[2,-3],[1,1]],cluster_std=0.9)
plt.scatter(X[:,0], X[:,1], marker='.')
#parameters of kmeans class of sklearn.cluster are init which is the method of selecting initial centroids, n_clusters which is the number of centroids/clusters, and n-init which is the number of times the algorithm will run with different centroid points
k_means=KMeans(n_clusters=4,init="k-means++",n_init=12)
k_means.fit(X)
#get labels for each point
k_means_labels=k_means.labels_
print(k_means_labels)
#get coordinates of centroids
k_means_cluster_centers=k_means.cluster_centers_
print(k_means_cluster_centers)
# plotting
fig=plt.figure(figsize=(6,4))
# Colors uses a color map, which will produce an array of colors based on the number of labels there are. We use set(k_means_labels) to get the unique labels.
colors=plt.cm.Spectral(np.linspace(0,1,len(set(k_means_labels))))
ax=fig.add_subplot(1,1,1)
# For loop that plots the data points and centroids.k will range from 0-3, which will match the possible clusters that each data point is in.
for k, col in zip(range(len([[4,4],[-2,-1],[2,-3],[1,-1]])),colors):
# Create a list of all data points, where the data poitns that are in the cluster (ex. cluster 0) are labeled as true, else they are labeled as false.
my_members=(k_means_labels==k)
# Define the centroid, or cluster center.
cluster_center=k_means_cluster_centers[k]
# Plots the datapoints with color col.
ax.plot(X[my_members,0],X[my_members,1],'w',markerfacecolor=col, marker='.')
# Plots the centroids with specified color, but with a darker outline
ax.plot(cluster_center[0],cluster_center[1],'o',markerfacecolor=col,markeredgecolor='k',markersize=6)
# Title of the plot
ax.set_title('KMeans')
# Remove x-axis ticks
ax.set_xticks(())
# Remove y-axis ticks
ax.set_yticks(())
# Show the plot
plt.show()
#running KMeans on real data set next
cust_df=pd.read_csv(r'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/Cust_Segmentation.csv')
print(cust_df.head())
print(cust_df.columns)
print(cust_df['Edu'].unique())
print(cust_df['Address'].unique())
#because KMeans is not applicable to categorical variables- because euclidean distance is not meaningful for discrete variables- we need to drop the feature 'Address' which is categorical.
df=cust_df.drop('Address',axis=1)
print(df.head(5))
#next we normalize the data over the std. "Normalization is a statistical method that helps mathematical-based algorithms to interpret features with different magnitudes and distributions equally.We use StandardScaler() to normalize our dataset."
from sklearn.preprocessing import StandardScaler
X=df.values[:,1:]
X=np.nan_to_num(X)
Clus_dataSet=StandardScaler().fit_transform(X)
print(Clus_dataSet)
#apply Kmeans clustering algorithm to the preprocessed dataset
ClusterNum=3
k_means=KMeans(n_clusters=ClusterNum,init='k-means++',n_init=12)
k_means.fit(X)
labels=k_means.labels_
print (labels)
# assign labels to each row
df['clus_km']=labels
print(df.head(5))
#check centroids by averaging the features in each cluster
print(df.groupby('clus_km').mean())
#check distribution of customers based on age and income through plotting
area=np.pi*(X[:,1])**2
plt.scatter(X[:,0],X[:,3], s=area,c=labels.astype(np.float), alpha=0.5)
plt.xlabel('age',fontsize=18)
plt.ylabel('income',fontsize=16)
plt.show()
from mpl_toolkits.mplot3d import Axes3D
fig=plt.figure(1,figsize=(8,6))
plt.clf()
ax=Axes3D(fig,rect=[0,0,0.95,1],elev=48,azim=134)
plt.cla()
ax.set_xlabel('education')
ax.set_ylabel('age')
ax.set_zlabel('income')
ax.scatter(X[:,1],X[:,0],X[:,3],c=labels.astype(np.float))
| [
"[email protected]"
] | |
d8e06bb45fd1f90be90bb45e0c0cc52f227b3187 | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-eps/huaweicloudsdkeps/v1/model/link.py | a9a92750cec83aea4939f5cad6e9fa7a51be5167 | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 3,044 | py | # coding: utf-8
import pprint
import re
import six
class Link:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'href': 'str',
'rel': 'str'
}
attribute_map = {
'href': 'href',
'rel': 'rel'
}
def __init__(self, href=None, rel=None):
"""Link - a model defined in huaweicloud sdk"""
self._href = None
self._rel = None
self.discriminator = None
self.href = href
self.rel = rel
@property
def href(self):
"""Gets the href of this Link.
API的URL地址。
:return: The href of this Link.
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this Link.
API的URL地址。
:param href: The href of this Link.
:type: str
"""
self._href = href
@property
def rel(self):
"""Gets the rel of this Link.
self。
:return: The rel of this Link.
:rtype: str
"""
return self._rel
@rel.setter
def rel(self, rel):
"""Sets the rel of this Link.
self。
:param rel: The rel of this Link.
:type: str
"""
self._rel = rel
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Link):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
f5a23884d1ffe0ba6d5609805a5f59ad9e44b1d7 | eca7e12e50032db0fe77703449d8ba2247a9a75f | /spliter.py | 8693d1f914405e3b2726ce43c8cb93a360ef39f7 | [] | no_license | doannamthai/cpsc501-machine-learning | d44f563b010e7b54b16b6bffbc895fe094815a67 | 765d2cc7068d1a523a4c4465e878a39d3aad4cf6 | refs/heads/master | 2020-09-21T20:26:11.804869 | 2019-12-07T01:53:53 | 2019-12-07T01:53:53 | 224,916,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | import pandas as pd
import numpy as np
df = pd.read_csv('heart.csv')
msk = np.random.rand(len(df)) <= 0.8
train = df[msk]
test = df[~msk]
train.to_csv('heart_train.csv')
test.to_csv('heart_test.csv') | [
"[email protected]"
] | |
2e445e4f56c622f6f5d41a6de407c6c9d92f5b20 | 83b8b30ebb633eecd29ca0a7a20cc43a293c9333 | /tests/basics/subclass_native2_list.py | 9ad0b77ef6dd1c7659097492eec0ebb77099b017 | [
"MIT",
"GPL-1.0-or-later"
] | permissive | adafruit/circuitpython | 430ec895149d1eb814b505db39b4977a35ee88a7 | 506dca71b0cbb7af749bb51f86b01021db5483b3 | refs/heads/main | 2023-08-21T16:30:46.781068 | 2023-08-20T00:39:44 | 2023-08-20T00:39:44 | 66,166,069 | 3,806 | 1,560 | MIT | 2023-09-14T19:23:51 | 2016-08-20T20:10:40 | C | UTF-8 | Python | false | false | 587 | py | class Base1:
def __init__(self, *args):
print("Base1.__init__", args)
class Clist1(Base1, list):
pass
a = Clist1()
print(len(a))
# Not compliant - list assignment should happen in list.__init__, which is not called
# because there's Base1.__init__, but we assign in list.__new__
#a = Clist1([1, 2, 3])
#print(len(a))
print("---")
class Clist2(list, Base1):
pass
# Not compliant - should call list.__init__, but we don't have it
#a = Clist2()
#print(len(a))
# Not compliant - should call list.__init__, but we don't have it
#a = Clist2([1, 2, 3])
#print(len(a))
| [
"[email protected]"
] | |
8b1c1de589cf6f3b813d6b93b2000fe3d8819ce0 | 83dc061068ea8f83038d78178a04268e6b4d0e0d | /app.py | a153a650eb8e76f15b6552d77f4b4b009fe8da78 | [] | no_license | sanghunkang/scenario-autocompletion-service | 20a31c09c01cf8bcd42e8297d238b1afb2c3f157 | 2f42f9d279c15d6a163abeda03e963232b73b5c8 | refs/heads/master | 2023-01-14T17:06:11.497781 | 2019-12-23T14:20:24 | 2019-12-23T14:20:24 | 225,888,979 | 0 | 0 | null | 2023-01-05T03:16:39 | 2019-12-04T14:41:51 | JavaScript | UTF-8 | Python | false | false | 1,132 | py | from flask import Flask
from flask_api import status
from data_saver import save_data
from model_functions import Predictor
from data_management import DataManager
app = Flask(__name__)
# Load all data needed to provide service
predictor = Predictor(model_name=config["dataset_name"]) # TODO: Add env reader
# Routes
@app.route("/")
def hello_world():
return "Hello, World!"
@app.route("/api/predict")
def predict():
try:
# terminate and response when one of action has reached
prediction, reasoning = predictor.predict_next_action()
# Pack something with action
# resp = make_response("Record not found", status.HTTP_400_BAD_REQUEST)
# resp.headers["X-Something"] = "A value"
return response
except:
response = make_response("INTERNAL SERVER ERROR", status.HTTP_500_INTERNAL_SERVER_ERROR)
return response
@app.route("/api/add_record")
def add_record():
try:
save_data()
return "ok"
except:
response = make_response("INTERNAL SERVER ERROR", status.HTTP_500_INTERNAL_SERVER_ERROR)
return response
| [
"[email protected]"
] | |
5a66a6cfa1ee27858fb8d075bb59744127503cf4 | a64a465700bbae8443ff3b38c45e6f37b89e2f59 | /overview/admin.py | a3e0735ba663daf92b009f7e9db2c312af3c3c7e | [] | no_license | Bencbabcock/4dafuture-website | 8debcdc8bf9bd8dab7407b976bced1fe7b7213bc | 73206b916699d9a1398e6af95036477c7fdeb236 | refs/heads/main | 2023-03-12T15:35:41.297864 | 2021-03-05T16:00:46 | 2021-03-05T16:00:46 | 344,527,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | from django.contrib import admin
from .models import Announcement
# Register your models here.
admin.site.register(Announcement)
| [
"bencbabcock01"
] | bencbabcock01 |
1943cb6e60e864e2e178eb3f9d8f20d70a05a0e5 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/securitydevops/azure-mgmt-securitydevops/generated_samples/azure_dev_ops_connector_get.py | c2e7527e4a6368c1b2ba061346255a98ba052933 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,627 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.securitydevops import MicrosoftSecurityDevOps
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-securitydevops
# USAGE
python azure_dev_ops_connector_get.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = MicrosoftSecurityDevOps(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.azure_dev_ops_connector.get(
resource_group_name="westusrg",
azure_dev_ops_connector_name="testconnector",
)
print(response)
# x-ms-original-file: specification/securitydevops/resource-manager/Microsoft.SecurityDevOps/preview/2022-09-01-preview/examples/AzureDevOpsConnectorGet.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
027ecc9822a4c10c051d49ba1e8e6de3e57e3d7e | bda338cf60a78da84a617f2b4bcaf34cca43d387 | /app.py | a8dcc9aed87c0a0d80af5f6e5ac6973dd6fae018 | [] | no_license | abitb/snacks-flask | 02189dc36033f8a6a87fd9047ad5c32488485e29 | 32658e63d545f49a0057c567dc98618fa781402f | refs/heads/master | 2021-04-15T17:11:31.860265 | 2018-03-27T03:22:39 | 2018-03-27T03:22:39 | 126,539,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,446 | py | from datetime import date
from flask import Flask, render_template, request, session, redirect, url_for, flash
from config import config
import webservice_client
import model
from forms import *
# Default port is 5000.
app = Flask(__name__)
# Take configuration from config.py.
app.config.from_object(config["development"])
# Set sqlite database location
model.Models.DB_FILE = app.config["DATABASE_URI"]
webservice_client.APIKEY = app.config["API_KEY"]
@app.route("/", methods=["GET","POST"])
def index():
# Make sure any user who can see the pages have provided their nerdery email.
if "email" not in session:
return redirect(url_for("register"))
print session["email"]
# Initialize container for keyword args to be passed to the view
snacks = {}
# Initialize model for this user
model_vote = model.Votes(session["email"])
# The allowed vote count left for user is need for both GET and POST
allowed_vote = model_vote.get_allowed_votes()
# When user request to view the page, get data to display.
if request.method == "GET":
resp = webservice_client.get_snacks_from_web_service()
# When web service is down, redirect.
if not resp:
return redirect(url_for("no_ws"))
today = date.today()
always_purchased, optional_snacks = webservice_client.separate_optional_snacks(resp)
suggested_snacks = model_vote.get_suggestion(year=today.year, month=today.month)
if len(suggested_snacks) == 0:
snacks["error_no_suggestion"] = "Please suggest some snacks!"
# Use optional snacks from webservice to create vote form
snacks_pruchase_date = {s["name"]: s["lastPurchaseDate"] for s in optional_snacks}
# Dynamically add fields (VoteSnackForm's class attribute) corresponding to each snack
VoteSnackForm.add_dynamic_fields(suggested_snacks)
form = VoteSnackForm()
# Get this month's suggestion vote
ranked_snacks = model_vote.get_tally(year=today.year, month=today.month)
snacks.update({
"allowed_vote": allowed_vote,
"always_purchased": always_purchased,
"ranked_snacks": ranked_snacks,
"snacks_pruchase_date": snacks_pruchase_date,
"form": form
})
# GET will render view
return render_template("pages/index.html", **snacks)
# When user submitted vote, process the interaction.
if request.method == "POST":
form = VoteSnackForm()
if form.validate():
# Get votes from user input
votes = []
for field in form:
if "snack" in field.name and field.data == True:
votes.append(field.label.text)
# Success: pass all validations and record the votes
if len(votes) <= allowed_vote:
model_vote.register_votes(votes)
# Error1:
else:
flash("You exceed the maximum allowed votes for this month.", "error_vote")
# Error2: Form input is not valid, not doing anything to database
else:
print form.errors
# POST will always redirect
return redirect(url_for("index"))
@app.route("/suggestions", methods=["GET","POST"])
def suggestions():
# Make sure any user who can see the pages have provided their nerdery email.
if "email" not in session:
return redirect(url_for("register"))
# Initialize container for keyword args to be passed to the view
snacks = {}
# Initialize model for this user
model_vote = model.Votes(session["email"])
today = date.today()
# Prepare not yet voted on suggestions using web service
resp = webservice_client.get_snacks_from_web_service()
# When web service is down, redirect.
if not resp:
return redirect(url_for("no_ws"))
# Get optional snacks from web service, subtract the already suggested snacks
optional_snacks = webservice_client.separate_optional_snacks(resp)[1]
l_optional_snacks = [s["name"] for s in optional_snacks]
suggested_snacks = model_vote.get_suggestion(year=today.year, month=today.month)
optional_not_suggested = list(set(l_optional_snacks)-set(suggested_snacks))
# Construct form for the view
form_suggestion = SuggestionDropdown()
choices = [("","Please select")]+[(s, s) for s in optional_not_suggested]
form_suggestion.snack_options.choices = choices
# When user request to view the suggestion page
if request.method == "GET":
# If already voted this month, set error message
if today.strftime("%Y-%m") == model_vote.get_last_suggest_date():
snacks["error_suggestion"] = "There is a total of one allowed suggestion per month."
snacks.update({
"form": form_suggestion
})
# GET will render view
return render_template("pages/suggestions.html", **snacks)
if request.method == "POST":
dropdown_input = form_suggestion.snack_options.data
text_suggestion = form_suggestion.suggestion_input.data
text_location = form_suggestion.suggestion_location.data
# Error0 : already suggested, don't process form
if today.strftime("%Y-%m") == model_vote.get_last_suggest_date():
flash("You have attempted to add more than the allowed number of suggestions per month!", "error_suggestion")
return redirect(url_for("suggestions"))
if form_suggestion.validate():
dropdown_input = form_suggestion.snack_options.data
text_suggestion = form_suggestion.suggestion_input.data
text_location = form_suggestion.suggestion_location.data
# Error1: more than one suggestion
if dropdown_input and text_suggestion:
flash("Please choose one between selecting from drop-down or entering a new suggestion.", "error_suggestion")
# Success1: user submits one drop-down item, record suggestion by updating db
if dropdown_input and (not text_suggestion):
model_vote.suggest(dropdown_input)
return redirect(url_for("index"))
# User manually inputs a new suggestion
if (not dropdown_input) and text_suggestion:
# With location, Post to web service
if text_location:
post_resp = webservice_client.post_snack_to_web_service(name=text_suggestion, location=text_location)
# Success2: If success, record suggestion for this month too
if post_resp == 200:
model_vote.suggest(text_suggestion)
return redirect(url_for("index"))
# Error2: duplicate
elif post_resp == 409:
flash("You have attempted to add a suggestion that already exists!", "error_duplicate")
# Error3: not enough info
else:
flash("You have not completed information requested.", "error_completion")
if (not dropdown_input) and (not text_suggestion):
flash("You have not completed information requested.", "error_completion")
else:
print form_suggestion.errors
# POST will always redirect
return redirect(url_for("suggestions"))
@app.route("/register", methods=["GET","POST"])
def register():
if "email" in session:
return redirect(url_for("index"))
# Create form object to display, and use
form = IndentifyUserForm()
# When user summited email
if form.validate_on_submit():
# If input is validate, set session, and redirect to vote page
user_email = form.email.data.lower()
session["email"] = user_email
return redirect(url_for("index"))
return render_template("pages/register.html", form=form)
@app.route("/servicedown")
def no_ws():
return render_template("pages/servicedown.html")
@app.route("/logout")
def log_out():
session.clear()
return redirect(url_for("index"))
@app.errorhandler(404)
def page_not_found(e):
return render_template('pages/404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('pages/500.html'), 500
| [
"[email protected]"
] | |
33b06f62c68a5ba0684eed829b6c8036fced5339 | 6020d9fca971194717a50eb9d118374a80d3ca1c | /backend/user/urls.py | 9ab95535e219911b09b035de5d5185269d5aa0af | [] | no_license | eve-klopfenstein/krypstock | 8f4a3714adc44eabcf86dcf39862395ddd6a176a | 712e598286a596329f547f0f1ae8c47984f79eaa | refs/heads/main | 2023-04-28T15:48:32.372025 | 2021-04-23T08:49:35 | 2021-04-23T08:49:35 | 367,724,267 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | from django.urls import path
from user.views import MyUserView
urlpatterns = [
path('', MyUserView.as_view()),
] | [
"[email protected]"
] | |
66e0564ca35bd073cd0e03d4d4e9c7c945624a01 | fe8aff6f59d7b96c3a78fa9a9f564ff0e0ccc036 | /Turtlebot_ROSSharp/ROS/src/unity_simulation_scene/scripts/odometry_publisher.py | c4dd7d83a2383e2b4e3b50fb67342cfa526f5e05 | [] | no_license | kjwoo31/ROS_unity | b2149539f9e9244be2bade7ab5826ecaa1784a57 | e8348eb1438cfb6d4c7dca6dc3fda6b1ab16e89a | refs/heads/main | 2023-07-10T09:57:20.015121 | 2021-08-19T09:24:14 | 2021-08-19T09:24:14 | 374,883,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 942 | py | #!/usr/bin/env python
import rospy
# Because of transformations
import tf_conversions
import tf2_ros
import geometry_msgs.msg
def publish_odom_frame(msg):
br = tf2_ros.TransformBroadcaster()
t = geometry_msgs.msg.TransformStamped()
t.header.stamp = rospy.Time.now()
t.header.frame_id = "odom"
t.child_frame_id = "base_footprint"
t.transform.translation.x = msg.pose.position.x
t.transform.translation.y = msg.pose.position.y
t.transform.translation.z = msg.pose.position.z
t.transform.rotation.x = msg.pose.orientation.x
t.transform.rotation.y = msg.pose.orientation.y
t.transform.rotation.z = msg.pose.orientation.z
t.transform.rotation.w = msg.pose.orientation.w
br.sendTransform(t)
if __name__ == '__main__':
rospy.init_node('odometry_frame_publisher')
rospy.Subscriber('odometry_frame', geometry_msgs.msg.PoseStamped, publish_odom_frame)
rospy.spin()
| [
"[email protected]"
] | |
26e2176af4f3535cb88f8d4a8b4e890c0adf5c2a | 883c09284c28311371561b7b14a0494396d010a8 | /symkala_env/lib/python2.7/site-packages/tinys3/util.py | 3c95a282f80031d25c511eab72b12f45f73ab7fe | [
"Apache-2.0"
] | permissive | bopopescu/symkalaResearch | dd2e5c799aed2bef89c213fac9a84b7858b68ed4 | 5c2bc918f8f4598ed92e0ba482a77acca25ba3ba | refs/heads/master | 2022-09-30T15:18:58.028066 | 2019-12-03T00:17:30 | 2019-12-03T00:17:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,825 | py | import os
class LenWrapperStream(object):
"""
A simple class to wrap a stream and provide length capability
for streams like cStringIO
We do it because requests will try to fallback to chuncked transfer if
it can't extract the len attribute of the object it gets, and S3 doesn't
support chuncked transfer.
In some cases, like cStringIO, it may cause some issues, so we wrap the stream
with a class of our own, that will proxy the stream and provide a proper
len attribute
"""
def __init__(self, stream):
"""
Creates a new wrapper from the given stream
Params:
- stream The baseline stream
"""
self.stream = stream
def read(self, n=-1):
"""
Proxy for reading the stream
"""
return self.stream.read(n)
def __iter__(self):
"""
Proxy for iterating the stream
"""
return self.stream
def seek(self, pos, mode=0):
"""
Proxy for the `seek` method of the underlying stream
"""
return self.stream.seek(pos, mode)
def tell(self):
"""
Proxy for the `tell` method of the underlying stream
"""
return self.stream.tell()
def __len__(self):
"""
Calculate the stream length in a fail-safe way
"""
o = self.stream
# If we have a '__len__' method
if hasattr(o, '__len__'):
return len(o)
# If we have a len property
if hasattr(o, 'len'):
return o.len
# If we have a fileno property
if hasattr(o, 'fileno'):
try:
return os.fstat(o.fileno()).st_size
except IOError:
pass # fallback to the manual way, this is useful when using something like BytesIO
# calculate based on bytes to end of content
# get our start position
start_pos = o.tell()
# move to the end
o.seek(0, os.SEEK_END)
# Our len is end - start position
size = o.tell() - start_pos
# Seek the stream back to the start position
o.seek(start_pos)
# Return the size
return size
def __eq__(self, other):
"""
Make sure equal method works as expected (comparing the underlying stream and not the wrapper)
"""
if self.stream == other:
return True
if isinstance(other, LenWrapperStream) and other.stream == self.stream:
return True
@property
def closed(self):
"""
Proxy for the underlying stream closed property
"""
return self.stream.closed
def __repr__(self):
"""
Proxy for the repr of the stream
"""
return repr(self.stream) | [
"[email protected]"
] | |
157e2681aa253486361337452c0f4aea6ef2ef86 | 4e3442af8867f09a16fe56b4523ddb5b068604e3 | /core/main.py | 98e24622fe1e1af62ff15a30302f52bdf0a9d46a | [
"MIT"
] | permissive | RakeshRam/service_mesh_poc | 58aca17318fb703bbb16aabe038cb98e57b0bed5 | 7285edba96e28b109d032641ece7bdd099184066 | refs/heads/main | 2023-04-07T01:06:53.323634 | 2021-04-24T15:48:18 | 2021-04-24T15:48:18 | 338,132,198 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,025 | py | import os
import sys
import random
import logging
import requests
from flask import Flask, request, render_template, jsonify, abort
from flask_cors import CORS
app = Flask(__name__, instance_relative_config=True)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config["SQLALCHEMY_DATABASE_URI"] = 'mysql://root:root@db/main' # On Docker
# app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///main.sqlite3" # To test on local
CORS(app)
logger = logging.getLogger(f'Custom App {os.environ.get("VER")}')
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
try:
from db import db, Books
except: # TODO fix circular imports !!!
from .db import db, Books
db.init_app(app)
@app.route('/')
def index():
# To Stimulate 500 Internal server error.
logger.info("Demo view to test Error handling")
enable_retry = os.environ.get('RT')
if enable_retry == "Y":
c = random.choice((0,1))
logger.warn(f"Selected Choice {c}")
if c == 1:
logger.error("500 Internal Server Error")
logger.critical('Internal Server Error To Test Istio Retry Logic')
abort(500, 'Internal Server Error To Test Istio Retry Logic')
logger.debug(f'App Version: {os.environ.get("VER")}')
return render_template('main.html', books=Books.query.all(), version=os.environ.get('VER'))
@app.route('/get_userinfo', methods=['GET'])
def get_userinfo():
logger.info("Demo view to test GET API from email service")
try:
email_svc = f'http://{os.environ.get("email_svc")}:{os.environ.get("email_svc_port")}/email_svc/get_emails'
logger.debug(f"EMAIL SVC: {email_svc}")
data = requests.get(email_svc).json()
except Exception as e:
logger.critical(f'Error: {str(e)}')
data = []
logger.debug(f'Total Records: {len(data)}')
return jsonify({'result': data, "status": 200})
@app.route('/get_search', methods=['GET'])
def get_search():
logger.info("Demo view to test GET API from search service")
try:
search_svc = f'http://{os.environ.get("search_svc")}:{os.environ.get("search_svc_port")}/search_svc/search'
logger.debug(f"SEARCH SVC: {search_svc}")
data = requests.get(search_svc).json()
except Exception as e:
logger.critical(f'Error: {str(e)}')
data = []
logger.debug(f'Total Records: {len(data)}')
return jsonify({'result': data, "status": 200})
@app.route('/book/add_book/', methods=['POST'])
def add_book():
"""
Example Request:
----------------
{
"name": "MyBook",
"author": "John",
"publisher": "Macmillan",
"is_available": true
}
"""
content = request.get_json(silent=True)
try:
book = Books(**content)
db.session.add(book)
db.session.commit()
except Exception as e:
abort(400, str(e))
return jsonify({
'message': 'success',
'version': os.environ.get('VER')
})
@app.route('/book/edit_book/<int:id>/', methods=['PUT', 'DELETE'])
def edit_book(id):
request_action = 'Update'
# Update Book
if request.method == 'PUT':
content = request.get_json(silent=True)
try:
book = Books.query.filter_by(id=id).update(content)
db.session.commit()
except Exception as e:
abort(400, str(e))
# Delete Book
elif request.method == 'DELETE':
request_action = 'Delete'
try:
book = Books.query.get(id)
db.session.delete(book)
db.session.commit()
except Exception as e:
abort(400, str(e))
return jsonify({
'message': f'{request_action} success',
'version': os.environ.get('VER')
})
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0') | [
"[email protected]"
] | |
f0923ac629aee511e023158bffcd945b408a625c | 04737b18f760915ea7753d45582b23d8cf39faac | /main.py | 9f0cff55fe96fb62359b5bbfa52cf2b70ecd612e | [] | no_license | Orange-Cake/Yandex | 81454cad98eb8e9f3c63b54033cf8c60693806eb | 8644b180a295dca22f3e6b0f0ee2710fa1217d98 | refs/heads/main | 2023-02-13T18:59:21.607287 | 2021-01-06T18:59:16 | 2021-01-06T18:59:16 | 327,401,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | import sys
from PyQt5.QtWidgets import QWidget, QApplication, QPushButton
from PyQt5.QtGui import QPainter, QColor, QImage
from random import randint
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.resize(500, 500)
self.image = QImage(self.width(), self.height(), QImage.Format_ARGB32)
self.image.fill(QColor(255, 255, 255))
self.btn = QPushButton('Кнопка', self)
self.btn.resize(60, 60)
self.btn.clicked.connect(self.click)
self.show()
def paintEvent(self, e):
paint = QPainter(self)
paint.drawImage(0, 0, self.image)
def click(self):
self.paint = QPainter(self.image)
x, y = [randint(10, 400) for j in range(2)]
w = randint(10, 100)
self.paint.setBrush(QColor('yellow'))
self.paint.drawEllipse(x, y, w, w)
self.update()
self.paint = QPainter(self.image)
x, y = [randint(10, 400) for j in range(2)]
w = randint(10, 100)
self.paint.setBrush(QColor('yellow'))
self.paint.drawEllipse(x, y, w, w)
app = QApplication(sys.argv)
w = Example()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
34a1e201add585aa04483afc9282d5dd3ebcab53 | 60d5ea4f007d49768d250ef394003f554003e4d0 | /python/Linked List/148.Sort List.py | df0485a4e3990534fe5b2bb38f8196871282c2ac | [] | no_license | EvanJamesMG/Leetcode | dd7771beb119ea1250dbb3b147a09053298cd63b | fa638c7fda3802e9f4e0751a2c4c084edf09a441 | refs/heads/master | 2021-01-10T17:11:10.896393 | 2017-12-01T16:04:44 | 2017-12-01T16:04:44 | 46,968,756 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,437 | py | # coding=utf-8
'''
Sort a linked list in O(n log n) time using constant space complexity.
'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
'''
归并排序,最佳时间复杂度O(n log n) 最坏的时间复杂度O(n log n)
由于题目对时间复杂度和空间复杂度要求比较高,所以查看了各种解法,最好的解法就是归并排序,由于
链表在归并操作时并不需要像数组的归并操作那样分配一个临时数组空间,所以这样就是常数空间复杂度了,当然这里不考虑递归所产生的系统调用的栈。
这里涉及到一个链表常用的操作,即快慢指针的技巧。设置slow和fast指针,
开始它们都指向表头,fast每次走两步,slow每次走一步,fast到链表尾部时,slow正好到中间,这样就将链表截为两段。
'''
class Solution:
# @param head, a ListNode
# @return a ListNode
def merge(self, head1, head2):
if head1 == None: return head2
if head2 == None: return head1
dummy = ListNode(0) #归并时,新建一个链表头结点
p = dummy
while head1 and head2:
if head1.val <= head2.val:
p.next = head1
head1 = head1.next
p = p.next
else:
p.next = head2
head2 = head2.next
p = p.next
if head1 == None:
p.next = head2
if head2 == None:
p.next = head1
return dummy.next
def sortList(self, head):
if head == None or head.next == None:
return head
slow = head; fast = head #快慢指针技巧的运用,用来截断链表。
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
head1 = head
head2 = slow.next
slow.next = None #head1和head2为截为两条链表的表头
head1 = self.sortList(head1)
head2 = self.sortList(head2)
head = self.merge(head1, head2)
return head
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
#
# if __name__ == "__main__":
#
# result = Solution().numTrees(3)
# print result
| [
"[email protected]"
] | |
23080bb922bfbab575f93190df5d6d3d056bfdf9 | a8790cc9fba25aa587bc1f11638917252acae49f | /RDS/layer3_central_services/research_manager/src/lib/EnumStatus.py | bf43573c3f417a3f140d9939be40ee3594794f32 | [
"MIT"
] | permissive | Sciebo-RDS/Sciebo-RDS | e4aa96cfbf93f6601272c5ff0ce38ff19c750393 | dea6b22e8fe0fe26a40b1ac158a40f5650c5a232 | refs/heads/develop | 2023-08-31T04:13:17.611973 | 2023-08-18T16:00:13 | 2023-08-18T16:00:13 | 221,629,298 | 14 | 8 | MIT | 2023-09-12T13:08:40 | 2019-11-14T06:38:51 | Python | UTF-8 | Python | false | false | 545 | py | from enum import Enum, auto
class Status(Enum):
"""
The order represents the workflow through the states. So the successor of each status is the next in line.
"""
CREATED = auto()
WORK = auto()
DONE = auto()
DELETED = auto()
def succ(self):
if self.hasNext():
return Status(self.value + 1)
raise IndexError("out of status. You are already at the last state.")
def hasNext(self):
return not (self.value is len(Status))
def getDict(self):
return self.value
| [
"[email protected]"
] | |
33edbe9b248dcf9ef45e2ae90adb5d10ee394dee | 76ae437d71e9bb3bf428d3e474dcd01cfa4309aa | /bme280.py | bdf409ce837d1ba56a52846c346f2e41b68c03f2 | [] | no_license | HaiQ31/Gartenhaus | df8a07fb50d87f59dfadfaeb3a23972ed1e562fd | 73abf8b813b12b772137fa032afd6b04599bf611 | refs/heads/master | 2020-03-17T21:38:55.825491 | 2018-07-20T15:46:21 | 2018-07-20T15:46:21 | 133,967,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,066 | py | #!/usr/bin/python
#--------------------------------------
# ___ ___ _ ____
# / _ \/ _ \(_) __/__ __ __
# / , _/ ___/ /\ \/ _ \/ // /
# /_/|_/_/ /_/___/ .__/\_, /
# /_/ /___/
#
# bme280.py
# Read data from a digital pressure sensor.
#
# Official datasheet available from :
# https://www.bosch-sensortec.com/bst/products/all_products/bme280
#
# Author : Matt Hawkins
# Date : 25/07/2016
#
# http://www.raspberrypi-spy.co.uk/
#
#--------------------------------------
import smbus
import time
from ctypes import c_short
from ctypes import c_byte
from ctypes import c_ubyte
DEVICE = 0x76 # Default device I2C address
bus = smbus.SMBus(1) # Rev 2 Pi, Pi 2 & Pi 3 uses bus 1
# Rev 1 Pi uses bus 0
def getShort(data, index):
# return two bytes from data as a signed 16-bit value
return c_short((data[index+1] << 8) + data[index]).value
def getUShort(data, index):
# return two bytes from data as an unsigned 16-bit value
return (data[index+1] << 8) + data[index]
def getChar(data,index):
# return one byte from data as a signed char
result = data[index]
if result > 127:
result -= 256
return result
def getUChar(data,index):
# return one byte from data as an unsigned char
result = data[index] & 0xFF
return result
def readBME280ID(addr=DEVICE):
# Chip ID Register Address
REG_ID = 0xD0
(chip_id, chip_version) = bus.read_i2c_block_data(addr, REG_ID, 2)
return (chip_id, chip_version)
def readBME280All(addr=DEVICE):
# Register Addresses
REG_DATA = 0xF7
REG_CONTROL = 0xF4
REG_CONFIG = 0xF5
REG_CONTROL_HUM = 0xF2
REG_HUM_MSB = 0xFD
REG_HUM_LSB = 0xFE
# Oversample setting - page 27
OVERSAMPLE_TEMP = 2
OVERSAMPLE_PRES = 2
MODE = 1
# Oversample setting for humidity register - page 26
OVERSAMPLE_HUM = 2
bus.write_byte_data(addr, REG_CONTROL_HUM, OVERSAMPLE_HUM)
control = OVERSAMPLE_TEMP<<5 | OVERSAMPLE_PRES<<2 | MODE
bus.write_byte_data(addr, REG_CONTROL, control)
# Read blocks of calibration data from EEPROM
# See Page 22 data sheet
cal1 = bus.read_i2c_block_data(addr, 0x88, 24)
cal2 = bus.read_i2c_block_data(addr, 0xA1, 1)
cal3 = bus.read_i2c_block_data(addr, 0xE1, 7)
# Convert byte data to word values
dig_T1 = getUShort(cal1, 0)
dig_T2 = getShort(cal1, 2)
dig_T3 = getShort(cal1, 4)
dig_P1 = getUShort(cal1, 6)
dig_P2 = getShort(cal1, 8)
dig_P3 = getShort(cal1, 10)
dig_P4 = getShort(cal1, 12)
dig_P5 = getShort(cal1, 14)
dig_P6 = getShort(cal1, 16)
dig_P7 = getShort(cal1, 18)
dig_P8 = getShort(cal1, 20)
dig_P9 = getShort(cal1, 22)
dig_H1 = getUChar(cal2, 0)
dig_H2 = getShort(cal3, 0)
dig_H3 = getUChar(cal3, 2)
dig_H4 = getChar(cal3, 3)
dig_H4 = (dig_H4 << 24) >> 20
dig_H4 = dig_H4 | (getChar(cal3, 4) & 0x0F)
dig_H5 = getChar(cal3, 5)
dig_H5 = (dig_H5 << 24) >> 20
dig_H5 = dig_H5 | (getUChar(cal3, 4) >> 4 & 0x0F)
dig_H6 = getChar(cal3, 6)
# Wait in ms (Datasheet Appendix B: Measurement time and current calculation)
wait_time = 1.25 + (2.3 * OVERSAMPLE_TEMP) + ((2.3 * OVERSAMPLE_PRES) + 0.575) + ((2.3 * OVERSAMPLE_HUM)+0.575)
time.sleep(wait_time/1000) # Wait the required time
# Read temperature/pressure/humidity
data = bus.read_i2c_block_data(addr, REG_DATA, 8)
pres_raw = (data[0] << 12) | (data[1] << 4) | (data[2] >> 4)
temp_raw = (data[3] << 12) | (data[4] << 4) | (data[5] >> 4)
hum_raw = (data[6] << 8) | data[7]
#Refine temperature
var1 = ((((temp_raw>>3)-(dig_T1<<1)))*(dig_T2)) >> 11
var2 = (((((temp_raw>>4) - (dig_T1)) * ((temp_raw>>4) - (dig_T1))) >> 12) * (dig_T3)) >> 14
t_fine = var1+var2
temperature = float(((t_fine * 5) + 128) >> 8);
# Refine pressure and adjust for temperature
var1 = t_fine / 2.0 - 64000.0
var2 = var1 * var1 * dig_P6 / 32768.0
var2 = var2 + var1 * dig_P5 * 2.0
var2 = var2 / 4.0 + dig_P4 * 65536.0
var1 = (dig_P3 * var1 * var1 / 524288.0 + dig_P2 * var1) / 524288.0
var1 = (1.0 + var1 / 32768.0) * dig_P1
if var1 == 0:
pressure=0
else:
pressure = 1048576.0 - pres_raw
pressure = ((pressure - var2 / 4096.0) * 6250.0) / var1
var1 = dig_P9 * pressure * pressure / 2147483648.0
var2 = pressure * dig_P8 / 32768.0
pressure = pressure + (var1 + var2 + dig_P7) / 16.0
# Refine humidity
humidity = t_fine - 76800.0
humidity = (hum_raw - (dig_H4 * 64.0 + dig_H5 / 16384.0 * humidity)) * (dig_H2 / 65536.0 * (1.0 + dig_H6 / 67108864.0 * humidity * (1.0 + dig_H3 / 67108864.0 * humidity)))
humidity = humidity * (1.0 - dig_H1 * humidity / 524288.0)
if humidity > 100:
humidity = 100
elif humidity < 0:
humidity = 0
return temperature/100.0,pressure/100.0,humidity
def main():
(chip_id, chip_version) = readBME280ID()
print( "Chip ID :", chip_id)
print( "Version :", chip_version)
temperature,pressure,humidity = readBME280All()
print( "Temperature : ", temperature, "C")
print( "Pressure : ", pressure, "hPa")
print( "Humidity : ", humidity, "%")
if __name__=="__main__":
main()
| [
"[email protected]"
] | |
2a2f3078f350a78d7f5ac02b2f4aff19d0095d41 | 38d9ac28d08b796fe8725e0f532cc326038ff562 | /venv/Scripts/pip3.6-script.py | b53249424812d97c603150bed5859402e9c26399 | [] | no_license | jgmanzanas/Prueba | fb3a68119a5dd38568310022de16dcb59080e381 | 2f6fc02dcc5368da64d6c7e9ee0c6e0e5b93bcf1 | refs/heads/master | 2020-03-07T07:22:43.172787 | 2018-03-29T21:09:26 | 2018-03-29T21:09:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | #!C:\Users\SrMan\PycharmProjects\Prueba\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3.6'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3.6')()
)
| [
"[email protected]"
] | |
15a2a90627d1501751f45454be341a000e2d5b91 | d7fe81d21b46706b427ba38def6d0f4d6dbbfc2a | /sample_code/node-test/node-test.py | 59420d6d5c3ec4acc9312ca2f1524d5e4b5e39a6 | [
"BSD-2-Clause"
] | permissive | pibara-utopian/asyncsteem | c9e7dc8272dace8a87c3dc99a78c44246da0d8d5 | c6a2c1ef7bcf15cf4a15bb7f2152dca506d26935 | refs/heads/master | 2021-05-10T19:31:09.173699 | 2020-05-29T13:03:13 | 2020-05-29T13:03:13 | 118,157,494 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,086 | py | #!/usr/bin/python
import sys
import json
import socket
from twisted.internet import reactor
from twisted.logger import Logger, textFileLogObserver
from asyncsteem import RpcClient
def process_account_info(event,client):
obj = json.loads(event[0]["json_metadata"])
candidates = []
for rep in obj["report"]:
candidates.append(rep["node"])
candidates += obj["failing_nodes"].keys()
candidates += obj["nodes"]
candidates_set = set(candidates)
candidate_list = []
for candidate in candidates:
if candidate[:8] == "https://" and len(candidate.split(":")) == 2:
host = str(candidate[8:])
candidate_list.append(host)
for apinode in candidate_list:
print apinode
def process_error(errno,msg,client):
print msg
obs = textFileLogObserver(sys.stdout)
log = Logger(observer=obs,namespace="node-test")
client = RpcClient(reactor,log,stop_when_empty=False,rpc_timeout=15)
opp = client.get_accounts(["fullnodeupdate"])
opp.on_result(process_account_info)
opp.on_error(process_error)
client()
reactor.run()
| [
"[email protected]"
] | |
fd394c02d2b2cee07e518e829a25fca4ea11bebb | 39ac12bf42f5137562ed12f63fc3c4ff455501cc | /python-advanced/Shallow-deep-copying/shallow-deep-copy.py | de69542e4b605e0b3fc2d52d3ff8e0ff43d8309a | [] | no_license | tiwariutkarsh422/python-advanced | f4f545bac120e143c8f77d566bcda13065c36085 | 1567a727deb561a2a7446550dd619718ad5b779f | refs/heads/master | 2022-10-31T22:53:53.667154 | 2020-06-04T13:47:34 | 2020-06-04T13:47:34 | 267,061,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py | import copy
original_list = [2, 4 ,['ada lovelace'], 1965, [7,8,9], [11, 12, 13]]
''' Shallow copies can be created using factory function such as list(), dict() etc.
However shallow copies only create references to the child object present in the original lists.'''
shallow_copy_list = list(original_list)
print('original list before append:', original_list)
print('shallow copied list before append', shallow_copy_list)
print()
original_list.append(['new_append'])
print('original list after append:', original_list)
print('shallow copied list after append', shallow_copy_list)# appending value sin original copy does not affect shallow copy
print()
original_list[2][0] = 'Ed'
print('original list after specified change', original_list)
print('shallow copied list after specofied change:', shallow_copy_list)
print()
''' As we can see above that since shallow copy is a one level deep copy, it
is not truly independent of the original list and it changes the shallow
copied list as well when original list is modified as it is only a reference
to the child objects of original_list before the append.'''
original_list = [2, 4 ,['ada lovelace'], 1965, [7,8,9], [11, 12, 13]]
''' Deep copy can be created usinfg deepcopy() function of copy module.'''
deep_copy_list = copy.deepcopy(original_list)
print('original list before change:', original_list)
print('deep copied list before change', deep_copy_list)
print()
original_list[2][0] = 'Ed'
print('original list after specified change', original_list)
print('deep copied list after specified change:', deep_copy_list)
| [
"[email protected]"
] | |
6ed42cb0f9267d97d80c1b28fb1c9290a328327e | 09379dea7ec9f69ee34dab2c4e5f906d636848a2 | /GV_Catalogue_Gen.py | 76c08f706e665e5da58268bfa58cc5609a793bd4 | [
"MIT"
] | permissive | TheMonitorBeep/STADS---Star-Matching | 619f3dac073542536d28494bb0dcd2a224d7dd43 | 0a96885a168b8de86eb4f51ba401980969023452 | refs/heads/master | 2022-03-17T05:20:19.202165 | 2019-12-10T16:00:39 | 2019-12-10T16:00:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,694 | py | import numpy as np
import pandas as pd
import time, gc
pi = np.pi
cos = np.cos
sin = np.sin
acos = np.arccos
degrees = np.degrees
radians = np.radians
def angularDistance(row, col_names):
'''
Computes the angular distance (in degrees) between two points on the celestial
sphere with a given right-ascension and declination values
<Formula> - http://spiff.rit.edu/classes/phys373/lectures/radec/radec.html
Parameters
----------
row : pd.Dataframe - series
Input right-ascension in (hrs) and declination in (degrees) format
col_names: list of strings
The names of the columns on which the function will be applied
### SHOULD FOLLOW THIS CONVENTION
c1 = right-ascension_1; c2 = right-ascension_2
c3 = declination_1; c4 = declination_2
Returns
-------
y : pd.Dataframe - series
The corresponding angular distance in degree value.
'''
# Unpack column names
c1, c2, c3, c4 = col_names
# Assert datatypes
assert type(c1) == str and type(c2) == str and type(c3) == str and type(c4) == str, 'TypeError: input should be str'
# Units of right-ascension is in (hours) format
alpha1, alpha2 = radians(15*row[c1]), radians(15*row[c2])
# Units of declination is in (degrees) format
delta1, delta2 = radians(row[c3]), radians(row[c4])
# Given Formula
temp = cos(pi/2 - delta1)*cos(pi/2 - delta2) + sin(pi/2 - delta1)*sin(pi/2 - delta2)*cos(alpha1 - alpha2)
return np.degrees(acos(temp))
def genRefCatalogue(CATALOGUE, mag_limit, no_iter = -1, gen_csv = True):
'''
Generates the reference star catalogue for Geometric Voting Algorithm where each row
of the table has two unique stars and the corresponding angular distance in degrees,
for all pairs of stars with a specified upper magnitude limit
Parameters
----------
CATALOGUE : pd.Dataframe
The 'master' star catalogue on which the function works
mag_limit : floating-point number
The upper magnitude limit of stars that are required in the reference catalogue
no_iter : integer, default = -1
Specifies the number of iterations, thereby allowing it to be reduced
Default value = -1, allows for the completion of the entire catalogue
gen_csv : boolean, default = True
If True generates csv files of the reference catalogues
Returns
-------
OB_CATALOGUE : pd.Dataframe
The corresponding angular distance in degree value.
'''
# Start clock-1
start1 = time.time()
# Generate restricted catalogue based on upper magnitude limit
temp0 = CATALOGUE[CATALOGUE.Mag <= mag_limit]
# Number of rows in the resticted catalogue
rows = temp0.shape[0]
# Resets the index of <temp0>
temp0.index = list(range(rows))
# Prints total number of stars in <temp0> and the (n)C(2) - combinations
print('Number of stars - ', rows)
print('Number of unique combinations = ', (rows-1)*rows/2)
# Initialize the number of iterations to take place
no_iter = (rows-1) if no_iter == -1 else no_iter
for i in range(no_iter):
# Throws error if an iteration runs beyond number of available rows in <temp0>
assert i<(rows-1), 'IndexError: iterating beyond available number of rows'
# The final iteration is reduntant, as <temp2> will be zero rows
'''
if (rows-1-i)==0:
continue
'''
# Generates <temp1> dataframe which has the (i - th) star of <temp0>
# repetated (rows-1-i) times
temp1 = pd.DataFrame(columns = ['Star_ID1','RA_1', 'Dec_1', 'Mag_1'])
s1, ra, dec, mag = temp0.iloc[i]
temp1.loc[0] = [s1] + [ra] + [dec] + [mag]
temp1 = pd.concat([temp1]*(rows-1-i), ignore_index=True)
# Generates <temp2> dataframe by copying values of <temp0> and dropping the first
# (i + 1) number of stars
temp2 = temp0
temp2 = temp2.drop(list(range(i+1)), axis = 0)
# Resets the index
temp2.index = list(range(0, rows-1-i))
# Concatenates <temp1> & <temp2> side-by-side such that resulting <temp3> has (8) columns altogether
temp3 = pd.concat([temp1, temp2], axis=1)
# Initializes <temp4> in the first iteratation
if i == 0:
temp4 = temp3
# Append subsequent <temp4> with <temp3> after first iteration
else:
temp4 = pd.concat([temp4, temp3], axis = 0, ignore_index=True)
# Releases memory back to OS
if i%40 == 0:
gc.collect()
gc.collect()
# Rename columns
temp4.columns = ['Star_ID1','RA_1', 'Dec_1', 'Mag_1', 'Star_ID2', 'RA_2', 'Dec_2', 'Mag_2']
if gen_csv == True:
#Generates CSV of <temp4>
temp4.to_csv('Processed_Catalogue1.csv', index = False)
# Stop clock-1
end1 = time.time() - start1
# Print time taken
print('Process 1 - ', end1)
# Start clock-2
start2 = time.time()
# Initialize <OB_CATALOGUE>
OB_CATALOGUE = temp4
# Calculate angular distance between the two stars present in every row
cols = ['RA_1', 'RA_2', 'Dec_1', 'Dec_2']
OB_CATALOGUE['Ang_Distance'] = OB_CATALOGUE.apply(angularDistance, axis = 1, col_names = cols)
if gen_csv == True:
# Generates CSV of <OB_CATALOGUE>
OB_CATALOGUE.to_csv('Processed_Catalogue2.csv', index = False)
# Stop clock-2
end2 = time.time() - start2
# Print time taken
print('Process 2 - ', end2)
print('Total Process ', end1+ end2)
return OB_CATALOGUE
def main():
'''
main function
'''
# Reads 'Master' star catalogue
CATALOGUE = pd.read_csv(r"F:\IIT Bombay\SatLab\Star Tracker\Programs\Catalogues\Modified Star Catalogue.csv")
# StarID: The database primary key from a larger "master database" of stars
# Mag: The star's apparent visual magnitude
# RA, Dec: The star's right ascension and declination, for epoch 2000.0 (Unit: RA - hrs; Dec - degrees)
# Sorts <CATALOGUE>
CATALOGUE.sort_values('Mag', inplace=True)
# Run function
REF_DF = genRefCatalogue(CATALOGUE, mag_limit=1, no_iter=-1, gen_csv=False)
# Sort <REF_DF>
REF_DF.sort_values('Ang_Distance', inplace=False)
# Generates CSV of <REF_DF>
REF_DF.to_csv('Processed_Catalogue3.csv', index = False)
print('Done')
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
04e940e749fb6dd0721f27b24375989edc516388 | ed2dd2354d56c986857e1331fd9efce1fafa53e2 | /pat1106.py | ecf1c6f39248d5b92269bb1b34d27f18161990d8 | [] | no_license | aceDwill/PAT | f10f3bdb87ad01a6bc30582376a43a59cb5720f8 | 6c54630cd0f32cc9680c87debc5ff2de418c9a9b | refs/heads/master | 2021-01-01T18:40:30.578907 | 2018-11-26T06:55:40 | 2018-11-26T06:55:40 | 98,403,099 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | """
思路:具体见pat1090
注:测试点2,3,5,6,7运行超时
"""
origin = input().split()
N = int(origin[0])
price = float(origin[1])
rate = float(origin[2]) / 100
tree = []
for i in range(N):
child = [int(x) for x in input().split()]
tree.append(child[1:])
min_depth = 100000
num = 0
def dfs(root, depth):
global min_depth, num
if len(tree[root]) == 0:
if depth < min_depth:
min_depth = depth
num = 1
elif depth == min_depth:
num += 1
else:
for child in tree[root]:
dfs(child, depth + 1)
dfs(0,0)
result = price * (1 + rate) ** min_depth
print("{:.4f}".format(result) + " " + str(num))
| [
"[email protected]"
] | |
3ecf9b834c4eb9b27f4030875f86d478ca91f7a7 | f8dd1dfb0f81de16b9c8f681c85c6995b63ce037 | /tensorflow/contrib/estimator/__init__.py | 6b9f9575b606f1822d760e8597c55994dd8af04c | [
"Apache-2.0"
] | permissive | DandelionCN/tensorflow | 74688926778ae06da1f406967baf6b251b3f3c4e | 1712002ad02f044f7569224bf465e0ea00e6a6c4 | refs/heads/master | 2020-03-06T19:10:37.847848 | 2018-03-27T17:11:49 | 2018-03-27T17:11:49 | 127,022,134 | 1 | 0 | Apache-2.0 | 2018-03-27T17:24:51 | 2018-03-27T17:24:51 | null | UTF-8 | Python | false | false | 2,130 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental utilities re:tf.estimator.*."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,wildcard-import
from tensorflow.contrib.estimator.python.estimator.dnn import *
from tensorflow.contrib.estimator.python.estimator.dnn_linear_combined import *
from tensorflow.contrib.estimator.python.estimator.extenders import *
from tensorflow.contrib.estimator.python.estimator.head import *
from tensorflow.contrib.estimator.python.estimator.linear import *
from tensorflow.contrib.estimator.python.estimator.logit_fns import *
from tensorflow.contrib.estimator.python.estimator.multi_head import *
from tensorflow.contrib.estimator.python.estimator.replicate_model_fn import *
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,line-too-long,wildcard-import
_allowed_symbols = [
'add_metrics',
'binary_classification_head',
'clip_gradients_by_norm',
'forward_features',
'multi_class_head',
'multi_head',
'multi_label_head',
'poisson_regression_head',
'regression_head',
'DNNEstimator',
'DNNLinearCombinedEstimator',
'LinearEstimator',
'call_logit_fn',
'dnn_logit_fn_builder',
'linear_logit_fn_builder',
'replicate_model_fn',
'TowerOptimizer',
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
| [
"[email protected]"
] | |
f25ef6a9d029c2611592714256c32d631ad3e526 | bd48e5dc2c52a3d942b7b10f5bfd4f7563b8ed9a | /Python Essentials/Laura's solutions/String.py | 73c08430d2aff9da1b480a5794f1be97d96b5b72 | [] | no_license | yawwusugh/Enthought-Training-on-Demand | da8ea3d9339fc14c1e0dd7517702f47d3f20e95b | a19609d96efeb7efd2fe732ff91cf84bf741890a | refs/heads/master | 2022-11-16T08:24:08.644608 | 2015-06-29T20:18:59 | 2015-06-29T20:18:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | fst_name = 'Laura'
last_name = "Moretti"
h = "Hello"
print (h + " " + fst_name + " " + last_name)
number = len(h) + 1 + len(fst_name) + 1 + len(last_name)
print "="*number | [
"[email protected]"
] | |
8fe0d5d5df785b0103ed4d20d458b03dd04298c1 | 3f80e93acf60049087a27bd7de80fbaca57a605d | /setup.py | d55b768d9a95e6b0950e9a235e111e7af91e16dc | [
"MIT"
] | permissive | prompteus/style-transfer | f66d8782be3c36280837c8ab5131d8c06e3e7a79 | 9ff06fd7a17494ac51a5d2ce238e164558985952 | refs/heads/main | 2023-06-28T00:06:48.090767 | 2021-07-30T12:28:43 | 2021-07-30T12:28:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | from setuptools import setup, find_packages
setup(
package_dir={'': 'src'},
packages=find_packages(where='src'),
)
| [
"[email protected]"
] | |
0fe6b1643589f8e1b07a45256b6936a9e6d92168 | 73575daf314262e354b5839610fb59bbd922d229 | /projects/07/virtual_machine_part_1/write_file.py | 3ab9f7ebbd22d33e7e5d399f2ad4fa0ba72e281a | [] | no_license | hoosierEE/nand2tetris | d3a58cd2de441afc611bb8d9a641b970b0a2b415 | 5ca1ad8af654151bc5120890c6c78b62999c1d9f | refs/heads/master | 2023-03-15T23:31:39.546974 | 2015-10-09T21:42:17 | 2015-10-09T21:42:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | # import file parsing function
from parser import parse
def write_file(vmCode, filename):
# parses file and converts virtual machine to assembly
outputFilename = filename.replace('.vm', '.asm')
# write assembly to file
f = open(outputFilename, 'w')
f.write('\n'.join(vmCode))
f.close() | [
"[email protected]"
] | |
0508b18ea031c12502a6dff30485a63fa71a0660 | d17a8870ff8ac77b82d0d37e20c85b23aa29ca74 | /lite/tests/unittest_py/pass/common/test_conv_scale_fuse_pass_base.py | a071233f2ff5d5725c9fc9aede18f373c5baff9c | [
"Apache-2.0"
] | permissive | PaddlePaddle/Paddle-Lite | 4ab49144073451d38da6f085a8c56822caecd5b2 | e241420f813bd91f5164f0d9ee0bc44166c0a172 | refs/heads/develop | 2023-09-02T05:28:14.017104 | 2023-09-01T10:32:39 | 2023-09-01T10:32:39 | 104,208,128 | 2,545 | 1,041 | Apache-2.0 | 2023-09-12T06:46:10 | 2017-09-20T11:41:42 | C++ | UTF-8 | Python | false | false | 3,376 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
sys.path.append('.')
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
from test_conv_util import UpdatePaddingAndDilation, ConvOutputSize
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
def sample_program_configs(draw):
in_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=64), min_size=4, max_size=4))
weight_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=64), min_size=4, max_size=4))
paddings = draw(st.sampled_from([[1, 2], [4, 2]]))
dilations = draw(st.sampled_from([[1, 1]]))
groups = draw(st.sampled_from([1, 2, in_shape[1]]))
padding_algorithm = draw(st.sampled_from(["VALID", "SAME"]))
strides = draw(st.sampled_from([[1, 1], [2, 2]]))
scale = draw(st.floats(min_value=0.5, max_value=5))
scale_bias = draw(st.floats(min_value=0.0, max_value=1.0))
assume(in_shape[1] == weight_shape[1] * groups)
assume(weight_shape[0] % groups == 0)
paddings_, dilations_ = UpdatePaddingAndDilation(
in_shape, weight_shape, paddings, dilations, groups, padding_algorithm,
strides)
out_shape = [in_shape[0], weight_shape[0]]
oh, ow = ConvOutputSize(in_shape, weight_shape, dilations_, paddings_,
strides)
out_shape = out_shape + [oh, ow]
assume(oh > 0 and ow > 0)
conv_op = OpConfig(
type="conv2d",
inputs={
"Input": ["input_data"],
"Filter": ["weight_data"],
"Bias": ["conv_bias"]
},
outputs={"Output": ["conv_output_data"]},
attrs={
"data_format": 'nchw',
"dilations": dilations,
"padding_algorithm": padding_algorithm,
"groups": groups,
"paddings": paddings,
"strides": strides
})
scale_op = OpConfig(
type="scale",
inputs={"X": ["conv_output_data"]},
outputs={"Out": ["output_data"]},
attrs={"scale": scale,
"bias": scale_bias,
"bias_after_scale": True})
ops = [conv_op, scale_op]
program_config = ProgramConfig(
ops=ops,
weights={
"conv_bias": TensorConfig(shape=[weight_shape[0]]),
"weight_data": TensorConfig(shape=weight_shape)
},
inputs={"input_data": TensorConfig(shape=in_shape)},
outputs=["output_data"])
return program_config
| [
"[email protected]"
] | |
6f5f7cf02c38f40c6146cd65d7c587a38aa251a3 | 30f84b4fbd7832f9c8b1221b9b542dec65bcabb8 | /main.py | 07c1475887041bd64f226387b835ece61a2e88d0 | [] | no_license | egdw/crawler_weehui | 8c7235d0a4d3c8c77b1571c4f4d839f46298009e | b41bf28db10791b9d8132d49c8af51b8fbaff17b | refs/heads/master | 2020-03-27T05:50:59.460177 | 2018-08-25T07:21:40 | 2018-08-25T07:21:40 | 146,056,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,945 | py | import requests
import re
from bs4 import BeautifulSoup
import os
# 全局session
rq = requests.Session()
# 文章id
cartoon_id = None
# 登录
def login(username, password, cartoonUrl):
pattern = re.compile(r'[a-z 0-9]{32}')
id = pattern.search(cartoonUrl)
if id:
global cartoon_id
cartoon_id = id.group(0)
print('获取到的id为:'+cartoon_id)
payload = {'name': username, 'password': password}
headers = {'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',
'Host': 'www.weehui.com', 'Origin': 'http://www.weehui.com', 'Referer': 'http://www.weehui.com/login/login'}
rq.post('http://www.weehui.com/login/login',
data=payload, headers=headers)
global cartoonStartIndex
get(cartoonStartIndex)
else:
print('no match')
def get(index):
print("正在下载第"+str(index)+'章内容..')
global cartoon_id
url = 'http://www.weehui.com/cartoon/read/' + \
cartoon_id+'/'+str(index)
r = rq.get(url)
print(r)
soup = BeautifulSoup(r.text, 'html5lib')
# print(soup.prettify())
div = soup.findAll('div', attrs={"class": 'contentNovel'})[0]
# print(div)
soup = BeautifulSoup(str(div), 'html5lib')
i = 0
for src in soup.findAll("img"):
data_original = src.get('data-original')
if(data_original == None):
data_original = src.get('src')
print(data_original)
i = i + 1
headers = {'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',
'Referer': url}
ir = requests.get(data_original, headers=headers)
global save_url
if ir.status_code == 200:
open(save_url +
str(index)+"_"+str(i)+".jpg", 'wb').write(ir.content)
index = index+1
global cartoonEndIndex
if(index < cartoonEndIndex):
get(index)
# 用户名
username = input('漫小说用户名:')
# 密码
password = input('漫小说密码:')
# 漫画链接地址
cartoonUrl = input('漫画链接地址:')
# 漫画总章数
cartoonPagesNum = input('漫画总章数(比如一共有57话,那么输入57)):')
# 漫画存放的地址 "/Users/hdy/Desktop/pic/"
save_url = input('漫画存放地址(必须为文件夹):')
# 第几话开始下载
cartoonStartIndex = input('从第几话开始?如果不输入默认为1:')
if(cartoonStartIndex):
cartoonStartIndex = int(cartoonStartIndex)
else:
cartoonStartIndex = 1
# 第几话结束
cartoonEndIndex = input('从第几话结束?如果不输入默认为到底:')
if(cartoonEndIndex):
cartoonEndIndex = int(cartoonEndIndex)
else:
cartoonEndIndex = int(cartoonPagesNum)
# 开始下载
login(username, password, cartoonUrl)
| [
"[email protected]"
] | |
859ea5495b24a8f17514422448cf086173fb93fa | 4ad464e04cb6d78d68ef79ebbfe7c848bee84248 | /setup.py | 8212e7ecbb5b6076215ad7cefeb8ac0ce45c1540 | [] | no_license | fpbattaglia/data_navigator | deecb2ea9f3ea2a23cde8f22ca40d95cc8bb036a | 9b2f4b0348bf589295e6dcace6fd5f76d15c47fa | refs/heads/master | 2020-05-04T23:15:10.978972 | 2019-04-10T16:28:31 | 2019-04-10T16:28:31 | 179,537,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | from setuptools import setup
setup(
name='data_navigator',
version='0.1',
packages=['data_navigator'],
url='https://www.github.com/fpbattaglia/data_navigator',
license='GPLv3',
author='Francesco Battaglia ',
author_email='[email protected]',
description='Basic data navigation facilities'
)
| [
"[email protected]"
] | |
5b956d94c1dd1ae6bc83a24c5571ebfeaddcdeba | 0f1f580f7e0a68abe1a261b696206f5a1a723c45 | /Week 1/grok/samples/1b/20.visualising fits image.py | 02e914a010983d89dc2bd740fe19254420b059f2 | [
"MIT"
] | permissive | Alfiesan/Assignments-Data-Driven-Astronomy-from-University-of-sydney-on-coursera- | 0e5489a33c65f20870d3e5c28044eaacd4d4c403 | 58fab1c413d7ad5693b1d63f14be05b0f5ec448c | refs/heads/master | 2022-04-10T20:31:38.007417 | 2019-02-05T00:40:43 | 2019-02-05T00:40:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | from astropy.io import fits
import matplotlib.pyplot as plt
x = fits.open('image0.fits')
data = x[0].data
# Plot the 2D array
plt.imshow(data, cmap=plt.cm.viridis)
plt.xlabel('x-pixels (RA)')
plt.ylabel('y-pixels (Dec)')
plt.colorbar()
plt.show() | [
"[email protected]"
] | |
fec483ec7ffc645dc6d83b08f1f7592805d9a5fc | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /Jx4mjwEoFdfYuF9ky_10.py | a723d6a308abbe962a25372403471fc9bbe9f518 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | """
Write a function that takes an integer and:
* If the number is a multiple of 3, return `"Hello"`.
* If the number is a multiple of 5, return `"World"`.
* If the number is a multiple of both 3 and 5, return `"Hello World"`.
### Examples
hello_world(3) ➞ "Hello"
hello_world(5) ➞ "World"
hello_world(15) ➞ "Hello World"
### Notes
Don't forget to `return` the result.
"""
def hello_world(num):
if num%15==0:
return ("Hello World")
elif num%5==0:
return ("World")
elif num%3==0:
return ("Hello")
| [
"[email protected]"
] | |
da389f031fcf53136f23f5f78a7381189e3a6ea4 | 92c281a941fd1727723de9e086de29488a11a2bd | /CreateRatesInExcel.py | 2d7f973fd3c6b2d571339a1c6a627933ad20ad12 | [] | no_license | gtohill/MSOffice | 39c822434c4a0eb57780aada79c1d52540f58946 | 48b99e5717670d2084f019ae45fbe1502b05ef34 | refs/heads/master | 2021-07-09T01:35:20.022863 | 2019-02-04T15:00:57 | 2019-02-04T15:00:57 | 164,893,466 | 0 | 1 | null | 2020-07-23T08:26:45 | 2019-01-09T15:58:48 | Python | UTF-8 | Python | false | false | 1,529 | py | import names
from openpyxl import Workbook
from msxlsxparsing.Node import *
import random
def createExcelSpreadSheet():
wb = Workbook()
# grab the active worksheet
ws = wb.active
# Data can be assigned directly to cells
'''
ws['A1'] = 'Company'
ws['B1'] = '1 year'
ws['C1'] = '2 year'
ws['D1'] = '3 year'
ws['E1'] = '4 year'
ws['F1'] = '5 year'
'''
blist = open('banklist', 'r').read()
bank_list = blist.split(',')
banks = list()
for x in range(len(bank_list)):
if x%2 == 1:
banks.append(bank_list[x])
# base rates. use to calculate annual rates for each bank. a random factor will be added or subtracted.
oneYearRate = .015
twoYearRate = .0175
threeYearRate = .02
fourYearRate = .0225
fiveYearRate = .025
r = 1
for bank in banks:
one = round(oneYearRate+random.uniform(-.005, .005), 4)
two = round(twoYearRate+random.uniform(-.005, .005), 4)
three = round(threeYearRate + random.uniform(-.005, .005), 4)
four = round(fourYearRate + random.uniform(-.005, .005), 4)
five = round(fiveYearRate + random.uniform(-.005, .005), 4)
ws['A'+str(r)] = bank
ws['B'+str(r)] = one
ws['C'+str(r)] = two
ws['D'+str(r)] = three
ws['E'+str(r)] = four
ws['F'+str(r)] = five
# increment row counter(r)
r += 1
# Save the file
wb.save("rates.xlsx")
if __name__ == '__main__':
createExcelSpreadSheet()
| [
"Gt153328@"
] | Gt153328@ |
07d2f150b748c87902b6120e6bda4b00867f5af6 | ca554bba8b2796ef8863e79ac9db692f506dc85a | /src/pnl/__init__.py | 2be16fa8603fc1e712204627bceec6b195eef9ea | [] | no_license | gregasmaximus/pnl | 40cae446bf790ed39ead795758f11e7181553dde | 058fb0f13a3d0cc5821a264816c5a69705b4a9d7 | refs/heads/master | 2022-07-20T09:38:21.558882 | 2020-05-18T14:45:58 | 2020-05-18T14:45:58 | 262,369,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18 | py | from .pnl import * | [
"[email protected]"
] | |
9094e4d07aed08c64e5414f70929e4734ab6bfb8 | 6864b1f7480d81328b3f4d7a837fca44ca86947b | /kNN-MapReduce-Journal-2014/perf2-noconflit/script/A3-pgbj-loadbalancing.py | 26e4ab00c86ce12007c977978cb6eb365e4f949b | [] | no_license | lea92/knn-MapReduce | f3ec5e6f971c729d1fb0ec4b42251c407e80fc44 | 9c824aa973809b803f9adb425ac80d1d2f7081e9 | refs/heads/master | 2021-08-28T15:01:59.305003 | 2017-12-12T14:20:00 | 2017-12-12T14:20:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,138 | py | #!/usr/bin/python
from pylab import *
from readfile import *
from mycolor import *
CPT=0
CPT_S=1
CPT_R=2
TIME=3
rc('font', size=26)
rc('legend', fontsize=21)
#-------------------------------------------------------------------------------
mark =['d','v','o','^','>','h','s','p','*','+']
colors = ['red','green','blue','silver','aqua','magenta','gold','black']
#-------------------------------------------------------------------------------
tab = ["geo_20r_400","greedy_20r_400","greedy_50r_400"];
nametab = ["geo 20 reducers","greedy 20 reducers","greedy 50 reducers"];
width =1
def func(path,colorfond='aqua', colorfond2='blue',cptindtab = 0) :
label,mat = file2matrix(path,4,';')
#-----------------------------------------------
fig =figure(figsize=(10, 8), dpi=80)
#fig.subplots_adjust(bottom=0.2, left=0.15, top = 0.9, right=0.85)
#fig.subplots_adjust(hspace=0.5)
ax = fig.add_subplot(1,1,1)
cpt=0
print mat[:,TIME]
ax.bar(mat[:,CPT]-width/2.0,mat[:,CPT_S]/1000,width,color=colorfond,alpha=0.7, edgecolor='black',label="#S")
ax.bar(mat[:,CPT]-width/2.0,mat[:,CPT_R]/1000,width,color=colorfond2, edgecolor='black',label="#R")
ax.grid()
#ax2 = ax.twinx()
#ax2.plot(mat[:,CPT],mat[:,TIME]/1000,color='red')
#ax2.grid()
ax.set_xlabel('Reducer number')
ax.set_ylabel('Number of Elements ($*10^{3}$)')
#for tl in ax.get_yticklabels():
#tl.set_color(colorfond2)
#ax2.set_ylabel('time(s)',color="red")
#for tl in ax2.get_yticklabels():
# tl.set_color('r')
#figtext(.02, .02, "An example of bad balancing for 4x10^5 elements by file\n")
xlabel('Reducer number')
#title(nametab[cptindtab].upper(),fontweight="bold"),
cptindtab+=1
leg = ax.legend(loc='upper left', fancybox=True, shadow=True)
show();
fig.savefig('../../img-perf/perso/pgbj/'+name+'.pdf',dpi=200)
#-----------------------------------------------
for name in tab :
path= "../data/loadbalancing/voronoi/"+name+".txt"
func(path,'#85c895','green')
#func( "../data/loadbalancing/dim/386-rand.txt",'#85c895','green')
#func( "../data/loadbalancing/dim/386-realtxt",'#85c895','green')
| [
"[email protected]"
] | |
a182bb0f67bd066ba32c09ab3fab8ceb823c6aff | b35643fad3490b2b0f3ae69da035aa5da771a6b2 | /pages/urls.py | 7b806b7e5a94317792f561634c7dbf4d04e19c9e | [] | no_license | joohongkim1/Django | 78034abf83a59f0374785c6ab4258a994e31375b | 793b3ed82b6525f089fd065ea13a39a865ad7388 | refs/heads/master | 2020-07-23T23:59:41.586587 | 2019-09-11T06:56:51 | 2019-09-11T06:56:51 | 207,744,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | from django.urls import path
from . import views # 현재 디렉토리에서부터 views 를 가져옴
# domain.com/pages/____
urlpatterns = [
path('greeting/<str:name>/', views.greeting),
path('', views.index),
]
| [
"[email protected]"
] | |
5f8c96a265627cec49242877fb003b478ceee9d7 | d1d6f64055caecf7b2833eb055d2c5b0dbe1ba9d | /src/leilao/principal.py | 39a1ac64a8bdef08b2befcda2194c76cdc168ae8 | [] | no_license | RodolfoSouza96/Leilao | 74154f69e1cea0cea6430526ae60aecd31ef7eee | 414d0c57aedd9098912365c7e0b45353c87b36b8 | refs/heads/master | 2023-09-03T22:32:50.315246 | 2021-11-08T20:28:26 | 2021-11-08T20:28:26 | 425,984,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | from dominio import Usuario, Lance, Leilao, Avaliador
gui = Usuario('Gui')
yuri = Usuario('Yuri')
lance_do_yuri = Lance(yuri, 100.0)
lance_do_gui = Lance(gui, 150.0)
leilao = Leilao('Celular')
leilao.lances.append(lance_do_yuri)
leilao.lances.append(lance_do_gui)
for lance in leilao.lances:
print(f'O usuario {lance.usuario.nome} deu um lance de {lance.valor}')
avaliador = Avaliador()
avaliador.avalia(leilao)
print(f'O menor lance foi de {avaliador.menor_lance} e o maior lance foi de {avaliador.maior_lance}') | [
"[email protected]"
] | |
cb4da288a2ec492bb5cbfe83995a1a4b91336e75 | 94824c3e6d12ace4c88125930cc3539b718d2fa2 | /primes.py | ce47f52f30bb57b07e55bd4cc8dcefed4a9fbc92 | [] | no_license | ctwtruscottwatters/primes | 9bf832a9e2c8f8d06041f64343c5048d15ccf2a8 | 4c082b05246367328244be0c4b40df48e3d7ea43 | refs/heads/main | 2023-08-14T17:10:01.579213 | 2021-09-24T22:13:22 | 2021-09-24T22:13:22 | 410,114,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,685 | py | #!/usr/bin/python
# From my Python lectures. Trying to get a bit more practical by transfering from C/C++ to Python even to compile to an .EXE PORTABLE EXECUTABLE FORMAT BINARY
# Eager to learn pywin32 API, really drill down desktop GUI development and the real ins and outs of the Python standard library
# as well as object orientation, polymorphism, inheritance, and tricky classful things about the Python language as de-facto for development
# As I delve more into C and C++ however, instead of going the GNU way, I'm most certainly trying to think up large scale projects
# When I read Stephen Hawking's 'Large Scale Structure of Spacetime' it changed my life.
# Code borrowed from a circa 2007 Python 3 Lecture
# In C would love to write instead of 100 lines 100 headers or 100 multiple-.c-linkages
# Very interested in writing code that spans multiple headers, uses ingenuity and fine-grained and tailored approaches to solving CS problems
# e.g. source code for the TCP/IP protocol suite, their protocols in RFCs or MSDNs, file-formats or assembly language
# e.g. device driver programming, DLL programming, compiler, memory based / processor exploit mitigation technologies
# e.g. kernel compilation, race conditions and use-after-frees, heap overflows instead of just stack overflows
# recently wrote up an x64 stack overflow for my own code, really do need to stick the null byte in, e.g. 00007FFFFFFFFFFF
# ASLR
def isprime(n):
if n == 1:
return False
for x in range(2, n):
if n % x == 0:
print("{} equals {} x {}".format(n, x, n//x))
return False
else:
print("{} is a prime number :-)".format(n))
tai = 0
while tai < 20000:
isprime(tai)
tai = tai + 1
| [
"[email protected]"
] | |
d35bd9c2c278422fd660e3847b92ab37e1b9496e | 1b7530b4548a6d025cf250aa353ffd9fa75e44a5 | /main.py | 2884bf74ee7c5bc3552aeb21cc365aa084bae70c | [] | no_license | ReinieRSrtiboS/snake-master | 9c630838b962c50fb2f554070ff1e823f0b5dc71 | 5c7d0ce496ea91e0c29329fa9029b7073f0da368 | refs/heads/master | 2021-09-05T18:38:48.354701 | 2018-01-30T09:46:17 | 2018-01-30T09:46:17 | 117,999,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,204 | py | from tkinter import *
import time
from snake import Snake
from board import Board
root = None
canvas = None
scale = None
canvas_width = 800
canvas_height = 800
tics_per_second = 250
""" BEGIN GAME SETTINGS """
# Board width and height
board_width = 15
board_height = 15
# Maximum number of food blocks on the board
food_blocks_max = 3
# Maximum number of wall blocks on the board
wall_blocks_max = 2
# Indicates whether the test setup need to be used, turn to false to use the wall_blocks_max for spawning random walls
test_config = False
# Number of turns to starve, -1 for disabled
starvation_tics = -1
""" END GAME SETTINGS """
# game objects
snake = None
board = None
def callback():
update()
def main():
global root, canvas, canvas_height, canvas_width, board, snake, scale, start
start = time.time()
root = Tk()
root.title("Snake")
canvas = Canvas(root, width=canvas_width, height=canvas_height)
scale = Scale(root, from_=0, to=250, orient=HORIZONTAL, length=canvas_width, tickinterval=25,
label="Turns Per Second")
scale.set(tics_per_second)
scale.bind("<ButtonRelease-1>", on_slider_update)
canvas.pack()
scale.pack(side=LEFT)
b = Button(root, text="Next Step", command=callback)
b.pack()
snake = Snake(board_width, board_height, starvation_tics)
board = Board(board_width, board_height, canvas_width, canvas_height, snake, food_blocks_max, wall_blocks_max,
test_config)
board.draw(canvas)
canvas.after(int(1000 / tics_per_second), game_loop)
mainloop()
def game_loop():
global canvas, tics_per_second
if tics_per_second > 0:
update()
canvas.after(int(1000 / tics_per_second), game_loop)
else:
canvas.after(int(1000), game_loop)
def update():
global tics_per_second, board, snake, canvas
# update gamestate
if snake.update(board):
print(str(time.time() - start))
snake.reset(board)
# clear canvas
canvas.delete("all")
# draw new state
board.draw(canvas)
def on_slider_update(event):
global scale, tics_per_second
tics_per_second = scale.get()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
4d1e52f60ebc4c8d4a60d85a6e0d46289da1a4c4 | f176975a314b6f8f4c7b931c6057caf20988d12d | /problems/uri_2448_postman/uri_2448_postman.py | dbf9f4c68510cb044c5e4c8853107f7c203b51a4 | [] | no_license | fgmacedo/problems_ads | 4b3226307e66a37fd1848dcc25f3fa6c78567d98 | d510a9f8788f99c2559efddd54235cb3a134989a | refs/heads/main | 2023-05-20T03:29:57.814018 | 2021-06-15T01:54:56 | 2021-06-15T01:54:56 | 352,163,858 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | #!/usr/bin/env python3
import sys
rl = sys.stdin.readline
rl() # discart n, m info
houses = {x: idx for idx, x in enumerate(rl().split())}
cum_time = 0
current_house_index = 0
for order in rl().split():
order_house_index = houses[order]
cum_time = cum_time + abs(order_house_index - current_house_index)
current_house_index = order_house_index
sys.stdout.write(f"{cum_time}\n")
| [
"[email protected]"
] | |
54d230e035660f1c04dc37607042960a7e45f3ef | 77175384fe787daf8b0c90ce4fdc4a42c528910b | /statistics/separatePreSex.py | 42d8ac5e9160e2e3ad0c41a1b2ac3d84ff2f35ae | [] | no_license | FloreU/MobileData | 837683b29180c14c464523b37da50a39744afbee | 7ab2709ff3506d5da7df1829f35a836bd30d6fed | refs/heads/master | 2021-01-19T21:40:59.102450 | 2018-01-03T04:41:16 | 2018-01-03T04:41:16 | 88,687,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | # -*- coding: UTF-8 -*-
# 将性别总表分割成性别预分割表
import arcpy
import sys
from statistics import separate
reload(sys)
sys.setdefaultencoding('utf-8')
table_name = "GRID_SEX"
pre_table_name = [table_name + "_0_10",
table_name + "_11_20",
table_name + "_21_30"]
time_range_list = [["2016-06-01", "2016-06-11"],
["2016-06-11", "2016-06-21"],
["2016-06-21", "2016-06-31"]]
date_filed = "TIME_DUR"
arcpy.env.workspace = "E:/InformationCenter/Time_Sex.gdb"
print("前期导入 -- 100%")
try:
arcpy.env.overwriteOutput = True
for i in range(len(pre_table_name)):
separate.separate_table_days_range(table_name, pre_table_name[i], date_filed, time_range_list[i])
except Exception as err:
print(err.args[0])
| [
"[email protected]"
] | |
66c4b75268efaf39bcd2cb3ab76830c23863246a | df7267a5200400c18dd35d358d33fffcaf6d4623 | /crime_term_project/data.py | 5f8cf89870ada5695f33e660c733332c004ce5c5 | [
"MIT"
] | permissive | gSuranjan/eece2300_termproject | f4f3bf0ec07eba899b758b4989d95f39b4e8b3e6 | a4631409bbda347fbbc867f35059908ed558ec08 | refs/heads/master | 2020-04-02T03:10:19.305237 | 2018-10-20T13:05:25 | 2018-10-20T13:05:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,570 | py |
"""
Module for data handling. Specifically, Crime Dataset.
"""
import pandas as pd
import numpy as np
def load_data(filename):
"""
Function to load data and attribute
:param filename: raw data
:return: creates dataframe from raw data
"""
df = pd.read_csv(filename, header=None)
return (df)
def summarize_data (df) :
"""
Function to summarize data and find number of missing datapoints for each column..
:param df: dataframe
:return: new df with an additional row with coonts of missing values
"""
count = 0
n_a = 0
df.loc['Total_Stat_Count'] = None
for col in range(0, len(df.columns)):
count = 0
n_a = 0
for row in range(0, len(df.index) - 1):
if df[col][row] == '?':
n_a = n_a + 1
else:
count = count + 1
df[col][row + 1] = count
return (df)
def label_data (attributesfile, df):
"""
Function that loads the attributes file and adds it to dataframe as column labels
:param attributesfile: file with attributes
:param df: dataframe with data
:return: labeled dataframe
"""
attributes = pd.read_csv(attributesfile, header=None) #creates dataframe from attributes file
temp = pd.DataFrame() #temporary dataframe to store attributes in
attribute = attributes[0].str.split(' ').str[1] #gets just the attribute name
temp[0] = attribute
temp = temp.transpose()
header = temp.iloc[0] #gets the row of attributes from temp
labeled_df = df.rename(columns = header) #sets the attributes as column labels
return(labeled_df)
def clean_data (df2):
"""
Function to delete columns with 10% data missing
:param df2: summarized dataframe
:return: cleaned df
"""
num_rows = int(len(df2.index.values)-1) #gets number of attributes
for column in df2:
if df2.loc['Total_Stat_Count'][column] < (.90*num_rows): #gets rows with more than 10% data points missing
df2 = df2.drop([column], axis=1) #deletes rows
df2 = df2.drop(['Total_Stat_Count'], axis=0)
return df2
def select_attributes(list_attributes):
"""
Function to delete all unwanted attributes
:param list_attributes: list of attributes we want to analyze
:return: df with selected attributes
"""
pass
#potential fcn to delete rows with missing data points
def decision_tree_model(df):
"""
Function to call model on our dataframe and classifies cities
:param df: dataframe
:return: classified cities based on model
"""
pass
def naive_bayesian_model(df):
"""
Function to call model on our dataframe and classifies cities
:param df: dataframe
:return: classifies cities based on model
"""
pass
def perf_eval (our_results, actual_results):
"""
Function to compare our results to the original data
:param our_results: results from model
:param actual_results: results from original dataframe
:return: evaluation of the accuracy of the models
"""
pass
def main():
df = load_data('C:\\Users\\catic\\Documents\\EECE 2300\\python\\crime_term_project\\data\\raw\\communities.data.txt')
#print (df)
df2 = summarize_data(df)
#print(df2)
df_attributes = label_data('C:\\Users\\catic\\Documents\\EECE 2300\\python\\crime_term_project\\data\\raw\\communities.attributes.txt', df2)
#print(df_attributes)
cleaned_df = clean_data(df_attributes)
#print (cleaned_df)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
110d8236d06e04f87bbdde93efe2a53ff6c8bc7d | 132bd3619b2ddd2fed5ffd96c63acba4c431b82d | /light-communication-signaling/src/utils/dtw.py | ead589a6b7b0536485c86e1f0cc74a13f2374203 | [] | no_license | TUM-cm/iPresence | 56927e326c2f8c6a15878bc11289613ea81f3ecc | 23bde5449817d2fb846f0e3ffe8ad1de28aeab68 | refs/heads/master | 2022-09-19T02:51:03.775798 | 2020-06-05T14:35:23 | 2020-06-05T14:35:23 | 267,056,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,046 | py | from __future__ import division
import numpy
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
from rpy2.robjects import r
from rpy2.robjects.packages import importr
'''
To use external R packages in Conda you will install these packages in Conda.
If you install them from R you can't use them. To install (e.g. for dtw library) it do next like:
https://stackoverflow.com/questions/32983365/rpy2-cannot-find-installed-external-r-packages
https://cran.r-project.org/web/packages/dtw/dtw.pdf
conda install rpy2
install.packages(file.choose(), repos=NULL)
remove.packages("...")
packageurl <- "https://cran.r-project.org/src/contrib/Archive/proxy/proxy_0.4-20.tar.gz"
install.packages(packageurl, repos=NULL, type="source")
library(dtw)
query <- c(0, 0, 1, 2, 1, 0, 1, 0, 0)
template <- c(0, 1, 2, 0, 0, 0, 0, 0, 0)
alignment <- dtw(query, template, keep=TRUE)
alignment$distance
alignment$normalizedDistance
'''
class Dtw:
def __init__(self):
# Set up R namespaces
try:
r('memory.limit(size=16000)')
self.R = rpy2.robjects.r
self.DTW = importr('dtw')
except Exception as err:
print(err)
# dissimilarity matrix for each row (default), now column (transpose)
def dissimilarity(self, data):
return self.R.dist(data.transpose(), data.transpose(), method="DTW")[1]
def apply_warp_query(self, alignment, query):
warp_query = self.R.warp(alignment, index=False)
warp_query_idx = numpy.array(warp_query, dtype=int) - 1
return query[warp_query_idx]
def apply_warp(self, alignment, query, template):
warp_query = self.R.warp(alignment, index=False)
warp_query_idx = numpy.array(warp_query, dtype=int) - 1
warp_template = self.R.warp(alignment, index=True)
warp_template_idx = numpy.array(warp_template, dtype=int) - 1
return query[warp_query_idx], template[warp_template_idx]
def calculate_alignment(self, query, template):
return self.R.dtw(query, template, keep=True)
def get_normalized_distance(self, alignment):
return alignment.rx('normalizedDistance')[0][0]
def get_distance(self, alignment):
return alignment.rx('distance')[0][0]
def install_dtw_package():
utils = importr("utils")
#utils.install_packages("dtw")
utils.install_packages("dtw", repos="http://cran.us.r-project.org")
def platform_info():
base = importr('base')
print(base._libPaths())
print(r('memory.limit()'))
def test_dtw():
dtw = Dtw()
template = numpy.array([0, 0, 1, 2, 1, 0, 1, 0, 0], dtype=numpy.double)
query = numpy.array([0.0, 1, 2, 0, 0, 0, 0, 0, 0])
alignment = dtw.calculate_alignment(query, template)
print("distance:", dtw.get_distance(alignment))
print("normalized distance:", dtw.get_normalized_distance(alignment))
def main():
#install_dtw_package()
#platform_info()
test_dtw()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
7f6b9f5f0f268f913fed26fad0d7a716a96b9d2b | 0dc66a62fcb12d393c2dc832c87baf184439017f | /skews/x5_5_extract_fig.py | 062e53b4c945dfc1ea4cfaef9434e762f5d344cd | [
"MIT"
] | permissive | Conxz/brainskew | bccd1863bf2ae2e08679f1ca41a91031c83ccff3 | 5b7ec6642f3f7440f201c3ac2005733e56cc0d71 | refs/heads/main | 2023-04-28T22:03:06.674219 | 2021-05-09T08:46:40 | 2021-05-09T08:46:40 | 365,706,284 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,902 | py |
import os
from glob import glob
import numpy as np
sidListFile = '../doc/sidList.txt'
sidList = [sid.strip() for sid in open(sidListFile)]
niiDir = '/data/clusterfs/lag/users/xiakon/torque/dat'
niiFileName = 'T1/T1_brain_to_MNIsymm_dof9.nii.gz'
outDir = '/data/clusterfs/lag/users/xiakon/torque/dat'
z_slice = 73
z_value = str(round(z_slice/192.0, 2))
figFileName_z = 'dof9_z'+str(z_slice)+'.png'
y_slice = 105
y_value = str(round(y_slice/228.0, 2))
figFileName_y = 'dof9_y'+str(y_slice)+'.png'
x_slice = 90
x_value = str(round(x_slice/192.0, 2))
figFileName_x = 'dof9_x'+str(x_slice)+'.png'
docDir = '../doc'
run_sge_str = 'fsl_sub -q single.q -l ./sgelog '
#run_sge_str = ''
sidError = []
for sid in sidList:
print sid
niiFile = os.path.join(niiDir, sid, niiFileName)
if os.path.exists(niiFile):
sidDir = os.path.join(outDir, sid)
if not os.path.exists(sidDir):
os.mkdir(sidDir)
outFileDir = os.path.join(sidDir, 'T1')
if not os.path.exists(outFileDir):
os.mkdir(outFileDir)
outFile_z = os.path.join(outFileDir, figFileName_z)
outFile_y = os.path.join(outFileDir, figFileName_y)
outFile_x = os.path.join(outFileDir, figFileName_x)
if not os.path.exists(outFile_z):
os.system(run_sge_str + 'slicer ' + niiFile + ' -u -z ' + str(z_value) + ' ' + outFile_z)
if not os.path.exists(outFile_y):
os.system(run_sge_str + 'slicer ' + niiFile + ' -u -y ' + str(y_value) + ' ' + outFile_y)
if not os.path.exists(outFile_x):
os.system(run_sge_str + 'slicer ' + niiFile + ' -u -x ' + str(x_value) + ' ' + outFile_x)
#print run_sge_str + 'slicer ' + niiFile + ' -u -z ' + str(z_value) + ' ' + outFile
else:
sidError.append(sid)
np.savetxt(os.path.join(docDir, 'sidError_dof9_slicer.txt'), sidError, fmt='%s')
| [
"[email protected]"
] | |
3b8c37a8c630f9d66a9ad1fb72a9cf67136da6b6 | 3371510b9bfb3afb15f0e0855cda1ad49eb1dd67 | /URI-ES/MATEMÁTICAS/1193 - Base Conversion.py | 91566b2e0f0037da89559a15de3b751f45d4d466 | [] | no_license | felipesdias/Extractor-URI-Online-Judge | 30d031dbf8d55b62a83124e7b20d121ff98fee92 | 74717bd2e51939e7be67b552802b1a1b9612849e | refs/heads/master | 2021-08-17T08:20:54.696524 | 2017-11-21T00:23:33 | 2017-11-21T00:23:33 | 111,474,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | # Autor: Felipe Souza Dias <[email protected]>
# Nome: Base Conversion
# Nível: %d
# Categoria: MATEMÁTICAS
# URL: https://www.urionlinejudge.com.br/judge/es/problems/view/1193
n = int(input())
case = 1
while n:
numero, tipo = input().split(' ')
print("Case {0}:".format(case))
if(tipo == 'dec'):
val = int(numero)
elif(tipo == 'bin'):
val = int(numero, 2)
else:
val = int(numero, 16)
if(tipo != 'dec'):
print("{0:d} dec".format(val))
if(tipo != 'hex'):
print("{0:x} hex".format(val))
if(tipo != 'bin'):
print("{0:b} bin".format(val))
print('')
n -= 1
case += 1
| [
"[email protected]"
] | |
5a1f74ad3676d0f2a76bd6d1585b841542c8d2f3 | 352373d78a863a9566b21c61e08a0b92fc3f37b8 | /Segundo proyecto/holaMundo/__init__.py | a70ae121e83b5cbccfa4d16335a5585e49a38f49 | [] | no_license | Unainigo/eclipsePortafolio | fd7af2efd44895b51847392a6971462b7424fdec | 64f87c3ec40838f4610d38d7e9193fe03d15f936 | refs/heads/master | 2023-04-14T03:56:13.662153 | 2021-04-22T22:30:26 | 2021-04-22T22:30:26 | 360,691,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | print("este es un proyecto para probar a subir dos proyectos diferentes a github") | [
"[email protected]"
] | |
3f09cad1ee5ac1ecec7a509039e62e427a824490 | 388a76cc54c2e818298d58245cc47543d0e747ed | /wav2bin/src/draw_graph.py | 13803de7a48c625befc94c94760b5b370249dc8e | [
"MIT"
] | permissive | jvanderen1/WAV2BIN | f297884d8781dacda2fecf8356fba4281d6a0df2 | 84b60a984b66cc05c8811e799fffc7aa97ceb1a9 | refs/heads/master | 2021-09-08T01:54:58.069932 | 2018-03-05T17:46:38 | 2018-03-05T17:46:38 | 105,501,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,971 | py | import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import scipy.signal
import warnings
from collections import OrderedDict
warnings.simplefilter('ignore', np.RankWarning) # Turns off warning for large polynomial degrees . . .
# The variables below are set for quick changes without the hassle of sifting through code . . .
POLY_DEG = 25
x_AXIS_TITLE = "Sample # (ROM Address)"
y_AXIS_TITLE = "Amplitude"
x_MIN, x_MAX = 0, 255
y_MIN, y_MAX = 0, 255
x_MINOR_TICKS, x_MAJOR_TICKS = 61, 4
y_MINOR_TICKS, y_MAJOR_TICKS = 29, 8
WAVEFORM_COUNT = 32
DRAW_WINDOW = 1.5 # Used to give user leeway when drawing on graph . . .
PAGES = 4
FIG_COUNT = 8
class DrawGraph(object):
"""Used in conjunction with tkinter to allow hand-drawn graphs to be generated
Components:
:param self.__Enter_cid: CID for entering axis
:param self.__Exit_cid: CID for exiting axis
:param self.__Motion_cid: CID for moving mouse
:param self.ax: Holds the axis within self.fig
:param self.canvas: The visual plot on top of self.ax
:param self.current_waveform: Index to keep track of current waveform
:param self.current_x: Temp variable for hand drawing (for x)
:param self.current_y: Temp variable for hand drawing (for y)
:param self.fig: Holds figure lines will be in
:param self.line: Line plotted on axis
:param self.line_set: List of 'LinePoints' objects
:param self.x_max: Upper x bound
:param self.x_min: Lower x bound
:param self.y_max: Upper y bound
:param self.y_mid_point: Mid point location on y axis
:param self.y_min: Lower y bound
"""
def __init__(self):
""" Initializes all necessary variables """
self.fig = plt.figure() # Generates a figure for the plot to lie on . . .
self.ax = create_graph(x_axis=x_AXIS_TITLE, y_axis=y_AXIS_TITLE,
x_min=x_MIN, x_max=x_MAX,
y_min=y_MIN, y_max=y_MAX,
x_major_ticks=x_MAJOR_TICKS, x_minor_ticks=x_MINOR_TICKS,
y_major_ticks=y_MAJOR_TICKS, y_minor_ticks=y_MINOR_TICKS,
fig=self.fig, subplot_section=[1, 1, 1])
# The minimum/maximum values for x and y plot points are recorded . . .
self.x_min = x_MIN
self.x_max = x_MAX
self.y_min = y_MIN
self.y_max = y_MAX
self.y_mid_point = (self.y_max + self.y_min) / 2
# To better differentiate plot points, a list of lines are kept . . .
self.line_set = [LinePoints() for i in range(WAVEFORM_COUNT)]
self.line = self.ax.plot(0, 0)[0] # Returns 1st (and only) line generated to graph . . .
# Components not yet initialized in this class are listed below . . .
self.canvas = None # Canvas used for the user to draw graph on . . .
self.current_waveform = None # Index used for keeping track of working waveform . . .
# Each event id is tracked for enabling/disabling proper events . . .
self.__Motion_cid = None
self.__Enter_cid = None
self.__Exit_cid = None
# Variable used to reduce component tracing . . .
self.current_x = None
self.current_y = None
# END def __init__() #
def change_amp(self, amp: float):
"""Changes the current waveform's amplitude
Keyword arguments:
:param amp: Amplitude factor used
"""
# Multiplies given amplitude to the data . . .
self.line_set[self.current_waveform].y *= amp
# Changes if data is still in bounds (and takes action, if needed) . . .
self.__check_plot_details()
# END def change_amp() #
def change_freq(self, freq: int):
"""Changes the current waveform's frequency
Keyword arguments:
:param freq: Frequency factor used
"""
append_data = 0 # Used for appending any missing points . . .
y_point = 0 # Keeps track of new y data point . . .
y_array = np.array([]) # Keeps track of set of new y data points . . .
# Performs an averaging of the data points for frequency change . . .
for i in range(self.x_max - self.x_min + 1):
y_point += self.line_set[self.current_waveform].y[i]
if (i + 1) % freq == 0: # Captures set of points and puts them in np.array . . .
y_array = np.append(y_array, [y_point / freq])
y_point = 0
# Creates any multiple copies of line for frequency change . . .
self.line_set[self.current_waveform].y = np.tile(y_array, [freq])
# Fills in any missing data points (if needed) . . .
while self.line_set[self.current_waveform].y.size < (self.x_max - self.x_min + 1):
self.line_set[self.current_waveform].y = np.append(self.line_set[self.current_waveform].y,
self.line_set[self.current_waveform].y[append_data])
append_data += 1
# Changes if data is still in bounds (and takes action, if needed) . . .
self.__check_plot_details()
# END def change_freq() #
def change_function(self, name: str, mix_func: bool, cycles: float, wav_num: int = None):
"""Changes the current waveform by either mixing or overwriting waveform with function
Keyword arguments:
:param name: Name of function being used
:param mix_func: Boolean used to control whether user mixes function or not
:param cycles: Provides number of cycles function will happen
:param wav_num: Used to index another waveform user created
"""
x_array = np.linspace(self.x_min,
self.x_max,
self.x_max - self.x_min + 1)
y_array = np.array([])
# Looks at what function user has selected . . .
if name in {"Sine", "Cosine", "Square", "Sawtooth"}:
# To fill the graph, a custom frequency is generated (using name and cycles as an input)
freq = (cycles * 2 * np.pi) / (self.x_max - self.x_min)
y_array = (self.y_max - self.y_mid_point) * FUNCTIONS[name](freq * x_array) + self.y_mid_point
elif name == "Random":
# To use random, cycles will be casted as an int . . .
cycles = self.__round_int(cycles)
if cycles < 1:
cycles = 1
y_array = (self.y_max - self.y_min) * np.random.random_sample((x_array.size // cycles,)) + self.y_min
y_array = np.tile(y_array, [cycles])
# Makes sure there's enough y data points . . .
append_index = 0
while y_array.size < (self.x_max - self.x_min + 1):
y_array = np.append(y_array, y_array[append_index])
else: # name == "Waveform"
# Checks to see if there is a waveform that can be copied . . .
if not self.line_set[wav_num].drawn:
return # Does nothing if array is empty . . .
# To use random, cycles will be casted as an int . . .
cycles = self.__round_int(cycles)
if cycles < 1:
cycles = 1
y_point = 0
# Performs an averaging of the data points for frequency change . . .
for i in range(self.x_max - self.x_min + 1):
y_point += self.line_set[wav_num].y[i]
if (i + 1) % cycles == 0: # Captures set of points and puts them in np.array . . .
y_array = np.append(y_array, [y_point / cycles])
y_point = 0
# Creates any multiple copies of line for frequency change . . .
y_array = np.tile(y_array, [cycles])
# Makes sure there's enough y data points . . .
append_index = 0
while y_array.size < (self.x_max - self.x_min + 1):
y_array = np.append(y_array, y_array[append_index])
# Checks whether the user selected to mix and if the line is drawn . . .
if mix_func and self.line_set[self.current_waveform].drawn:
self.line_set[self.current_waveform].y += y_array
else:
self.line_set[self.current_waveform].x = x_array
self.line_set[self.current_waveform].y = y_array
self.line_set[self.current_waveform].drawn = True
if self.__Enter_cid is not None:
self.canvas.mpl_disconnect(self.__Enter_cid)
self.__Enter_cid = None
self.__check_plot_details()
self.plot_current_data()
# END def change_function() #
def change_level(self, level: int):
"""Changes the level of the current plot
Keyword Arguments:
:param level: amount graph needs to move
"""
self.line_set[self.current_waveform].y += level # Adds level value to graph . . .
self.__check_plot_details()
self.plot_current_data()
# END def change_level() #
def clear_graph(self):
"""Clears 'LinePoints' data"""
# No need to clear if graph is already cleared . . .
if not self.line_set[self.current_waveform].drawn:
return
self.line_set[self.current_waveform] = LinePoints() # Resets current line . . .
# Re-references current_x and current_y for drawing . . .
self.current_x = self.line_set[self.current_waveform].x
self.current_y = self.line_set[self.current_waveform].y
self.plot_current_data()
# Re-enable entering axis . . .
self.__Enter_cid = self.canvas.mpl_connect('axes_enter_event', self.__enter_axes)
# END def clear_graph() #
def export_data(self) -> list:
"""Exports data from graph to a file provided
returns: list of data from graph in binary form
"""
data_to_return = []
for line in self.line_set:
if not line.drawn:
data_to_return.append([self.y_min] * (self.x_max - self.x_min + 1))
else:
data_to_return.append(np.rint(line.y).astype(int)) # Ensures ints are being received . . .
return data_to_return
# END def export_data() #
def print_to_pdf(self, file_name: str):
"""Exports graph data to pdfs
Keyword arguments:
:param file_name: Name of file being saved to
"""
# Opens pdf for printing graphs to . . .
pp = PdfPages(file_name)
fig = plt.figure()
# Creates 'fig_count' amount of axis' for printing on same page . . .
ax = []
for i in range(FIG_COUNT):
# Creates graphs that look the same . . .
ax.append(create_graph(x_axis='', y_axis='',
x_min=x_MIN, x_max=x_MAX,
y_min=y_MIN, y_max=y_MAX,
x_major_ticks=x_MAJOR_TICKS, x_minor_ticks=x_MINOR_TICKS,
y_major_ticks=y_MAJOR_TICKS, y_minor_ticks=y_MINOR_TICKS,
fig=fig, subplot_section=[4, 2, i + 1]))
ax[i].set_yticklabels([])
ax[i].set_xticklabels([])
# Prepares each axis for each page . . .
for page in range(PAGES):
# Plots data . . .
for current_figure in range(FIG_COUNT):
ax[current_figure].plot(self.line_set[page * FIG_COUNT + current_figure].x,
self.line_set[page * FIG_COUNT + current_figure].y,
color='b')
# Saves current subplots to page . . .
pp.savefig()
# Removes last plotted data . . .
for current_figure in range(FIG_COUNT):
ax[current_figure].lines.pop(0)
pp.close()
# END def __print_to_pdf() #
def plot_current_data(self):
"""Plots current data"""
self.line.set_data(self.line_set[self.current_waveform].x, self.line_set[self.current_waveform].y)
self.canvas.draw()
# END def __plot_current_data() #
def set_current_plot(self, current_waveform: int):
"""Plots current line and reflects changes through canvas
Keyword arguments:
:param current_waveform: Index of waveform desired to be used
"""
self.current_waveform = current_waveform # Current waveform number is updated . . .
self.ax.set_title("Waveform %d" % current_waveform) # Axis title is updated for current waveform . . .
self.plot_current_data()
# Will only allow the user to draw a line if LinePoints.drawn is True . . .
if not self.line_set[self.current_waveform].drawn:
# Variables current_x and current_y are only used for hand-drawing . . .
self.current_x = self.line_set[self.current_waveform].x
self.current_y = self.line_set[self.current_waveform].y
# Reset most cid values . . .
self.__Motion_cid = None
self.__Exit_cid = None
# If 'axes_enter_event' already enabled, no need to re-enable . . .
if self.__Enter_cid is None:
self.__Enter_cid = self.canvas.mpl_connect('axes_enter_event', self.__enter_axes)
# END def set_current_line() #
def __check_plot_details(self):
"""Checks to make sure plot is right size and is made up of integers"""
# Only go into here when a y value overflows over the desired boundaries . . .
if self.line_set[self.current_waveform].y.max() > self.y_max or \
self.line_set[self.current_waveform].y.min() < self.y_min:
self.__rescale_to_fit()
# END def __check_plot_details() #
def __curve_fit(self):
"""Creates a line of best fit for the current plotted data"""
# Converts x and y points to numpy array . . .
self.line_set[self.current_waveform].x = np.array(self.line_set[self.current_waveform].x)
self.line_set[self.current_waveform].y = np.array(self.line_set[self.current_waveform].y)
coefficients = np.polyfit(self.line_set[self.current_waveform].x, # Creates coefficients for a polynomial of
self.line_set[self.current_waveform].y, # of degree POLY_DEG . . .
POLY_DEG)
# Creates a function using the coefficients . . .
f = np.poly1d(coefficients)
self.line_set[self.current_waveform].x = np.linspace(self.x_min, # Creates an equally spaced set of x points
self.x_max, # at every integer . . .
self.x_max - self.x_min + 1)
self.line_set[self.current_waveform].y = f(self.line_set[self.current_waveform].x)
self.__check_plot_details()
self.plot_current_data()
self.line_set[self.current_waveform].drawn = True # A waveform is considered drawn at this point . . .
# END def __curve_fit() #
def __enter_axes(self, event):
"""Method called after axis has been entered
Keyword arguments:
:param event: Holds event data
"""
# Makes sure user enters from left side of window . . .
if event.xdata <= self.x_min + DRAW_WINDOW:
self.current_x.append(event.xdata)
self.current_y.append(event.ydata)
if self.__Motion_cid is None:
self.__Motion_cid = self.canvas.mpl_connect('motion_notify_event', self.__hand_draw_on_graph)
if self.__Exit_cid is None:
self.__Exit_cid = self.canvas.mpl_connect('axes_leave_event', self.__exit_axes)
# END def __enter_axes() #
def __exit_axes(self, event):
"""Method called after axis has been left
Keyword arguments:
:param event: Holds event data (unused)
"""
# All events are disabled when user leaves axis . . .
self.canvas.mpl_disconnect(self.__Motion_cid)
self.canvas.mpl_disconnect(self.__Enter_cid)
self.canvas.mpl_disconnect(self.__Exit_cid)
# Points are processed once the cursor leaves the axis . . .
self.__curve_fit()
self.__Motion_cid = None
self.__Enter_cid = None
self.__Exit_cid = None
# END def __exit_axes() #
def __hand_draw_on_graph(self, event):
"""Allows the user to draw proper functions on graph
Keyword arguments:
:param event: Holds event data
"""
# Prevents the user from plotting non-functions . . .
# self.line_set[self.current_waveform].x[-1] returns the maximum x, in this case . . .
if event.xdata > self.current_x[-1]:
# A list append is much faster than a numpy append . . .
self.current_x.append(event.xdata)
self.current_y.append(event.ydata)
self.line.set_data(self.current_x, self.current_y)
self.canvas.draw()
# END def __hand_draw_on_graph() #
def __rescale_to_fit(self):
"""Corrects plot data that overflows over the y boundaries"""
# Below, this algorithm is used to compress the graph . . .
overflow = (np.absolute(self.line_set[self.current_waveform].y - self.y_mid_point)).max()
self.line_set[self.current_waveform].y -= self.y_mid_point
self.line_set[self.current_waveform].y *= (self.y_max - self.y_mid_point) / overflow
self.line_set[self.current_waveform].y += self.y_mid_point
# END def __rescale_to_fit() #
@staticmethod
def __round_int(num: float) -> int:
"""Rounds numbers to nearest integer
Keyword arguments:
:param num: Number to be rounded
:returns: Rounded integer
"""
return int(num + .5)
# END def __round_int() #
class LinePoints(object):
"""
Holds coordinates for x and y plots (along with if they were drawn or not)
Components:
:param self.x: Holds all x plot data (first as a list, for speed reasons, then converted to numpy array)
:param self.y: Holds all y plot data (first as a list, for speed reasons, then converted to numpy array)
:param self.drawn: Indicates whether or not graph has been drawn
"""
def __init__(self):
"""Initializes all necessary variables"""
self.x = []
self.y = []
self.drawn = False
# END def __init__() #
def create_graph(x_axis: str, y_axis: str,
x_min: int, x_max: int,
y_min: int, y_max: int,
x_major_ticks: int, x_minor_ticks: int,
y_major_ticks: int, y_minor_ticks: int,
fig, subplot_section) -> object:
"""
Creates a graph
Keyword arguments:
:param x_axis: x axis title
:param y_axis: y axis title
:param x_min: lower x bound
:param x_max: upper x bound
:param y_min: lower y bound
:param y_max: upper y bound
:param x_major_ticks: major x ticks count
:param x_minor_ticks: minor x ticks count
:param y_major_ticks: major y ticks count
:param y_minor_ticks: minor y ticks count
:param fig: figure to be plotted on
:param subplot_section: section of figure for plot
:returns: axis to that figure
"""
ax = fig.add_subplot(*subplot_section) # Places the figure in a specific spot . . .
# The x and y axis titles are set here . . .
ax.set_xlabel(x_axis)
ax.set_ylabel(y_axis)
# Prevents the graph axes from changing . . .
ax.set_autoscale_on(False)
# Sets background ticks in the graph, for better visual appearance . . .
x_minor_ticks = np.linspace(0, 255, x_minor_ticks)
x_major_ticks = np.linspace(0, 255, x_major_ticks)
y_minor_ticks = np.linspace(0, 255, y_minor_ticks)
y_major_ticks = np.linspace(0, 255, y_major_ticks)
ax.set_xticks(x_major_ticks)
ax.set_xticks(x_minor_ticks, minor=True)
ax.set_yticks(y_major_ticks)
ax.set_yticks(y_minor_ticks, minor=True)
plt.grid(which='both')
ax.grid(which='minor', alpha=0.2)
ax.grid(which='major', alpha=0.5)
# Sets the graphs boundaries . . .
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
return ax
# END def create_graph() #
# Dictionary used to hold all functions used . . .
FUNCTIONS = OrderedDict([("Sine", np.sin),
("Cosine", np.cos),
("Square", scipy.signal.square),
("Sawtooth", scipy.signal.sawtooth),
("Random", None),
("Waveform", None)])
| [
"[email protected]"
] | |
4e45a26e2453afb3dbb3328c6abf244953d7639c | e78d40f4a522dd11f631326002b2f4d417cc1ef0 | /neuro.py | 5c00ad01dcfae486e9f2b02c5828f4aa6dc74ebb | [
"MIT"
] | permissive | ParaDogs/predicting-coronawaves | 2fe7cc05f906b99392cc802ea3222e33f4097ad3 | 8db622dbd5c5f7c40e922bbae089639b42104d99 | refs/heads/master | 2022-04-16T12:13:52.059133 | 2020-04-19T13:16:29 | 2020-04-19T13:16:29 | 257,003,203 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,091 | py | import numpy as np
import scipy.special as sci
class NeuralNetwork:
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# количество входных, скрытых, выходных узлов
self.inodes = input_nodes
self.hnodes = hidden_nodes
self.onodes = output_nodes
# коэффициент обучения
self.lr = learning_rate
# матрицы весов
# self.wih = (np.random.rand(self.hnodes, self.inodes)- 0.5)
# self.who = (np.random.rand(self.onodes, self.hnodes)- 0.5)
# self.wih = (np.random.rand(self.hnodes, self.inodes)*2.0 - 1.0)
# self.who = (np.random.rand(self.onodes, self.hnodes)*2.0 - 1.0)
self.wih = np.random.normal(0.0, pow(self.hnodes, -0.5), (self.hnodes, self.inodes))
self.who = np.random.normal(0.0, pow(self.onodes, -0.5), (self.onodes, self.hnodes))
pass
def activation_function(self, x):
return sci.expit(x) # сигмоида
def train(self, inputs_list, targets_list):
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
hidden_inputs = np.dot(self.wih, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = np.dot(self.who, hidden_outputs)
final_outputs = self.activation_function(final_inputs)
output_errors = targets - final_outputs
hidden_errors = np.dot(self.who.T, output_errors)
self.who += self.lr * np.dot((output_errors * final_outputs * (1 - final_outputs)), np.transpose(hidden_outputs))
self.wih += self.lr * np.dot((hidden_errors * hidden_outputs * (1 - hidden_outputs)), np.transpose(inputs))
pass
def query(self, inputs_list):
# преобразовать список входных значений
# в двухмерный массив
inputs = np.array(inputs_list, ndmin=2).T
hidden_inputs = np.dot(self.wih, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = np.dot(self.who, hidden_outputs)
final_outputs = self.activation_function(final_inputs)
return final_outputs | [
"[email protected]"
] | |
5e28b92bfbaa311a1a87374710b57a049c0ee5d1 | 9bb55f7bd1415f1840a4d380bb6d84447f1bae2f | /Aufgabe 2.9.1 copy.py | bb43354f312e2d5d3787d882636cd799c7f16a02 | [] | no_license | CodingPippo/TigerJython-Gturtle | 620f093178116501c986d698bab5d2082eed4547 | 603a41daeadf1f571363beb3d5876449232ec2fd | refs/heads/main | 2023-01-30T12:29:36.390433 | 2020-12-08T19:32:18 | 2020-12-08T19:32:18 | 319,739,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | """FigA ist eine last line recursion, da sie erst in der letzten zeile wieder aufgerufen wird.
FigB ist eine first line recursion, da sie schon auf der ersten zeile aufgerufen wird.
zudem fängt FigA innen an und ist spiegelverkehrt zu FigB"""
from gturtle import *
def figA(s):
if s > 200:
return
forward(s)
right(90)
figA(s + 10)
makeTurtle()
figA(100)
from gturtle import *
def figB(s):
if s > 200:
return
figB(s + 10)
forward(s)
right(90)
makeTurtle()
figB(100) | [
"[email protected]"
] | |
33176477beb2b5e2706c7d4663e92bbe43f7fe7a | 5bb1addbd71a7ea2d4c7e157cc49632e45fbe4dc | /django_test1/blog/admin.py | f544e1fa05702f804996d7f3b00d4427b9565406 | [] | no_license | KVVaisakh/django_test | 646c96e7692acea77c68cc0f9c0711f878b67b9b | b4665d71985cc9bf215a990801b01d65c4ca7984 | refs/heads/master | 2020-03-19T15:56:38.142012 | 2018-06-09T10:59:45 | 2018-06-09T10:59:45 | 136,692,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | from __future__ import unicode_literals
from django.contrib import admin
# Register your models here.
from .models import Post
admin.site.register(Post)
| [
"[email protected]"
] | |
d9c4bcb42f64d9a0e02dffb96098a43012fe89b8 | 2f5010d8e459d05d8b299eb9d986067ceb1d2b04 | /utils/vect2d.py | 5c52dbfb6f2a246f916fcc572be1b8eae7be7800 | [] | no_license | santids/advgame | 68f1bd52669c4c6d42df5815b3ab7e331a1d1eb1 | 114db10c3b12acc69fc34b1fa4c1b577eabcfe6b | refs/heads/master | 2020-04-06T04:05:01.101285 | 2017-06-20T04:41:56 | 2017-06-20T04:41:56 | 83,057,081 | 1 | 0 | null | 2017-06-20T04:51:31 | 2017-02-24T15:47:32 | Python | UTF-8 | Python | false | false | 1,093 | py | #Common 2d vector function apply to tupples
from math import sqrt
class Vect:
def __init__((x,y)):
self.x = x
self.y = y
def tple(self):
"""Return a tuple representing the vect"""
return (self.x,self.y)
def suma(p1,p2):
"""Suma de dos vectores p1 y p2"""
if type(p1) != tuple or type(p2) != tuple:
raise TypeError('Is not tuple',p1,p2)
if len(p1) != 2 or len(p2) != 2:
raise ValueError ('tupple lenght different from 2',p1,p2)
x = p1[0]+p2[0]
y = p1[1]+p2[1]
return (x,y)
def div(v,n):
return (v[0]/n,v[1]/n)
def modulo(v):
return sqrt(v[0]*v[0]+v[1]*v[1])
def dist(p1,p2):
"""Distancia entre dos puntos"""
return modulo((p1[0]-p2[0],p1[1]-p2[1]))
def isInsideRect(v,rect):
"""is the point v inside the rect ( 0,0,rect.x,rect.y)"""
return v[0] < rect[0] and v[0]>= 0 and v[1] < rect[1] and v[1] >= 0
def inverse(v):
return (v[0]*(-1),v[1]*(-1))
def resta(p1,p2):
return suma(p1,inverse(p2))
if __name__ == '__main__':
p1 = (1,1)
p2 = (2,2)
print dist(p1,p2)
| [
"[email protected]"
] | |
6e85a5c512217490a88a8e92c2dbb8c7a4f6a8f0 | 4ae4f1957e34e42b63c913d6f824a6a2d3846ebf | /src/yunta/models.py | 4515d7c42616dd57f41fdd41623643e0dfbb91c9 | [] | no_license | pynef/yunta | a0f2a924b128600b20bc43541c005703f17b5063 | 6b571f3c1692a4bf85a8016c977c02d291782199 | refs/heads/master | 2022-12-03T03:52:18.777552 | 2019-12-13T20:35:08 | 2019-12-13T20:35:08 | 227,558,368 | 0 | 0 | null | 2022-11-22T04:54:43 | 2019-12-12T08:36:05 | JavaScript | UTF-8 | Python | false | false | 5,926 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from django.contrib.auth.models import User
from django.db import models
from .choices import TIPO_FRECUENCIA, GENERO_CHOICES
def generar_ruta_imagen(instance, filename):
return os.path.join("Profile", filename)
class Usuario(models.Model):
user = models.OneToOneField(User, on_delete=models.PROTECT)
dni = models.CharField('D.N.I.', max_length=8, unique=True, null=True, blank=True)
nombres = models.CharField('Nombres', max_length=150)
apellido_paterno = models.CharField('Apellido paterno', max_length=150)
apellido_materno = models.CharField('Apellido materno', max_length=150)
genero = models.CharField('Genero', max_length=10, choices=GENERO_CHOICES, default='F')
correo = models.EmailField('Correo Electronico', max_length=254, null=True, blank=True)
imagen = models.ImageField(upload_to=generar_ruta_imagen, default="Profile/default.png")
celular = models.CharField('Celular', max_length=50, null=True, blank=True)
fecha_creacion = models.DateTimeField(auto_now_add=True)
fecha_modificacion = models.DateTimeField(auto_now=True)
usuario_creacion = models.ForeignKey(User, on_delete=models.PROTECT, related_name='+', null=True, blank=True)
usuario_modificacion = models.ForeignKey(User, on_delete=models.PROTECT, related_name='+', null=True, blank=True)
def __str__(self):
return u'{}| {}, {}'.format(self.user.username, self.user.last_name.capitalize(), self.user.first_name.capitalize())
def nombre_completo(self):
return u'{}| {}, {}'.format(self.user.username, self.user.apellido_paterno.upper(), self.user.apellido_materno.upper())
class Meta:
ordering = ['dni']
verbose_name = 'Usuario'
verbose_name_plural = 'Usuarios'
class Junta(models.Model):
nombre = models.CharField(max_length=200)
monto = models.DecimalField(decimal_places=2, max_digits=8)
nro_cuotas = models.IntegerField()
puja = models.DecimalField(decimal_places=2, max_digits=8, default=0)
clave = models.CharField(max_length=200, null=True)
nro_participantes = models.IntegerField(default=2)
frecuencia = models.CharField('Frecuencia', max_length=10, choices=TIPO_FRECUENCIA, default='M', db_index=True)
creador = models.ForeignKey(User, on_delete=models.PROTECT)
activo = models.BooleanField(default=False)
abierto = models.BooleanField(default=False)
iniciar = models.BooleanField(default=False)
fecha_creacion = models.DateTimeField(auto_now_add=True)
fecha_modificacion = models.DateTimeField(auto_now=True)
usuario_creacion = models.ForeignKey(User, on_delete=models.PROTECT, related_name='+', null=True, blank=True)
usuario_modificacion = models.ForeignKey(User, on_delete=models.PROTECT, related_name='+', null=True, blank=True)
def __str__(self):
return u'{}'.format(self.nombre)
class Meta:
ordering = ['-fecha_creacion']
verbose_name = u'Junta'
verbose_name_plural = u'Juntas'
class ParticipanteJunta(models.Model):
junta = models.ForeignKey(Junta, on_delete=models.PROTECT)
participante = models.ForeignKey(User, on_delete=models.PROTECT)
monto = models.DecimalField(decimal_places=2, max_digits=8, default=0)
nro_cuotas = models.IntegerField()
cuota = models.IntegerField()
mi_cuota = models.DecimalField(decimal_places=2, max_digits=8, default=0)
puja = models.IntegerField(default=0)
mi_puja = models.DecimalField(decimal_places=2, max_digits=8, default=0)
fecha = models.DateField(null=True)
es_activo = models.BooleanField(default=True)
es_creador = models.BooleanField(default=False)
iniciar = models.BooleanField(default=False)
estado = models.BooleanField(default=True)
fecha_creacion = models.DateTimeField(auto_now_add=True)
fecha_modificacion = models.DateTimeField(auto_now=True)
usuario_creacion = models.ForeignKey(User, on_delete=models.PROTECT, related_name='+', null=True, blank=True)
usuario_modificacion = models.ForeignKey(User, on_delete=models.PROTECT, related_name='+', null=True, blank=True)
def __str__(self):
return u'{}'.format(self.monto)
class Meta:
ordering = ['-fecha_creacion']
verbose_name = u'Junta'
verbose_name_plural = u'Juntas'
unique_together = (('junta', 'participante'),)
class DetalleParticipanteJunta(models.Model):
participante_junta = models.ForeignKey(ParticipanteJunta, on_delete=models.PROTECT)
monto = models.IntegerField()
nro_cuotas = models.IntegerField()
nro_cuota_actual = models.IntegerField()
cuota = models.IntegerField()
cuota_actual = models.IntegerField
puja = models.IntegerField(default=0)
puja_actual = models.IntegerField(default=0)
fecha_pago = models.DateField(null=True)
esta_pagado = models.BooleanField(default=False)
estado = models.BooleanField(default=True)
fecha_creacion = models.DateTimeField(auto_now_add=True)
fecha_modificacion = models.DateTimeField(auto_now=True)
usuario_creacion = models.ForeignKey(User, on_delete=models.PROTECT, related_name='+', null=True, blank=True)
usuario_modificacion = models.ForeignKey(User, on_delete=models.PROTECT, related_name='+', null=True, blank=True)
def __str__(self):
return u'{}'.format(self.cuota)
class Meta:
ordering = ['-fecha_creacion']
verbose_name = u'Junta'
verbose_name_plural = u'Juntas'
class Monedero(models.Model):
usuario = models.OneToOneField(User, on_delete=models.PROTECT)
saldo_actual = models.IntegerField()
saldo_contable = models.IntegerField()
def __str__(self):
return u'{} - {}'.format(self.usuario.username, self.saldo_actual)
class Meta:
ordering = ['-saldo_actual']
verbose_name = u'Monedero'
verbose_name_plural = u'Monederos'
| [
"[email protected]"
] | |
3aa4f597847a981fc4c28f61c442c768e551b919 | 2d3aba0bf1d3a5e018ded78218859b31dd0930dd | /3.문자열/str_to_int.py | 253d92331801b779fa6170d23a73a965f2dfaee0 | [] | no_license | CS-for-non-CS/Data-Structure | 7018203de7d14a0be7da2308963082b93fac8e21 | efce4c13578bd3d143aa570e9317c505b6424c40 | refs/heads/master | 2022-12-17T14:04:05.521164 | 2020-09-21T02:18:24 | 2020-09-21T02:18:24 | 297,205,549 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 400 | py |
str1 = "123"
str2 = "12.3"
print(int(str1),type(int(str1))) # 123
print(float(str2),type(float(str2))) # 12.3
str3 = "1+2"
print(str3)
print(repr(str3))
print(eval(str3))
print(eval(repr(str3)))
print(eval(eval(repr(str3))))
num1 = 123
num2 = 12.3
print(str(num1),type(str(num1)))
print(repr(num1),type(repr(num1)))
print(str(num2),type(str(num2)))
print(repr(num2),type(repr(num2))) | [
"[email protected]"
] | |
93fbe2030f9680ec93f24a90c8369df453beb9e8 | 4492bf5e04ada88ff8a230236ad424a3a2099660 | /buttonwood/MarketObjects/Events/OrderEvents.py | 76e29911334860534d6a3a6c94409d2c131733a0 | [
"MIT"
] | permissive | denhartog/Buttonwood | 135def28fd86be054cbec1fc464c01a0ef5cd193 | 478408d064246de990a4a51a0d407ab31c5b6c6b | refs/heads/master | 2022-04-06T18:53:25.050288 | 2020-02-20T21:42:44 | 2020-02-20T21:42:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,463 | py | """
This file is part of Buttonwood.
Buttonwood is a python software package created to help quickly create, (re)build, or
analyze markets, market structures, and market participants.
MIT License
Copyright (c) 2016-2019 Peter F. Nabicht
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from buttonwood.MarketObjects.CancelReasons import CANCEL_TYPES_STRINGS, CANCEL_REASON_STRINGS
from buttonwood.MarketObjects.Events.BasicEvents import BasicEvent
from buttonwood.MarketObjects.Price import Price
from buttonwood.MarketObjects.Market import Market
from buttonwood.MarketObjects.RejectReasons import REJECT_REASON_STRINGS
from buttonwood.MarketObjects.Side import Side
from buttonwood.MarketObjects.Events import OrderEventConstants
class OrderEvent(BasicEvent):
"""
An OrderEvent is the base class for matching engine order events, which are the messages a customer/user/participant
sends into the matching engine and the resulting executions the matching engine sends out.
OrderEvent has two sub categories that are both classes inheriting from it:
* Commands
* Execution Reports
Commands are the events sent into the matching engine. They are the events that express a desire that the matching
engine then tries to execute on.
Execution Reports are the events the matching engine sends back after acting on a Command. Every Execution Report
should have a causing OrderEvent.
A series of events can be strung together to make up the life cycle of an order. For example:
* New Order
* Acknowledgement
* Cancel Replace
* Acknowledgement
* Partial Fill
* Cancel
* Cancel Confirm
This is referred to as an order chain. Events have an order chain ID that
is used to clearly identify all events that belong to the same order chain.
"""
def __init__(self, event_id, timestamp, chain_id, user_id, market, other_key_values=None):
"""
The initializer of the base OrderEvent class.
While it seems like one wouldn't need product, it is great to have here
for filtering, comparisons, etc.
`other_key_values` is used for keeping key/value pairs around that don't
fit the defined arguments. This is an quick and easy way to track meta
data that a particular matching venue uses/keeps without having to
create custom versions of the OrderEvent just to keep this data.
:param event_id: unique identifier of the event
:param timestamp: float. microsecond time stamp of event. Expecting format of seconds.microseconds (ex: 1234.001123)
:param chain_id: str or int. the unique identifier fo the orderchain
:param user_id: str or int. unique identifier of the user who sent the command
:param market: MarketObjects.Market
:param other_key_values: dict
"""
assert isinstance(chain_id, str) or isinstance(chain_id, int)
assert isinstance(user_id, str) or isinstance(user_id, int)
assert isinstance(market, Market)
assert other_key_values is None or isinstance(other_key_values,
dict), "other_key_values must be none or of type dict"
BasicEvent.__init__(self, event_id, timestamp)
self._user_id = user_id
self._chain_id = chain_id
self._market = market
self._other_key_values = {} if other_key_values is None else other_key_values
def market(self):
"""
The market of the order event
:return:
"""
return self._market
def user_id(self):
"""
The user id of the order event
:return: int or str
"""
return self._user_id
def chain_id(self):
"""
The order chain id of the order event
:return: int or str
"""
return self._chain_id
def get_other_value(self, key):
"""
Returns the value for the given key from `other_key_values`.
If the key does not exist in `other_key_values` then returns `None`
:param key: object
:return: object. Can be `None`.
"""
return self._other_key_values.get(key)
def other_data(self):
"""
Gets the dictionary of optional other key/value pairs that are stored with the event.
:return: dict. Can be None
"""
return self._other_key_values
def _other_values_json(self):
d = {}
if self._other_key_values is not None:
for key, value in self._other_key_values.items():
if isinstance(value, Market):
d[str(key)] = value.to_json()
elif isinstance(value, Price):
d[str(key)] = str(value.price())
elif hasattr(value, '__dict__'): # cheap hack to figure out if a primitive or not
d[str(key)] = str(value)
else:
d[str(key)] = value
return d
class OrderCommand(OrderEvent):
"""
The base class for an order's commands that go into a matching engine. These
are the events that express a desire
from a participant that the matching engine attempts to execute.
"""
def __init__(self, event_id, timestamp, chain_id, user_id, market, other_key_values=None):
OrderEvent.__init__(self, event_id, timestamp, chain_id, user_id, market, other_key_values=other_key_values)
class NewOrderCommand(OrderCommand):
"""
A new order. A new order is necessarily the first event in an order chain.
If iceberg_peak_qty is None then not taking advantage of iceberg functionality.
"""
def __init__(self, event_id, timestamp, chain_id, user_id, market, side, time_in_force,
price, qty, iceberg_peak_qty=None, limit_or_market=OrderEventConstants.LIMIT, other_key_values=None):
# TODO documentation
assert isinstance(side, Side)
assert isinstance(price, Price)
assert isinstance(qty, int)
assert isinstance(limit_or_market, int)
assert limit_or_market in [OrderEventConstants.MARKET, OrderEventConstants.LIMIT]
assert iceberg_peak_qty is None or isinstance(iceberg_peak_qty, int)
assert isinstance(time_in_force, int)
assert qty > 0, "Qty must be greater than 0"
assert iceberg_peak_qty is None or iceberg_peak_qty >= 0, "Iceberg Peak Qty must be None or an int >= 0"
OrderCommand.__init__(self, event_id, timestamp, chain_id, user_id, market, other_key_values=other_key_values)
assert market.is_valid_price(price), "Price %s is not valid for Product %s" % (str(price), str(market))
self._side = side
self._price = price
self._qty = qty
self._limit_or_market = limit_or_market
self._iceberg_peak_qty = iceberg_peak_qty
if self._iceberg_peak_qty is None:
self._iceberg_peak_qty = qty
self._time_in_force = time_in_force
def side(self):
"""
Get the side.
:return: MarketObjects.Side
"""
return self._side
def price(self):
"""
Get the price.
:return: MarketObjects.Price
"""
return self._price
def qty(self):
"""
Get the qty of the new order.
:return: int
"""
return self._qty
def iceberg_peak_qty(self):
"""
Get the iceberg_peak_qty of the new order
:return: int
"""
return self._iceberg_peak_qty
def time_in_force(self):
"""
Gets the integer that identifies the order type.
:return: int
"""
return self._time_in_force
def is_far(self):
"""
Helper function to say if it is a FAR order or not.
:return: bool
"""
return self._time_in_force == OrderEventConstants.FAR
def is_fak(self):
"""
Helper function to say if it is a FAK order or not.
:return: bool
"""
return self._time_in_force == OrderEventConstants.FAK
def is_fok(self):
"""
Helper function to say if it is a FOK order or not.
:return: bool
"""
return self._time_in_force == OrderEventConstants.FOK
def is_limit_order(self):
"""
Returns True if the new order is a limit order; false if it is not.
:return: bool
"""
return self._limit_or_market == OrderEventConstants.LIMIT
def is_market_order(self):
"""
Returns True if the new order is a market order; false if it is not.
:return: bool
"""
return self._limit_or_market == OrderEventConstants.MARKET
def event_type_str(self):
return "New Order Command"
def to_json(self):
return {self.__class__.__name__: {'user_id': self.user_id(),
'chain_id': self.chain_id(),
'event_id': self.event_id(),
'timestamp': self.timestamp(),
'product_name': self.market().product().name(),
'endpoint_name': self.market().endpoint().name(),
'side': int(self.side()),
'price': str(self.price()),
'qty': self.qty(),
'iceberg_peak_qty': self.iceberg_peak_qty(),
'time_in_force': OrderEventConstants.TIME_IN_FORCE_STRINGS[
self.time_in_force()],
'other_key_values': self._other_values_json()}}
class CancelReplaceCommand(OrderCommand):
def __init__(self, event_id, timestamp, chain_id, user_id, market, side, price, qty, iceberg_peak_qty=None,
other_key_values=None):
# TODO document
assert isinstance(price, Price)
assert isinstance(qty, int)
assert iceberg_peak_qty is None or isinstance(iceberg_peak_qty, int)
assert iceberg_peak_qty is None or iceberg_peak_qty >= 0, "iceberg_peak_qty cannot be negative."
assert qty >= 0, "Qty must be greater than 0"
assert market.is_valid_price(price), "Price %s is not valid for Market %s" % (str(price), str(market))
OrderCommand.__init__(self, event_id, timestamp, chain_id, user_id, market, other_key_values=other_key_values)
self._side = side
self._price = price
self._iceberg_peak_qty = iceberg_peak_qty
if self._iceberg_peak_qty is None:
self._iceberg_peak_qty = qty
self._qty = qty
def price(self):
"""
Get the price of the cancel replace.
:return: MarketObjects.Price
"""
return self._price
def side(self):
"""
Get the side.
:return: MarketObjects.Side
"""
return self._side
def qty(self):
"""
Get the qty of the cancel replace request.
:return: int
"""
return self._qty
def iceberg_peak_qty(self):
"""
Get the iceberg_peak_qty of the cancel replace request
:return: int
"""
return self._iceberg_peak_qty
def event_type_str(self):
return "Cancel Replace Command"
def to_json(self):
"""
Get the json dictionary of the cancel replace message
:return: dict
"""
return {self.__class__.__name__: {'user_id': self.user_id(),
'chain_id': self.chain_id(),
'event_id': self.event_id(),
'timestamp': self.timestamp(),
'product_name': self.market().product().name(),
'endpoint_name': self.market().endpoint().name(),
'price': str(self.price()),
'qty': self.qty(),
'iceberg_peak_qty': self.iceberg_peak_qty(),
'other_key_values': self._other_values_json()}}
class CancelCommand(OrderCommand):
def __init__(self, event_id, timestamp, chain_id, user_id, market, cancel_type, other_key_values=None):
# TODO document
assert isinstance(cancel_type, int)
OrderCommand.__init__(self, event_id, timestamp, chain_id, user_id, market, other_key_values=other_key_values)
self._cancel_type = cancel_type
def cancel_type(self):
"""
Get the cancel type identifier from the Cancel.
:return: int
"""
return self._cancel_type
def cancel_type_str(self):
"""
Get the cancel type human readable string from the Cancel.
:return: str
"""
if self.cancel_type() in CANCEL_TYPES_STRINGS:
return CANCEL_TYPES_STRINGS[self.cancel_type()]
return "%d is an unknown cancel reason." % (self.cancel_type())
def event_type_str(self):
return "Cancel Command"
def to_json(self):
"""
Get the json dictionary of the cancel message
:return: dict
"""
return {self.__class__.__name__: {'user_id': self.user_id(),
'chain_id': self.chain_id(),
'event_id': self.event_id(),
'timestamp': self.timestamp(),
'product_name': self.market().product().name(),
'endpoint_name': self.market().endpoint().name(),
'cancel_type': self.cancel_type(),
'cancel_type_str': self.cancel_type_str(),
'other_key_values': self._other_values_json()}}
class ExecutionReport(OrderEvent):
"""
The base class for the execution reports that come out of the matching engine. These are the events that respond
to the order commands.
"""
def __init__(self, event_id, timestamp, chain_id, user_id, market, causing_command, other_key_values=None):
OrderEvent.__init__(self, event_id, timestamp, chain_id, user_id, market, other_key_values=other_key_values)
self._causing_command = causing_command
def causing_command(self):
"""
Get the command that caused the execution report.
:return: MarketObjects.Events.OrderEvents.OrderCommand
"""
return self._causing_command
class AcknowledgementReport(ExecutionReport):
def __init__(self, event_id, timestamp, chain_id, user_id, market, response_to_command, price,
qty, iceberg_peak_qty, other_key_values=None):
"""
An acknowledgement execution report. Has all the standard data for identifying the message, order, etc.
Also includes the command it is acknowledging (this is for ease of use for listeners to the data as well as to
systems and usecases that may not be using order chain tracking.
Price, qty, and iceberg_peak_qty are contained here since what gets ack'd will not always match what was
requested based on the situation (ex: aggressive orders getting partially filled pre-ack) or the matching
engine's rules, or the order type (ex: pegged orders starting off at different prices).
:param event_id: int
:param timestamp: float
:param chain_id: int or str
:param user_id: int or str
:param market: MarketObjects.Market.Market
:param response_to_command: MarketObjects.Events.OrderEvents.OrderCommand
:param price: MarketObjects.Price.Price
:param qty: int
:param iceberg_peak_qty: int
:param other_key_values: dict
"""
# TODO finish param documentation above
# TODO finish asserts below
assert isinstance(price, Price)
assert isinstance(qty, int)
assert iceberg_peak_qty is None or isinstance(iceberg_peak_qty, int)
ExecutionReport.__init__(self, event_id, timestamp, chain_id, user_id, market, response_to_command,
other_key_values=other_key_values)
self._price = price
self._qty = qty
self._iceberg_peak_qty = iceberg_peak_qty
if self._iceberg_peak_qty is None:
self._iceberg_peak_qty = qty
def acknowledged_command(self):
"""
Get the event that is being acknowledged. Helper function that returns the same as causing_command()
:return: MarketObjects.Events.OrderEvents.OrderCommand
"""
return self._causing_command
def price(self):
"""
Get the price.
:return: MarketObjects.Price
"""
return self._price
def qty(self):
"""
Get the qty
:return: int
"""
return self._qty
def iceberg_peak_qty(self):
"""
Get the iceberg peak qty
:return: int
"""
return self._iceberg_peak_qty
def event_type_str(self):
return "Acknowledgement Report"
def to_json(self):
"""
Get the json dictionary of the acknowledgement
:return: dict
"""
return {self.__class__.__name__: {'user_id': self.user_id(),
'chain_id': self.chain_id(),
'event_id': self.event_id(),
'timestamp': self.timestamp(),
'product_name': self.market().product().name(),
'endpoint_name': self.market().endpoint().name(),
'response_to_command': self.causing_command().to_json(),
'price': str(self.price()),
'qty': self.qty(),
'iceberg_peak_qty': self.iceberg_peak_qty(),
'other_key_values': self._other_values_json()}}
class RejectReport(ExecutionReport):
def __init__(self, event_id, timestamp, chain_id, user_id, market, response_to_command, reject_reason,
other_key_values=None):
# TODO document
assert isinstance(reject_reason, int)
ExecutionReport.__init__(self, event_id, timestamp, chain_id, user_id, market, response_to_command,
other_key_values=other_key_values)
self._reject_reason = reject_reason
def rejected_command(self):
"""
Get the event that is being rejected. Helper function that returns the same as causing_command()
:return: MarketObjects.Events.OrderEvents.OrderCommand
"""
return self._causing_command
def reject_reason(self):
"""
Get the reject reason's identifier, which is an int
:return: int
"""
return self._reject_reason
def reject_reason_str(self):
"""
Get the human readable reject reason string.
:return: str
"""
if self.reject_reason() in REJECT_REASON_STRINGS:
return CANCEL_TYPES_STRINGS[self.reject_reason()]
return "%d is an unknown cancel reason." % (self.reject_reason())
def event_type_str(self):
return "Reject Report"
def to_json(self):
"""
Get the json dictionary of the reject report
:return: dict
"""
return {self.__class__.__name__: {'user_id': self.user_id(),
'chain_id': self.chain_id(),
'event_id': self.event_id(),
'timestamp': self.timestamp(),
'product_name': self.market().product().name(),
'endpoint_name': self.market().endpoint().name(),
'reject_reason': self.reject_reason(),
'reject_reason_str': self.reject_reason_str(),
'response_to_command': self.causing_command().to_json(),
'other_key_values': self._other_values_json()}}
class CancelReport(ExecutionReport):
def __init__(self, event_id, timestamp, chain_id, user_id, market, cancel_command, cancel_reason,
other_key_values=None):
# TODO document
assert isinstance(cancel_reason, int)
ExecutionReport.__init__(self, event_id, timestamp, chain_id, user_id, market, cancel_command,
other_key_values=other_key_values)
self._cancel_reason = cancel_reason
def cancel_command(self):
"""
Get the cancel command that the report is confirming. This is a helper function that is just here for logical
naming. It simply returns the causing command.
:return:
"""
return self.causing_command()
def cancel_reason(self):
"""
Get the cancel reason's identifier, which is an int
:return: int
"""
return self._cancel_reason
def cancel_reason_str(self):
"""
Get the human readable cancel reason string.
:return: str
"""
if self.cancel_reason() in CANCEL_REASON_STRINGS:
return CANCEL_REASON_STRINGS[self.cancel_reason()]
return "%d is an unknown cancel reason." % (self.cancel_reason())
def event_type_str(self):
return "Cancel Report"
def to_json(self):
"""
Get the json dictionary of the cancel message
:return: dict
"""
return {self.__class__.__name__: {'user_id': self.user_id(),
'chain_id': self.chain_id(),
'event_id': self.event_id(),
'timestamp': self.timestamp(),
'product_name': self.market().product().name(),
'endpoint_name': self.market().endpoint().name(),
'cancel_reason': self.cancel_reason(),
'cancel_reason_str': self.cancel_reason_str(),
'cancel_command': "None" if self.causing_command() is None else self.cancel_command().to_json(),
'other_key_values': self._other_values_json()}}
class FillReport(ExecutionReport):
def __init__(self, event_id, timestamp, chain_id, user_id, market, aggressing_command, fill_qty, fill_price,
side, match_id, other_key_values=None):
# TODO document
assert isinstance(fill_price, Price)
assert isinstance(fill_qty, int)
assert isinstance(match_id, int) or isinstance(match_id, str)
assert isinstance(side, Side)
ExecutionReport.__init__(self, event_id, timestamp, chain_id, user_id, market, aggressing_command,
other_key_values=other_key_values)
self._fill_price = fill_price
self._fill_qty = fill_qty
self._side = side
self._match_ids = match_id
def aggressing_command(self):
"""
Get the aggressing command that the fill was triggered by. This is a helper function that is just here
for logical naming. It simply returns the causing command.
:return:
"""
return self.causing_command()
def is_aggressor(self):
"""
Returns True if this fill is for the aggressor that caused the match; returns False otherwise.
:return: bool
"""
return self.aggressing_command().chain_id() == self.chain_id()
def match_id(self):
"""
Get the match id that the fill is part of.
:return: int or str.
"""
return self._match_ids
def fill_price(self):
"""
Get the price of the fill.
:return: MarketObjects.Price
"""
return self._fill_price
def fill_qty(self):
"""
Get the qty filled.
:return: int
"""
return self._fill_qty
def side(self):
"""
Get the side of the fill.
:return: MarketObjects.Side
"""
return self._side
def event_type_str(self):
return "Fill Report"
def to_json(self):
"""
Get the json dictionary of the fill message.
:return: dict
"""
return {self.__class__.__name__: {'user_id': self.user_id(),
'chain_id': self.chain_id(),
'event_id': self.event_id(),
'timestamp': self.timestamp(),
'product_name': self.market().product().name(),
'endpoint_name': self.market().endpoint().name(),
'side': int(self.side()),
'fill_price': str(self.fill_price()),
'fill_qty': self.fill_qty(),
'match_id': self.match_id(),
'aggressing_command': self.aggressing_command().to_json(),
'other_key_values': self._other_values_json()}}
class PartialFillReport(FillReport):
def __init__(self, event_id, timestamp, chain_id, user_id, market, aggressing_command, fill_qty, fill_price,
side, match_id, leaves_qty, other_key_values=None):
# TODO document
assert isinstance(leaves_qty, int)
FillReport.__init__(self, event_id, timestamp, chain_id, user_id, market, aggressing_command, fill_qty,
fill_price, side, match_id, other_key_values=other_key_values)
self._leaves_qty = leaves_qty
def leaves_qty(self):
"""
Gets the leaves qty that results from the fill.
:return:
"""
return self._leaves_qty
def event_type_str(self):
return "Partial Fill Report"
def to_json(self):
"""
Get the json dictionary of the partial fill message.
:return: dict
"""
return {self.__class__.__name__: {'user_id': self.user_id(),
'chain_id': self.chain_id(),
'event_id': self.event_id(),
'timestamp': self.timestamp(),
'product_name': self.market().product().name(),
'endpoint_name': self.market().endpoint().name(),
'side': int(self.side()),
'fill_price': str(self.fill_price()),
'fill_qty': self.fill_qty(),
'match_id': self.match_id(),
'leaves_qty': self.leaves_qty(),
'aggressing_command': self.aggressing_command().to_json(),
'other_key_values': self._other_values_json()}}
class FullFillReport(FillReport):
def __init__(self, event_id, timestamp, chain_id, user_id, market, aggressing_command, fill_qty, fill_price,
side, match_id, other_key_values=None):
# TODO document
FillReport.__init__(self, event_id, timestamp, chain_id, user_id, market, aggressing_command, fill_qty,
fill_price, side, match_id, other_key_values=other_key_values)
def event_type_str(self):
return "Full Fill Report"
| [
"[email protected]"
] | |
fc7fd4340228c0eb18ca72833e39c6bde03e87cf | 174c6b81276e8816a2035a571bc1e15250a89f85 | /blog/views.py | ea02957b380323fa4bb353a63954aeb9367a133d | [] | no_license | Vanyali/personalportfolio | 36799bbd5c6e3719d6e9262c209ee2f5a0dd9cbb | ffcd25afd5989aaf58265beacfd6e22b57a3d035 | refs/heads/master | 2020-03-25T23:48:50.450996 | 2018-08-10T14:03:12 | 2018-08-10T14:03:12 | 144,294,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | from django.shortcuts import render, get_object_or_404
from .models import Blog
# Create your views here.
def allblogs(request):
blogs = Blog.objects
return render(request, 'blog/allblogs.html', {'blogs':blogs})
def detail(request, blog_id):
detailblog = get_object_or_404(Blog, pk=blog_id)
return render(request, 'blog/detail.html', {'blog':detailblog}) | [
"[email protected]"
] | |
fac70365a8f81a9aa5b55a9cd5d7d9b6467d9779 | 70d7e6f3f08b4928d7b69148996550dc93c2423c | /2016/apr/code/distribution.py | 425881e0ecc966f0fc7ec109402e04e5d43b5126 | [] | no_license | pythonnortheast/slides | 5078145dcd670896b394c758ec21cd66084375ee | 076fd70462560e58a43e0db1577a1ca179dc5893 | refs/heads/master | 2021-06-05T01:44:31.627884 | 2017-12-19T08:59:29 | 2017-12-19T08:59:29 | 5,814,944 | 3 | 4 | null | 2017-12-19T08:59:30 | 2012-09-14T21:21:46 | Jupyter Notebook | UTF-8 | Python | false | false | 1,109 | py | #!/usr/bin/env python
"""
Visualisation - distribution.
"""
import pandas
import seaborn
seaborn.set_style("whitegrid")
seaborn.set_context("paper")
# read data from a CSV file
data = pandas.read_csv("january_2016.csv", skiprows=2, thousands=",")
# replace spaces in column names with underscores
data.columns = data.columns.str.replace(" ", "_")
# convert strings to date (DD/MM/YYYY) using null for invalid/missing values
data.Paid_Date = pandas.to_datetime(data.Paid_Date, format="%d/%m/%Y", errors="coerce")
# remove all rows with missing values
data = data.dropna()
small_payments = data.query("0 < Total < 10000")
# histogram + kernel density estimation
plot = seaborn.distplot(small_payments.Total)
plot.figure.savefig("distribution.pdf", bbox_inches="tight")
plot.figure.clear()
# box plots
plot = seaborn.boxplot(x="Total", y="Directorate", data=small_payments)
plot.figure.savefig("box_plot.pdf", bbox_inches="tight")
plot.figure.clear()
# violin plots
plot = seaborn.violinplot(x="Total", y="Directorate", data=small_payments)
plot.figure.savefig("violin_plot.pdf", bbox_inches="tight")
| [
"momat@blaszak"
] | momat@blaszak |
bc650166674848aae2e76eed7a9ef526e9a008f4 | c906e707a68c4314a8fd65321cef494b52cb3b8b | /2018/day2/PuzzleD2P2.py | 66d7db9dc30b81d564eb34eab30012d9f84f4d6d | [] | no_license | vladimir-ionita/AdventOfCode | 6306aedf2fec17a181d441e0490b8da10b1469eb | 3d825436f012c342034b5273e407ba0af5447fe3 | refs/heads/master | 2020-04-10T10:29:48.457561 | 2018-12-10T06:57:50 | 2018-12-10T06:57:50 | 160,324,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | from utilities import FileUtilities
def get_difference(word_a, word_b):
difference = 0
for c in range(len(word_a)):
difference += 1 if word_a[c] != word_b[c] else 0
return difference
def get_similar_words(words):
for word_index_a in range(len(words)):
for word_index_b in range(word_index_a + 1, len(words)):
word_a = words[word_index_a]
word_b = words[word_index_b]
if get_difference(word_a, word_b) == 1:
return word_a, word_b
def get_common_letters(word_a, word_b):
common = []
for c in range(len(word_a)):
if word_a[c] == word_b[c]:
common.append(word_a[c])
return ''.join(common)
if __name__ == "__main__":
input_file_path = "puzzle.in"
file_content = FileUtilities.get_sanitized_content_from_file(input_file_path)
word_a, word_b = get_similar_words(file_content)
print(get_common_letters(word_a, word_b))
| [
"[email protected]"
] | |
4ffe88ba899c6533dbf898c44501f57ee3a17dcc | 714b28c006b3c60aa87714f8777a37486b94e995 | /accounts/migrations/0006_auto_20210522_1401.py | 992a110f8f0095db91399a635e9b3b4465af91f9 | [] | no_license | kyrios213/django_tutorial | 3f0bdce5c0e5faa4f7e08a238ac6d77bba35c92e | 771d209c4b198df9361254deefd1c9a49c4a0746 | refs/heads/main | 2023-04-25T23:11:06.356823 | 2021-05-30T05:31:32 | 2021-05-30T05:31:32 | 368,026,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | # Generated by Django 3.2.3 on 2021-05-22 06:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_auto_20210519_1035'),
]
operations = [
migrations.AddField(
model_name='order',
name='note',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='product',
name='description',
field=models.TextField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
16a18aa3c5735f7fbeb02b8769f5d1279b47ec27 | 42e386e873fa0669bfb6dd05a71e0847dceacf4d | /tasks/color_constancy_multi/test.py | 3ae7c974d526170cf0e1e3e1b29c37c73fbbcea2 | [] | no_license | YiKeYaTu/Experiments | 93869acc312f9d88aa4fb188b28f7e8a8ff7d25f | 45b694b78a57f5b9ad81d66ee52178b7e26853dc | refs/heads/main | 2023-03-19T23:58:18.919005 | 2021-03-09T06:11:22 | 2021-03-09T06:11:22 | 307,280,892 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,271 | py |
from dataloaders.multi_color_constancy.ImageNet import ImageNet
from models.multi_color_constancy.StackedNCRF import StackedNCRF
from torch.utils.data import DataLoader
from constant import DEVICE, TMP_ROOT
from utils.StatisticalValue import StatisticalValue
from loss_functions.multi_angular_loss import multi_angular_loss
from torchvision import transforms
import torch
import torchvision
import os
import time
from thop import profile
dataset = ImageNet(
train=False,
transform=transforms.Compose([
transforms.ToTensor()
]),
target_transform=transforms.Compose([
transforms.ToTensor()
]),
)
testloader = DataLoader(
dataset,
batch_size=1,
shuffle=False,
num_workers=8
)
model = StackedNCRF()
model.to(device=DEVICE)
macs, params = profile(model, inputs=(torch.randn(1, 3, 224, 224).to(DEVICE), ))
print("Model's macs is %f, params is %f" % (macs, params))
def run():
statistical_angular_errors = StatisticalValue()
sub_dir = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
print('Test start.')
with torch.no_grad():
for idx, (images, labels, names) in enumerate(testloader):
images, labels = images.to(DEVICE), labels.to(DEVICE)
predictions = model(images)
angular_error = multi_angular_loss(predictions[-1], labels)
statistical_angular_errors.update(angular_error.item(), names, sort=True)
view_data = torch.zeros((4, *images.shape[1:]))
view_data[0, :, :, :] = images.squeeze()
view_data[1, :, :, :] = images.squeeze() / predictions[-1].squeeze()
view_data[2, :, :, :] = predictions[-1].squeeze()
view_data[3, :, :, :] = labels.squeeze()
if not os.path.isdir(os.path.join(TMP_ROOT, 'test', sub_dir)):
os.makedirs(os.path.join(TMP_ROOT, 'test', sub_dir))
torchvision.utils.save_image(
view_data,
os.path.join(TMP_ROOT, 'test/%s/%s' % (sub_dir, names[0]))
)
print(
'Angular Error: mean: {errors.avg}, mid: {errors.mid}, worst: {errors.max[0]}, best: {errors.min[0]}'.format(
errors=statistical_angular_errors))
print('Test end.')
| [
"[email protected]"
] | |
afe2e3497fcf2748a39df150b3000ee0cd199b92 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/documentdb/v20210301preview/get_sql_resource_sql_stored_procedure.py | ad0982c8a942b15814d0f916e6958ee808ba44f3 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,150 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSqlResourceSqlStoredProcedureResult',
'AwaitableGetSqlResourceSqlStoredProcedureResult',
'get_sql_resource_sql_stored_procedure',
'get_sql_resource_sql_stored_procedure_output',
]
@pulumi.output_type
class GetSqlResourceSqlStoredProcedureResult:
"""
An Azure Cosmos DB storedProcedure.
"""
def __init__(__self__, id=None, identity=None, location=None, name=None, resource=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource and not isinstance(resource, dict):
raise TypeError("Expected argument 'resource' to be a dict")
pulumi.set(__self__, "resource", resource)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The unique resource identifier of the ARM resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedServiceIdentityResponse']:
"""
Identity for the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the ARM resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def resource(self) -> Optional['outputs.SqlStoredProcedureGetPropertiesResponseResource']:
return pulumi.get(self, "resource")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
class AwaitableGetSqlResourceSqlStoredProcedureResult(GetSqlResourceSqlStoredProcedureResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSqlResourceSqlStoredProcedureResult(
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
resource=self.resource,
tags=self.tags,
type=self.type)
def get_sql_resource_sql_stored_procedure(account_name: Optional[str] = None,
container_name: Optional[str] = None,
database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
stored_procedure_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSqlResourceSqlStoredProcedureResult:
"""
An Azure Cosmos DB storedProcedure.
:param str account_name: Cosmos DB database account name.
:param str container_name: Cosmos DB container name.
:param str database_name: Cosmos DB database name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str stored_procedure_name: Cosmos DB storedProcedure name.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['containerName'] = container_name
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
__args__['storedProcedureName'] = stored_procedure_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20210301preview:getSqlResourceSqlStoredProcedure', __args__, opts=opts, typ=GetSqlResourceSqlStoredProcedureResult).value
return AwaitableGetSqlResourceSqlStoredProcedureResult(
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
resource=__ret__.resource,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_sql_resource_sql_stored_procedure)
def get_sql_resource_sql_stored_procedure_output(account_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
stored_procedure_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSqlResourceSqlStoredProcedureResult]:
"""
An Azure Cosmos DB storedProcedure.
:param str account_name: Cosmos DB database account name.
:param str container_name: Cosmos DB container name.
:param str database_name: Cosmos DB database name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str stored_procedure_name: Cosmos DB storedProcedure name.
"""
...
| [
"[email protected]"
] | |
52bd7c4c86d98ce1b576f8ff77c0c657aedad1ad | a61caaadd55502c044d5719f0ac8f24d6ebed0a8 | /CollisionTypes/GameSetup.py | 6d8b6798698546aad8a866fdb06c5cdcea3bdb8f | [] | no_license | IcecreaO-omnfire/CollisionTypes | 454ef278d87ea2183f55dfba04e51f7b615fee8c | 5f58cb9d3e24a8459278d45de40eecc2be991fa7 | refs/heads/master | 2023-07-22T01:50:58.790937 | 2021-08-29T14:37:04 | 2021-08-29T14:37:04 | 401,060,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,771 | py | import GlobalVariables
import CharacterCollisionNode
class GameSetup():
def __init__(self):
self.customcollisions={}
self.game=GlobalVariables.ShowBase()
self.game.cTrav=GlobalVariables.CollisionTraverser()
self.queuegame=GlobalVariables.CollisionHandlerQueue()
taskMgr.doMethodLater(0.01,self.collisiontraverse,"CollisionTraverse")
def collisiontraverse(self,task):
self.game.cTrav.traverse(render)
for each in self.game.cTrav.getColliders():
try:
collisionnode=self.customcollisions[each.node()]
if isinstance(collisionnode,CharacterCollisionNode.CharacterCollisionNode):
collisionnode.model.setZ(collisionnode.model,-0.1)
except Exception as exception:
pass
self.game.cTrav.showCollisions(render)
if len(self.queuegame.entries)>0:
for each in self.queuegame.entries:
collisiontype=None
fromnodepath=each.getFromNodePath()
try:
if isinstance(self.customcollisions[fromnodepath.node()],CharacterCollisionNode.CharacterCollisionNode):
collisiontype=self.customcollisions[fromnodepath.node()]
#print(type(self.customcollisions[fromnodepath.node()]))
if collisiontype.isghost==True:
print("Ghosting")
else:
fromnodepath.getParent().setZ(fromnodepath.getParent(),each.getSurfacePoint(fromnodepath.getParent()).z+.9)
except Exception as exception:
print(exception)
return task.again | [
"[email protected]"
] | |
d615b760898802dc9155d05c5fee311838b3ece0 | 485be21ebe0a956b7f4a681968e160a463903ecc | /KnowledgedRank/BoePRFReranker.py | 59319910dfd622e0a334dfd716a1ba920c9b8fb2 | [] | no_license | xiongchenyan/cxPyLib | e49da79345006d75a4261a8bbd4cc9a7f730fad2 | 8d87f5a872458d56276a2a2b0533170ede4d5851 | refs/heads/master | 2021-01-10T20:43:20.147286 | 2016-01-14T04:02:45 | 2016-01-14T04:02:45 | 17,610,431 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,418 | py | '''
Created on Dec 7, 2015 7:24:56 PM
@author: cx
what I do:
I rerank doc in the BOE space
with simple PRF re-ranking
what's my input:
doc with hEntity
what's my output:
evaluation results
'''
import site
site.addsitedir('/bos/usr0/cx/PyCode/cxPyLib')
from cxBase.base import cxBaseC
from cxBase.Conf import cxConfC
import logging,json
import math
from KnowledgedRank.BoeReranker import *
class BoePRFRerankerC(BoeLmRankerC):
def Init(self):
BoeLmRankerC.Init(self)
self.WOrigQ = 0.5
self.NumOfExpEntity = 20
def SetConf(self, ConfIn):
BoeLmRankerC.SetConf(self, ConfIn)
self.WOrigQ = float(self.conf.GetConf('worigq', self.WOrigQ))
self.NumOfExpEntity = int(self.conf.GetConf('numofexp', self.NumOfExpEntity))
@staticmethod
def ShowConf():
BoeLmRankerC.ShowConf()
print 'worigq 0.5\nnumofexp 20'
def QExp(self,qid,query,lDoc):
hEntityScore = {} #ObjId -> prf score
for doc in lDoc:
if not doc.DocNo in self.hDocKg:
continue
hDocEntity = self.hDocKg[doc.DocNo]
for ObjId,score in hDocEntity.items():
score += doc.score #log(a) + log(b)
if not ObjId in hEntityScore:
hEntityScore[ObjId] = math.exp(score)
else:
hEntityScore[ObjId] += math.exp(score)
lEntityScore = hEntityScore.items()
lEntityScore.sort(key=lambda item:item[1],reverse = True)
lEntityScore = lEntityScore[:self.NumOfExpEntity]
Z = sum([item[1] for item in lEntityScore])
if Z == 0:
lEntityScore = []
else:
lEntityScore = [[item[0],item[1] / float(Z)] for item in lEntityScore]
logging.info(
'[%s][%s] exp entity: %s',
qid,
query,
json.dumps(lEntityScore)
)
return lEntityScore
def RankScoreForDoc(self,lQObjScore,doc):
if not doc.DocNo in self.hDocKg:
return self.Inferencer.MinWeight
hDocEntity = self.hDocKg[doc.DocNo]
score = 0
for ObjId,weight in lQObjScore:
ObjScore = self.Inferencer.inference(ObjId, hDocEntity,doc)
score += ObjScore * weight
# logging.info('[%s] [%s] - [%s] obj score: %f',qid,doc.DocNo,ObjId,ObjScore)
# logging.info('[%s] [%s] ranking score: %f',qid,doc.DocNo,score)
return score
def Rank(self, qid, query, lDoc):
lQObj = []
if qid in self.hQObj:
lQObj = self.hQObj[qid]
lExpEntityScore = self.QExp(qid, query, lDoc)
lQExpObjScore = [[ObjId,self.WOrigQ * score] for ObjId,score in lQObj]
lQExpObjScore += [
[ObjId,score * (1.0 - self.WOrigQ)]
for ObjId,score in lExpEntityScore
]
lScore = [self.RankScoreForDoc(lQExpObjScore, doc) for doc in lDoc]
lMid = zip(lDoc,lScore)
lDocNoScore = [[item[0].DocNo,item[1],item[0].score] for item in lMid]
#sort doc by two keys, if boe scores tie, use original ranking score
lDocNoScore.sort(key=lambda item: (item[1],item[2]), reverse = True)
lRankRes = [item[0] for item in lDocNoScore]
return lRankRes
if __name__=='__main__':
import sys,os
from AdhocEva.RankerEvaluator import RankerEvaluatorC
if 2 != len(sys.argv):
print 'I evaluate Boe exp model '
print 'in\nout'
BoePRFRerankerC.ShowConf()
RankerEvaluatorC.ShowConf()
sys.exit()
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
# ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
conf = cxConfC(sys.argv[1])
QIn = conf.GetConf('in')
EvaOut = conf.GetConf('out')
Ranker = BoePRFRerankerC(sys.argv[1])
Evaluator = RankerEvaluatorC(sys.argv[1])
Evaluator.Evaluate(QIn, Ranker.Rank, EvaOut)
| [
"[email protected]"
] | |
11d1d4f7de37fdfe1f4084bddb41715fac9c2fe2 | 48877f86e02cdd90deb7d44f2309037e5427f76a | /nerd2.py | e7820b7302c1c93292208e21c82798396f98363d | [] | no_license | pinetree408/apss | 46ec76cb5faa21236c610bdb83906aee6ca53750 | 6e12cce0b58cd6091daeae57dc930d121b9e9416 | refs/heads/master | 2021-07-14T14:43:45.179885 | 2020-06-08T07:35:09 | 2020-06-08T07:35:09 | 164,045,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,083 | py | import collections
def is_dominated(x, y, coords):
idx = 0
sorted_coords = collections.OrderedDict(sorted(coords.items()))
for i, key in enumerate(sorted_coords.keys()):
idx = i
if key > x:
break
if len(sorted_coords.keys()) == 0 or idx == len(sorted_coords.keys())-1:
return False
return y < coords[sorted_coords.keys()[idx]]
def remove_dominated(x, y, coords):
idx = 0
sorted_coords = collections.OrderedDict(sorted(coords.items()))
for i, key in enumerate(sorted_coords.keys()):
idx = i
if key > x:
if idx != 0:
idx = idx-1
break
if idx == 0:
return
while True:
if coords[sorted_coords.keys()[idx]] > y:
break
if idx == 0:
coords.pop(sorted_coords.keys()[idx], None)
break
else:
new_idx = idx
new_idx = new_idx-1
coords.pop(sorted_coords.keys()[idx], None)
idx = new_idx
def registered(x, y, coords):
if is_dominated(x, y, coords):
return len(coords.key())
remove_dominated(x, y, coords)
coords[x] = y
return len(coords.keys())
def solve(people):
ret = 0
coords = {}
for person in people:
ret = ret+registered(person[0], person[1], coords)
return ret
def nerd2(input_case):
input_list = list(
map(
lambda x: [float(i) for i in x.strip().split(' ')],
input_case.split('\n')
)
)
case_num = int(input_list[0][0])
input_list = input_list[1:]
case_start = 0
for i in range(case_num):
start = case_start
people_num = int(input_list[start][0])
people_list = input_list[start+1:start+1+people_num]
print solve(people_list)
case_start = start+1+people_num
if __name__ == '__main__':
input_case = \
'''2
4
72 50
57 67
74 55
64 60
5
1 5
2 4
3 3
4 2
5 1'''
nerd2(input_case)
| [
"[email protected]"
] | |
590397fe9381bcf57530d1bb9e192b13de61ebd2 | 3abff95fb50ecacf0c68d2f2a60890f4d3d5f50e | /build-c-ext.py | 313ae53c37cea0edcecc423e44931ffe98cd1d97 | [
"MIT"
] | permissive | orende/intro-till-python | bc8587fef11313f3d38fa4cc0f010c03ef836832 | d2fccc7aaeabf88d6b040b7e199326a748dee0fa | refs/heads/master | 2021-01-10T05:58:03.984026 | 2015-11-23T08:02:46 | 2015-11-23T08:02:46 | 45,859,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | from shell import shell
cmd1 = 'gcc -DNDEBUG -g -O3 -Wall -Wstrict-prototypes -fPIC -I/usr/include/python2.7 -c spammodule.c -o spammodule.o'
result1 = shell(cmd1)
if result1.code != 0:
print result1.errors()
cmd2 = 'gcc -shared spammodule.o -L/usr/local/lib -o spammodule.so'
result2 = shell(cmd2)
if result2.code != 0:
print result2.errors()
| [
"[email protected]"
] | |
e8973cda288820446681e4f974d0c59e7a39bf3f | 7e6d3c6669181fd9314b051cad9a38781fb935a5 | /chapter3/3_5.py | 10c1a2b3da7400e3193e11ec1b72aacb44fb44e3 | [] | no_license | YMilton/data_mining | 1d3e6bcf1a0c73a4ece80651d82e06d001311c6d | 35e6890c222d8db2bcf4155daceba4efe21cd8f0 | refs/heads/master | 2020-04-11T23:04:00.120679 | 2019-04-03T13:23:34 | 2019-04-03T13:23:34 | 161,468,283 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,387 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def LDA(positive, negative):
'''
Linear Discriminant Analysis, 线性判别分析
J = w.T*Sb*w/w.T*Sw*w
Sb = (mu0-mu1)*(mu0-mu1).T
Sw = sigma0+sigma1
w = Sw-1(mu0 - mu1)
:return:
'''
# the mean of positive and negative
mu1 = np.mean(positive, axis=0).reshape((-1,1))
mu0 = np.mean(negative, axis=0).reshape((-1,1))
# the cov of positive and negative
sigma1 = np.cov(positive, rowvar=False)
sigma0 = np.cov(negative, rowvar=False)
Sw = sigma0 + sigma1
# return omega
return np.linalg.inv(Sw).dot(mu0 - mu1)
if __name__ == '__main__':
# read the data (编号,密度,含糖量,是否好瓜)
workbook = pd.read_csv('data/watermelon_3a.csv', header=None)
# delete the row number
data = np.array(workbook.values[:, 1:])
# classify the positive and negative
positive = data[data[:, -1] == 1, :-1]
negative = data[data[:, -1] == 0, :-1]
omega = LDA(positive, negative)
# plot the LDA
plt.plot(positive[:,0], positive[:,1], 'bo')
plt.plot(negative[:,0], negative[:,1], 'r+')
lda_left = -(omega[0]*0) / omega[1]
lda_right = -(omega[0]*0.9) / omega[1]
plt.plot([0,0.9], [lda_left, lda_right], 'g-')
plt.xlabel('density')
plt.ylabel('sugar rate')
plt.title('LDA')
plt.show() | [
"[email protected]"
] | |
ec0e013992f3a32768a38fca75858408bb77e362 | 3c5ffbe12bda41cd96025ba4667f2236ec585751 | /05/fashion_masks.py | c1b1cf08631339764912cadaab92d001076a9d52 | [] | no_license | ericlief/deep-learning | 7a9fd64ac22cbc082290ba303db300756299c7d6 | 24fc0d2d5b0fe00baa5ea66f37e3a7f80faab198 | refs/heads/master | 2020-03-08T08:32:47.079825 | 2018-07-08T10:09:42 | 2018-07-08T10:09:42 | 128,024,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,817 | py | #!/usr/bin/env python3
#Team: Felipe Vianna and Yuu Sakagushi
# Felipe Vianna: 72ef319b-1ef9-11e8-9de3-00505601122b
# Yuu Sakagushi: d9fbf49b-1c71-11e8-9de3-00505601122b
import numpy as np
import tensorflow as tf
class Dataset:
def __init__(self, filename, shuffle_batches = True):
data = np.load(filename)
self._images = data["images"]
self._labels = data["labels"] if "labels" in data else None
self._masks = data["masks"] if "masks" in data else None
self._shuffle_batches = shuffle_batches
self._permutation = np.random.permutation(len(self._images)) if self._shuffle_batches else range(len(self._images))
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def masks(self):
return self._masks
def next_batch(self, batch_size):
batch_size = min(batch_size, len(self._permutation))
batch_perm, self._permutation = self._permutation[:batch_size], self._permutation[batch_size:]
return self._images[batch_perm], self._labels[batch_perm] if self._labels is not None else None, self._masks[batch_perm] if self._masks is not None else None
def epoch_finished(self):
if len(self._permutation) == 0:
self._permutation = np.random.permutation(len(self._images)) if self._shuffle_batches else range(len(self._images))
return True
return False
class Network:
HEIGHT = 28
WIDTH = 28
LABELS = 10
def __init__(self, threads, seed=42):
# Create an empty graph and a session
graph = tf.Graph()
graph.seed = seed
self.session = tf.Session(graph = graph, config=tf.ConfigProto(inter_op_parallelism_threads=threads,
intra_op_parallelism_threads=threads))
def construct(self, args):
with self.session.graph.as_default():
# Inputs
self.images = tf.placeholder(tf.float32, [None, self.HEIGHT, self.WIDTH, 1], name="images")
self.labels = tf.placeholder(tf.int64, [None], name="labels")
self.masks = tf.placeholder(tf.float32, [None, self.HEIGHT, self.WIDTH, 1], name="masks")
self.is_training = tf.placeholder(tf.bool, [], name="is_training")
# TODO: Computation and training.
#
# The code below assumes that:
# - loss is stored in `loss`
# - training is stored in `self.training`
# - label predictions are stored in `self.labels_predictions` of shape [None] and type tf.int64
# - mask predictions are stored in `self.masks_predictions` of shape [None, 28, 28, 1] and type tf.float32
# with values 0 or 1
# Network
#both classification and masks
conv_1 = self.images
for i in range(10):
conv_1 = tf.layers.conv2d(conv_1, filters=args.cnn_dim_1, kernel_size=[3,3], strides=1, padding="same", activation=tf.nn.sigmoid, name="conv_1_"+str(i))
print(conv_1)
if args.dropout_1:
conv_1 = tf.layers.dropout(conv_1, rate=args.dropout_1, training=self.is_training, name="dropout_1")
if args.bn_1:
conv_1 = tf.layers.batch_normalization(conv_1, name='bn_1')
if args.pool:
conv_1 = tf.layers.average_pooling2d(conv_1, [2,2], 2, 'valid', name='pool')
#for i in range(2):
#conv_2 = tf.layers.conv2d(conv_1, filters=args.cnn_dim_2, kernel_size=[3,3], strides=2, padding="same", activation=tf.nn.sigmoid, name="conv_2_"+str(i))
#print(conv_2)
#if args.pool:
#conv_2 = tf.layers.average_pooling2d(conv_2, [2,2], 2, 'valid', name='pool')
##conv_2 = tf.layers.conv2d(conv_1, args.cnn_dim_2, kernel_size=[3, 3], strides=2, padding="same", activation=tf.nn.relu, name="conv_2")
##print(conv_2)
#if args.dropout_2:
#conv_2 = tf.layers.dropout(conv_2, rate=args.dropout_2, training=self.is_training, name="dropout_2")
#if args.bn_2:
#conv_2 = tf.layers.batch_normalization(conv_2, name='bn_2')
#conv_3 = tf.layers.conv2d(conv_2, args.cnn_dim_3, kernel_size=[2, 2], strides=2, padding="same", activation=tf.nn.relu, name="conv_3")
#print(conv_3)
#if args.dropout_3:
#conv_3 = tf.layers.dropout(conv_3, rate=args.dropout_2, training=self.is_training, name="dropout_3")
#if args.bn_3:
#conv_3 = tf.layers.batch_normalization(conv_2, name='bn_3')
# conv = tf.layers.conv2d(conv2, filters=10, kernel_size=[5, 5], strides=2, padding="same", use_bias=False, activation=None, name="conv")
# bn = tf.layers.batch_normalization(inputs=conv, axis=-1, training = self.is_training, name="bn")
# relu = tf.nn.relu(bn)
# classification
#pool1 = tf.layers.max_pooling2d(conv2, pool_size=[2, 2], strides=1, name="pool1")
flat = tf.layers.flatten(conv_1, name="flatten")
print('flat1', flat)
fcl_1 = tf.layers.dense(flat, 256, activation=tf.nn.relu, name="fcl_1")
#if args.dropout_4:
#fc_1 = tf.layers.dropout(fcl_1, rate=args.dropout_4, training=self.is_training, name="dropout_4")
#fcl_2 = tf.layers.dense(fcl_1, 512, activation=tf.nn.relu, name="fcl_2")
#output_layer_1 = tf.layers.dense(fcl_2, self.LABELS, activation=None, name="output_layer_1")
output_layer_1 = tf.layers.dense(fcl_1, self.LABELS, activation=None, name="output_layer_1")
# Masks output
flat_2 = tf.layers.flatten(conv_1, name="flatten2")
#dense3 = tf.layers.dense(flatten2, 900, activation=tf.nn.relu, name="dense3")
fcl_mascs_1 = tf.layers.dense(flat_2, 384, activation=tf.nn.relu, name="fcl_mascs_1")
fcl_mascs_2 = tf.layers.dense(fcl_mascs_1, 784, activation=None, name="fcl_mascs_2")
output_layer_2 = tf.reshape(fcl_mascs_2, [-1, 28, 28, 1])
self.labels_predictions = tf.argmax(output_layer_1, axis=1)
#self.masks_predictions = (tf.round(tf.tanh(output_layer2))+1)*0.5
self.masks_predictions = (tf.sign(output_layer_2) + 1) * 0.5
# Training
loss1 = tf.losses.sparse_softmax_cross_entropy(self.labels, output_layer_1, scope="loss1")
loss2 = tf.losses.sigmoid_cross_entropy(self.masks, output_layer_2, scope="loss2")
loss = loss1 + loss2
global_step = tf.train.create_global_step()
self.training1 = tf.train.AdamOptimizer().minimize(loss1, global_step=global_step, name="training1")
self.training2 = tf.train.AdamOptimizer().minimize(loss2, global_step=global_step, name="training2")
# Summaries
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(self.labels, self.labels_predictions), tf.float32))
only_correct_masks = tf.where(tf.equal(self.labels, self.labels_predictions),
self.masks_predictions, tf.zeros_like(self.masks_predictions))
intersection = tf.reduce_sum(only_correct_masks * self.masks, axis=[1,2,3])
self.iou = tf.reduce_mean(
intersection / (tf.reduce_sum(only_correct_masks, axis=[1,2,3]) + tf.reduce_sum(self.masks, axis=[1,2,3]) - intersection)
)
summary_writer = tf.contrib.summary.create_file_writer(args.logdir, flush_millis=10 * 1000)
self.summaries = {}
with summary_writer.as_default(), tf.contrib.summary.record_summaries_every_n_global_steps(100):
self.summaries["train"] = [tf.contrib.summary.scalar("train/loss", loss),
tf.contrib.summary.scalar("train/accuracy", self.accuracy),
tf.contrib.summary.scalar("train/iou", self.iou),
tf.contrib.summary.image("train/images", self.images),
tf.contrib.summary.image("train/masks", self.masks_predictions)]
with summary_writer.as_default(), tf.contrib.summary.always_record_summaries():
for dataset in ["dev", "test"]:
self.summaries[dataset] = [tf.contrib.summary.scalar(dataset+"/loss", loss),
tf.contrib.summary.scalar(dataset+"/accuracy", self.accuracy),
tf.contrib.summary.scalar(dataset+"/iou", self.iou),
tf.contrib.summary.image(dataset+"/images", self.images),
tf.contrib.summary.image(dataset+"/masks", self.masks_predictions)]
# Initialize variables
self.session.run(tf.global_variables_initializer())
with summary_writer.as_default():
tf.contrib.summary.initialize(session=self.session, graph=self.session.graph)
def train(self, images, labels, masks):
self.session.run([self.training1, self.summaries["train"]],
{self.images: images, self.labels: labels, self.masks: masks, self.is_training: True})
self.session.run([self.training2, self.summaries["train"]],
{self.images: images, self.labels: labels, self.masks: masks, self.is_training: True})
def evaluate(self, dataset, images, labels, masks):
return self.session.run([self.summaries[dataset], self.accuracy, self.iou],
{self.images: images, self.labels: labels, self.masks: masks, self.is_training: False})
def predict(self, images):
return self.session.run([self.labels_predictions, self.masks_predictions],
{self.images: images, self.is_training: False})
if __name__ == "__main__":
import argparse
import datetime
import os
import re
# Fix random seed
np.random.seed(42)
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=50, type=int, help="Batch size.")
parser.add_argument("--epochs", default=20, type=int, help="Number of epochs.")
parser.add_argument("--threads", default=1, type=int, help="Maximum number of threads to use.")
parser.add_argument("--dropout_1", default=0, type=float, help="Dropout rate.")
parser.add_argument("--dropout_2", default=0, type=float, help="Dropout rate.")
parser.add_argument("--dropout_3", default=0, type=float, help="Dropout rate.")
parser.add_argument("--dropout_4", default=0, type=float, help="Dropout rate.")
parser.add_argument("--bn_1", default=False, type=bool, help="Batch normalization.")
parser.add_argument("--bn_2", default=False, type=bool, help="Batch normalization.")
parser.add_argument("--bn_3", default=False, type=bool, help="Batch normalization.")
parser.add_argument("--cnn_dim_1", default=64, type=int, help="RNN cell dimension.")
parser.add_argument("--cnn_dim_2", default=128, type=int, help="RNN cell dimension.")
parser.add_argument("--cnn_dim_3", default=256, type=int, help="RNN cell dimension.")
parser.add_argument("--pool", default=False, type=bool, help="Pooling.")
args = parser.parse_args()
# Create logdir name
args.logdir = "logs/{}-{}-{}".format(
os.path.basename(__file__),
datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S"),
",".join(("{}={}".format(re.sub("(.)[^_]*_?", r"\1", key), value)
for key, value in sorted(vars(args).items()))).replace("/", "-")
)
if not os.path.exists("logs"): os.mkdir("logs") # TF 1.6 will do this by itself
# Load the data
train = Dataset("fashion-masks-train.npz")
print(len(train.masks[0]))
dev = Dataset("fashion-masks-dev.npz")
test = Dataset("fashion-masks-test.npz", shuffle_batches=False)
# Construct the network
network = Network(threads=args.threads)
network.construct(args)
# Train
best_acc = 0
best_mask = 0
for i in range(args.epochs):
while not train.epoch_finished():
images, labels, masks = train.next_batch(args.batch_size)
network.train(images, labels, masks)
result = network.evaluate("dev", dev.images, dev.labels, dev.masks)
if i%5==0:
print("----------------------", i, "/", args.epochs, "epochs")
if result[1] >= best_acc:
best_acc = result[1]
print("Accuracy: {:.2f}".format(100 * result[1]), 'Best!!!')
else:
print("Accuracy: {:.2f}".format(100 * result[1]))
if result[2] >= best_mask:
best_mask = result[2]
print("Mask iou: {:.2f}".format(100 * result[2]), 'Best!!!')
else:
print("Mask iou: {:.2f}".format(100 * result[2]))
print("---------------")
# Predict test data
#with open("fashion_masks_test.txt", "w") as test_file:
with open("{}/fashion_masks_test.txt".format(args.logdir), "w") as test_file:
while not test.epoch_finished():
images, _, _ = test.next_batch(args.batch_size)
labels, masks = network.predict(images)
for i in range(len(labels)):
print(labels[i], *masks[i].astype(np.uint8).flatten(), file=test_file)
| [
"ericlief@bossa"
] | ericlief@bossa |
decc0276a133d3ca4d2bfdc0f34fc1ff7ee92055 | a1730de4b50c17ecd388a995a1526c2eab80cb7d | /Plugins/Aspose-Cells-Java-for-Python/setup.py | 49e93716140f94069b1d526135d2a7a8348415f5 | [
"MIT"
] | permissive | aspose-cells/Aspose.Cells-for-Java | 2dcba41fc99b0f4b3c089f2ff1a3bcd32591eea1 | 42d501da827058d07df7399ae104bb2eb88929c3 | refs/heads/master | 2023-09-04T21:35:15.198721 | 2023-08-10T09:26:41 | 2023-08-10T09:26:41 | 2,849,714 | 133 | 89 | MIT | 2023-03-07T09:39:29 | 2011-11-25T13:16:33 | Java | UTF-8 | Python | false | false | 705 | py | __author__ = 'fahadadeel'
from setuptools import setup, find_packages
setup(
name = 'aspose-cells-java-for-python',
packages = find_packages(),
version = '1.0',
description = 'Aspose.cells Java for Python is a project that demonstrates / provides the Aspose.Cells for Java API usage examples in Python.',
author='Fahad Adeel',
author_email='[email protected]',
url='https://github.com/asposecells/Aspose_Cells_Java/tree/master/Plugins/Aspose-Cells-Java-for-Python',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent'
]
)
| [
"[email protected]"
] | |
9a497a06ee18928dfc7bc17f59d25523f920e47e | 671067c93d251635ed1360936c7ec84a59ece10c | /doublecop.py | 419ad0c817fd5955ddadc9233606416bb494dcd7 | [
"BSD-2-Clause"
] | permissive | nd1511/ccw_tutorial_theano | 48773052ec99da95aa50300399c943834ca29435 | f92aa8edbb567c9ac09149a382858f841a4a7749 | refs/heads/master | 2020-04-03T13:10:35.753232 | 2017-02-01T21:54:14 | 2017-02-01T21:54:14 | 155,276,374 | 1 | 0 | BSD-2-Clause | 2018-10-29T20:25:01 | 2018-10-29T20:25:01 | null | UTF-8 | Python | false | false | 577 | py | from theano import Apply
from theano.gof import COp
from theano.tensor import as_tensor_variable
class DoubleCOp(COp):
__props__ = ()
def __init__(self):
COp.__init__(self, ["doublecop.c"],
"APPLY_SPECIFIC(doublecop)")
def make_node(self, x):
x = as_tensor_variable(x)
if x.ndim != 1:
raise TypeError("DoubleCOp only works with 1D")
return Apply(self, [x], [x.type()])
def infer_shape(self, input_shapes):
return input_shapes
def grad(self, inputs, g):
return [g[0] * 2]
| [
"[email protected]"
] | |
fb6487f6c0dde6e50b419b0a00ed08704ef76d23 | c74ef12fa57c54de1ccfa4a63e91069aca5a245c | /mysite/polls/migrations/0001_initial.py | d04e17a799718a92ee9150db1a5d77117090b733 | [] | no_license | alex-mark/django-polls | c21ffcc4100c510d9bfb88edeae836f7a9a732c1 | cae1a456e9bb2d97a8a92b94783b02c33ef10173 | refs/heads/master | 2023-05-02T20:06:43.382588 | 2020-05-08T17:32:11 | 2020-05-08T17:32:11 | 184,925,766 | 0 | 0 | null | 2023-04-21T20:33:19 | 2019-05-04T17:35:26 | Python | UTF-8 | Python | false | false | 3,735 | py | # Generated by Django 2.2 on 2019-05-10 12:12
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question')),
],
),
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"[email protected]"
] | |
583582288cc22190bd1acb06dfb58b86cd717ddc | b1b4a323fd0c22a55c1ce88317a26041d37ec2cf | /w2/d5_stats_and_prob/test/.ipynb_checkpoints/question_01-checkpoint.py | c824e685054be622153266722c3148e68b3b80c3 | [] | no_license | LilaKelland/data_bootcamp | 5ee5cbadc879c30e8d70c9c923abbd4ffb2bbcb0 | 86cd887f940310589b76d7df74631ac96e26e29b | refs/heads/master | 2023-09-02T13:55:54.794979 | 2021-11-18T19:56:05 | 2021-11-18T19:56:05 | 406,582,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | """
Create a function that returns the mean of all digits.
Example:
mean(42) ➞ 3.0
mean(12345) ➞ 3.0
mean(666) ➞ 6.0
Notes:
- Function should always return float
"""
def mean(digits):
str_digits = str(digits)
split_digits = []
for dgt in str_digits:
int_dgt = int(dgt)
split_digits.append(int_dgt)
return(sum(split_digits)/ len(split_digits)) | [
"[email protected]"
] | |
e758759b714c65ed9bcc448e5fe5615004c2826b | 336d52bb53eb24d09e8433018525fa54aa7f1592 | /Agents/Actor_Critic_Agents/DDPG.py | ad6aa0593f8c9d0c9925aaa9282afb929428cf7d | [] | no_license | crashmatt/Deep-Reinforcement-Learning-Algorithms-with-PyTorch | 8a1901344df0fc499731515cbd53670c77c9c677 | 9c487dc51a483d2130cb9bb2a4d771f9748949cb | refs/heads/master | 2020-05-16T06:20:14.048294 | 2019-04-22T16:38:02 | 2019-04-22T16:38:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,653 | py | import copy
import torch
import torch.nn.functional as functional
from nn_builder.pytorch.NN import NN
from torch import optim
from Base_Agent import Base_Agent
from Replay_Buffer import Replay_Buffer
from Utilities.OU_Noise import OU_Noise
class DDPG(Base_Agent):
"""A DDPG Agent"""
agent_name = "DDPG"
def __init__(self, config):
Base_Agent.__init__(self, config)
self.hyperparameters = config.hyperparameters
self.critic_local = self.create_NN(input_dim=self.state_size + self.action_size, output_dim=1, key_to_use="Critic")
self.critic_target = self.create_NN(input_dim=self.state_size + self.action_size, output_dim=1, key_to_use="Critic")
self.critic_target.load_state_dict(copy.deepcopy(self.critic_local.state_dict()))
self.critic_optimizer = optim.Adam(self.critic_local.parameters(),
lr=self.hyperparameters["Critic"]["learning_rate"])
self.memory = Replay_Buffer(self.hyperparameters["Critic"]["buffer_size"], self.hyperparameters["batch_size"],
self.config.seed)
self.actor_local = self.create_NN(input_dim=self.state_size, output_dim=self.action_size, key_to_use="Actor")
self.actor_target = self.create_NN(input_dim=self.state_size, output_dim=self.action_size, key_to_use="Actor")
self.actor_target.load_state_dict(copy.deepcopy(self.actor_local.state_dict()))
self.actor_optimizer = optim.Adam(self.actor_local.parameters(),
lr=self.hyperparameters["Actor"]["learning_rate"])
self.noise = OU_Noise(self.action_size, self.config.seed, self.hyperparameters["mu"],
self.hyperparameters["theta"], self.hyperparameters["sigma"])
def reset_game(self):
"""Resets the game information so we are ready to play a new episode"""
Base_Agent.reset_game(self)
self.noise.reset()
def step(self):
"""Runs a step in the game"""
while not self.done:
self.action = self.pick_action()
self.conduct_action(self.action)
if self.time_for_critic_and_actor_to_learn():
for _ in range(self.hyperparameters["learning_updates_per_learning_session"]):
states, actions, rewards, next_states, dones = self.memory.sample() # Sample experiences
self.critic_learn(states, actions, rewards, next_states, dones)
self.actor_learn(states)
self.save_experience()
self.state = self.next_state #this is to set the state for the next iteration
self.global_step_number += 1
self.episode_number += 1
def pick_action(self):
"""Picks an action using the actor network and then adds some noise to it to ensure exploration"""
state = torch.from_numpy(self.state).float().unsqueeze(0).to(self.device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
action += self.noise.sample()
return action.squeeze(0)
def critic_learn(self, states, actions, rewards, next_states, dones):
"""Runs a learning iteration for the critic"""
loss = self.compute_loss(states, next_states, rewards, actions, dones)
self.take_optimisation_step(self.critic_optimizer, self.critic_local, loss, self.hyperparameters["Critic"]["gradient_clipping_norm"])
self.soft_update_of_target_network(self.critic_local, self.critic_target, self.hyperparameters["Critic"]["tau"])
def compute_loss(self, states, next_states, rewards, actions, dones):
"""Computes the loss for the critic"""
with torch.no_grad():
critic_targets = self.compute_critic_targets(next_states, rewards, dones)
critic_expected = self.compute_expected_critic_values(states, actions)
loss = functional.mse_loss(critic_expected, critic_targets)
return loss
def compute_critic_targets(self, next_states, rewards, dones):
"""Computes the critic target values to be used in the loss for the critic"""
critic_targets_next = self.compute_critic_values_for_next_states(next_states)
critic_targets = self.compute_critic_values_for_current_states(rewards, critic_targets_next, dones)
return critic_targets
def compute_critic_values_for_next_states(self, next_states):
"""Computes the critic values for next states to be used in the loss for the critic"""
with torch.no_grad():
actions_next = self.actor_target(next_states)
critic_targets_next = self.critic_target(torch.cat((next_states, actions_next), 1))
return critic_targets_next
def compute_critic_values_for_current_states(self, rewards, critic_targets_next, dones):
"""Computes the critic values for current states to be used in the loss for the critic"""
critic_targets_current = rewards + (self.hyperparameters["discount_rate"] * critic_targets_next * (1.0 - dones))
return critic_targets_current
def compute_expected_critic_values(self, states, actions):
"""Computes the expected critic values to be used in the loss for the critic"""
critic_expected = self.critic_local(torch.cat((states, actions), 1))
return critic_expected
def time_for_critic_and_actor_to_learn(self):
"""Returns boolean indicating whether there are enough experiences to learn from and it is time to learn for the
actor and critic"""
return self.enough_experiences_to_learn_from() and self.global_step_number % self.hyperparameters["update_every_n_steps"] == 0
def actor_learn(self, states):
"""Runs a learning iteration for the actor"""
if self.done: #we only update the learning rate at end of each episode
self.update_learning_rate(self.hyperparameters["Actor"]["learning_rate"], self.actor_optimizer)
actor_loss = self.calculate_actor_loss(states)
self.take_optimisation_step(self.actor_optimizer, self.actor_local, actor_loss,
self.hyperparameters["Actor"]["gradient_clipping_norm"])
self.soft_update_of_target_network(self.actor_local, self.actor_target, self.hyperparameters["Actor"]["tau"])
def calculate_actor_loss(self, states):
"""Calculates the loss for the actor"""
actions_pred = self.actor_local(states)
actor_loss = -self.critic_local(torch.cat((states, actions_pred), 1)).mean()
return actor_loss | [
"[email protected]"
] | |
0980ec9b29cae8ca8eb4d166d4157dbe4b3c392b | 4ce5022078c53b3bd75493b12a38237618b52fc8 | /prodsys/migrations/0068_job_number_of_events.py | c18fcdcbc318a34d2627aee7d52bbe11aa900c43 | [] | no_license | virthead/COMPASS-ProdSys | 90180e32c3a23d9fd05b252a6f8ded234525a780 | 6dfaa3e9ca40845282d3004ac61f386db5abdbe9 | refs/heads/master | 2023-02-23T18:16:02.789709 | 2022-09-28T09:37:59 | 2022-09-28T09:37:59 | 144,685,667 | 0 | 1 | null | 2018-10-13T10:07:42 | 2018-08-14T07:38:34 | Python | UTF-8 | Python | false | false | 452 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-12-14 13:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('prodsys', '0067_task_files_source'),
]
operations = [
migrations.AddField(
model_name='job',
name='number_of_events',
field=models.IntegerField(default=0),
),
]
| [
"[email protected]"
] | |
1dd1b3235d84748e650b23f239c85f7d7acee5dd | 18b0d83b5daadbc926a88e320c5a7b337649f824 | /K-means.py | 04158fd0125b731c9042dddff1dac84ef638b6c6 | [] | no_license | adityadas8888/k-means | e122c44895f612ed952ca3071c1d75956526b134 | 8ac21491d61e590d5745bbc0f24d0484cf346350 | refs/heads/master | 2021-08-27T21:39:32.577823 | 2021-08-14T03:07:56 | 2021-08-14T03:07:56 | 180,055,845 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,576 | py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import copy
def center_assignment(df, centers):
for i in centers.keys():
df['distance_from_{}'.format(i)] = (
np.sqrt(
(df['x'] - centers[i][0]) ** 2
+ (df['y'] - centers[i][1]) ** 2
)
)
centroid_distance_cols = ['distance_from_{}'.format(i) for i in centers.keys()]
df['closest'] = df.loc[:, centroid_distance_cols].idxmin(axis=1)
df['closest'] = df['closest'].map(lambda x: int(x.lstrip('distance_from_')))
df['color'] = df['closest'].map(lambda x: colmap[x])
return df
def update_center(center):
for i in centers.keys():
centers[i][0] = np.mean(df[df['closest'] == i]['x'])
centers[i][1] = np.mean(df[df['closest'] == i]['y'])
return center
iter = 1
mean1 = [1,0]
mean2 = [0,1.5]
Sigma1 = [[0.9,0.4],[0.4,0.9] ]
Sigma2 = [[0.9,0.4],[0.4,0.9] ]
#c=[[10,10],[-10,-10]] #centers for Part 2 of Question 1
c=[[10,10],[-10,-10],[10,-10],[-10,10]] #centers for Part 3 of Question 1
#k=2 # cluster for Part 2 of Question 1
k=4 # cluster for Part 3 of Question 1
colmap = {1: 'r', 2: 'g', 3: 'b',4:'y',5:'pink',6:'brown'}
x, y = np.random.multivariate_normal(mean1, Sigma1, 500).T
p, q = np.random.multivariate_normal(mean2, Sigma2, 500).T
x=np.concatenate((x, p))
y=np.concatenate((y, q))
d = {
'x': x,
'y': y
}
df=pd.DataFrame(d)
centers = {
i+1: [c[i][0],c[i][1]]
for i in range(len(c))
}
df = center_assignment(df, centers)
dx=1
dy=1
while iter<=10000 or (dx>=0.001 and dy>= 0.001):
closest_centers = df['closest'].copy(deep=True)
old_centers = copy.deepcopy(centers)
for i in old_centers.keys():
old_x = old_centers[i][0]
old_y = old_centers[i][1]
dx = (centers[i][0] - old_centers[i][0])
dy = (centers[i][1] - old_centers[i][1])
centers = update_center(centers)
df = center_assignment(df, centers)
iter=iter+1
if closest_centers.equals(df['closest']):
break
fig = plt.figure(figsize=(10, 10))
plt.scatter(df['x'], df['y'], color=df['color'], alpha=0.5, edgecolor='k',marker="x")
for i in centers.keys():
plt.scatter(*centers[i], color='black')
plt.xlim(-3, 5)
plt.ylim(-3, 5)
print(centers)
print(iter)
print(df)
plt.show()
| [
"[email protected]"
] | |
69e42ad3252722a86f3f6a0fc45ac5e791d360f7 | 2c4a278c9ee6f5ed3eb272e4117d2b5c01b94d61 | /homework/homework4/homework4.py | 322ea8a3237cef2967a83611427d7ecc589aca2c | [] | no_license | FimnOrde/Python_study_120191080618 | fc3fee00532d1f603da48428c05f54aac9e038f5 | 70fdeaacd10d28fa368ce99b26da87f608c0eadf | refs/heads/master | 2023-05-06T10:53:52.812151 | 2021-06-01T14:19:11 | 2021-06-01T14:19:11 | 343,657,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# file:homework4.py
# author:74049
# datetime:2021/4/6 15:14
# software: PyCharm
'''
this is functiondescription
'''
# import module your need
import base64
try:
with open("D:\\PythonTest\\Test\\homework4\\homework3\\account.txt", "r", encoding="utf-8") as file:
midlis = file.readlines()
flag = 0
flag1 = 0
lis = []
for i in midlis:
info = i.split(" ")
lis.append(info)
name = input("输入登录同学姓名:")
num = 0
for i in lis:
if name == i[0]:
account = input("请输入账号:")
flag = 1
break
num = num +1
else:
print("不存在此用户")
if flag == 1:
if account == lis[num][1]:
password = input("请输入密码:")
flag1 = 1
else:
print("账号输入错误")
if flag1 == 1:
word = base64.b64decode(lis[num][2][2:-2]).decode()
if password == word:
print("登陆成功")
else:
print("密码错误, 登录失败")
except Exception:
print("程序出错") | [
"[email protected]"
] | |
aa342583e7f64224e167db39abc398760268e22e | 187ec84de1e03e2fe1e154dcb128b5886b4d0547 | /chapter_05/exercises/05_alien_colors_3.py | bba30d284c7891d8e409c681d5c751e6804d47bc | [] | no_license | xerifeazeitona/PCC_Basics | fcbc1b8d5bc06e82794cd9ff0061e6ff1a38a64e | 81195f17e7466c416f97acbf7046d8084829f77b | refs/heads/main | 2023-03-01T07:50:02.317941 | 2021-01-27T21:08:28 | 2021-01-27T21:08:28 | 330,748,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | # 5-5. Alien Colors #3
# Turn your if-else chain from Exercise 5-4 into an if-elif-else chain.
# If the alien is green, print a message that the player earned 5
# points.
alien_color = 'green'
if alien_color == 'green':
print('You just earned 5 points!')
elif alien_color == 'yellow':
print('You just earned 10 points!')
else:
print('You just earned 15 points!')
# If the alien is yellow, print a message that the player earned 10
# points.
alien_color = 'yellow'
if alien_color == 'green':
print('You just earned 5 points!')
elif alien_color == 'yellow':
print('You just earned 10 points!')
else:
print('You just earned 15 points!')
# If the alien is red, print a message that the player earned 15 points.
alien_color = 'red'
if alien_color == 'green':
print('You just earned 5 points!')
elif alien_color == 'yellow':
print('You just earned 10 points!')
else:
print('You just earned 15 points!')
# Write three versions of this program, making sure each message is
# printed for the appropriate color alien. | [
"[email protected]"
] | |
b6da5a066cbcb33172a12090e0a0c20860343965 | 1d2d18752af8fcd52e9721f55cf0301dea0d854d | /BTrees/btree.py | 8682c4fdcfee8c1cb6325070b9340b67a9e9f8a4 | [] | no_license | aazambrano2/CS3-Labs | ed74329de7483a084b4d5b2ebf60c241928caff9 | 7b8e8348c804f34104969407b441801935c16413 | refs/heads/master | 2023-01-02T06:34:08.335588 | 2020-10-15T22:13:32 | 2020-10-15T22:13:32 | 238,854,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,707 | py | # Code to implement a B-tree
# Programmed by Olac Fuentes and Diego Aguirre
import matplotlib.pyplot as plt
class BTreeNode:
# Constructor
def __init__(self, data, child=[], is_leaf=True, max_items=5):
self.data = data
self.child = child
self.is_leaf = is_leaf
self.max_items = max_items # max_items must be odd and greater or equal to 3
def is_full(self):
return len(self.data) >= self.max_items
class BTree:
# Constructor
def __init__(self, max_items=5):
self.max_items = max_items # Maximum number of keys allowed in a node
self.root = BTreeNode(data=[],max_items=max_items)
def find_child(self, k, node=None):
# Determines value of c, such that k must be in subtree node.child[c], if k is in the BTree
if node is None:
node = self.root
for i in range(len(node.data)):
if k < node.data[i]:
return i
return len(node.data)
def insert_internal(self, i, node=None):
if node is None:
node = self.root
# node cannot be Full
if node.is_leaf:
self.insert_leaf(i, node)
else:
k = self.find_child(i, node)
if node.child[k].is_full():
m, l, r = self.split(node.child[k])
node.data.insert(k, m)
node.child[k] = l
node.child.insert(k + 1, r)
k = self.find_child(i, node)
self.insert_internal(i, node.child[k])
def split(self, node=None):
if node is None:
node = self.root
# print('Splitting')
# PrintNode(T)
mid = node.max_items // 2
if node.is_leaf:
left_child = BTreeNode(node.data[:mid], max_items=node.max_items)
right_child = BTreeNode(node.data[mid + 1:], max_items=node.max_items)
else:
left_child = BTreeNode(node.data[:mid], node.child[:mid + 1], node.is_leaf, max_items=node.max_items)
right_child = BTreeNode(node.data[mid + 1:], node.child[mid + 1:], node.is_leaf, max_items=node.max_items)
return node.data[mid], left_child, right_child
def insert_leaf(self, i, node=None):
if node is None:
node = self.root
node.data.append(i)
node.data.sort()
def leaves(self, node=None):
if node is None:
node = self.root
# Returns the leaves in a b-tree
if node.is_leaf:
return [node.data]
s = []
for c in node.child:
s = s + self.leaves(c)
return s
def insert(self, i, node=None):
if node is None:
node = self.root
if not node.is_full():
self.insert_internal(i, node)
else:
m, l, r = self.split(node)
node.data = [m]
node.child = [l, r]
node.is_leaf = False
k = self.find_child(i, node)
self.insert_internal(i, node.child[k])
def height(self, node=None):
if node is None:
node = self.root
if node.is_leaf:
return 0
return 1 + self.height(node.child[0])
def find(self, k, node=None):
if node is None:
node = self.root
# Returns node where k is, or None if k is not in the tree
if k in node.data:
return node
if node.is_leaf:
return None
return self.find(k, node.child[self.find_child(k, node)])
def _set_x(self, dx, node=None):
if node is None:
node = self.root
# Finds x-coordinate to display each node in the tree
if node.is_leaf:
return
else:
for c in node.child:
self._set_x(dx, c)
d = (dx[node.child[0].data[0]] + dx[node.child[-1].data[0]] + 10 * len(node.child[-1].data)) / 2
dx[node.data[0]] = d - 10 * len(node.data) / 2
def _draw_btree(self, dx, y, y_inc, fs, ax, node=None):
if node is None:
node = self.root
# Function to display b-tree to the screen
# It works fine for trees with up to about 70 data
xs = dx[node.data[0]]
if node.is_leaf:
for itm in node.data:
ax.plot([xs, xs + 10, xs + 10, xs, xs], [y, y, y - 10, y - 10, y], linewidth=1, color='k')
ax.text(xs + 5, y - 5, str(itm), ha="center", va="center", fontsize=fs)
xs += 10
else:
for i in range(len(node.data)):
xc = dx[node.child[i].data[0]] + 5 * len(node.child[i].data)
ax.plot([xs, xs + 10, xs + 10, xs, xs], [y, y, y - 10, y - 10, y], linewidth=1, color='k')
ax.text(xs + 5, y - 5, str(node.data[i]), ha="center", va="center", fontsize=fs)
ax.plot([xs, xc], [y - 10, y - y_inc], linewidth=1, color='k')
self._draw_btree(dx, y - y_inc, y_inc, fs, ax, node.child[i])
xs += 10
xc = dx[node.child[-1].data[0]] + 5 * len(node.child[-1].data)
ax.plot([xs, xc], [y - 10, y - y_inc], linewidth=1, color='k')
self._draw_btree(dx, y - y_inc, y_inc, fs, ax, node.child[-1])
def draw(self):
# Find x-coordinates of leaves
ll = self.leaves()
dx = {}
d = 0
for l in ll:
dx[l[0]] = d
d += 10 * (len(l) + 1)
# Find x-coordinates of internal nodes
self._set_x(dx)
# plt.close('all')
fig, ax = plt.subplots()
self._draw_btree(dx, 0, 30, 12, ax)
ax.set_aspect(1.0)
ax.axis('off')
plt.show()
| [
"[email protected]"
] | |
bc7864fb55d1e465b06cece970a179dbcab5a044 | b25943e43fd6af97e9c3263454a19076622b849f | /core/migrations/0006_auto_20201210_1720.py | 1c3f65a91fcab874851dd5c4a03c6968c3b137ab | [] | no_license | jacksellers/cebarco | 26f7d3eb9074d0efa4fea77a94dbfb0e9cd129c9 | 34d37d4df154e534126df0dedb5784e1f25b1b95 | refs/heads/main | 2023-08-21T13:14:29.903816 | 2021-09-14T05:53:31 | 2021-09-14T05:53:31 | 311,674,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | # Generated by Django 3.1.4 on 2020-12-10 14:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_auto_20201210_1717'),
]
operations = [
migrations.AlterField(
model_name='executive',
name='rank',
field=models.IntegerField(help_text="\n Their relative position on the 'About' page (i.e. 1, 2, 3, ...)\n ", unique=True),
),
]
| [
"[email protected]"
] | |
71ac3b38241ab179de7aa4edc58a6750b7cb02a3 | 4ddc6604f0c8160c7637d036b835faf974d48556 | /nova/policies/networks.py | a4d065f47d0a291902d07878202cf7f44eb9cdf1 | [
"Apache-2.0"
] | permissive | tjjh89017/nova | a8513a806f24ca0d1c60495fd1f192b7d402b05d | 49b85bd2e9c77c6e0bd8141b38cd49efa5c06dc2 | refs/heads/master | 2021-01-21T10:16:18.970238 | 2017-05-18T10:35:32 | 2017-05-18T10:35:32 | 91,682,422 | 1 | 0 | null | 2017-05-18T10:50:38 | 2017-05-18T10:50:38 | null | UTF-8 | Python | false | false | 1,072 | py | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-networks'
POLICY_ROOT = 'os_compute_api:os-networks:%s'
networks_policies = [
policy.RuleDefault(
name=BASE_POLICY_NAME,
check_str=base.RULE_ADMIN_API),
policy.RuleDefault(
name=POLICY_ROOT % 'view',
check_str=base.RULE_ADMIN_OR_OWNER),
]
def list_rules():
return networks_policies
| [
"[email protected]"
] | |
26b37aef691158811a1419dac6cabceaeda69787 | 0faf46329fba9705f1ee818634c68315edfce308 | /Sudoko/My solution/solution_test.py | 7af786478a75a6b8757db2d216152f9d16c0b2f6 | [] | no_license | AdepojuJeremy/AI | 5c7f0f71827f5c1b87266fc490065332d89549dc | bd23e90a247bb2aebe91152710b50fd013ad84c5 | refs/heads/master | 2021-12-13T00:50:05.443680 | 2017-03-09T10:16:45 | 2017-03-09T10:16:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,724 | py | import solution
import unittest
class TestNakedTwins(unittest.TestCase):
before_naked_twins_1 = {'I6': '4', 'H9': '3', 'I2': '6', 'E8': '1', 'H3': '5', 'H7': '8', 'I7': '1', 'I4': '8',
'H5': '6', 'F9': '7', 'G7': '6', 'G6': '3', 'G5': '2', 'E1': '8', 'G3': '1', 'G2': '8',
'G1': '7', 'I1': '23', 'C8': '5', 'I3': '23', 'E5': '347', 'I5': '5', 'C9': '1', 'G9': '5',
'G8': '4', 'A1': '1', 'A3': '4', 'A2': '237', 'A5': '9', 'A4': '2357', 'A7': '27',
'A6': '257', 'C3': '8', 'C2': '237', 'C1': '23', 'E6': '579', 'C7': '9', 'C6': '6',
'C5': '37', 'C4': '4', 'I9': '9', 'D8': '8', 'I8': '7', 'E4': '6', 'D9': '6', 'H8': '2',
'F6': '125', 'A9': '8', 'G4': '9', 'A8': '6', 'E7': '345', 'E3': '379', 'F1': '6',
'F2': '4', 'F3': '23', 'F4': '1235', 'F5': '8', 'E2': '37', 'F7': '35', 'F8': '9',
'D2': '1', 'H1': '4', 'H6': '17', 'H2': '9', 'H4': '17', 'D3': '2379', 'B4': '27',
'B5': '1', 'B6': '8', 'B7': '27', 'E9': '2', 'B1': '9', 'B2': '5', 'B3': '6', 'D6': '279',
'D7': '34', 'D4': '237', 'D5': '347', 'B8': '3', 'B9': '4', 'D1': '5'}
possible_solutions_1 = [
{'G7': '6', 'G6': '3', 'G5': '2', 'G4': '9', 'G3': '1', 'G2': '8', 'G1': '7', 'G9': '5', 'G8': '4', 'C9': '1',
'C8': '5', 'C3': '8', 'C2': '237', 'C1': '23', 'C7': '9', 'C6': '6', 'C5': '37', 'A4': '2357', 'A9': '8',
'A8': '6', 'F1': '6', 'F2': '4', 'F3': '23', 'F4': '1235', 'F5': '8', 'F6': '125', 'F7': '35', 'F8': '9',
'F9': '7', 'B4': '27', 'B5': '1', 'B6': '8', 'B7': '27', 'E9': '2', 'B1': '9', 'B2': '5', 'B3': '6', 'C4': '4',
'B8': '3', 'B9': '4', 'I9': '9', 'I8': '7', 'I1': '23', 'I3': '23', 'I2': '6', 'I5': '5', 'I4': '8', 'I7': '1',
'I6': '4', 'A1': '1', 'A3': '4', 'A2': '237', 'A5': '9', 'E8': '1', 'A7': '27', 'A6': '257', 'E5': '347',
'E4': '6', 'E7': '345', 'E6': '579', 'E1': '8', 'E3': '79', 'E2': '37', 'H8': '2', 'H9': '3', 'H2': '9',
'H3': '5', 'H1': '4', 'H6': '17', 'H7': '8', 'H4': '17', 'H5': '6', 'D8': '8', 'D9': '6', 'D6': '279',
'D7': '34', 'D4': '237', 'D5': '347', 'D2': '1', 'D3': '79', 'D1': '5'},
{'I6': '4', 'H9': '3', 'I2': '6', 'E8': '1', 'H3': '5', 'H7': '8', 'I7': '1', 'I4': '8', 'H5': '6', 'F9': '7',
'G7': '6', 'G6': '3', 'G5': '2', 'E1': '8', 'G3': '1', 'G2': '8', 'G1': '7', 'I1': '23', 'C8': '5', 'I3': '23',
'E5': '347', 'I5': '5', 'C9': '1', 'G9': '5', 'G8': '4', 'A1': '1', 'A3': '4', 'A2': '237', 'A5': '9',
'A4': '2357', 'A7': '27', 'A6': '257', 'C3': '8', 'C2': '237', 'C1': '23', 'E6': '579', 'C7': '9', 'C6': '6',
'C5': '37', 'C4': '4', 'I9': '9', 'D8': '8', 'I8': '7', 'E4': '6', 'D9': '6', 'H8': '2', 'F6': '125',
'A9': '8', 'G4': '9', 'A8': '6', 'E7': '345', 'E3': '79', 'F1': '6', 'F2': '4', 'F3': '23', 'F4': '1235',
'F5': '8', 'E2': '3', 'F7': '35', 'F8': '9', 'D2': '1', 'H1': '4', 'H6': '17', 'H2': '9', 'H4': '17',
'D3': '79', 'B4': '27', 'B5': '1', 'B6': '8', 'B7': '27', 'E9': '2', 'B1': '9', 'B2': '5', 'B3': '6',
'D6': '279', 'D7': '34', 'D4': '237', 'D5': '347', 'B8': '3', 'B9': '4', 'D1': '5'}
]
before_naked_twins_2 = {'A1': '23', 'A2': '4', 'A3': '7', 'A4': '6', 'A5': '8', 'A6': '5', 'A7': '23', 'A8': '9',
'A9': '1', 'B1': '6', 'B2': '9', 'B3': '8', 'B4': '4', 'B5': '37', 'B6': '1', 'B7': '237',
'B8': '5', 'B9': '237', 'C1': '23', 'C2': '5', 'C3': '1', 'C4': '23', 'C5': '379',
'C6': '2379', 'C7': '8', 'C8': '6', 'C9': '4', 'D1': '8', 'D2': '17', 'D3': '9',
'D4': '1235', 'D5': '6', 'D6': '237', 'D7': '4', 'D8': '27', 'D9': '2357', 'E1': '5',
'E2': '6', 'E3': '2', 'E4': '8', 'E5': '347', 'E6': '347', 'E7': '37', 'E8': '1', 'E9': '9',
'F1': '4', 'F2': '17', 'F3': '3', 'F4': '125', 'F5': '579', 'F6': '279', 'F7': '6',
'F8': '8', 'F9': '257', 'G1': '1', 'G2': '8', 'G3': '6', 'G4': '35', 'G5': '345',
'G6': '34', 'G7': '9', 'G8': '27', 'G9': '27', 'H1': '7', 'H2': '2', 'H3': '4', 'H4': '9',
'H5': '1', 'H6': '8', 'H7': '5', 'H8': '3', 'H9': '6', 'I1': '9', 'I2': '3', 'I3': '5',
'I4': '7', 'I5': '2', 'I6': '6', 'I7': '1', 'I8': '4', 'I9': '8'}
possible_solutions_2 = [
{'A1': '23', 'A2': '4', 'A3': '7', 'A4': '6', 'A5': '8', 'A6': '5', 'A7': '23', 'A8': '9', 'A9': '1', 'B1': '6',
'B2': '9', 'B3': '8', 'B4': '4', 'B5': '37', 'B6': '1', 'B7': '237', 'B8': '5', 'B9': '237', 'C1': '23',
'C2': '5', 'C3': '1', 'C4': '23', 'C5': '79', 'C6': '79', 'C7': '8', 'C8': '6', 'C9': '4', 'D1': '8',
'D2': '17', 'D3': '9', 'D4': '1235', 'D5': '6', 'D6': '237', 'D7': '4', 'D8': '27', 'D9': '2357', 'E1': '5',
'E2': '6', 'E3': '2', 'E4': '8', 'E5': '347', 'E6': '347', 'E7': '37', 'E8': '1', 'E9': '9', 'F1': '4',
'F2': '17', 'F3': '3', 'F4': '125', 'F5': '579', 'F6': '279', 'F7': '6', 'F8': '8', 'F9': '257', 'G1': '1',
'G2': '8', 'G3': '6', 'G4': '35', 'G5': '345', 'G6': '34', 'G7': '9', 'G8': '27', 'G9': '27', 'H1': '7',
'H2': '2', 'H3': '4', 'H4': '9', 'H5': '1', 'H6': '8', 'H7': '5', 'H8': '3', 'H9': '6', 'I1': '9', 'I2': '3',
'I3': '5', 'I4': '7', 'I5': '2', 'I6': '6', 'I7': '1', 'I8': '4', 'I9': '8'},
{'A1': '23', 'A2': '4', 'A3': '7', 'A4': '6', 'A5': '8', 'A6': '5', 'A7': '23', 'A8': '9', 'A9': '1', 'B1': '6',
'B2': '9', 'B3': '8', 'B4': '4', 'B5': '3', 'B6': '1', 'B7': '237', 'B8': '5', 'B9': '237', 'C1': '23',
'C2': '5', 'C3': '1', 'C4': '23', 'C5': '79', 'C6': '79', 'C7': '8', 'C8': '6', 'C9': '4', 'D1': '8',
'D2': '17', 'D3': '9', 'D4': '1235', 'D5': '6', 'D6': '237', 'D7': '4', 'D8': '27', 'D9': '2357', 'E1': '5',
'E2': '6', 'E3': '2', 'E4': '8', 'E5': '347', 'E6': '347', 'E7': '37', 'E8': '1', 'E9': '9', 'F1': '4',
'F2': '17', 'F3': '3', 'F4': '125', 'F5': '579', 'F6': '279', 'F7': '6', 'F8': '8', 'F9': '257', 'G1': '1',
'G2': '8', 'G3': '6', 'G4': '35', 'G5': '345', 'G6': '34', 'G7': '9', 'G8': '27', 'G9': '27', 'H1': '7',
'H2': '2', 'H3': '4', 'H4': '9', 'H5': '1', 'H6': '8', 'H7': '5', 'H8': '3', 'H9': '6', 'I1': '9', 'I2': '3',
'I3': '5', 'I4': '7', 'I5': '2', 'I6': '6', 'I7': '1', 'I8': '4', 'I9': '8'}
]
def test_naked_twins(self):
# solution.display(self.before_naked_twins_1)
solution.display(self.possible_solutions_1[1])
self.assertTrue(solution.naked_twins(self.before_naked_twins_1) in self.possible_solutions_1,
"Your naked_twins function produced an unexpected board.")
def test_naked_twins2(self):
self.assertTrue(solution.naked_twins(self.before_naked_twins_2) in self.possible_solutions_2,
"Your naked_twins function produced an unexpected board.")
class TestDiagonalSudoku(unittest.TestCase):
diagonal_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
solved_diag_sudoku = {'G7': '8', 'G6': '9', 'G5': '7', 'G4': '3', 'G3': '2', 'G2': '4', 'G1': '6', 'G9': '5',
'G8': '1', 'C9': '6', 'C8': '7', 'C3': '1', 'C2': '9', 'C1': '4', 'C7': '5', 'C6': '3',
'C5': '2', 'C4': '8', 'E5': '9', 'E4': '1', 'F1': '1', 'F2': '2', 'F3': '9', 'F4': '6',
'F5': '5', 'F6': '7', 'F7': '4', 'F8': '3', 'F9': '8', 'B4': '7', 'B5': '1', 'B6': '6',
'B7': '2', 'B1': '8', 'B2': '5', 'B3': '3', 'B8': '4', 'B9': '9', 'I9': '3', 'I8': '2',
'I1': '7', 'I3': '8', 'I2': '1', 'I5': '6', 'I4': '5', 'I7': '9', 'I6': '4', 'A1': '2',
'A3': '7', 'A2': '6', 'E9': '7', 'A4': '9', 'A7': '3', 'A6': '5', 'A9': '1', 'A8': '8',
'E7': '6', 'E6': '2', 'E1': '3', 'E3': '4', 'E2': '8', 'E8': '5', 'A5': '4', 'H8': '6',
'H9': '4', 'H2': '3', 'H3': '5', 'H1': '9', 'H6': '1', 'H7': '7', 'H4': '2', 'H5': '8',
'D8': '9', 'D9': '2', 'D6': '8', 'D7': '1', 'D4': '4', 'D5': '3', 'D2': '7', 'D3': '6',
'D1': '5'}
def test_solve(self):
self.assertEqual(solution.solve(self.diagonal_grid), self.solved_diag_sudoku)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
79acbaa2ac8fd0edb8c526d1179dfe06f152007e | 92714474b4390b8fa5d01a3df2e2ae610750e310 | /twill/other_packages/_mechanize_dist/_urllib2.py | 2bb68167b63e81b84553f5db4461f1698af787d5 | [
"MIT"
] | permissive | quokkaproject/twill | e7d95053de05b15114c758f09e3e56d1ad1e2f4a | 1fa51eb66c020b5d277a01063b2293c9e28f9cd0 | refs/heads/master | 2016-08-04T08:33:24.898947 | 2015-08-23T16:53:39 | 2015-08-23T16:53:39 | 35,701,946 | 1 | 1 | null | 2015-05-15T23:36:57 | 2015-05-15T23:36:57 | null | UTF-8 | Python | false | false | 1,382 | py | from urllib.error import URLError, HTTPError
# ...and from mechanize
from ._opener import OpenerDirector, \
SeekableResponseOpener, \
build_opener, install_opener, urlopen
from ._auth import \
HTTPPasswordMgr, \
HTTPPasswordMgrWithDefaultRealm, \
AbstractBasicAuthHandler, \
AbstractDigestAuthHandler, \
HTTPProxyPasswordMgr, \
ProxyHandler, \
ProxyBasicAuthHandler, \
ProxyDigestAuthHandler, \
HTTPBasicAuthHandler, \
HTTPDigestAuthHandler, \
HTTPSClientCertMgr
from ._request import \
Request
from ._http import \
RobotExclusionError
from urllib.request import BaseHandler, UnknownHandler, FTPHandler, CacheFTPHandler, FileHandler
# ...and from mechanize
from ._http import \
HTTPHandler, \
HTTPDefaultErrorHandler, \
HTTPRedirectHandler, \
HTTPEquivProcessor, \
HTTPCookieProcessor, \
HTTPRefererProcessor, \
HTTPRefreshProcessor, \
HTTPErrorProcessor, \
HTTPRobotRulesProcessor
from ._upgrade import \
HTTPRequestUpgradeProcessor, \
ResponseUpgradeProcessor
from ._debug import \
HTTPResponseDebugProcessor, \
HTTPRedirectDebugProcessor
from ._seek import \
SeekableProcessor
# crap ATM
## from _gzip import \
## HTTPGzipProcessor
import http.client
if hasattr(httplib, 'HTTPS'):
from ._http import HTTPSHandler
del httplib
| [
"[email protected]"
] | |
f3af2ff83f2df45440a0273e53a8f1211089070c | 966444daf21c85de32822d0fcae649891de5ae18 | /core/market_data.py | 422f7e2efc01cb06d2be5b6e7fe98f9d673ce384 | [] | no_license | ianrerb/quant | 0bce5f18222caa2849b070776dce7108f2ad197e | 962aeefe2c60d59df2f5ec005f4e24bd1fb7ea55 | refs/heads/master | 2023-07-19T10:54:27.555039 | 2019-08-27T13:06:38 | 2019-08-27T13:06:38 | 194,100,056 | 1 | 0 | null | 2023-07-06T21:32:26 | 2019-06-27T13:24:54 | Python | UTF-8 | Python | false | false | 1,187 | py | import pandas as pd
MARKET_DATA_FILE = "quant_case_data/market_data.csv"
MIN_STOCKS = 100
COL_RENAME = {
"Date": "date",
"Ticker": "ticker",
"Total Return": "total_return",
"Market Cap": "market_cap",
"Daily Volume": "daily_volume",
}
class MarketData:
def __init__(self):
self._dataset = None
def load_data(self, reload=False):
if (self._dataset is not None) & (~reload):
return
data = (
pd.read_csv(MARKET_DATA_FILE)
.rename(columns=COL_RENAME)
.set_index(["date", "ticker"])
.to_xarray()
)
data["date"] = pd.PeriodIndex(data["date"].values, freq="B")
valid_dates = data.market_cap.count("ticker") > 100
self._dataset = data.where(valid_dates).dropna("date", how="all")
@property
def total_return(self):
self.load_data()
return self._dataset.total_return.to_pandas()
@property
def market_cap(self):
self.load_data()
return self._dataset.market_cap.to_pandas()
@property
def daily_volume(self):
self.load_data()
return self._dataset.daily_volume.to_pandas()
| [
"[email protected]"
] | |
ccb0ef815e13a42b110a0ee2e91d2ca924477203 | 105373a184a141e602f6840952737ff4abfd3026 | /code/src/demo.py | 61084192224fc63b2ca92db35367122a1ffc8c07 | [] | no_license | votranbaohieu/do-an-tot-nghiep | 2d8fc90d365055732916fbc6391d191027e6fcb3 | 4e4fe4ad3b456fa166cd68bfeb163e9325c5dc47 | refs/heads/master | 2022-10-31T15:31:36.561979 | 2020-06-19T15:44:57 | 2020-06-19T15:44:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,164 | py | import os
import pandas as pd
import json
import numpy as np
from pyvi import ViTokenizer
from bs4 import BeautifulSoup
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_TRAIN_PATH = os.path.join(DIR_PATH, 'data/train/data.xlsx')
DATA_TRAIN_JSON = os.path.join(DIR_PATH, 'data/train/data.json')
STOP_WORDS = os.path.join(DIR_PATH, 'stopwords.txt')
SPECIAL_CHARACTER = '0123456789%@$.,=+-!;/()*"&^:#|\n\t\''
# Read And Write File
def write_excel_to_json(pathExcel, pathJson):
df = pd.read_excel(pathExcel)
df.to_json(pathJson, orient="records")
def read_json(pathJson):
with open(pathJson, encoding="utf-8") as f:
s = json.load(f)
return s
def read_stopwords(pathStopWords):
with open(pathStopWords, 'r') as f:
stopwords = set([w.strip().replace(' ', '_') for w in f.readlines()])
return stopwords
# Preprocess
def strip_html_tags(text):
return BeautifulSoup(text, 'html.parser').get_text()
def remove_special_characters(text):
text = re.sub('[^a-zA-z0-9\s]', '', text)
return text
def segmentation(text):
return ViTokenizer.tokenize(text)
def remove_stopwords(text):
tokens = segmentation(text)
tokens = [x.strip(SPECIAL_CHARACTER).lower() for x in tokens.split()]
def normalize_corpus(corpus, html_stripping=True, contraction_expansion=True,
accented_char_removal=True, text_lower_case=True,
text_lemmatization=True, special_char_removal=True,
stopword_removal=True)
class NLP(object):
def __init__(self, text = None, stopwords = {}):
self.text = text
self.stopwords = stopwords
def strip_html_tags(self):
self.text = BeautifulSoup(self.text, 'html.parser').get_text()
return self
def segmentation(self):
self.text = ViTokenizer.tokenize(self.text)
return self
def split_words(self):
try:
r = [x.strip(SPECIAL_CHARACTER).lower() for x in text.split()]
return [i for i in r if i]
except TypeError:
return []
def remove_stopwords(self, is_lower_case=True):
pass
def get_words_feature(self):
split_words = self.split_words()
return [word for word in split_words if word not in self.stopwords]
class DocPreprocess(object):
def __init__(self, data):
self.data = data
# def to_lower():
# for row in self.data:
def total(self):
newData = []
for i, row in enumerate(self.data):
self.data[i]['content'] = NLP(row['content']).split_words()
if self.data[i] not in newData:
newData.append(self.data[i])
return newData
def get_data_and_label(self):
pass
def main():
# write_excel_to_json(DATA_TRAIN_PATH, DATA_TRAIN_JSON)
data_train = read_json(DATA_TRAIN_JSON)
stopwords = read_stopwords(STOP_WORDS)
newData = DocPreprocess(data_train).total()
print(np.array(newData))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
6456afdcfb72444d01ad09e4f851c86cb9b4ddef | d3cabb25e9af022fa3ca7818668a3267c16f31ed | /queroMeiaWebapp/settings.py | 1501cd1e7a333c3b286235d56186badea80dcd3e | [] | no_license | fafaschiavo/mobileQueroMeiaWebapp | 6e8df6bdb17ad82b0d1c43a8d78f71e4fd4dccb4 | 83584cf81f7a28b36fa9a699986aaf111d4b3eb5 | refs/heads/master | 2021-01-09T20:52:37.544906 | 2016-07-11T16:31:49 | 2016-07-11T16:31:49 | 58,693,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,420 | py | """
Django settings for queroMeiaWebapp project.
Generated by 'django-admin startproject' using Django 1.10.dev20160307181939.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_gaqx%)0dc8=hd4m5!_v5a4sn)egl1#k21_kqs0*mxz571!zyq'
#Paypal Information
# EMAIL_PAYPAL_ACCOUNT = '[email protected]'
# EMAIL_PAYPAL_ACCOUNT = '[email protected]'
EMAIL_PAYPAL_ACCOUNT = '[email protected]'
PRODUCT_ID_1 = 3
PRODUCT_ID_2 = 4
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# PAYPAL_TEST = True
MANDRILL_API_KEY = "PQsvG3uAlMUoboU2fQoGHg"
EMAIL_BACKEND = "djrill.mail.backends.djrill.DjrillBackend"
DEFAULT_FROM_EMAIL = '[email protected]'
MANDRILL_API_URL = "https://mandrillapp.com/api/1.0"
# Application definition
INSTALLED_APPS = [
'paypal.standard.ipn',
'djrill',
'cinema.apps.CinemaConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'queroMeiaWebapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'queroMeiaWebapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'quero_meia',
'USER': 'root',
'PASSWORD': 'root',
'HOST': '127.0.0.1', # Or an IP Address that your DB is hosted on
'PORT': '3306',
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
# 'NAME': 'quero_meia', # Or path to database file if using sqlite3.
# 'USER': 'fafaschiavo', # Not used with sqlite3.
# 'PASSWORD': '310308Fah!', # Not used with sqlite3.
# 'HOST': 'mysql.queromeia.com', # Set to empty string for localhost. Not used with sqlite3.
# 'PORT': '', # Set to empty string for default. Not used with sqlite3.
# }
# }
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
55dc50f6b021443e36db8ccc7f89987fc5070e30 | 5cb522222e9a4b63d6266254aad9410465f7f701 | /data_generator.py | b0863e82bbc2066ec7caaa175b96a264cb1147b6 | [] | no_license | luuuucy/LIRD | 9a923c7c1e7a034b86fa5233ff1b60b06a16746e | 5946d51b248b2eca814ae9c72758d3fd6f15df4f | refs/heads/main | 2023-04-25T10:30:10.563445 | 2021-04-30T01:43:37 | 2021-04-30T01:43:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,759 | py | import pandas as pd
import random
class DataGenerator(object):
def __init__(self, datapath, itempath):
'''
Load data from the DB MovieLens
List the users and the items
List all the users historic
'''
self.data = self.load_datas(datapath, itempath)
self.users = self.data['userId'].unique() # list of all users
self.items = self.data['itemId'].unique() # list of all items
self.histo = self.gen_histo()
self.train = []
self.test = []
def load_datas(self, datapath, itempath):
'''
Load the data and merge the name of each movie.
A row corresponds to a rate given by a user to a movie.
Parameters
----------
datapath : string
path to the data 100k MovieLens
contains usersId;itemId;rating
itempath : string
path to the data 100k MovieLens
contains itemId;itemName
Returns
-------
result : DataFrame
Contains all the ratings
'''
data = pd.read_csv(datapath, sep='\t',
names=['userId', 'itemId', 'rating', 'timestamp'])
movie_titles = pd.read_csv(itempath, sep='|', names=['itemId', 'itemName'],
usecols=range(2), encoding='latin-1')
return data.merge(movie_titles, on='itemId', how='left')
def gen_histo(self):
'''
Group all rates given by users and store them from older to most recent.
Returns
-------
result : List(DataFrame)
List of the historic for each user
'''
historic_users = []
for i, u in enumerate(self.users):
temp = self.data[self.data['userId'] == u]
temp = temp.sort_values('timestamp').reset_index()
temp.drop('index', axis=1, inplace=True)
historic_users.append(temp)
return historic_users
def sample_histo(self, user_histo, action_ratio=0.8, max_samp_by_user=5, max_state=100, max_action=50, nb_states=[],
nb_actions=[]):
'''
For a given historic, make one or multiple sampling.
If no optional argument given for nb_states and nb_actions, then the sampling
is random and each sample can have differents size for action and state.
To normalize sampling we need to give list of the numbers of states and actions
to be sampled.
Parameters
----------
user_histo : DataFrame
historic of user
delimiter : string, optional
delimiter for the csv
action_ratio : float, optional
ratio form which movies in history will be selected
max_samp_by_user: int, optional
Nulber max of sample to make by user
max_state : int, optional
Number max of movies to take for the 'state' column
max_action : int, optional
Number max of movies to take for the 'action' action
nb_states : array(int), optional
Numbers of movies to be taken for each sample made on user's historic
nb_actions : array(int), optional
Numbers of rating to be taken for each sample made on user's historic
Returns
-------
states : List(String)
All the states sampled, format of a sample: itemId&rating
actions : List(String)
All the actions sampled, format of a sample: itemId&rating
Notes
-----
States must be before(timestamp<) the actions.
If given, size of nb_states is the numbller of sample by user
sizes of nb_states and nb_actions must be equals
'''
n = len(user_histo)
sep = int(action_ratio * n)
nb_sample = random.randint(1, max_samp_by_user)
if not nb_states:
nb_states = [min(random.randint(1, sep), max_state) for i in range(nb_sample)]
if not nb_actions:
nb_actions = [min(random.randint(1, n - sep), max_action) for i in range(nb_sample)]
assert len(nb_states) == len(nb_actions), 'Given array must have the same size'
states = []
actions = []
# SELECT SAMPLES IN HISTO
for i in range(len(nb_states)):
sample_states = user_histo.iloc[0:sep].sample(nb_states[i])
sample_actions = user_histo.iloc[-(n - sep):].sample(nb_actions[i])
sample_state = []
sample_action = []
for j in range(nb_states[i]):
row = sample_states.iloc[j]
# FORMAT STATE
state = str(row.loc['itemId']) + '&' + str(row.loc['rating'])
sample_state.append(state)
for j in range(nb_actions[i]):
row = sample_actions.iloc[j]
# FORMAT ACTION
action = str(row.loc['itemId']) + '&' + str(row.loc['rating'])
sample_action.append(action)
states.append(sample_state)
actions.append(sample_action)
return states, actions
def gen_train_test(self, test_ratio, seed=None):
'''
Shuffle the historic of users and separate it in a train and a test set.
Store the ids for each set.
An user can't be in both set.
Parameters
----------
test_ratio : float
Ratio to control the sizes of the sets
seed : float
Seed on the shuffle
'''
n = len(self.histo)
if seed is not None:
random.Random(seed).shuffle(self.histo)
else:
random.shuffle(self.histo)
self.train = self.histo[:int((test_ratio * n))]
self.test = self.histo[int((test_ratio * n)):]
self.user_train = [h.iloc[0, 0] for h in self.train]
self.user_test = [h.iloc[0, 0] for h in self.test]
def write_csv(self, filename, histo_to_write, delimiter=';', action_ratio=0.8, max_samp_by_user=5, max_state=100,
max_action=50, nb_states=[], nb_actions=[]):
'''
From a given historic, create a csv file with the format:
columns : state;action_reward;n_state
rows : itemid&rating1 | itemid&rating2 | ... ; itemid&rating3 | ... | itemid&rating4; itemid&rating1 | itemid&rating2 | itemid&rating3 | ... | item&rating4
at filename location.
Parameters
----------
filename : string
path to the file to be produced
histo_to_write : List(DataFrame)
List of the historic for each user
delimiter : string, optional
delimiter for the csv
action_ratio : float, optional
ratio form which movies in history will be selected
max_samp_by_user: int, optional
Nulber max of sample to make by user
max_state : int, optional
Number max of movies to take for the 'state' column
max_action : int, optional
Number max of movies to take for the 'action' action
nb_states : array(int), optional
Numbers of movies to be taken for each sample made on user's historic
nb_actions : array(int), optional
Numbers of rating to be taken for each sample made on user's historic
Notes
-----
if given, size of nb_states is the numbller of sample by user
sizes of nb_states and nb_actions must be equals
'''
with open(filename, mode='w') as file:
f_writer = csv.writer(file, delimiter=delimiter)
f_writer.writerow(['state', 'action_reward', 'n_state'])
for user_histo in histo_to_write:
states, actions = self.sample_histo(user_histo, action_ratio, max_samp_by_user, max_state, max_action,
nb_states, nb_actions)
for i in range(len(states)):
# FORMAT STATE
state_str = '|'.join(states[i])
# FORMAT ACTION
action_str = '|'.join(actions[i])
# FORMAT N_STATE
n_state_str = state_str + '|' + action_str
f_writer.writerow([state_str, action_str, n_state_str])
| [
"norio-kosaka"
] | norio-kosaka |
6a03c1e5902c68fd96c4f242ec0ea67c24a7a41c | 41aaf6e897b2f84ba2036b7f372e6213572d0917 | /snake.py | 2f628f8e0a060f60b6c5101d7f9addb8bb9ea130 | [] | no_license | Saadcode/snake | b6957ab6717c5397a1619e1279da8697decfa0cc | bae2880963510f5db884476d36d849f300bc1a96 | refs/heads/master | 2022-04-21T02:44:30.612402 | 2020-04-13T15:36:52 | 2020-04-13T15:36:52 | 255,370,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,944 | py | import pygame
import sys
import random
pygame.init()
FPS = 15
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
WINDOW_WIDTH = 600
WINDOW_HEIGHT = 600
VELOCITY = 10
SNAKE_WIDTH = 15
APPLE_SIZE = 20
TOP_WIDTH = 40
small_font = pygame.font.SysFont('forte', 25)
medium_font = pygame.font.SysFont('showcard gothic', 50, True)
large_font = pygame.font.SysFont('chiller', 60, True, True)
clock = pygame.time.Clock()
canvas = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
pygame.display.set_caption('Snake Game')
snake_img = pygame.image.load('snake2.png')
apple_img = pygame.image.load('apple2.png')
tail_img = pygame.image.load('tail1.png')
apple_img_rect = apple_img.get_rect()
def start_game():
canvas.fill(BLACK)
start_font1 = large_font.render("Welcome to snake game", True, GREEN)
start_font2 = medium_font.render("Play Game", True, RED, YELLOW)
start_font4 = medium_font.render("Quit", True, RED, YELLOW)
start_font2_rect = start_font2.get_rect()
start_font4_rect = start_font4.get_rect()
start_font2_rect.center = (200, 100)
start_font4_rect.center = (230, 160)
canvas.blit(start_font1, (10, 20))
canvas.blit(start_font2, start_font2_rect)
canvas.blit(start_font4, start_font4_rect)
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
x, y = event.pos
if x > start_font2_rect.left and x < start_font2_rect.right:
if y > start_font2_rect.top and y < start_font2_rect.bottom:
gameloop()
if x > start_font4_rect.left and x < start_font4_rect.right:
if y > start_font4_rect.top and y < start_font4_rect.bottom:
pygame.quit()
sys.exit()
pygame.display.update()
def gameover():
#canvas.fill(BLACK)
font_gameover1 = large_font.render('GAME OVER', True, GREEN)
font_gameover2 = medium_font.render("Play Again", True, RED, YELLOW)
font_gameover3 = medium_font.render("Quit", True, RED, YELLOW)
font_gameover1_rect = font_gameover1.get_rect()
font_gameover2_rect = font_gameover2.get_rect()
font_gameover3_rect = font_gameover3.get_rect()
font_gameover1_rect.center = (WINDOW_WIDTH/2, WINDOW_HEIGHT/2 - 100)
font_gameover2_rect.center = (WINDOW_WIDTH / 2 + 150, WINDOW_HEIGHT / 2 + 20)
font_gameover3_rect.center = (WINDOW_WIDTH / 2 + 150, WINDOW_HEIGHT / 2 + 70)
canvas.blit(font_gameover1, font_gameover1_rect)
canvas.blit(font_gameover2, font_gameover2_rect)
canvas.blit(font_gameover3, font_gameover3_rect)
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
x, y = event.pos
if x > font_gameover2_rect.left and x < font_gameover2_rect.right:
if y > font_gameover2_rect.top and y < font_gameover2_rect.bottom:
gameloop()
if x > font_gameover3_rect.left and x < font_gameover3_rect.right:
if y > font_gameover3_rect.top and y < font_gameover3_rect.bottom:
pygame.quit()
sys.exit()
pygame.display.update()
def snake(snakelist, direction):
if direction == 'right':
head = pygame.transform.rotate(snake_img, 270)
tail = pygame.transform.rotate(tail_img, 270)
if direction == 'left':
head = pygame.transform.rotate(snake_img, 90)
tail = pygame.transform.rotate(tail_img, 90)
if direction == 'up':
head = pygame.transform.rotate(snake_img, 0)
tail = pygame.transform.rotate(tail_img, 0)
if direction == 'down':
head = pygame.transform.rotate(snake_img, 180)
tail = pygame.transform.rotate(tail_img, 180)
canvas.blit(head, snakelist[-1])
canvas.blit(tail, snakelist[0])
for XnY in snakelist[1:-1]:
pygame.draw.rect(canvas, BLUE, (XnY[0], XnY[1], SNAKE_WIDTH, SNAKE_WIDTH))
def game_paused():
# canvas.fill(BLACK)
paused_font1 = large_font.render("Game Paused", True, RED)
paused_font_rect1 = paused_font1.get_rect()
paused_font_rect1.center = (WINDOW_WIDTH/2, WINDOW_HEIGHT/2)
canvas.blit(paused_font1, paused_font_rect1)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
pause_xy = event.pos
if pause_xy[0] > (WINDOW_WIDTH - 50) and pause_xy[0] < WINDOW_WIDTH:
if pause_xy[1] > 0 and pause_xy[1] < 50:
return
pygame.display.update()
def gameloop():
while True:
LEAD_X = 0
LEAD_Y = 100
direction = 'right'
score = small_font.render("Score:0", True, YELLOW)
APPLE_X = random.randrange(0, WINDOW_WIDTH - 10, 10)
APPLE_Y = random.randrange(TOP_WIDTH, WINDOW_HEIGHT - 10, 10)
snakelist = []
snakelength = 3
pause_font = medium_font.render('II', True, RED)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
if direction == 'right':
pass
else:
direction = 'left'
if event.key == pygame.K_RIGHT:
if direction == 'left':
pass
else:
direction = 'right'
if event.key == pygame.K_UP:
if direction == 'down':
pass
else:
direction = 'up'
if event.key == pygame.K_DOWN:
if direction == 'up':
pass
else:
direction = 'down'
if event.type == pygame.MOUSEBUTTONDOWN:
pause_xy = event.pos
if pause_xy[0] > (WINDOW_WIDTH - 50) and pause_xy[0] < WINDOW_WIDTH:
if pause_xy[1] > 0 and pause_xy[1] < 50:
game_paused()
if direction == 'up':
LEAD_Y -= VELOCITY
if LEAD_Y < TOP_WIDTH:
gameover()
if direction == 'down':
LEAD_Y += VELOCITY
if LEAD_Y > WINDOW_HEIGHT - SNAKE_WIDTH:
gameover()
if direction == 'right':
LEAD_X += VELOCITY
if LEAD_X > WINDOW_WIDTH - SNAKE_WIDTH:
gameover()
if direction == 'left':
LEAD_X -= VELOCITY
if LEAD_X < 0:
gameover()
snakehead = []
snakehead.append(LEAD_X)
snakehead.append(LEAD_Y)
snakelist.append(snakehead)
snake_head_rect = pygame.Rect(LEAD_X, LEAD_Y, SNAKE_WIDTH, SNAKE_WIDTH)
apple_rect = pygame.Rect(APPLE_X, APPLE_Y, APPLE_SIZE, APPLE_SIZE)
if len(snakelist) > snakelength:
del snakelist[0]
for point in snakelist[:-1]:
if point == snakehead:
gameover()
canvas.fill(BLACK)
snake(snakelist, direction)
if snake_head_rect.colliderect(apple_rect):
APPLE_X = random.randrange(0, WINDOW_WIDTH - 10, 10)
APPLE_Y = random.randrange(TOP_WIDTH, WINDOW_HEIGHT - 10, 10)
snakelength += 1
score = small_font.render("Score:" + str(snakelength - 3), True, YELLOW)
canvas.blit(score, (20, 10))
pygame.draw.line(canvas, GREEN, (0, TOP_WIDTH), (WINDOW_WIDTH, TOP_WIDTH))
pygame.draw.line(canvas, YELLOW, (WINDOW_WIDTH - 60, 0), (WINDOW_WIDTH - 60, TOP_WIDTH))
pygame.draw.rect(canvas, YELLOW, (WINDOW_WIDTH - 60, 0, 60, TOP_WIDTH))
canvas.blit(apple_img, (APPLE_X, APPLE_Y))
pygame.display.update()
clock.tick(FPS)
start_game()
gameloop()
| [
"[email protected]"
] | |
0b95068832384dd3ad74756b653d201da6d211bf | fd9c17e51ea6c4a3bd57ca1124eed8b9cb869a2e | /myshop1/payment/apps.py | c3d50177c0d56fc728462b44b44d8b6459616485 | [] | no_license | ankurpython/e-commerce_application- | 3eb05c231fa989976d3ffe1cca931775258f211f | 8efebb73dd394d729e6826aedd36662cb45d114c | refs/heads/master | 2020-07-13T05:23:34.667148 | 2019-08-28T18:54:37 | 2019-08-28T18:54:37 | 205,002,634 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | from django.apps import AppConfig
class PaymentConfig(AppConfig):
name = 'payment'
verbose_name = 'Payment'
def ready(self):
# import signal handlers
import payment.signals
| [
"[email protected]"
] | |
30c836cb77e3af15c95d574f4dffbd05f972c98a | fee21a0de0a7e04d4cea385b9403fa9ba3109fc7 | /耿梦宇-2220172376.py | d85cb611dd895cd0b0c817262627f4f5fdb50750 | [
"MIT"
] | permissive | veritastry/trainee | 2e9123fe0dfb87e4dacf8de3eb9c53d5ff68281b | eb9f4be00e80fddd0ab3d3e6ea9a20c55f5bcab8 | refs/heads/master | 2023-02-17T20:44:39.660480 | 2021-01-18T14:29:33 | 2021-01-18T14:29:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,129 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 10 21:02:04 2019
@author: Administrator
"""
#一、提取数据
import pandas as pd
data=pd.read_csv('./4s.csv',index_col=u'纳税人编号')
#二、数据探索分析
import matplotlib.pyplot as plt#导入
import matplotlib as mpl
fig,axes=plt.subplots(1,2)#创建画布
fig.set_size_inches(20,6)#设置画布大小
ax0,ax1=axes.flat#flat是数组的迭代器
mpl.rcParams['font.sans-serif']=[u'simHei']
mpl.rcParams['axes.unicode_minus']=False
data[u'销售类型'].value_counts().plot(kind='barh',ax=ax0,title=u'销售类型分布情况')
data[u'销售模式'].value_counts().plot(kind='barh',ax=ax1,title=u'销售模式分布情况')
data.describe().T#对数据变量进行可视化展示
plt.show()#输出结果
#三、数据预处理
data[u'输出']=pd.Categorical(data[u'输出']).codes
data[u'销售类型']=pd.Categorical(data[u'销售类型']).codes
data[u'销售模式']=pd.Categorical(data[u'销售模式']).codes
#数据划分
from sklearn.model_selection import train_test_split
data=data.as_matrix()
train_x,test_x,train_y,test_y=train_test_split(data[:,:14],data[:,14],test_size=0.2,random_state=1)
#构建LM神经网络模型
from keras.models import Sequential#导入神经网络的初始函数
from keras.layers.core import Dense,Activation
net_file='net.model'
net=Sequential()#建立神经网络模型
net.add(Dense(input_dim=14,output_dim=10))
net.add(Activation('relu'))
net.add(Dense(input_dim=10,output_dim=1))
net.add(Activation('sigmoid'))
net.compile(loss='binary_crossentropy',optimizer='adam')
net.fit(train_x,train_y,nb_epoch=1000,batch_size=10)#每次训练10个样本
net.save_weights(net_file)#保存模型
predict_result=net.predict_classes(train_x).reshape(len(train_x))#预测结果
from cm_plot import cm_plot
cm_plot(train_y,predict_result).show()#混淆矩阵显示
#构建决策树模型
from sklearn.tree import DecisionTreeClassifier
from sklearn.externals import joblib
tree_file = "tree.pkl"
tree = DecisionTreeClassifier(criterion = "entropy",max_depth =3)
tree.fit(train_x,train_y)
joblib.dump(tree,tree_file)
cm_plot(train_y,tree.predict(train_x)).show()
# 模型评价
# 绘制LM神经网络模型的ROC曲线
from sklearn.metrics import roc_curve # 导入ROC曲线函数
predict_result = net.predict(test_x).reshape(len(test_x)) # 预测结果
fpr, tpr, thresholds = roc_curve(test_y, predict_result, pos_label=1)
plt.plot(fpr, tpr, linewidth=2, label='ROC of LM') # 绘制ROC曲线
plt.xlabel('False Positive Rate') # 坐标轴标签
plt.ylabel('True POstive Rate')
plt.xlim(0, 1.05) # 设定边界范围
plt.ylim(0, 1.05)
plt.legend(loc=4) # 设定图例位置
plt.show() # 显示绘图结果
# 绘制决策树模型的ROC曲线
fpr, tpr, thresholds = roc_curve(test_y, tree.predict_proba(test_x)[:,1], pos_label=1)
plt.plot(fpr, tpr, linewidth=2, label='ROC of CHAR') # 绘制ROC曲线
plt.xlabel('False Positve Rate') # 坐标轴标签
plt.ylabel('True Postive Rate')
plt.xlim(0, 1.05) # 设定边界范围
plt.ylim(0, 1.05)
plt.legend(loc=4) # 设定图例位置
plt.show() # 显示绘图结果
| [
"[email protected]"
] | |
cf1c95226b738e88e5ece8b394896f8d6b81bf09 | d806dd4a6791382813d2136283a602207fb4b43c | /sirius/blueprints/api/remote_service/tula/passive/hospitalization/xform.py | e1eb90b2defe6e898e8d65d353d0acc7a8ea2d35 | [] | no_license | MarsStirner/sirius | 5bbf2a03dafb7248db481e13aff63ff989fabbc2 | 8839460726cca080ca8549bacd3a498e519c8f96 | refs/heads/master | 2021-03-24T12:09:14.673193 | 2017-06-06T16:28:53 | 2017-06-06T16:28:53 | 96,042,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | #! coding:utf-8
"""
@author: BARS Group
@date: 13.10.2016
"""
from sirius.lib.xform import XForm
from sirius.blueprints.api.remote_service.tula.entities import TulaEntityCode
from sirius.blueprints.api.remote_service.tula.passive.hospitalization.schemas import \
HospitalizationSchema
from sirius.models.system import SystemCode
class HospitalizationTulaXForm(HospitalizationSchema, XForm):
remote_system_code = SystemCode.TULA
entity_code = TulaEntityCode.MEASURE_HOSPITALIZATION
| [
"[email protected]"
] | |
1258d1606b051cacf5126e91e913a29adab58b12 | 1dcc10454332be45f7165784d59b2d152d72a1c3 | /blog/migrations/0003_auto_20210119_2239.py | 662dc81979570efc1d091f9ab950fcf3dd9d6613 | [] | no_license | thabothibos/simple-django-blog-app | 72098c50baf68d58da4ce8cb1a80db6ccbf76dc4 | bc395e401251c19064f1d0e28c12f177efc700bb | refs/heads/main | 2023-03-05T05:29:04.054133 | 2021-02-09T16:46:36 | 2021-02-09T16:46:36 | 337,537,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | # Generated by Django 3.1.5 on 2021-01-19 22:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_post_post_date'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='post',
name='category',
field=models.CharField(default='coding', max_length=255),
),
]
| [
"[email protected]"
] | |
cb9ae56a87e36096a294f2e4055d0886d4c602d5 | e52ca5e3806755857f4c4412f2d47cfadfd10e46 | /Star's Python Workspace/Web App/Distributed/CodEX/search/views.py | a263d74ad7830d60319e36fb382d66646695baab | [] | no_license | clmorgan111/Final-Year-Project | 0b96593d605bc1e3fd69e02e006cae271eed0f6b | dfdfb5755ef1da29c816c412e3f0d8a7a49f8b5d | refs/heads/master | 2020-04-15T13:03:07.732902 | 2018-07-20T12:06:25 | 2018-07-20T12:06:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,742 | py | # coding:utf-8
# @author: Star
# @time: 10-03-2018
import json
import hashlib
import redis
import time
from CodEX.config import configs
from search.supportings.network import Client
from search.supportings.communicator import CommunicationServer
from search.supportings.network import Server
import search.supportings.FCIConverter as fci
from django.views.decorators.csrf import csrf_exempt
from search.supportings.java_ast.java_AST import JavaAST
from django.shortcuts import render
from django.http import HttpResponse
from search.supportings.LSI.LSI_TFIDF import LSI_TFIDF
from search.supportings.LSI.LSI_NLP import LSI_TFIDF as NLP_LSI_TFIDF
import CodEX.config as config
from search.supportings.FrontEndInterface import FrontEndInterface
from search.supportings.AST.ASTSearching import ASTSearching
def index(request):
return render(request, 'index.html')
def task(message, shared):
print(message)
def search(request):
q = request.GET['q']
p = int(request.GET['p'])
timestamp = time.time()
client = Client("as", "137.43.92.9", 9609,
{'operate_type': 1, 'query': q, 'page': p, 'timestamp': str(timestamp)})
client.send_message()
server = CommunicationServer()
message = server.receive_message(socket_name=str(timestamp))
result = message['result']
# tfidf = LSI_TFIDF()
# result = tfidf.getResult(query=q, page=p)
pages = []
f = result[1]
total_p = (result[0] / configs['others']['page_num']) + 1
t_p = int(total_p)
p_p = max(p - 5, 1)
n_p = min(p + 5, total_p)
while total_p > 0:
pages.append(0)
total_p -= 1
files = []
for f_f in f:
f_name = f_f[0]
temp = fci.to_fciObject(config.configs['paths']['FCI_path'] + "/lsi/" + f_name)
m_l = ''
for t_f_f in f_f[1]:
m_l += str(t_f_f + 1)
m_l += ','
m_l = m_l[0:len(m_l) - 1]
fei = FrontEndInterface(temp, m_l)
files.append(fei)
return render(request, 'search-result.html',
{'results': files, 'q': q, 'p': p, 'pages': pages, 'p_p': p_p, 'n_p': n_p, 'pre': p - 1,
'next': p + 1, 't_p': t_p})
def init(request):
return HttpResponse("init successfully")
def plagiarize(request):
return render(request, 'snippet.html', {})
def nlsindex(request):
return render(request, 'nls.html', {})
def nls_result(request):
q = request.GET['q']
p = int(request.GET['p'])
# tfidf = NLP_LSI_TFIDF()
# result = tfidf.getResult(query=q, page=p)
timestamp = time.time()
client = Client("as", "137.43.92.9", 9609,
{'operate_type': 2, 'query': q, 'page': p, 'timestamp': str(timestamp)})
client.send_message()
server = CommunicationServer()
message = server.receive_message(socket_name=str(timestamp))
result = message['result']
pages = []
f = result[1]
total_p = (result[0] / 10) + 1
t_p = int(total_p)
p_p = max(p - 5, 1)
n_p = min(p + 5, total_p)
while total_p > 0:
pages.append(0)
total_p -= 1
files = []
for f_f in f:
f_name = f_f[0]
temp = fci.to_fciObject(config.configs['paths']['FCI_path'] + "/so/" + f_name)
m_l = ''
for t_f_f in f_f[1]:
m_l += str(t_f_f + 1)
m_l += ','
m_l = m_l[0:len(m_l) - 1]
fei = FrontEndInterface(temp, m_l)
files.append(fei)
return render(request, 'nlp-result.html',
{'results': files, 'q': q, 'p': p, 'pages': pages, 'p_p': p_p, 'n_p': n_p, 'pre': p - 1,
'next': p + 1, 't_p': t_p})
@csrf_exempt
def plagiarizeResult(request):
snippet = request.POST['snippet']
page = int(request.POST['p'])
operate_type = request.POST['l']
timestamp = time.time()
m = hashlib.md5()
m.update((str(timestamp)+snippet).encode("utf8"))
ts = m.hexdigest()
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
r.set(ts, snippet, ex=3000)
operate_type = int(operate_type)
# timestamp = time.time()
# client = Client("yeats.ucd.ie", "10.141.131.14", 9609,
# {'operate_type': operate_type, 'query': snippet, 'page': page, 'timestamp': timestamp})
# client.send_message()
# server = CommunicationServer()
# message = server.receive_message(socket_name=str(timestamp))
# result = message['result']
ast = None
language = ''
if operate_type == 3:
# ast = ASTSearching()
ast = 3
language = 'python'
else:
# ast = JavaAST()
ast = 4
language = 'java'
timestamp = time.time()
client = Client("as", "137.43.92.9", 9609,
{'operate_type': ast, 'query': snippet, 'page': page, 'timestamp': str(timestamp)})
client.send_message()
server = CommunicationServer()
message = server.receive_message(socket_name=str(timestamp))
result = message['result']
# result = ast.getResults(snippet, page)
if result == 0:
return render(request, 'snippet-result.html',
{'snippet': snippet, })
else:
result = result.to_dict()
is_global = False
plagiarize_list = []
document_list = []
component_document = []
global_similarity = 0
if result != None:
total_num = result['numOfResults']
total_page = (total_num / config.configs['others']['page_num']) + 1
matching_blocks = result['matchingBlocks']
global_similarity = result['globalSimilarity']
matching_lines = result['matchingLines']
blockWeights = result['blockWeights']
if global_similarity != None and global_similarity > 0:
is_global = True
cd = result['componentDocuments']
component_document = []
for c in cd:
qml = str(matching_blocks[c][0]) + '-' + str(matching_blocks[c][1])
ml=''
for mls in matching_lines[c]:
ml += str(mls[2]) + '-' + str(mls[3]) + ','
print(qml)
fobj = fci.to_fciObject(config.configs['paths']['FCI_path'] + "/" + language + "/" + c)
fei = FrontEndInterface(fobj, ml)
fei.set_query_match_lines(qml)
print(fei.get_query_match_lines(),'==========')
component_document.append(fei)
for t in result['plagiarismList']:
ml = ''
qml = ''
for mls in matching_lines[t]:
qml += str(mls[0]) + '-' + str(mls[1]) + ','
ml += str(mls[2]) + '-' + str(mls[3]) + ','
fobj = fci.to_fciObject(config.configs['paths']['FCI_path'] + "/" + language + "/" + t)
fei = FrontEndInterface(fobj, ml)
fei.set_query_match_lines(qml)
plagiarize_list.append(fei)
for t in result['documentList']:
ml = ''
qml = ''
for mls in matching_lines[t]:
qml += str(mls[0]) + '-' + str(mls[1]) + ','
ml += str(mls[2]) + '-' + str(mls[3]) + ','
fobj = fci.to_fciObject(config.configs['paths']['FCI_path'] + "/" + language + "/" + t)
fei = FrontEndInterface(fobj, ml)
fei.set_query_match_lines(qml)
document_list.append(fei)
if global_similarity != None:
global_similarity *= 100
global_similarity = '%.2f' % global_similarity
return render(request, 'snippet-result.html',
{'snippet': snippet, "is_global": is_global, 'component_documents': component_document,
"global_similarity": global_similarity, "plagiarize_list": plagiarize_list,
"document_list": document_list, "l": operate_type, 'ts': ts})
def snippet_detail(request):
id = request.GET['id']
ml = request.GET['ml']
qml = request.GET['qml']
timestamp = request.GET['ts']
l = int(request.GET['l'])
r = redis.Redis(host='localhost', port=6379, decode_responses=True)
q = r.get(timestamp)
language = ''
print(l)
if l == 3:
language = 'python'
else:
language = 'java'
m_l = ml
fci_obj = fci.to_fciObject(config.configs['paths']['FCI_path'] + "/" + language + "/" + id + '.json')
return render(request, 'snippet-detail.html',
{'detail': fci_obj, 'match_lines': m_l, 'query_match_lines': qml, 'query': q})
def detail(request):
id = request.GET['id']
ml = request.GET['ml']
m_l = ml
fci_obj = fci.to_fciObject(config.configs['paths']['FCI_path'] + "/lsi/" + id + '.json')
return render(request, 'detail.html', {'detail': fci_obj, 'match_lines': m_l})
| [
"[email protected]"
] | |
6c49714668856f4f031104e448854f10a72e4811 | 62d4c138577bbb862bb36ed2e0b29aa50715a673 | /problem7.py | c559f4cf8a7b28c66b8c40a7ccdbc3a5d2dfe80d | [] | no_license | paulkarayan/projecteuler | 06ff1d989b88d4bb4e0e2504f4cf68cf2911df3b | 5ce7e2ee9de399c701bddfdf6b1703a2fd4eeacc | refs/heads/master | 2021-01-22T00:51:49.018829 | 2018-10-19T14:59:39 | 2018-10-19T14:59:39 | 23,476,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,611 | py | #https://projecteuler.net/problem=7
import math
def findprimes(limit, sieve, plist, primelen): # sieve of eratosthenes w/ tricks
#print(limit, sieve, plist, "inputs")
if not plist:
sieve = [1] * int(math.sqrt(limit))
sieve[0] = sieve[1] = 0
start = 0
else:
start = len(sieve)
sieve = [1] * int(math.sqrt(limit) + len(sieve))
sieve[0] = sieve[1] = 0
p = []
# find primes less than limit
# create an array of all the values between 0 and limit
for pos in range(0, int(math.sqrt(limit))):
#print(pos)
if sieve[pos] == 1:
#it's prime so add it to list
p.append(pos)
# print(pos, "<--found prime")
x = 1
if len(p) >= primelen:
return(p, limit, sieve)
#set all the multiples of prime to 0
while x*pos < int(math.sqrt(limit)):
try:
sieve[(x*pos)] = 0
x += 1
#print(limit, x, pos, x*pos, "<---success")
except:
#print(limit, x, pos, x*pos, "<---fail")
break
#print("findprimes:", p)
return(p, limit, sieve)
primes = []
sieve = []
counter = 10
while len(primes) < 10001:
counter *= 10
primes, limit, sieve = findprimes(counter, sieve, primes, 10001)
print(counter, "<=resized", primes, len(primes))
print("\n\n")
print(primes, limit, len(primes), "<--outer loop")
| [
"[email protected]"
] | |
47480fc33fee1b1c7fe3c7862832d0468e611ad5 | 13c0f48030415ebd7b1a01ed2cf4003df3cc5293 | /colored_logs/models/color_config.py | 0dd8c6ee4524f47e4ba2ff3ef612a5c26b165ab5 | [
"MIT"
] | permissive | ravnicas/colored_logs | 7d17b0981f3fad49b01e6a6b92c4a2ca2d4eacb5 | 98182f8a2cc44d702300fe0a5388b42064f8d98d | refs/heads/master | 2022-07-23T13:19:14.059209 | 2020-05-04T18:52:57 | 2020-05-04T18:52:57 | 262,340,974 | 0 | 0 | null | 2020-05-08T14:11:30 | 2020-05-08T14:11:29 | null | UTF-8 | Python | false | false | 936 | py | from .color_pair import ColorPair
from .color import Color
class ColorConfig:
def __init__(
self,
info: str = ColorPair(foreground=Color.fromHex('#B4AEA8')),
success: str = ColorPair(foreground=Color.fromHex('#3EA966')),
fail: str = ColorPair(foreground=Color.fromHex('#C8553D')),
warning: str = ColorPair(foreground=Color.fromHex('#F28F3B')),
error: str = ColorPair(foreground=Color.fromHex('#A22B24')),
critical: str = ColorPair(background=Color.fromHex('#982720'), foreground=Color.fromHex('#F3F3F3')),
process: str = ColorPair(foreground=Color.fromHex('#2BC4E9')),
dim: str = ColorPair(foreground=Color.fromHex('#918B86'))
):
self.info = info
self.success = success
self.fail = fail
self.warning = warning
self.error = error
self.critical = critical
self.process = process
self.dim = dim | [
"[email protected]"
] | |
400aaefe3b7bf92ebc46bb5965825b39726c5367 | d96b966f368bdc441f8791bf860bbd4f6d0c2951 | /editor/sound_editor.py | d1ff534876e20bd7365262de6fc63a05150c94cd | [] | no_license | CrisHu/game | 5949d4999275d8877602e2706afffa7e6a0b7372 | e7ffc7808e0b7a4d7c459b7e148338d316764dae | refs/heads/master | 2020-04-02T05:27:18.468096 | 2018-10-23T03:24:46 | 2018-10-23T03:24:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,261 | py | import pyxel
from pyxel.constants import AUDIO_SOUND_COUNT
from pyxel.ui import NumberPicker, ScrollBar
from pyxel.ui.constants import WIDGET_FRAME_COLOR
from .constants import EDITOR_IMAGE_X, EDITOR_IMAGE_Y
from .editor import Editor
class SoundEditor(Editor):
def __init__(self, parent):
super().__init__(parent)
self._sound_picker = NumberPicker(self, 45, 17, 0, AUDIO_SOUND_COUNT - 1, 0)
self._speed_picker = NumberPicker(self, 105, 17, 1, 99, 0)
self._scroll_var = ScrollBar(
self, 222, 24, 125, ScrollBar.VERTICAL, 100, 10, 0, with_shadow=False
)
self.add_event_handler("draw", self.__on_draw)
def __on_draw(self):
self.draw_frame(11, 16, 218, 157)
pyxel.text(23, 18, "SOUND", 6)
pyxel.text(83, 18, "SPEED", 6)
pyxel.blt(12, 25, 3, EDITOR_IMAGE_X, EDITOR_IMAGE_Y + 8, 19, 123)
for i in range(4):
pyxel.blt(
31 + i * 48, 25, 3, EDITOR_IMAGE_X + 19, EDITOR_IMAGE_Y + 8, 48, 147
)
pyxel.line(222, 149, 222, 171, WIDGET_FRAME_COLOR)
pyxel.text(17, 150, "TON", 6)
pyxel.text(17, 158, "VOL", 6)
pyxel.text(17, 166, "EFX", 6)
self.draw_not_implemented_message()
| [
"[email protected]"
] | |
22c4d7f96a6349a7d19d0b2069f885a37474aa47 | 010279e2ba272d09e9d2c4e903722e5faba2cf7a | /contrib/python/plotly/py2/plotly/validators/sankey/textfont/__init__.py | 7a16a4ec501428eb068d80f780f31eca40f57f29 | [
"MIT",
"Apache-2.0"
] | permissive | catboost/catboost | 854c1a1f439a96f1ae6b48e16644be20aa04dba2 | f5042e35b945aded77b23470ead62d7eacefde92 | refs/heads/master | 2023-09-01T12:14:14.174108 | 2023-09-01T10:01:01 | 2023-09-01T10:22:12 | 97,556,265 | 8,012 | 1,425 | Apache-2.0 | 2023-09-11T03:32:32 | 2017-07-18T05:29:04 | Python | UTF-8 | Python | false | false | 1,471 | py | import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="size", parent_name="sankey.textfont", **kwargs):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="family", parent_name="sankey.textfont", **kwargs):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "style"),
strict=kwargs.pop("strict", True),
**kwargs
)
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="color", parent_name="sankey.textfont", **kwargs):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
| [
"[email protected]"
] | |
600fadd785148f4a7f668ac8599d5e3b6e2e67de | 3024235542ce7379b22e9a848316968e7abb51eb | /src/ExtGraph.py | 38974616adaa57d59e5daf5ad756d2a335202498 | [
"CC-BY-3.0"
] | permissive | harshakokel/JA-Walk-ER | 667efec9463e4ce34d7a8e6a021a8bcea6861a5e | d15adba5d9390bee43d0bca630ece0d0c73c522b | refs/heads/master | 2020-04-27T19:44:23.844569 | 2019-11-12T19:58:37 | 2019-11-12T19:58:37 | 174,631,519 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17,407 | py | # coding=utf8
'''
Copyright (R) 2015 Vincent.H <[email protected]>
Published under Apache 2.0 License (http://www.apache.org/licenses/LICENSE-2.0.html).
-------------------------------------------------------------------------------------
This module define the extended functions of graph data.
The core class base on the PyDot project. (https://github.com/erocarrera/pydot)
'''
from __builtin__ import dict
import pydot
import wx, sys
import ExtParser
from DEUtils import to_unicode, add_double_quote,\
remove_double_quote
import tempfile
import DEUtils
import jawalker
TEMP_IMG_FILE = tempfile.gettempdir()+'/de_tempimg'
TEMPLATE_DOT = DEUtils.resource_path('GraphTemplate.dot')
INIT_SCRIPT = '''
digraph G {
rankdir=LR;
node [fontname="serif"];
edge [fontname="serif"];
}
'''
INIT_SCRIPT_SUBGRAPH = '''
graph G {
node [comment="subgraph node wildcard"];
edge [comment="subgraph edge wildcard"];
}
'''
class ExtGraph(pydot.Dot):
__bitmap = None
__historys = []
__history_point = -1
#order [u"Attribute Node",u"Entity Node", u"Relation Node"]
node_shape = [jawalker.BuildDictionariesFromDOT.ENTITY_SHAPE, jawalker.BuildDictionariesFromDOT.RELATION_SHAPE, jawalker.BuildDictionariesFromDOT.ATTRIBUTE_SHAPE, jawalker.BuildDictionariesFromDOT.ATTRIBUTE_SHAPE]
# Order [ u"None", u"Important", u"Target"]
node_color = [None, jawalker.BuildDictionariesFromDOT.IMPORTANT_COLOR, jawalker.BuildDictionariesFromDOT.TARGET_COLOR]
def __init__(self, graph_name='G', obj_dict=None, template_file=None):
pydot.Dot.__init__(self, graph_name=graph_name, obj_dict=obj_dict)
# If create empty new graph...
if (obj_dict is None):
### Some default setting.
try:
if template_file is None:
g = ExtParser.parse_file(TEMPLATE_DOT)
else:
g = ExtParser.parse_file(template_file)
except:
g = ExtParser.parse_string(INIT_SCRIPT)
self.obj_dict = g.obj_dict
# If create graph from parsing program...
else:
# Now check if wildcard nodes existed in every graph and subgraph.
self.__check_wildcard_existed()
### Important!!! To make "pydot.Graph.toString()" working correct.
self.set_parent_graph(self)
### -------------------------------------------------------------
self.refresh_bitmap()
return
def __check_wildcard_existed(self, root_graph=None):
'''Check if wildcard nodes existed in root_graph and all subgraph.'''
if root_graph is None:
root_graph = self
for n_name in ['node', 'edge']:
node = self.EG_get_node_by_name(n_name, root_graph)
if node is None:
n = pydot.Node(n_name)
n.set_comment('Wildcard node added automatic in EG.')
root_graph.add_node(n)
### Anyway, push the wildcard node to the front of all other nodes.
if n_name == 'node':
root_graph.obj_dict['nodes']['node'][0]['sequence'] = 0.5
else:
root_graph.obj_dict['nodes']['edge'][0]['sequence'] = 0
### -------------------------------------------------------------
sgs = root_graph.get_subgraphs()
for sg in sgs:
self.__check_wildcard_existed(sg)
return
def create_empty_subgraph(self, name):
sg = pydot.Subgraph()
sg.set_name(name)
g = ExtParser.parse_string(INIT_SCRIPT_SUBGRAPH)
sg.obj_dict['nodes'] = g.obj_dict['nodes']
return sg
def get_bitmap(self):
"Get the graph image in wx.Bitmap format."
if self.__bitmap is None: ### Generate a image and load it now.
self.refresh_bitmap()
return self.__bitmap
def refresh_bitmap(self):
self.write(TEMP_IMG_FILE, self.prog, 'png')
self.__bitmap = wx.EmptyBitmap(0,0)
self.__bitmap.LoadFile(TEMP_IMG_FILE, wx.BITMAP_TYPE_PNG)
return
def EG_get_all_node_names(self, root_graph=None):
'''Get all node names in the graph, include nodes in all subgraph.'''
if root_graph is None:
root_graph = self
nodes = root_graph.get_nodes()
result = [ remove_double_quote(n.get_name()) for n in nodes ]
sgs = root_graph.get_subgraphs()
for sg in sgs:
result += self.EG_get_all_node_names(sg)
### Remove wildcard node.
try:
result.remove('node'); result.remove('edge')
except:
pass
return list(set(result))
def EG_get_all_edge_names(self, root_graph=None):
'''Get all edge names in the graph, include edges in all subgraph.'''
if root_graph is None:
root_graph = self
edges = root_graph.get_edges()
result = [ ( remove_double_quote( e.get_source() ), \
remove_double_quote( e.get_destination() ) ) \
for e in edges ]
sgs = root_graph.get_subgraphs()
for sg in sgs:
result += self.EG_get_all_edge_names(sg)
return list(set(result))
def EG_append_ER_node(self, nodename, root_graph=None, type=0, color=None ):
"Add node to 'root_graph' only by name."
uname = to_unicode(nodename.strip())
if root_graph is None:
root_graph = self
# Check unique.
n = self.EG_get_node_by_name(uname, root_graph=root_graph)
if not (n is None):
raise Exception('Unique error. The node with name "%s" was existed in the graph.' % uname)
uname = add_double_quote(uname)
attributes ={}
if not (color is None or color <= 0):
attributes["style"] = '\"filled\"'
attributes["fillcolor"] = self.node_color[color]
if color == 2:
attributes["fontcolor"]= jawalker.BuildDictionariesFromDOT.TARGET_FONT_COLOR
if type == 1:
attributes["orientation"] = u"45.0"
elif type == 3:
attributes["peripheries"] = u"2"
attributes["shape"] = self.node_shape[type]
n = pydot.Node(name =uname, **attributes)
root_graph.add_node(n)
self.__check_wildcard_existed()
self.refresh_bitmap()
return n
def EG_append_node(self, nodename, attr=None, root_graph=None):
"Add node to 'root_graph' only by name."
uname = to_unicode(nodename.strip())
if root_graph is None:
root_graph = self
# Check unique.
n = self.EG_get_node_by_name(uname, root_graph=root_graph)
if not(n is None):
raise Exception('Unique error. The node with name "%s" was existed in the graph.'%uname)
uname = add_double_quote(uname)
n = pydot.Node(uname,attr)
root_graph.add_node(n)
self.__check_wildcard_existed()
self.refresh_bitmap()
return n
def EG_append_edge(self, name_pair, root_graph=None):
"Add edge to 'root_graph' only by node-names of the edge."
nameA = to_unicode(name_pair[0].strip())
nameB = to_unicode(name_pair[1].strip())
if root_graph is None:
root_graph = self
### Check unique.
e = self.EG_get_edge_by_names((nameA, nameB), root_graph=root_graph)
if not(e is None):
raise Exception('Unique error. The edge with same names was existed in the graph.')
nameA = add_double_quote(nameA)
nameB = add_double_quote(nameB)
if root_graph.get_node(nameA)[0].get_shape() == jawalker.BuildDictionariesFromDOT.ENTITY_SHAPE and root_graph.get_node(nameB)[0].get_shape() == jawalker.BuildDictionariesFromDOT.ATTRIBUTE_SHAPE:
e = pydot.Edge(src=nameB, dst=nameA, color=jawalker.BuildDictionariesFromDOT.ATTRIBUTE_EDGE)
elif root_graph.get_node(nameB)[0].get_shape() == jawalker.BuildDictionariesFromDOT.ENTITY_SHAPE and root_graph.get_node(nameA)[0].get_shape() == jawalker.BuildDictionariesFromDOT.ATTRIBUTE_SHAPE:
e = pydot.Edge(src=nameA, dst=nameB, color=jawalker.BuildDictionariesFromDOT.ATTRIBUTE_EDGE)
else:
e = pydot.Edge(src=nameA, dst=nameB)
root_graph.add_edge(e)
self.__check_wildcard_existed()
self.refresh_bitmap()
return e
def EG_append_subgraph(self, graphname, root_graph=None):
"Add node to 'root_graph' only by name."
uname = to_unicode(graphname.strip())
if root_graph is None:
root_graph = self
# Check unique.
n = self.EG_get_subgraph_by_name(uname)
if not(n is None):
raise Exception('Unique error. The subgraph with name "%s" was existed in the graph.'%uname)
uname = add_double_quote(uname)
sg = self.create_empty_subgraph(uname)
root_graph.add_subgraph(sg)
self.refresh_bitmap()
return sg
def EG_get_node_by_name(self, name, root_graph=None):
'''Get node by name. Return None if not found.'''
if root_graph is None:
root_graph = self
n_name = remove_double_quote(name)
nodes = root_graph.get_nodes()
r = None
for n in nodes:
_name = remove_double_quote( n.get_name() )
if _name == n_name:
r = n
break
return r
def EG_get_edge_by_names(self, name_pair, root_graph=None):
'''Get edge by names of source and destination. Return None if not found.'''
if root_graph is None:
root_graph = self
nameA = remove_double_quote(name_pair[0])
nameB = remove_double_quote(name_pair[1])
edges = root_graph.get_edges()
r = None
for e in edges:
n = remove_double_quote( e.get_source() )
n1 = remove_double_quote( e.get_destination() )
if ((n == nameA )
and (n1 == nameB)):
r = e
break
return r
def EG_get_subgraph_by_name(self, name, root_graph=None):
'''Get node by name. Return None if not found.'''
if root_graph is None:
root_graph = self
sg_name = remove_double_quote(name)
sgs = root_graph.get_subgraphs()
r = None
for sg in sgs:
_name = remove_double_quote(sg.get_name())
if _name == sg_name:
r = sg
break
return r
def EG_remove_node(self, name, root_graph=None):
"Remove node from root_graph by name."
if root_graph is None:
root_graph = self
uname = add_double_quote( to_unicode(name) )
try:
del root_graph.obj_dict['nodes'][uname]
except:
pass
self.refresh_bitmap()
return
def EG_remove_edge(self, name_pair, root_graph=None):
"Remove edge from root_graph by end points name of the edge."
if root_graph is None:
root_graph = self
nameA = add_double_quote( to_unicode( name_pair[0]) )
nameB = add_double_quote( to_unicode( name_pair[1]) )
try:
del root_graph.obj_dict['edges'][(nameA, nameB)]
except:
pass
self.refresh_bitmap()
return
def EG_remove_subgraph(self, name, root_graph=None):
"Remove subgraph from root_graph by name."
if root_graph is None:
root_graph = self
sg_name = add_double_quote( to_unicode(name) )
try:
del root_graph.obj_dict['subgraphs'][sg_name]
except:
pass
self.refresh_bitmap()
return
def EG_to_modes(self, indent=0, root_graph=None):
"""Returns a string representation of the graph in dot language.
This version try to make string looking better than to_string().
"""
#TODO Add modes
dictionary = jawalker.BuildDictionariesFromDOT(self)
if dictionary.target is None:
return "No target node found"
target = dictionary.target
all_features = list(set(dictionary.relations).union(set(dictionary.attributes)) - set([target]))
features = dictionary.importants
networks = jawalker.Networks(target, features, dictionary)
all_paths = networks.paths_from_target_to_features()
networks.walkFeatures(all_paths, shortest=True)
bk = networks.all_modes
return '\n'.join(bk)
def EG_to_string(self, indent=0, root_graph=None):
"""Returns a string representation of the graph in dot language.
This version try to make string looking better than to_string().
"""
idt = ' '*(4 + indent)
graph = list()
if root_graph is None:
root_graph = self
if root_graph != root_graph.get_parent_graph():
graph.append(' '*indent)
if root_graph.obj_dict.get('strict', None) is not None:
if root_graph==root_graph.get_parent_graph() and root_graph.obj_dict['strict']:
graph.append('strict ')
if root_graph.obj_dict['name'] == '':
graph.append( '{\n' )
else:
graph.append( '%s %s {\n' % (root_graph.obj_dict['type'], root_graph.obj_dict['name']) )
for attr in root_graph.obj_dict['attributes'].keys():
if root_graph.obj_dict['attributes'].get(attr, None) is not None:
graph.append( idt+'%s=' % attr )
val = root_graph.obj_dict['attributes'].get(attr)
graph.append( pydot.quote_if_necessary(val) )
graph.append( ';\n' )
edges_done = set()
edge_obj_dicts = list()
for e in root_graph.obj_dict['edges'].values():
edge_obj_dicts.extend(e)
if edge_obj_dicts:
edge_src_set, edge_dst_set = zip( *[obj['points'] for obj in edge_obj_dicts] )
edge_src_set, edge_dst_set = set(edge_src_set), set(edge_dst_set)
else:
edge_src_set, edge_dst_set = set(), set()
node_obj_dicts = list()
for e in root_graph.obj_dict['nodes'].values():
node_obj_dicts.extend(e)
sgraph_obj_dicts = list()
for sg in root_graph.obj_dict['subgraphs'].values():
sgraph_obj_dicts.extend(sg)
obj_list = [ (obj['sequence'], obj) for obj in (edge_obj_dicts + node_obj_dicts + sgraph_obj_dicts) ]
obj_list.sort()
for _, obj in obj_list:
if obj['type'] == 'node':
node = pydot.Node(obj_dict=obj)
if root_graph.obj_dict.get('suppress_disconnected', False):
if (node.get_name() not in edge_src_set and
node.get_name() not in edge_dst_set):
continue
graph.append( DEUtils.smart_indent(node.to_string(), idt) + '\n' )
elif obj['type'] == 'edge':
edge = pydot.Edge(obj_dict=obj)
if root_graph.obj_dict.get('simplify', False) and edge in edges_done:
continue
graph.append( DEUtils.smart_indent(edge.to_string(), idt) + '\n' )
edges_done.add(edge)
else:
sgraph = pydot.Subgraph(obj_dict=obj)
sg_str = self.EG_to_string(indent+4, sgraph)
graph.append( sg_str+'\n' )
if root_graph != root_graph.get_parent_graph():
graph.append(' '*indent)
graph.append( '}\n' )
return ''.join(graph)
def undo_change(self, step=1):
"Roll back the change of the graph."
self.refresh_bitmap()
return
def redo_change(self, step=1):
"Redo change on the graph."
self.refresh_bitmap()
return
if __name__ == '__main__':
sd = ExtGraph()
s = u"中文"
s1 = s.encode('utf8')
sd.EG_append_node(s)
sd.EG_append_edge((s,s1))
print sd.to_string() | [
"[email protected]"
] | |
a83c3362a529d970c8d74dc9a41e928ad7f6aa12 | 36764bbdbe3dd6bb12cd8eb78e4b8f889bd65af0 | /mysortmat.py | b231fa573b16f020be2aaa0e3b636ee9e073a985 | [] | no_license | tristaaa/lcproblems | 18e01da857c16f69d33727fd7dcc821c09149842 | 167a196a9c36f0eaf3d94b07919f4ed138cf4728 | refs/heads/master | 2020-05-21T14:38:14.920465 | 2020-02-23T01:49:23 | 2020-02-23T01:49:23 | 186,085,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | class Solution(object):
def mySortMat(self, mat):
"""
sort the input matrix, size of n*n, and the output should be in this order
[[9,8,6],
[7,5,3],
[4,2,1]]
:type mat: List[List[int]]
:rtype: List[List[int]]
"""
n = len(mat)
arr = []
for i in range(n):
arr+=mat[i]
arr.sort(reverse=True)
# print(arr)
result=[[0]*n for i in range(n)]
for i in range(n):
fn=i*(i+1)//2
if i!=n-1:
for j in range(i+1):
result[j][i-j] = arr[fn+j]
result[n-1-j][n-1-i+j] = arr[n*n-1-fn-j]
else:
for j in range(i//2+1):
result[j][i-j] = arr[fn+j]
result[n-1-j][n-1-i+j] = arr[n*n-1-fn-j]
return result
sol=Solution()
mat=[
[ 5, 1, 9, 11],
[ 2, 4, 8, 10],
[13, 3, 6, 7],
[15, 14, 12, 0]
]
mat1=[
[ 5, 1, 9],
[ 2, 4, 8],
[13, 3, 6]
]
print("Given the input matrix: [")
for i in range(len(mat)):
print(mat[i])
print("]")
print("the sorted matrix is: [")
res=sol.mySortMat(mat)
for i in range(len(res)):
print(res[i])
print("]")
print("Given the input matrix: [")
for i in range(len(mat1)):
print(mat1[i])
print("]")
print("the sorted matrix is: [")
res=sol.mySortMat(mat1)
for i in range(len(res)):
print(res[i])
print("]")
| [
"[email protected]"
] | |
060e4478c838487312c46740f0239fd29248b21d | e820f47d64b236860170a6daa2438074ce65743b | /accounts/migrations/0002_order_product.py | eb0ef7d85be167a439dabd171000fe1544f1cc43 | [] | no_license | shubham7413/django-project4 | 91a5732292ce58ce5a22bbd1e4ac521b14b9befd | 04b90b6f426f416fe59c97a24011bf3b9036196a | refs/heads/master | 2023-06-02T14:33:30.634561 | 2021-06-18T19:38:18 | 2021-06-18T19:38:18 | 378,123,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | # Generated by Django 3.2 on 2021-05-11 06:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True, null=True)),
('status', models.CharField(max_length=200, null=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True)),
('price', models.FloatField(null=True)),
('category', models.CharField(max_length=200, null=True)),
('description', models.CharField(max_length=200, null=True)),
('date_created', models.DateTimeField(auto_now_add=True, null=True)),
],
),
]
| [
"[email protected]"
] | |
9e7ecacbe29bae9c3f1c5c9fae36d189793ca0a3 | 83b1d3dd26a13631df76f3b68fcd14db5c04106e | /client.py | 5fe6c2dd73977e699d685138f6069dec21784006 | [] | no_license | ML273/flask_getting_started | b2b20a9b136eda927763b40fff734d86deae0151 | 6f23ef7fbfa10e40836d32943d4ab94463c571a6 | refs/heads/master | 2020-04-10T11:55:54.366143 | 2018-03-08T03:27:22 | 2018-03-08T03:27:22 | 124,272,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | import requests
r = requests.get("http://vcm-3502.vm.duke.edu:5000/")
print(r.text)
getname = requests.get("http://vcm-3502.vm.duke.edu:5000/name")
print(getname.json())
hello_name = requests.get("http://vcm-3502.vm.duke.edu:5000/hello/Marianne")
print(hello_name.json())
distance = requests.post("http://vcm-3502.vm.duke.edu:5000/distance",json={"a": [0, 5], "b": [4, 0]})
print(distance.json())
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.