blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ea98b768baecabb185a4dec462e1d58d22c97517 | 405e8ba51ab7f177797a14bbbc619e033c5e9fcc | /metrics_utils.py | 5ffc4e8672d8127ea826b90bc19898102248d67d | [] | no_license | htfhxx/metrics_python | 6d801f49136c249979d5e6c1d7d3ed5ba1b3f710 | c360353668cfbff335418921b67c3224ba182309 | refs/heads/master | 2022-12-15T20:51:33.211776 | 2020-09-13T11:53:01 | 2020-09-13T11:53:01 | 295,139,113 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,220 | py |
def evaluate_2(y_true, y_pred):
tp = sum(1 for a, b in zip(y_true, y_pred) if a == 1 and b == 1)
fp = sum(1 for a, b in zip(y_true, y_pred) if a == 0 and b == 1)
fn = sum(1 for a, b in zip(y_true, y_pred) if a == 1 and b == 0)
# tp = ((y_true==1) & (y_pred==1)).sum()
# fp = ((y_true==0) & (y_pred==1)).sum()
# fn = ((y_true==1) & (y_pred==0)).sum()
if tp == 0:
return 0.0
precision = tp / (tp + fp)
recall = tp / (tp+fn)
f1 = 2 * (precision * recall) / (precision + recall)
return precision, recall, f1
def evaluate_N(y_true, y_pred, N, average=None):
tp_list,fp_list, fn_list = [0 for i in range(N)],[0 for i in range(N)],[0 for i in range(N)]
for i in range(1, N+1):
y_true_tmp = [1 if j==i else 0 for j in y_true]
y_pred_tmp = [1 if j==i else 0 for j in y_pred]
# tp, fp, fn = count_tp_fp_fn(y_true_tmp, y_pred_tmp)
tp = sum(1 for a, b in zip(y_true_tmp, y_pred_tmp) if a == 1 and b == 1)
fp = sum(1 for a, b in zip(y_true_tmp, y_pred_tmp) if a == 0 and b == 1)
fn = sum(1 for a, b in zip(y_true_tmp, y_pred_tmp) if a == 1 and b == 0)
tp_list[i-1]=tp
fp_list[i-1]=fp
fn_list[i-1]=fn
if average == 'micro':
tp = sum(tp_list)
fp = sum(fp_list)
fn = sum(fn_list)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
if tp == 0:
f1 = 0.0
else:
f1 = 2 * (precision * recall) / (precision + recall)
return precision, recall, f1
elif average == 'macro':
precision_list, recall_list, f1_list = [0 for i in range(N)],[0 for i in range(N)],[0 for i in range(N)]
for i in range(1, N+1):
precision_list[i-1] = tp_list[i-1] / ( tp_list[i-1] + fp_list[i-1] )
recall_list[i-1] = tp_list[i-1] / ( tp_list[i-1] + fn_list[i-1] )
if (precision_list[i-1] + recall_list[i-1]) == 0:
f1_list[i-1] = 0.0
else:
f1_list[i-1] = 2 * (precision_list[i-1] * recall_list[i-1]) / (precision_list[i-1] + recall_list[i-1])
return sum(precision_list) / N, sum(recall_list) / N, sum(f1_list) / N
elif average == 'weighted':
precision_list, recall_list, f1_list = [0 for i in range(N)],[0 for i in range(N)],[0 for i in range(N)]
num_list = [0 for i in range(N)]
for i in range(1, N+1):
precision_list[i-1] = tp_list[i-1] / ( tp_list[i-1] + fp_list[i-1] )
recall_list[i-1] = tp_list[i-1] / ( tp_list[i-1] + fn_list[i-1] )
if (precision_list[i-1] + recall_list[i-1]) == 0:
f1_list[i-1] = 0.0
else:
f1_list[i-1] = 2 * (precision_list[i-1] * recall_list[i-1]) / (precision_list[i-1] + recall_list[i-1])
num_list[i-1] = sum(1 for a in y_true if a == i)
assert sum(num_list) == len(y_true) == len(y_pred)
percent_list = [a/len(y_true) for a in num_list]
func = lambda x, y: x * y
return sum(map(func, precision_list, percent_list)), sum(map(func, recall_list, percent_list)), sum(map(func, f1_list, percent_list))
else:
print('wrong average !')
exit()
def evaluate_Multi(y_true, y_pred, N, average=None):
# reference_list = [[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 0], [1, 0, 1]]
# prediciton_list = [[1, 0, 0], [1, 0, 0], [1, 1, 1], [1, 0, 0], [0, 1, 1]]
tp_list,fp_list, fn_list = [0 for i in range(N)],[0 for i in range(N)],[0 for i in range(N)]
for i in range(1, N+1):
y_true_tmp = [1 if j[i-1]==1 else 0 for j in y_true]
y_pred_tmp = [1 if j[i-1]==1 else 0 for j in y_pred]
# print("y_true_tmp: ",y_true_tmp)
# print("y_pred_tmp: ",y_pred_tmp)
tp = sum(1 for a, b in zip(y_true_tmp, y_pred_tmp) if a == 1 and b == 1)
fp = sum(1 for a, b in zip(y_true_tmp, y_pred_tmp) if a == 0 and b == 1)
fn = sum(1 for a, b in zip(y_true_tmp, y_pred_tmp) if a == 1 and b == 0)
tp_list[i-1]=tp
fp_list[i-1]=fp
fn_list[i-1]=fn
if average == 'micro':
tp = sum(tp_list)
fp = sum(fp_list)
fn = sum(fn_list)
if tp ==0:
return 0.0, 0.0, 0.0
precision = tp / (tp + fp)
recall = tp / (tp + fn)
if (precision + recall)== 0:
f1 = 0.0
else:
f1 = 2 * (precision * recall) / (precision + recall)
return precision, recall, f1
elif average == 'macro':
precision_list, recall_list, f1_list = [0 for i in range(N)],[0 for i in range(N)],[0 for i in range(N)]
for i in range(1, N+1):
precision_list[i-1] = tp_list[i-1] / ( tp_list[i-1] + fp_list[i-1] )
recall_list[i-1] = tp_list[i-1] / ( tp_list[i-1] + fn_list[i-1] )
if (precision_list[i-1] + recall_list[i-1]) == 0:
f1_list[i-1] = 0.0
else:
f1_list[i-1] = 2 * (precision_list[i-1] * recall_list[i-1]) / (precision_list[i-1] + recall_list[i-1])
return sum(precision_list) / N, sum(recall_list) / N, sum(f1_list) / N
elif average == 'weighted':
precision_list, recall_list, f1_list = [0 for i in range(N)],[0 for i in range(N)],[0 for i in range(N)]
num_list = [0 for i in range(N)]
for i in range(1, N+1):
precision_list[i-1] = tp_list[i-1] / ( tp_list[i-1] + fp_list[i-1] )
recall_list[i-1] = tp_list[i-1] / ( tp_list[i-1] + fn_list[i-1] )
if (precision_list[i-1] + recall_list[i-1]) == 0:
f1_list[i-1] = 0.0
else:
f1_list[i-1] = 2 * (precision_list[i-1] * recall_list[i-1]) / (precision_list[i-1] + recall_list[i-1])
# print('y_true: ',y_true)
num_list[i-1] = sum(1 for a in y_true if a[i-1] ==1)
# assert sum(num_list) == len(y_true) == len(y_pred)
# print('num_list: ', num_list)
percent_list = [a/sum(num_list) for a in num_list]
func = lambda x, y: x * y
return sum(map(func, precision_list, percent_list)), sum(map(func, recall_list, percent_list)), sum(map(func, f1_list, percent_list))
else:
print('wrong average !')
exit()
def main():
reference_list = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
prediciton_list = [0, 0, 1, 1, 1, 0, 0, 1, 1, 1]
print(evaluate_2(reference_list, prediciton_list))
print('-'*100)
reference_list = [1, 1, 2, 2, 2, 3, 3, 3, 3, 3]
prediciton_list = [1, 2, 2, 2, 3, 1, 2, 3, 3, 3]
print(evaluate_N(reference_list, prediciton_list, 3,average='micro'))
print(evaluate_N(reference_list, prediciton_list, 3,average='macro'))
print(evaluate_N(reference_list, prediciton_list, 3,average='weighted'))
print('-'*100)
reference_list = [[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 0], [1, 0, 1]]
prediciton_list = [[1, 0, 0], [1, 0, 0], [1, 1, 1], [1, 0, 0], [0, 1, 1]]
print(evaluate_Multi(reference_list, prediciton_list, 3, average='micro'))
print(evaluate_Multi(reference_list, prediciton_list, 3, average='macro'))
print(evaluate_Multi(reference_list, prediciton_list, 3, average='weighted'))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
112ff750125e7c5fb1438b5dcf7bcdaef7265187 | a992a2bcea0e40a9cc3d947466a7eaafaf337280 | /SocketPython/server.py | 0e78bac2702be3473a62e6ef8bef95fec62d64df | [] | no_license | rodrigoferrazazevedo/dojo-python | 6bcfb4bf27435b5e60fa1c992840e319fe2dbbb3 | b3aebfae8b0dae2c2b898431ed747dd9236706e3 | refs/heads/master | 2022-12-24T10:25:10.692833 | 2022-06-22T02:12:18 | 2022-06-22T02:12:18 | 107,460,652 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | import socket
server = socket.socket()
server.bind( ('localhost',8000))
server.listen(5)
while True:
conn, addr = server.accept()
print "Connection established"
print addr
msg = conn.recv(1024)
print msg
conn.send("Connection established")
conn.close() | [
"[email protected]"
] | |
095d3023a70bd06fac2923e3635efb9a4e247285 | 91e56cf5312223af9bef83538c4ccd50afa1ca27 | /p024_LexicographicPermutations.py | b0136f3d7a929a64a7c07f3d98b8f9326cccab32 | [] | no_license | fletcherw/projecteuler | 18dedac2bd90186f7ea9320ff45892cdc838f082 | 6bc3094f816511ca10984ed58b6fb8e99594327a | refs/heads/master | 2021-05-16T03:06:14.495177 | 2016-12-17T23:25:39 | 2016-12-17T23:25:39 | 18,919,808 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | import math
digits = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
digitsout = ""
index = 999999 #the index of the permutation you're trying to calculate. Note that this is 0 indexed, so the 10th permutation would be 9, etc. etc.
for n in range(len(digits) - 1, -1, -1):
fact = math.factorial(n)
digitsout += str(digits.pop(int(math.floor(index/fact))))
index = index % fact
print digitsout | [
"[email protected]"
] | |
4a6c735916616d81dda3aa9ae347e5ab8ee1ef2c | 4b2d75ba65d2a6838a0cbc70cdda0ae49bd4ae49 | /testdata/api_endpoint.py | 4a5691c2fcbfc737d822dd14c0db0fa80015d454 | [] | no_license | mimqupspy/python_rest | 32360d051f266f379165543aea9f7a7bf7252188 | 707ac2494752fd6772081f7afddfc7e9102ad12a | refs/heads/main | 2023-08-29T12:41:39.738241 | 2021-10-12T04:44:29 | 2021-10-12T04:44:29 | 415,355,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | # api endpoints
list_user = "https://reqres.in/api/users?page=2"
single_user = "https://reqres.in/api/users/2"
single_user_not_found = "https://reqres.in/api/users/23"
list_resource = "https://reqres.in/api/unknown"
single_resource = "https://reqres.in/api/unknown/2"
single_resource_not_found = "https://reqres.in/api/unknown/23"
post_create_user_url = "https://reqres.in/api/users"
patch_user_url = "https://reqres.in/api/users/2"
delete_user_url = "https://reqres.in/api/users/2"
# Register Sucessfull, Unsucessfull
register_url = "https://reqres.in/api/register"
# Login Sucessfull, unsucessfull
login_url = "https://reqres.in/api/login"
# response delayed
get_delayed_url = "https://reqres.in/api/users?delay=3"
| [
"[email protected]"
] | |
a3531277a9a252270e45aa6e6fef912242833302 | 1588092ae5dc099be61cea76900ca5b0f01b8739 | /exam_2020/order.py | 031c694e024b5fdd873722a99f7fb446c0612724 | [] | no_license | isakss/Forritun1_Python | e13fa19d90c7fba64dea3ef28853313e6a836ed8 | 9d9590126fbd80f1a5a8a440240fead03154ce91 | refs/heads/master | 2023-02-15T16:07:30.325561 | 2021-01-12T09:07:40 | 2021-01-12T09:07:40 | 292,535,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | class Order(object):
def __init__(self, item_str="", price=0.0):
self.__item = item_str
self.__price = price
def item(self):
return self.__item
def price(self):
return self.__price
def __gt__(self, other):
return self.__price > other.__price
def __add__(self, other):
result = Order()
result.__item = self.__item + "+" + other.__item
result.__price = self.__price + other.__price
return result
def __str__(self):
return "Item: {}, price: {}".format(self.__item, self.__price)
| [
"[email protected]"
] | |
98bb88d68bcbb6950b2ada14bd90d249dbc4e630 | 9ac9e863da8831703e1d0322a0fe9d279c550fbf | /auctions/migrations/0002_category_listing.py | 8c128798bf571a1448f073efd337bfe8e9352792 | [] | no_license | eldocbrown/cs50w-project2-commerce | b247da79ff2563f59781c7bc13d1efabae0c5a29 | 48988ac8f510c15fcf41d4c13010efe5509b3aa4 | refs/heads/master | 2023-01-04T00:24:34.454410 | 2020-10-26T03:27:08 | 2020-10-26T03:27:08 | 305,499,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,304 | py | # Generated by Django 3.1.2 on 2020-10-20 18:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auctions', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='Listing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('description', models.TextField()),
('startingPrice', models.DecimalField(decimal_places=2, max_digits=10)),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='catListings', to='auctions.category')),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='usrListings', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
71dfbd91d20000eec465c1c8fe2f2198fd8b1e9c | 38d568d4a230aa7b8ace0957847e9b681223564f | /getSingleZoneCloudMain.py | 399eff7450c8b4e45d3d065528d6bf9cd8da3506 | [] | no_license | lifeisjustagame/Hello-World | 03aa19c0fb8d77663439fb0de73780bbd8d9eeee | 43e51d79cffdb90a89b3bf2f219b5e8c2bb1e52a | refs/heads/master | 2021-01-23T06:54:26.110225 | 2017-04-06T07:44:21 | 2017-04-06T07:44:21 | 86,407,323 | 0 | 0 | null | 2017-03-28T03:01:34 | 2017-03-28T02:45:52 | null | UTF-8 | Python | false | false | 399 | py |
if __name__ == '__main__':
start = time.time()
LOG.warning('PROGRAM START AT: %s' % TIME)
try:
mc = MainClass()
mc.task_main()
except Exception, e:
LOG.error(e)
raise
except (SystemExit, keyboardInterrupt), e:
error_str = 'Program killed by user, reason: %s' %e
LOG.error(error_str)
sys.exit()
finally:
end = 'use: %s' % (time.time() - start)
LOG.info(end)
| [
"[email protected]"
] | |
6215b44a8f43d5aaf175a030c7c142759e405f83 | 747c76c186567d0fc1af2774f6bd732ce6b11198 | /catkin_auefinals/build/auefinals/turtlebot3/turtlebot3_slam/catkin_generated/pkg.installspace.context.pc.py | 8e5ea4bc2c9b2df68a1eb7a58dbe208672968976 | [] | no_license | abhibhagwat/TurtleBot3_obstaclecourse | 5bf9374b3f99f377f22f04304c65f10c0d5df0fd | 4e213042e5da385d5086175de1dae3e345be4880 | refs/heads/master | 2023-03-12T09:59:50.178892 | 2021-03-04T08:46:20 | 2021-03-04T08:46:20 | 344,267,482 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;sensor_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot3_slam"
PROJECT_SPACE_DIR = "/home/abhibhagwat/catkin_auefinals/install"
PROJECT_VERSION = "1.2.1"
| [
"[email protected]"
] | |
cf0a8120c529928cdef0b3a7a81c713a31381216 | 72512fb3e6c3e5b38f55a5385411ed6a9873aa05 | /examples/single_calculations/test_multiple_force_eval.py | 333c996e81ad331042290003dde1ea948be3a9de | [
"MIT"
] | permissive | borellim/aiida-cp2k | e8750e5e514b98c34b6a604fe46d3c473f359d35 | ea5cd54bd8419fefd24967f2c2569031cc28a592 | refs/heads/master | 2020-12-19T17:03:05.545681 | 2019-10-19T07:03:37 | 2019-10-19T07:03:37 | 235,795,066 | 0 | 0 | MIT | 2020-01-23T12:56:45 | 2020-01-23T12:56:44 | null | UTF-8 | Python | false | false | 7,658 | py | # -*- coding: utf-8 -*-
# pylint: disable=invalid-name
###############################################################################
# Copyright (c), The AiiDA-CP2K authors. #
# SPDX-License-Identifier: MIT #
# AiiDA-CP2K is hosted on GitHub at https://github.com/aiidateam/aiida-cp2k #
# For further information on the license, see the LICENSE.txt file. #
###############################################################################
"""Run DFT calculation with multiple force eval sections"""
from __future__ import print_function
from __future__ import absolute_import
import sys
import ase
import click
from aiida.engine import run
from aiida.orm import (Code, Dict, StructureData)
from aiida.common import NotExistent
from aiida.plugins import CalculationFactory
Cp2kCalculation = CalculationFactory('cp2k')
@click.command('cli')
@click.argument('codelabel')
def main(codelabel):
"""Run DFT calculation with multiple force eval sections"""
try:
code = Code.get_from_string(codelabel)
except NotExistent:
print("The code '{}' does not exist".format(codelabel))
sys.exit(1)
print("Testing CP2K ENERGY on H2O dimer (Mixed: DFT+MM)...")
# structure
pos = [[0.934, 2.445, 1.844], [1.882, 2.227, 1.982], [0.81, 3.165, 2.479], [3.59, 2.048, 2.436],
[4.352, 2.339, 1.906], [3.953, 1.304, 2.946]]
atoms = ase.Atoms(symbols='OH2OH2', pbc=True, cell=[5.0, 5.0, 5.0])
atoms.set_positions(pos)
structure = StructureData(ase=atoms)
# parameters
parameters = Dict(
dict={
'MULTIPLE_FORCE_EVALS': {
'FORCE_EVAL_ORDER': '2 3',
'MULTIPLE_SUBSYS': 'T',
},
'FORCE_EVAL': [
{
'METHOD': 'MIXED',
'MIXED': {
'MIXING_TYPE': 'GENMIX',
'GENERIC': {
'ERROR_LIMIT': 1.0E-10,
'MIXING_FUNCTION': 'E1+E2',
'VARIABLES': 'E1 E2',
},
'MAPPING': {
'FORCE_EVAL_MIXED': {
'FRAGMENT': [
{
'_': 1,
'1': '3'
},
{
'_': 2,
'4': '6'
},
],
},
'FORCE_EVAL': [{
'_': 1,
'DEFINE_FRAGMENTS': '1 2',
}, {
'_': 2,
'DEFINE_FRAGMENTS': '1 2',
}],
}
},
},
{
'METHOD': 'FIST',
'MM': {
'FORCEFIELD': {
'SPLINE': {
'EPS_SPLINE': 1.30E-5,
'EMAX_SPLINE': 0.8,
},
'CHARGE': [
{
'ATOM': 'H',
'CHARGE': 0.0,
},
{
'ATOM': 'O',
'CHARGE': 0.0,
},
],
'BOND': {
'ATOMS': 'H O',
'K': 0.0,
'R0': 2.0,
},
'BEND': {
'ATOMS': 'H O H',
'K': 0.0,
'THETA0': 2.0,
},
'NONBONDED': {
'LENNARD-JONES': [
{
'ATOMS': 'H H',
'EPSILON': 0.2,
'SIGMA': 2.4,
},
{
'ATOMS': 'H O',
'EPSILON': 0.4,
'SIGMA': 3.0,
},
{
'ATOMS': 'O O',
'EPSILON': 0.8,
'SIGMA': 3.6,
},
]
},
},
'POISSON': {
'EWALD': {
'EWALD_TYPE': 'none',
}
}
},
'SUBSYS': {
'TOPOLOGY': {
'CONNECTIVITY': 'GENERATE',
'GENERATE': {
'CREATE_MOLECULES': True,
}
}
}
},
{
'METHOD': 'Quickstep',
'DFT': {
'BASIS_SET_FILE_NAME': 'BASIS_MOLOPT',
'QS': {
'EPS_DEFAULT': 1.0e-12,
'WF_INTERPOLATION': 'ps',
'EXTRAPOLATION_ORDER': 3,
},
'MGRID': {
'NGRIDS': 4,
'CUTOFF': 280,
'REL_CUTOFF': 30,
},
'XC': {
'XC_FUNCTIONAL': {
'_': 'LDA',
},
},
'POISSON': {
'PERIODIC': 'none',
'PSOLVER': 'MT',
},
},
'SUBSYS': {
'KIND': [
{
'_': 'O',
'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',
'POTENTIAL': 'GTH-LDA-q6'
},
{
'_': 'H',
'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',
'POTENTIAL': 'GTH-LDA-q1'
},
],
},
},
]
})
options = {
"resources": {
"num_machines": 1,
"num_mpiprocs_per_machine": 1,
},
"max_wallclock_seconds": 1 * 3 * 60,
}
inputs = {'structure': structure, 'parameters': parameters, 'code': code, 'metadata': {'options': options,}}
print("Submitted calculation...")
run(Cp2kCalculation, **inputs)
if __name__ == '__main__':
main() # pylint: disable=no-value-for-parameter
| [
"[email protected]"
] | |
575cc3c1e2ae6bca3bd38b5117cba86f4090d1a7 | 24b1bec9f2e2cb0ae1f7dd75527a2a1a01e08429 | /py/neural_network.py | 958d7c089c05fb3f4a18a2d601633bdb4edf7f2c | [] | no_license | lwierzb1/mgr-colorful-image-colorization | c370409f6690bb72c7c5651104a470373a427fe4 | fe60cc06ce406d0ced75db36cb3755634216c019 | refs/heads/main | 2023-02-22T19:31:40.577761 | 2021-01-25T21:41:57 | 2021-01-25T21:41:57 | 332,080,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,576 | py | #!/usr/bin/env python
import cv2
import numpy as np
from config_reader import ConfigReader
__author__ = "Lukasz Wierzbicki"
__version__ = "1.0.0"
__maintainer__ = "Lukasz Wierzbicki"
__email__ = "[email protected]"
class NeuralNetwork:
"""
CNN neural network used to colorize grayscale image.
[https://arxiv.org/pdf/1603.08511]
...
Attributes
----------
__PROTO_FILE
file with cnn description. Describe the structure of neural network
__WEIGHTS_FILE
file that defines the internal parameters of cnn layers.
__QUANTIZED_LAB_SPACE
file with quantized lab space.
__INPUT_WIDTH
cnn input width.
__INPUT_HEIGHT
cnn input height.
__neural_network
instance of cnn neural network
Methods
-------
populate(blob_matrix)
sets input (blob_matrix) of __neural_network instance.
predict_ab_space()
predicts the ab space based on the provided input with the method populate()
"""
def __init__(self):
config_reader = ConfigReader()
self.__PROTO_FILE = config_reader.get_string_property('ProtoFile')
self.__WEIGHTS_FILE = config_reader.get_string_property('WeightsFile')
quantized_lab_space_path = config_reader.get_string_property('QuantizedLabSpace')
self.__QUANTIZED_LAB_SPACE = np.load(quantized_lab_space_path).transpose().reshape(2, 313, 1, 1)
self.__INPUT_WIDTH = config_reader.get_int_property('Width')
self.__INPUT_HEIGHT = config_reader.get_int_property('Height')
self.__neural_network = cv2.dnn.readNetFromCaffe(self.__PROTO_FILE, self.__WEIGHTS_FILE)
self.__populate_network_layers_with_quantized_lab_space()
def __populate_network_layers_with_quantized_lab_space(self):
# populate cluster centers as 1x1 convolution kernel. Based on 'colorization_deploy_v2.prototxt'
class8 = self.__neural_network.getLayerId("class8_ab")
conv8 = self.__neural_network.getLayerId("conv8_313_rh")
self.__neural_network.getLayer(class8).blobs = [self.__QUANTIZED_LAB_SPACE.astype("float32")]
self.__neural_network.getLayer(conv8).blobs = [np.full([1, 313], 2.606, dtype="float32")]
def predict_ab_space(self):
result = self.__neural_network.forward()
return result[0, :, :, :].transpose((1, 2, 0))
def populate(self, blob_matrix):
self.__neural_network.setInput(blob_matrix)
def get_width(self):
return self.__INPUT_WIDTH
def get_height(self):
return self.__INPUT_HEIGHT
| [
"[email protected]"
] | |
cdbbd4c40b880cbbffc579c8ac2750e95e75bb71 | 649bd422025e421d86025743eac324c9b882a2e8 | /exam/1_three-dimensional_atomic_system/dump/phasetrans/temp42_7000.py | 7498f9768395d5a4d3115ae4ba0a49d57c335108 | [] | no_license | scheuclu/atom_class | 36ddee1f6a5995872e858add151c5942c109847c | 0c9a8c63d9b38898c1869fe8983126cef17662cd | refs/heads/master | 2021-01-21T10:52:28.448221 | 2017-03-07T23:04:41 | 2017-03-07T23:04:41 | 83,489,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68,978 | py | ITEM: TIMESTEP
7000
ITEM: NUMBER OF ATOMS
2048
ITEM: BOX BOUNDS pp pp pp
7.2345940138227149e-01 4.6476540598614875e+01
7.2345940138227149e-01 4.6476540598614875e+01
7.2345940138227149e-01 4.6476540598614875e+01
ITEM: ATOMS id type xs ys zs
8 1 0.128552 0.0630246 0.0619357
35 1 0.0601396 0.125834 0.0579319
130 1 0.0620239 0.0611971 0.118196
165 1 0.121561 0.127058 0.122569
389 1 0.122801 7.85632e-05 0.373726
1423 1 0.440806 0.503865 0.436085
4 1 0.00145638 0.0660019 0.0574981
161 1 0.0018685 0.126967 0.120668
61 1 0.878707 0.12832 -0.000217188
509 1 0.873791 0.375808 0.370374
2 1 0.0606639 0.057574 -0.00141547
510 1 0.936259 0.439689 0.379218
12 1 0.246629 0.059001 0.0580648
39 1 0.183194 0.123133 0.0624847
43 1 0.311763 0.117562 0.0645861
134 1 0.189522 0.0648692 0.118709
138 1 0.309126 0.0546212 0.124301
169 1 0.251353 0.122474 0.125306
10 1 0.317942 0.0595849 0.000554554
1437 1 0.873931 0.498099 0.378169
143 1 0.43594 -0.00534059 0.185141
16 1 0.376082 0.059471 0.063345
47 1 0.440619 0.123451 0.058817
142 1 0.437309 0.0639194 0.122341
173 1 0.370194 0.120437 0.123993
177 1 0.503774 0.117399 0.122794
20 1 0.497881 0.0599086 0.0634801
1181 1 0.869593 0.497704 0.124042
24 1 0.622682 0.0627586 0.0613091
51 1 0.560709 0.125569 0.0578838
146 1 0.565034 0.0587568 0.126662
181 1 0.622477 0.122264 0.128479
1171 1 0.56661 0.497058 0.187624
1431 1 0.691599 0.500284 0.440763
28 1 0.74877 0.0616595 0.0627112
55 1 0.687452 0.125176 0.0597502
59 1 0.809676 0.125273 0.057304
150 1 0.692701 0.0637766 0.124013
154 1 0.80358 0.0654529 0.126697
185 1 0.745165 0.130014 0.124371
511 1 0.936169 0.372248 0.434261
1545 1 0.250192 0.497895 0.500736
512 1 0.876242 0.435194 0.439084
32 1 0.872674 0.0631448 0.0528145
63 1 0.938453 0.129104 0.058489
158 1 0.937772 0.0658637 0.120985
189 1 0.868243 0.123613 0.127033
399 1 0.438347 0.00121822 0.43826
40 1 0.123344 0.190071 0.0624893
67 1 0.061922 0.25435 0.0595683
72 1 0.133622 0.312737 0.0624685
162 1 0.0613279 0.188186 0.12354
194 1 0.0579134 0.316056 0.125646
197 1 0.125838 0.250344 0.1246
193 1 0.000868717 0.25327 0.125516
36 1 0.997218 0.19095 0.0645919
1435 1 0.814979 0.497984 0.435272
1281 1 1.00149 0.49997 0.250178
1429 1 0.627468 0.499332 0.386837
44 1 0.252754 0.187648 0.0625167
71 1 0.193338 0.246197 0.065619
75 1 0.31301 0.242155 0.0679734
76 1 0.252183 0.310784 0.0611341
166 1 0.188771 0.18211 0.123487
170 1 0.312798 0.190317 0.128483
198 1 0.192784 0.310755 0.121206
201 1 0.254954 0.251877 0.124522
202 1 0.314553 0.306996 0.124304
37 1 0.122934 0.127788 0.00153675
54 1 0.685453 0.191815 0.00013005
279 1 0.689414 0.00444724 0.314638
48 1 0.375741 0.18364 0.0641309
79 1 0.436483 0.245963 0.0594011
80 1 0.377986 0.309758 0.0622608
174 1 0.438238 0.183728 0.120081
205 1 0.379909 0.245674 0.124493
206 1 0.431713 0.315859 0.126658
52 1 0.502132 0.184773 0.0556531
1553 1 0.500981 0.497773 0.502565
209 1 0.494878 0.252142 0.120503
84 1 0.495935 0.311758 0.0608669
56 1 0.623663 0.183738 0.0663643
83 1 0.559196 0.244661 0.0641338
88 1 0.623248 0.308722 0.0632706
178 1 0.556537 0.183699 0.125315
210 1 0.558979 0.312292 0.120671
213 1 0.623702 0.252075 0.124608
60 1 0.751316 0.190625 0.0595086
87 1 0.6843 0.253515 0.0617974
91 1 0.809441 0.253359 0.0601402
92 1 0.751254 0.317498 0.0631962
182 1 0.682531 0.185316 0.122869
186 1 0.810428 0.189535 0.122964
214 1 0.686531 0.319619 0.122417
217 1 0.744962 0.25211 0.127427
218 1 0.811189 0.315369 0.12395
1169 1 0.500988 0.499828 0.123902
58 1 0.811208 0.189449 0.000213794
68 1 -0.0030746 0.310442 0.0629016
64 1 0.872593 0.188861 0.0671485
95 1 0.9296 0.251326 0.0692817
96 1 0.872728 0.311167 0.0643383
190 1 0.932789 0.187715 0.123904
221 1 0.871726 0.250878 0.126301
222 1 0.936077 0.314049 0.128202
53 1 0.626523 0.122997 0.00239837
481 1 -0.00233232 0.379308 0.371507
99 1 0.0698692 0.370332 0.0629588
104 1 0.132725 0.435108 0.0643846
226 1 0.0575469 0.436034 0.122689
229 1 0.124783 0.375395 0.127106
100 1 -0.00238494 0.442914 0.0633281
225 1 -0.00447408 0.373053 0.129089
135 1 0.186565 0.000400529 0.184849
62 1 0.934627 0.197249 0.0037252
1557 1 0.631374 0.499806 0.499968
281 1 0.753278 0.0020219 0.253043
103 1 0.193817 0.370276 0.0634747
107 1 0.316543 0.368993 0.0635862
108 1 0.254407 0.433809 0.0609888
230 1 0.192594 0.432589 0.121703
233 1 0.253361 0.370163 0.124159
234 1 0.310614 0.434772 0.122718
1303 1 0.68494 0.499832 0.313981
57 1 0.74889 0.124971 -0.00479295
111 1 0.434137 0.371804 0.0640754
112 1 0.379526 0.43471 0.0590255
237 1 0.372144 0.377514 0.125438
238 1 0.437321 0.434043 0.124914
241 1 0.498001 0.368891 0.121795
116 1 0.497304 0.436309 0.0607125
277 1 0.625793 0.000628152 0.249755
502 1 0.685319 0.440824 0.3764
503 1 0.685713 0.377063 0.438696
505 1 0.746164 0.371623 0.375539
411 1 0.816512 0.00157497 0.438578
6 1 0.183284 0.0638795 -0.00197856
115 1 0.557357 0.374096 0.0609541
120 1 0.624987 0.435852 0.062274
242 1 0.559459 0.43416 0.119013
245 1 0.619542 0.374346 0.122522
1305 1 0.748577 0.501699 0.252
119 1 0.686413 0.375393 0.0604713
123 1 0.813632 0.377248 0.06223
124 1 0.749588 0.43348 0.0602707
246 1 0.682314 0.430593 0.125754
249 1 0.750122 0.378295 0.128234
250 1 0.81313 0.433156 0.126269
22 1 0.685842 0.058761 -0.00122662
46 1 0.437046 0.188562 -0.00156703
127 1 0.937209 0.378517 0.0599529
128 1 0.872609 0.441509 0.061635
253 1 0.879914 0.378305 0.124592
254 1 0.936245 0.440139 0.125389
1409 1 0.00116811 0.498174 0.37569
506 1 0.813886 0.435297 0.375083
507 1 0.81274 0.37594 0.433099
508 1 0.75235 0.435584 0.43348
136 1 0.130429 0.0633251 0.18346
163 1 0.0642844 0.12429 0.185431
258 1 0.0626707 0.0626388 0.247322
264 1 0.122467 0.0627412 0.306706
291 1 0.0673849 0.128648 0.305536
293 1 0.126393 0.123899 0.24655
289 1 0.998768 0.12333 0.251277
140 1 0.249732 0.0599185 0.189703
167 1 0.191698 0.123987 0.189693
171 1 0.313517 0.12353 0.189509
262 1 0.183233 0.0589461 0.246267
266 1 0.314834 0.0637815 0.249472
268 1 0.250061 0.0621561 0.311939
295 1 0.186613 0.126324 0.31215
297 1 0.246649 0.126435 0.255237
299 1 0.314374 0.119777 0.31392
497 1 0.503502 0.374056 0.372282
144 1 0.379654 0.0579709 0.1919
175 1 0.436289 0.121758 0.18678
270 1 0.441094 0.0637015 0.252388
272 1 0.376906 0.0686017 0.317739
301 1 0.377446 0.119876 0.253432
303 1 0.440069 0.128176 0.316893
276 1 0.497579 0.0637862 0.313563
499 1 0.560995 0.377338 0.439845
148 1 0.50105 0.0618843 0.1883
305 1 0.499461 0.130923 0.255709
152 1 0.627963 0.0644428 0.192913
179 1 0.564038 0.127673 0.187498
274 1 0.559456 0.0743504 0.250611
280 1 0.626151 0.0650221 0.319268
307 1 0.559634 0.133156 0.312291
309 1 0.623981 0.129246 0.255201
504 1 0.626374 0.436987 0.438067
156 1 0.749008 0.0601101 0.189288
183 1 0.685492 0.129157 0.190995
187 1 0.809609 0.124112 0.186662
278 1 0.689394 0.0642617 0.257972
282 1 0.8052 0.0663072 0.250196
284 1 0.751253 0.0658281 0.315226
311 1 0.688663 0.13078 0.317425
313 1 0.751228 0.128691 0.251998
315 1 0.813729 0.131315 0.313511
626 1 0.56668 0.43788 0.498761
415 1 0.934626 -0.00282726 0.43551
498 1 0.560171 0.43641 0.378076
501 1 0.626643 0.377951 0.377553
260 1 1.00084 0.064715 0.310442
132 1 1.00368 0.0634479 0.184217
160 1 0.868685 0.0661395 0.188977
191 1 0.935907 0.126375 0.181639
286 1 0.932168 0.0640314 0.251438
288 1 0.866742 0.0625634 0.310761
317 1 0.873402 0.127423 0.243852
319 1 0.927993 0.131207 0.307613
3 1 0.0581158 -0.00190146 0.0619134
1177 1 0.751377 0.494014 0.125438
168 1 0.131123 0.184282 0.182038
195 1 0.0619186 0.251811 0.183098
200 1 0.12648 0.314769 0.182843
290 1 0.0682699 0.193436 0.243153
296 1 0.125708 0.197806 0.306016
322 1 0.0627085 0.323108 0.244657
323 1 0.0611777 0.252611 0.309632
325 1 0.124 0.258409 0.246446
328 1 0.123175 0.315897 0.311894
321 1 0.00261834 0.252948 0.246918
292 1 1.0036 0.189879 0.313072
172 1 0.252467 0.187329 0.189602
199 1 0.190551 0.247671 0.18132
203 1 0.314936 0.251209 0.187796
204 1 0.253621 0.313072 0.190657
294 1 0.189229 0.192158 0.247511
298 1 0.310573 0.184121 0.250842
300 1 0.250667 0.187152 0.31468
326 1 0.190392 0.315595 0.245044
327 1 0.187356 0.249585 0.314412
329 1 0.254179 0.247527 0.254073
330 1 0.310352 0.313613 0.252407
331 1 0.314816 0.250537 0.316564
332 1 0.246705 0.314151 0.312047
176 1 0.374258 0.185664 0.191951
207 1 0.435957 0.246665 0.186958
208 1 0.374461 0.313458 0.19019
302 1 0.432612 0.188601 0.25042
304 1 0.374425 0.182642 0.316039
333 1 0.369893 0.245776 0.249567
334 1 0.438595 0.308344 0.248895
335 1 0.433205 0.250308 0.311236
336 1 0.374491 0.313021 0.315189
212 1 0.501217 0.308275 0.184892
180 1 0.499941 0.18775 0.189727
308 1 0.499901 0.198606 0.311885
337 1 0.504367 0.254397 0.250366
340 1 0.497735 0.308694 0.311539
184 1 0.620302 0.185567 0.18841
211 1 0.560284 0.24965 0.182716
216 1 0.624408 0.314685 0.186641
306 1 0.564943 0.187647 0.252178
312 1 0.626893 0.192262 0.317594
338 1 0.565502 0.3151 0.247957
339 1 0.56419 0.256323 0.318893
341 1 0.625277 0.247575 0.249511
344 1 0.626406 0.316473 0.319034
188 1 0.745194 0.188593 0.188862
215 1 0.685737 0.251487 0.188383
219 1 0.81075 0.25178 0.189911
220 1 0.747784 0.3166 0.184695
310 1 0.684747 0.187144 0.256345
314 1 0.81079 0.188943 0.248082
316 1 0.749085 0.191064 0.313833
342 1 0.682474 0.314296 0.247686
343 1 0.688598 0.251542 0.314451
345 1 0.749651 0.250412 0.254062
346 1 0.810808 0.310493 0.248272
347 1 0.810164 0.251835 0.310499
348 1 0.744454 0.312693 0.310947
164 1 1.00328 0.184529 0.184832
324 1 1.00033 0.318132 0.309838
196 1 0.998697 0.313816 0.187448
192 1 0.872146 0.188689 0.183754
223 1 0.940535 0.250959 0.194518
224 1 0.877115 0.312882 0.190439
318 1 0.942039 0.184671 0.246444
320 1 0.875249 0.19185 0.312871
349 1 0.872674 0.252178 0.249771
350 1 0.93938 0.312645 0.253081
351 1 0.942492 0.250996 0.309285
352 1 0.878816 0.311622 0.311129
141 1 0.374978 3.92766e-05 0.122342
495 1 0.431885 0.3731 0.440483
227 1 0.0610198 0.374336 0.184984
232 1 0.126975 0.436617 0.18747
354 1 0.062513 0.437497 0.246811
355 1 0.0567612 0.38038 0.31601
357 1 0.12795 0.374827 0.242928
360 1 0.123694 0.438657 0.31007
356 1 0.992778 0.438884 0.310326
1283 1 0.0658573 0.501334 0.308944
273 1 0.506477 0.00427065 0.246629
494 1 0.441121 0.431742 0.378643
275 1 0.564042 0.00585184 0.308686
231 1 0.190993 0.374897 0.183494
235 1 0.311153 0.37696 0.185077
236 1 0.251098 0.434731 0.190255
358 1 0.188591 0.437991 0.247593
359 1 0.186018 0.37192 0.310038
361 1 0.252116 0.377792 0.251082
362 1 0.319875 0.438181 0.248636
363 1 0.314823 0.373693 0.313649
364 1 0.247823 0.43407 0.315161
493 1 0.373908 0.371581 0.377474
239 1 0.43849 0.369392 0.189544
240 1 0.377445 0.4337 0.183779
365 1 0.376758 0.37242 0.250733
366 1 0.433668 0.435656 0.248918
367 1 0.436666 0.374441 0.311694
368 1 0.376764 0.434919 0.310584
372 1 0.495942 0.434176 0.309584
244 1 0.50491 0.433899 0.187062
1439 1 0.942286 0.503028 0.43994
496 1 0.370026 0.438406 0.436723
287 1 0.936656 0.00398446 0.311895
369 1 0.500541 0.370402 0.247066
243 1 0.560679 0.371185 0.183311
248 1 0.624468 0.435352 0.186798
370 1 0.561684 0.432782 0.251939
371 1 0.562493 0.374042 0.313589
373 1 0.620893 0.376883 0.252521
376 1 0.624071 0.436007 0.314989
30 1 0.936737 0.0623645 0.000342756
129 1 0.00269962 0.000235982 0.121504
500 1 0.50152 0.437805 0.442839
247 1 0.68836 0.377992 0.190801
251 1 0.811435 0.374581 0.18448
252 1 0.753016 0.437142 0.193963
374 1 0.683446 0.436022 0.252517
375 1 0.683907 0.37497 0.314856
377 1 0.745761 0.373462 0.25147
378 1 0.81527 0.437948 0.252875
379 1 0.812838 0.37478 0.309425
380 1 0.747196 0.434409 0.311165
487 1 0.187698 0.37479 0.439347
1179 1 0.811322 0.499754 0.190289
1307 1 0.810746 0.505592 0.312752
263 1 0.183463 -0.00171005 0.312065
153 1 0.749312 0.00464983 0.125944
353 1 -0.00250144 0.379131 0.24883
228 1 -0.000391314 0.437336 0.1879
255 1 0.934083 0.377709 0.191298
256 1 0.871654 0.435546 0.19066
381 1 0.873295 0.37573 0.248809
382 1 0.935019 0.440722 0.248507
383 1 0.931914 0.380095 0.308505
384 1 0.874068 0.441369 0.315725
492 1 0.254135 0.439013 0.43624
1153 1 0.000331684 0.498357 0.124853
21 1 0.620344 0.000766715 -0.00146579
386 1 0.0662106 0.0684488 0.367382
392 1 0.126811 0.0619125 0.433824
419 1 0.0557541 0.124532 0.432022
421 1 0.12483 0.127573 0.369494
45 1 0.378547 0.124434 0.00133342
417 1 0.993974 0.123545 0.369929
1055 1 0.940165 0.504438 0.0616815
390 1 0.186223 0.0581618 0.371363
394 1 0.313466 0.0616664 0.375598
396 1 0.249939 0.0590612 0.437839
423 1 0.182974 0.122141 0.435049
425 1 0.250685 0.12319 0.376158
427 1 0.305581 0.119276 0.439374
491 1 0.311575 0.375249 0.437182
27 1 0.808756 0.00399316 0.0608145
486 1 0.188179 0.437942 0.371363
398 1 0.44097 0.0645954 0.376025
400 1 0.377356 0.0581341 0.434185
429 1 0.374444 0.130744 0.382271
431 1 0.443105 0.126526 0.434328
404 1 0.500676 0.0635706 0.440855
433 1 0.502201 0.12964 0.376101
1167 1 0.442553 0.497192 0.188607
617 1 0.250893 0.379023 0.499613
489 1 0.251782 0.374845 0.376108
1043 1 0.55871 0.497263 0.0568525
402 1 0.557668 0.066272 0.375141
408 1 0.626015 0.06475 0.434459
435 1 0.561246 0.124332 0.441317
437 1 0.62323 0.127912 0.376177
613 1 0.123415 0.374029 0.497484
159 1 0.934087 0.00619878 0.187281
562 1 0.564433 0.190248 0.499584
406 1 0.689208 0.06333 0.377409
410 1 0.812277 0.0631291 0.369976
412 1 0.749065 0.0633817 0.437206
439 1 0.686039 0.122799 0.444428
441 1 0.754034 0.123997 0.371065
443 1 0.809814 0.131784 0.432745
147 1 0.562587 0.000727164 0.181478
388 1 -0.00667217 0.0631736 0.434461
155 1 0.808683 -0.00127362 0.185638
414 1 0.93414 0.0650846 0.365536
416 1 0.875739 0.0701361 0.431391
445 1 0.875632 0.129289 0.369882
447 1 0.938803 0.12923 0.438796
1183 1 0.931201 0.49678 0.189292
1291 1 0.313711 0.498161 0.310527
418 1 0.06194 0.187106 0.376438
424 1 0.123853 0.186693 0.437777
450 1 0.0594206 0.317453 0.371678
451 1 0.0646468 0.248481 0.431968
453 1 0.123632 0.251669 0.367846
456 1 0.127553 0.313207 0.434796
452 1 0.994775 0.312803 0.435383
449 1 1.00268 0.253946 0.372113
391 1 0.185279 -0.00283091 0.433333
422 1 0.187366 0.189477 0.373049
426 1 0.309395 0.185385 0.381033
428 1 0.248359 0.182331 0.440471
454 1 0.186034 0.319678 0.371947
455 1 0.18237 0.249334 0.440373
457 1 0.246698 0.253643 0.373445
458 1 0.313812 0.314132 0.375606
459 1 0.307098 0.24968 0.436605
460 1 0.247253 0.312552 0.439602
1301 1 0.623926 0.499971 0.253137
546 1 0.0569969 0.190214 0.494085
1311 1 0.935712 0.502896 0.317711
430 1 0.439075 0.190808 0.373474
432 1 0.37508 0.192366 0.439258
461 1 0.378866 0.253051 0.377676
462 1 0.438708 0.316425 0.375089
463 1 0.441406 0.249369 0.437825
464 1 0.376712 0.312293 0.441147
436 1 0.498526 0.189639 0.442379
468 1 0.498721 0.317384 0.438036
465 1 0.501899 0.250892 0.376591
434 1 0.566026 0.190338 0.377444
440 1 0.626881 0.187723 0.444149
466 1 0.563432 0.311309 0.381162
467 1 0.559754 0.247968 0.438728
469 1 0.63032 0.248573 0.382204
472 1 0.620388 0.313928 0.441874
271 1 0.439033 -0.00106042 0.318222
473 1 0.753229 0.250453 0.37312
438 1 0.689598 0.1874 0.38137
442 1 0.809045 0.186526 0.371863
474 1 0.815001 0.313861 0.369948
444 1 0.753241 0.189608 0.440551
475 1 0.811177 0.247323 0.436242
476 1 0.751959 0.314415 0.437781
470 1 0.684813 0.315197 0.380985
471 1 0.685407 0.251678 0.446317
420 1 0.997909 0.189232 0.435278
479 1 0.939295 0.252653 0.434769
478 1 0.938104 0.314663 0.369391
477 1 0.873111 0.254639 0.37348
446 1 0.938719 0.19003 0.371371
448 1 0.873682 0.189067 0.431966
480 1 0.876965 0.312689 0.437697
483 1 0.0569922 0.371938 0.438989
484 1 -0.00134206 0.435792 0.440835
485 1 0.122074 0.372907 0.377218
488 1 0.126285 0.436041 0.434475
1549 1 0.381121 0.49942 0.498838
490 1 0.313301 0.436939 0.376498
482 1 0.064173 0.439183 0.372623
157 1 0.868307 0.0069701 0.126431
1433 1 0.753198 0.501021 0.372849
269 1 0.376982 0.00081037 0.251643
285 1 0.870084 0.000646785 0.245479
1419 1 0.316831 0.500513 0.435092
1049 1 0.749367 0.499365 -8.06914e-05
1421 1 0.376461 0.495069 0.373614
1295 1 0.439645 0.498139 0.315578
15 1 0.438754 0.00303228 0.065206
1297 1 0.507068 0.495652 0.248928
1175 1 0.683844 0.495873 0.185368
403 1 0.566975 0.00452225 0.440572
395 1 0.3123 -0.000585075 0.439513
610 1 0.0609063 0.437441 0.498717
593 1 0.497808 0.250396 0.499175
614 1 0.187232 0.440176 0.500463
9 1 0.245108 0.00202299 -0.00365606
1287 1 0.188464 0.500764 0.311751
139 1 0.313057 0.00157957 0.190137
1031 1 0.191881 0.494702 0.0663062
1035 1 0.311949 0.501131 0.0634568
1161 1 0.254855 0.496256 0.122763
29 1 0.870721 -0.000234753 0.000450807
413 1 0.875065 -0.000707176 0.373339
1285 1 0.129312 0.496968 0.250386
133 1 0.122837 0.00111593 0.1228
1045 1 0.621874 0.500224 -0.000807479
283 1 0.812789 -0.00129986 0.312839
1299 1 0.565038 0.497543 0.318349
131 1 0.0663741 0.00684952 0.184368
586 1 0.308124 0.31267 0.496695
514 1 0.0661114 0.0592995 0.497838
126 1 0.936719 0.43723 -0.000539188
618 1 0.315415 0.444037 0.500092
530 1 0.558248 0.0639541 0.503641
573 1 0.880604 0.126956 0.499553
17 1 0.500793 0.000173482 2.98394e-06
525 1 0.375497 -0.00123303 0.497529
1053 1 0.876359 0.502476 0.00249757
605 1 0.871831 0.247236 0.496698
125 1 0.872662 0.373497 -0.00103968
558 1 0.433649 0.195764 0.50476
1025 1 0.999089 0.501939 0.00247765
578 1 0.0633985 0.30943 0.500574
526 1 0.436419 0.0653712 0.500581
1 1 1.00082 -0.00223193 -0.00420077
561 1 0.49684 0.123359 0.500739
101 1 0.133281 0.379761 0.0047027
109 1 0.373151 0.373592 0.00135549
1561 1 0.748435 0.504914 0.502589
566 1 0.688869 0.18698 0.499233
117 1 0.622944 0.368307 0.00276264
622 1 0.436778 0.436051 0.498559
14 1 0.433657 0.0579452 -0.00298516
625 1 0.500982 0.375712 0.500169
634 1 0.810848 0.444363 0.499293
638 1 0.936604 0.436084 0.501887
518 1 0.182865 0.0582678 0.50336
49 1 0.496425 0.125832 -0.00902918
74 1 0.311832 0.311072 0.000372142
38 1 0.189616 0.191698 0.00335144
1041 1 0.496416 0.497982 -0.00655761
34 1 0.0634845 0.187676 -0.00239472
521 1 0.249008 -0.00660895 0.500631
520 1 0.126301 0.0622904 0.562213
547 1 0.0637536 0.122087 0.559946
642 1 0.0597411 0.0575779 0.62446
677 1 0.128431 0.124784 0.622987
516 1 0.000300178 0.063029 0.559377
907 1 0.311435 -0.00124977 0.939829
513 1 -0.00163344 -0.000334918 0.498526
581 1 0.123125 0.248136 0.502039
1943 1 0.687658 0.490706 0.931849
524 1 0.244747 0.0617972 0.562151
551 1 0.189595 0.126865 0.562738
555 1 0.308629 0.128502 0.552369
646 1 0.187072 0.0671946 0.624397
650 1 0.31117 0.0652671 0.620275
681 1 0.249288 0.126762 0.619581
909 1 0.373105 -8.40608e-05 0.870463
527 1 0.437495 0.00415031 0.560414
641 1 -0.00426936 -0.00101645 0.623458
528 1 0.372371 0.0663369 0.564949
559 1 0.434526 0.124524 0.562057
654 1 0.434501 0.0624754 0.627552
685 1 0.375361 0.123588 0.630649
532 1 0.498787 0.0609038 0.566987
601 1 0.753871 0.246728 0.501564
1951 1 0.938844 0.501539 0.937489
689 1 0.491391 0.13202 0.627178
536 1 0.625442 0.0583403 0.564089
563 1 0.559668 0.126134 0.571048
658 1 0.560138 0.0636245 0.628744
693 1 0.623675 0.122774 0.625306
1021 1 0.87753 0.374464 0.869492
669 1 0.879409 0.00673035 0.625305
540 1 0.750762 0.0658896 0.56392
567 1 0.692765 0.127084 0.56069
571 1 0.813331 0.123383 0.565094
662 1 0.686999 0.0633361 0.624982
666 1 0.813392 0.0613319 0.627102
697 1 0.753659 0.121914 0.62917
522 1 0.315828 0.0614469 0.50067
515 1 0.0614112 -0.00214769 0.5607
1022 1 0.943811 0.434816 0.878991
570 1 0.814508 0.187234 0.50202
519 1 0.181519 -0.00444905 0.559169
673 1 -0.0025037 0.125391 0.625122
544 1 0.880289 0.0630278 0.562581
575 1 0.933886 0.126384 0.56237
670 1 0.938856 0.0646148 0.621931
701 1 0.872952 0.130417 0.626798
911 1 0.434057 -0.00551882 0.936008
645 1 0.126139 -6.22041e-06 0.622581
569 1 0.755751 0.126882 0.499361
1929 1 0.25134 0.500876 0.869416
787 1 0.562019 0.00108128 0.808233
552 1 0.120141 0.185039 0.559269
579 1 0.0605794 0.247573 0.561744
584 1 0.127518 0.313878 0.562735
674 1 0.063762 0.184231 0.627348
706 1 0.0645617 0.311034 0.627859
709 1 0.125825 0.248681 0.626123
580 1 0.00375345 0.309811 0.566473
705 1 0.00471667 0.248082 0.626877
647 1 0.185766 0.000853534 0.687077
543 1 0.934572 -0.00209961 0.562554
556 1 0.247262 0.186428 0.554287
583 1 0.185095 0.246007 0.561617
587 1 0.310663 0.250313 0.559886
588 1 0.248719 0.314738 0.560482
678 1 0.191963 0.184628 0.630398
682 1 0.311257 0.196608 0.624712
710 1 0.187606 0.310189 0.625493
713 1 0.248311 0.250239 0.622945
714 1 0.306411 0.315873 0.623844
667 1 0.816034 -8.07411e-05 0.683572
779 1 0.314674 -0.000742022 0.809021
565 1 0.623402 0.120924 0.509187
560 1 0.372191 0.187739 0.562663
591 1 0.436149 0.256897 0.569148
592 1 0.371225 0.312626 0.561652
686 1 0.434821 0.190923 0.622524
717 1 0.373253 0.252869 0.62533
718 1 0.441138 0.311267 0.628673
564 1 0.498065 0.186202 0.560351
721 1 0.504203 0.243046 0.625252
1949 1 0.880026 0.496081 0.876367
899 1 0.0596275 -0.00265895 0.933129
606 1 0.939622 0.310775 0.501205
596 1 0.504397 0.313089 0.56426
568 1 0.628052 0.18754 0.56002
595 1 0.564288 0.249971 0.561845
600 1 0.627396 0.312996 0.56288
690 1 0.563334 0.187674 0.625169
722 1 0.563894 0.309739 0.621208
725 1 0.628234 0.251519 0.623003
538 1 0.810274 0.0665569 0.497744
1947 1 0.818948 0.496339 0.933981
917 1 0.619495 -0.00758049 0.869356
534 1 0.687672 0.0639608 0.50471
553 1 0.242712 0.124325 0.496831
572 1 0.75623 0.186571 0.563757
599 1 0.689184 0.254521 0.560172
603 1 0.812323 0.254262 0.567951
604 1 0.751509 0.31413 0.563518
694 1 0.687257 0.194308 0.624667
698 1 0.814787 0.188258 0.626842
726 1 0.690383 0.31539 0.622634
729 1 0.751656 0.249156 0.627397
730 1 0.816775 0.317287 0.625228
781 1 0.37196 -0.000487247 0.75122
1555 1 0.566193 0.499576 0.566721
548 1 1.00355 0.184696 0.560827
576 1 0.873346 0.189107 0.556639
607 1 0.935946 0.249639 0.559766
608 1 0.875126 0.317565 0.563922
702 1 0.942737 0.191188 0.623407
733 1 0.876628 0.250058 0.623399
734 1 0.93926 0.312204 0.623448
1691 1 0.813175 0.495108 0.685383
1927 1 0.193531 0.500823 0.936511
1921 1 1.00142 0.498997 0.876526
1023 1 0.942281 0.373286 0.943807
611 1 0.0622056 0.373226 0.561208
616 1 0.127972 0.435877 0.558709
738 1 0.0643782 0.435046 0.623199
741 1 0.126444 0.377903 0.626166
612 1 1.00071 0.43367 0.559662
737 1 1.00461 0.374136 0.627225
1665 1 0.999982 0.504268 0.62481
1813 1 0.623152 0.502683 0.757126
615 1 0.184781 0.375875 0.560492
619 1 0.315107 0.381726 0.559613
620 1 0.24927 0.438362 0.56388
742 1 0.18533 0.435363 0.623237
745 1 0.247463 0.375231 0.62415
746 1 0.314461 0.438759 0.627755
1024 1 0.878779 0.435075 0.936839
557 1 0.374978 0.128469 0.496047
623 1 0.440265 0.374161 0.566352
624 1 0.37689 0.439938 0.563171
749 1 0.373267 0.373671 0.626459
750 1 0.436179 0.437938 0.627138
753 1 0.50213 0.377951 0.623058
628 1 0.495838 0.440374 0.562026
602 1 0.810549 0.310486 0.500802
609 1 0.000101122 0.37223 0.501596
793 1 0.751631 0.00468845 0.750142
1014 1 0.689711 0.433178 0.872506
1803 1 0.313972 0.503967 0.810229
627 1 0.561524 0.376224 0.56169
632 1 0.624465 0.438702 0.562261
754 1 0.56103 0.437785 0.626842
757 1 0.625462 0.378826 0.624365
1015 1 0.683745 0.369765 0.940963
1695 1 0.942558 0.499325 0.687239
651 1 0.310134 0.000469171 0.687503
1017 1 0.752565 0.370502 0.871178
1689 1 0.752204 0.498967 0.627151
1563 1 0.812794 0.495945 0.562941
631 1 0.690756 0.380956 0.559491
635 1 0.81278 0.378319 0.560965
636 1 0.752246 0.441226 0.561838
758 1 0.688141 0.434503 0.63175
761 1 0.74688 0.374752 0.624112
762 1 0.813417 0.435558 0.624606
1817 1 0.753055 0.495136 0.754604
65 1 0.00200859 0.250385 1.00042
639 1 0.940187 0.37743 0.562002
640 1 0.871539 0.431237 0.555974
765 1 0.877642 0.379393 0.620556
766 1 0.941883 0.437871 0.624064
1018 1 0.809939 0.436945 0.879502
102 1 0.196933 0.434943 1.00391
1019 1 0.8094 0.372837 0.936057
590 1 0.436266 0.31096 0.506536
1793 1 0.00398661 0.500974 0.752631
1020 1 0.746994 0.431292 0.937424
791 1 0.688489 -0.00277478 0.81048
648 1 0.121043 0.0583957 0.686025
675 1 0.0684466 0.12349 0.685356
770 1 0.063205 0.0587692 0.747939
776 1 0.11747 0.0659846 0.813527
803 1 0.0606525 0.127154 0.809193
805 1 0.123429 0.122972 0.748939
644 1 0.00016984 0.0627566 0.685988
652 1 0.245703 0.0641332 0.686682
679 1 0.185201 0.124277 0.691537
683 1 0.311525 0.125601 0.686411
774 1 0.186385 0.0600975 0.752037
778 1 0.308117 0.0603319 0.749428
780 1 0.251825 0.0585692 0.811563
807 1 0.190514 0.118517 0.813226
809 1 0.251877 0.118137 0.747862
811 1 0.316811 0.122962 0.810243
656 1 0.372819 0.0618856 0.685787
687 1 0.435908 0.12572 0.689402
782 1 0.437846 0.0634155 0.748734
784 1 0.375794 0.0601246 0.812172
813 1 0.370368 0.121438 0.749003
815 1 0.436191 0.127933 0.81464
817 1 0.499408 0.128584 0.75207
788 1 0.500078 0.0618522 0.813826
1551 1 0.436938 0.501767 0.563826
660 1 0.498956 0.0653694 0.687908
664 1 0.628042 0.060037 0.689149
691 1 0.561481 0.122729 0.68379
786 1 0.559025 0.0693465 0.754228
792 1 0.621361 0.0597747 0.811365
819 1 0.565278 0.127886 0.812955
821 1 0.623156 0.126837 0.744307
1567 1 0.941386 0.49989 0.562061
66 1 0.0660371 0.313985 0.999076
668 1 0.749737 0.0610649 0.690401
695 1 0.683013 0.125866 0.686436
699 1 0.817169 0.1315 0.689272
790 1 0.683204 0.0622763 0.751394
794 1 0.812258 0.0627159 0.752424
796 1 0.748102 0.0602179 0.81532
823 1 0.689909 0.123188 0.817338
825 1 0.748606 0.125376 0.750518
827 1 0.80876 0.125655 0.817942
1821 1 0.874825 0.495872 0.750699
69 1 0.125249 0.248129 0.999698
86 1 0.685142 0.314141 1.00111
122 1 0.810043 0.433249 0.997818
772 1 1.00059 0.0594568 0.81091
801 1 1.00431 0.12983 0.746355
672 1 0.876569 0.0629857 0.690109
703 1 0.9387 0.122611 0.696418
798 1 0.940537 0.0564948 0.750826
800 1 0.876128 0.0585347 0.808556
829 1 0.870163 0.127308 0.753362
831 1 0.939635 0.125853 0.809723
1805 1 0.376562 0.498733 0.748155
1687 1 0.68367 0.500508 0.688349
680 1 0.12669 0.1856 0.688286
707 1 0.0633253 0.25204 0.687688
712 1 0.126177 0.314266 0.691734
802 1 0.0664631 0.187561 0.745994
808 1 0.122924 0.186519 0.808223
834 1 0.0619484 0.312386 0.747285
835 1 0.065297 0.251352 0.815673
837 1 0.125487 0.245806 0.753187
840 1 0.124664 0.314769 0.81313
708 1 0.99977 0.31157 0.686638
804 1 1.00139 0.189776 0.811906
833 1 0.00138796 0.244494 0.745417
684 1 0.251274 0.18382 0.68715
711 1 0.184333 0.250916 0.692773
715 1 0.305842 0.25309 0.68687
716 1 0.244893 0.314483 0.686449
806 1 0.188705 0.185398 0.748699
810 1 0.311688 0.184688 0.747696
812 1 0.25195 0.187161 0.807969
838 1 0.186702 0.312494 0.751026
839 1 0.186741 0.248435 0.812802
841 1 0.251791 0.250015 0.748527
842 1 0.313746 0.311862 0.74877
843 1 0.316652 0.251032 0.810324
844 1 0.246749 0.309719 0.813681
688 1 0.372296 0.18701 0.687647
719 1 0.438833 0.250695 0.689864
720 1 0.375556 0.314452 0.685963
814 1 0.434657 0.184714 0.751711
816 1 0.372406 0.188964 0.809005
845 1 0.374838 0.251208 0.745043
846 1 0.434971 0.313388 0.749753
847 1 0.435982 0.24786 0.814117
848 1 0.374752 0.313901 0.815865
849 1 0.503129 0.249545 0.749249
724 1 0.500913 0.307833 0.684878
852 1 0.496462 0.309394 0.813795
820 1 0.499022 0.187023 0.81994
692 1 0.496368 0.18828 0.688687
696 1 0.623857 0.18751 0.682375
723 1 0.566801 0.250268 0.688464
728 1 0.624837 0.314039 0.685603
818 1 0.564782 0.186901 0.745163
824 1 0.623079 0.190147 0.81495
850 1 0.563185 0.314082 0.746421
851 1 0.563701 0.247312 0.807662
853 1 0.621871 0.250029 0.747968
856 1 0.623759 0.312894 0.80908
700 1 0.751874 0.185875 0.689576
727 1 0.686644 0.253752 0.686105
731 1 0.817292 0.253832 0.689158
732 1 0.753364 0.316069 0.685942
822 1 0.686188 0.186782 0.750925
826 1 0.814761 0.186579 0.753176
828 1 0.748174 0.186009 0.81581
854 1 0.687887 0.31412 0.746905
855 1 0.687655 0.254763 0.808153
857 1 0.750548 0.247 0.75398
858 1 0.819442 0.312286 0.753793
859 1 0.814024 0.250077 0.814526
860 1 0.752366 0.312122 0.807777
676 1 1.00697 0.183799 0.689788
836 1 1.00579 0.315527 0.808823
704 1 0.884745 0.188548 0.689552
735 1 0.942048 0.25044 0.685493
736 1 0.876494 0.311842 0.683221
830 1 0.93726 0.184917 0.753346
832 1 0.877538 0.185339 0.812909
861 1 0.884386 0.250874 0.751769
862 1 0.943427 0.312663 0.749472
863 1 0.940148 0.250085 0.815218
864 1 0.878583 0.313626 0.813606
739 1 0.0622092 0.373534 0.688091
744 1 0.124357 0.438936 0.68881
866 1 0.0650629 0.436417 0.75478
867 1 0.0701104 0.379289 0.816857
869 1 0.126206 0.377829 0.749614
872 1 0.129499 0.445246 0.81258
868 1 1.00061 0.442453 0.816866
1671 1 0.188744 0.503331 0.689532
743 1 0.187158 0.378057 0.685469
747 1 0.313026 0.375264 0.690381
748 1 0.250494 0.439247 0.688676
870 1 0.186415 0.442522 0.747624
871 1 0.187845 0.378197 0.811114
873 1 0.250426 0.374033 0.746647
874 1 0.305004 0.441481 0.750714
875 1 0.311699 0.372381 0.810329
876 1 0.250113 0.438763 0.807056
1012 1 0.498439 0.437637 0.935513
751 1 0.437973 0.376309 0.689113
752 1 0.372933 0.436553 0.69199
877 1 0.377224 0.374173 0.750503
878 1 0.439105 0.437632 0.751027
879 1 0.438175 0.37547 0.81251
880 1 0.374 0.435036 0.811447
881 1 0.498929 0.371236 0.751713
1009 1 0.497567 0.379988 0.874053
884 1 0.501037 0.439479 0.813179
756 1 0.498519 0.435553 0.689372
755 1 0.562164 0.373563 0.6853
760 1 0.626924 0.434824 0.687532
882 1 0.563475 0.435785 0.750447
883 1 0.561982 0.372679 0.814654
885 1 0.623516 0.372877 0.747323
888 1 0.620134 0.438167 0.813877
1010 1 0.560817 0.433473 0.876421
93 1 0.868443 0.252651 0.998477
1016 1 0.617944 0.438089 0.93688
759 1 0.689522 0.374028 0.691348
763 1 0.819722 0.376308 0.688517
764 1 0.753774 0.433904 0.685774
886 1 0.688874 0.444246 0.750171
887 1 0.683944 0.379166 0.806586
889 1 0.749103 0.376316 0.750122
890 1 0.81143 0.436833 0.750431
891 1 0.816325 0.376516 0.81578
892 1 0.751739 0.435666 0.811919
865 1 0.00356826 0.376961 0.752409
740 1 0.00682604 0.438569 0.68975
767 1 0.938433 0.374441 0.685172
768 1 0.879194 0.439719 0.690243
893 1 0.883159 0.377043 0.749671
894 1 0.942622 0.440634 0.750491
895 1 0.941523 0.379622 0.811505
896 1 0.877823 0.438165 0.811263
1011 1 0.56077 0.373741 0.935524
898 1 0.0609698 0.0615097 0.875382
904 1 0.120447 0.0581093 0.942375
931 1 0.0636595 0.124825 0.939188
933 1 0.123957 0.123088 0.875454
900 1 0.000386377 0.0590057 0.937003
929 1 0.00271309 0.125172 0.876828
902 1 0.187206 0.061552 0.878412
906 1 0.312871 0.0541039 0.874158
908 1 0.249489 0.0652397 0.939658
935 1 0.188785 0.125157 0.933246
937 1 0.261406 0.118855 0.873699
939 1 0.316576 0.120855 0.937873
910 1 0.436552 0.0570089 0.872924
912 1 0.374345 0.0552077 0.934071
941 1 0.375888 0.117649 0.876264
943 1 0.438097 0.125375 0.934396
945 1 0.499955 0.124359 0.872807
995 1 0.0685415 0.377886 0.944965
633 1 0.752546 0.374752 0.503503
997 1 0.131813 0.379726 0.875133
1013 1 0.628702 0.371961 0.87699
916 1 0.501656 0.0568814 0.936075
914 1 0.563876 0.0614942 0.872777
920 1 0.62427 0.0585178 0.934028
947 1 0.565645 0.126302 0.936471
949 1 0.623355 0.122167 0.871268
82 1 0.560916 0.312546 0.999533
1667 1 0.0600651 0.500149 0.689391
994 1 0.0627763 0.436605 0.874542
1008 1 0.372654 0.440828 0.94024
50 1 0.564523 0.186358 0.996303
996 1 1.00125 0.43778 0.940209
918 1 0.686881 0.0608146 0.875695
922 1 0.812824 0.0620989 0.880622
924 1 0.743826 0.0616869 0.935371
951 1 0.68006 0.123081 0.938462
953 1 0.74698 0.12228 0.877994
955 1 0.818305 0.130552 0.937804
795 1 0.806589 -0.00104722 0.813652
1007 1 0.434619 0.375897 0.93433
926 1 0.937618 0.0651643 0.873001
928 1 0.879134 0.0602827 0.935861
957 1 0.876136 0.124457 0.867919
959 1 0.933601 0.129011 0.942167
960 1 0.876117 0.189599 0.934769
930 1 0.0654225 0.192273 0.873897
936 1 0.127544 0.185882 0.939264
962 1 0.0676279 0.31782 0.881195
963 1 0.0673427 0.248322 0.935481
965 1 0.12535 0.24935 0.8765
968 1 0.131094 0.319385 0.935034
932 1 0.000722945 0.18932 0.932045
961 1 0.00405908 0.257291 0.870709
964 1 1.00093 0.315465 0.941237
993 1 1.00394 0.379577 0.872604
1004 1 0.254338 0.44225 0.933187
934 1 0.191397 0.184664 0.866218
938 1 0.314213 0.186622 0.872318
940 1 0.250644 0.182045 0.935574
966 1 0.188468 0.312967 0.876532
967 1 0.189554 0.245098 0.931564
969 1 0.251171 0.248454 0.872866
970 1 0.307875 0.312742 0.871282
971 1 0.310955 0.247725 0.936203
972 1 0.252218 0.313019 0.93388
1005 1 0.369061 0.377314 0.875801
998 1 0.188793 0.441007 0.877128
942 1 0.43498 0.187172 0.874811
944 1 0.375273 0.187141 0.933489
973 1 0.371727 0.251523 0.875585
974 1 0.435227 0.313417 0.877166
975 1 0.434344 0.249781 0.933501
976 1 0.373777 0.313785 0.935506
980 1 0.496952 0.313709 0.939395
948 1 0.501578 0.190916 0.936855
977 1 0.502456 0.249591 0.870085
1003 1 0.31368 0.375996 0.939798
1002 1 0.314949 0.439872 0.868943
946 1 0.560274 0.187733 0.874198
952 1 0.626441 0.194318 0.934311
978 1 0.564867 0.310884 0.874651
979 1 0.565392 0.251714 0.927466
981 1 0.625241 0.251134 0.872851
984 1 0.624693 0.312486 0.933649
1006 1 0.429391 0.437771 0.87494
1001 1 0.250399 0.374313 0.874991
1000 1 0.129054 0.437194 0.942808
988 1 0.748066 0.313303 0.934697
987 1 0.810337 0.254823 0.936739
986 1 0.818498 0.31344 0.870634
985 1 0.749409 0.252409 0.876624
983 1 0.690082 0.252305 0.933825
982 1 0.687305 0.314678 0.871239
950 1 0.687563 0.186777 0.875912
954 1 0.810699 0.192221 0.878795
956 1 0.7482 0.186216 0.93898
999 1 0.191952 0.381417 0.942198
990 1 0.941229 0.319355 0.878351
991 1 0.94042 0.249364 0.933077
958 1 0.941957 0.186134 0.872053
989 1 0.87638 0.249539 0.874672
992 1 0.875025 0.315512 0.935102
771 1 0.0631879 0.000258648 0.812841
661 1 0.622953 0.000149141 0.630295
1807 1 0.434668 0.499424 0.812502
1811 1 0.560257 0.501111 0.814438
925 1 0.872534 0.00104581 0.873342
897 1 0.00178828 -0.00420825 0.87658
643 1 0.0557384 0.00117441 0.68985
1941 1 0.623209 0.497903 0.870316
1677 1 0.378172 0.501107 0.631196
915 1 0.569654 -0.0019318 0.936163
637 1 0.876363 0.369817 0.500739
799 1 0.936832 -0.00502061 0.814754
923 1 0.809612 0.00155874 0.938856
90 1 0.808423 0.317483 1.00115
1559 1 0.688949 0.501999 0.557994
81 1 0.502191 0.249465 0.999055
1799 1 0.188658 0.503169 0.811006
769 1 0.996272 -0.00292914 0.75051
98 1 0.0691753 0.439219 1.00298
1933 1 0.375847 0.503282 0.874504
927 1 0.934333 -0.00324277 0.937027
671 1 0.939331 0.00281526 0.686074
1925 1 0.127275 0.502585 0.878281
1669 1 0.121566 0.501075 0.627358
1693 1 0.87703 0.497016 0.620874
919 1 0.686309 -0.000328338 0.934096
1937 1 0.49023 0.498245 0.873764
913 1 0.507089 -0.00674374 0.874027
78 1 0.43491 0.310474 0.994507
77 1 0.371041 0.249063 1.00079
33 1 -0.00165645 0.133325 1.00004
110 1 0.435972 0.438737 0.99627
113 1 0.494602 0.377584 1
94 1 0.936303 0.307356 0.999617
41 1 0.251755 0.124592 1.00533
118 1 0.683616 0.436094 0.998945
97 1 0.00560698 0.378789 1.00806
106 1 0.312882 0.438717 1
70 1 0.189944 0.304892 0.999267
26 1 0.805038 0.0647419 0.996867
621 1 0.376948 0.377028 0.503238
18 1 0.559888 0.0599769 0.998551
598 1 0.681122 0.317578 0.501327
89 1 0.747523 0.252204 1.00062
42 1 0.316564 0.182952 1.00244
85 1 0.622137 0.255068 0.997024
630 1 0.695816 0.439623 0.498825
73 1 0.253373 0.247813 0.998789
585 1 0.243574 0.244869 0.498151
114 1 0.561095 0.43401 1.00093
629 1 0.627539 0.378052 0.502852
121 1 0.747578 0.375263 0.99736
105 1 0.253632 0.374592 0.998494
545 1 0.00224336 0.120531 0.499839
554 1 0.310558 0.191986 0.499791
589 1 0.368939 0.251449 0.497076
577 1 1.00296 0.252804 0.501549
597 1 0.62344 0.249805 0.500152
574 1 0.940322 0.191154 0.498889
549 1 0.122273 0.122462 0.498432
582 1 0.190704 0.312002 0.50105
594 1 0.557103 0.311726 0.498403
550 1 0.181297 0.186305 0.496279
542 1 0.93355 0.0589477 0.501781
537 1 0.756154 0.0039511 0.502387
1032 1 0.129447 0.564322 0.0679548
1059 1 0.065999 0.626819 0.064
1154 1 0.0638823 0.559838 0.129117
1189 1 0.12774 0.627736 0.127726
1028 1 0.00184071 0.564103 0.0669725
1185 1 -0.00300657 0.623766 0.132565
1122 1 0.0568578 0.939076 -0.00352094
1036 1 0.247358 0.563132 0.0622339
1063 1 0.185701 0.633572 0.0612891
1067 1 0.307523 0.622013 0.0648938
1158 1 0.192615 0.557573 0.124547
1162 1 0.313206 0.560507 0.12716
1193 1 0.249082 0.626718 0.122836
1411 1 0.0642662 0.494766 0.435891
1137 1 0.495695 0.87408 0.00136876
1138 1 0.562007 0.936185 0.00246663
11 1 0.312865 0.998492 0.0580585
1415 1 0.188445 0.494647 0.436692
1040 1 0.375836 0.562654 0.0657666
1071 1 0.437671 0.627839 0.0610853
1166 1 0.439196 0.562685 0.127524
1197 1 0.378144 0.620969 0.120022
1044 1 0.498533 0.566671 0.0601403
1201 1 0.502375 0.62264 0.122422
387 1 0.0617609 1.00075 0.433522
149 1 0.627658 1.00479 0.126844
1048 1 0.627123 0.559681 0.0571268
1075 1 0.560693 0.621334 0.0593583
1170 1 0.562692 0.563241 0.122342
1205 1 0.627603 0.62232 0.122191
1052 1 0.752065 0.563119 0.0576874
1079 1 0.684753 0.628129 0.0630683
1083 1 0.810035 0.628497 0.0625762
1174 1 0.691674 0.563976 0.128284
1178 1 0.808608 0.558432 0.124335
1209 1 0.747243 0.623346 0.123094
1056 1 0.871603 0.564614 0.06195
1087 1 0.936127 0.622234 0.070371
1182 1 0.939459 0.556904 0.127333
1213 1 0.866679 0.629124 0.123817
13 1 0.375001 0.998116 0.00145028
259 1 0.0617089 1.0039 0.311156
1064 1 0.123668 0.689696 0.0661475
1091 1 0.0605113 0.747805 0.0638264
1096 1 0.123586 0.811312 0.0577846
1186 1 0.0615333 0.684045 0.129313
1218 1 0.0612658 0.805914 0.128762
1221 1 0.123651 0.748079 0.126999
1060 1 1.00241 0.683239 0.0669203
1092 1 1.00335 0.806189 0.0681945
1082 1 0.808654 0.691063 0.00570046
409 1 0.753507 0.999067 0.376979
137 1 0.245964 1.0013 0.128529
1068 1 0.246454 0.688429 0.0651924
1095 1 0.184921 0.752953 0.0634163
1099 1 0.310777 0.747783 0.0649566
1100 1 0.249842 0.811737 0.0632794
1190 1 0.186622 0.689196 0.129986
1194 1 0.307283 0.683845 0.127434
1222 1 0.187791 0.806994 0.129375
1225 1 0.252474 0.749581 0.127371
1226 1 0.309681 0.81007 0.120234
1101 1 0.371576 0.744219 -0.00363087
1293 1 0.378664 0.496974 0.246086
265 1 0.247646 0.996727 0.249661
1072 1 0.369296 0.690693 0.0619638
1103 1 0.433613 0.754171 0.0581964
1104 1 0.371264 0.80984 0.0589139
1198 1 0.440144 0.688379 0.12279
1229 1 0.374687 0.746731 0.120167
1230 1 0.436119 0.815155 0.124276
1076 1 0.502079 0.685228 0.0593723
1108 1 0.500755 0.813179 0.0634006
1233 1 0.499304 0.752537 0.125264
1080 1 0.621357 0.686178 0.0637758
1107 1 0.559168 0.752617 0.0645046
1112 1 0.626793 0.810939 0.0684798
1202 1 0.55926 0.686201 0.123938
1234 1 0.564318 0.805598 0.128323
1237 1 0.625215 0.74691 0.128546
1537 1 0.00274122 0.500286 0.499164
393 1 0.251358 0.998638 0.371931
1047 1 0.689216 0.498782 0.0665484
1051 1 0.809238 0.496261 0.0564394
1084 1 0.743047 0.685321 0.0674463
1111 1 0.684792 0.746714 0.0678823
1115 1 0.809065 0.757232 0.0670527
1116 1 0.745065 0.81423 0.0567814
1206 1 0.684954 0.685768 0.128552
1210 1 0.80764 0.697231 0.120688
1238 1 0.688746 0.819188 0.125594
1241 1 0.745725 0.756149 0.125884
1242 1 0.812065 0.811817 0.125665
1106 1 0.56675 0.813522 0.00640689
1217 1 0.997848 0.742219 0.126993
1427 1 0.561194 0.499119 0.438817
1088 1 0.873145 0.690694 0.0640473
1119 1 0.937519 0.745661 0.0669316
1120 1 0.876238 0.808928 0.0605601
1214 1 0.934427 0.684117 0.127624
1245 1 0.878051 0.755956 0.125821
1246 1 0.943115 0.810458 0.125687
1098 1 0.309169 0.811021 -0.00131428
1123 1 0.0579373 0.869975 0.0618113
1128 1 0.113657 0.936044 0.0618047
1250 1 0.0648534 0.938921 0.126193
1253 1 0.124408 0.868991 0.123931
1173 1 0.619513 0.499267 0.123325
1249 1 0.999987 0.87313 0.12364
1159 1 0.193651 0.494553 0.184425
1155 1 0.0640484 0.496249 0.183193
1127 1 0.185348 0.876413 0.0609179
1131 1 0.314633 0.87663 0.063058
1132 1 0.253834 0.934252 0.0645236
1254 1 0.178287 0.938467 0.121771
1257 1 0.250128 0.876505 0.124719
1258 1 0.317783 0.938107 0.127037
401 1 0.506645 0.998148 0.380388
1577 1 0.25055 0.617234 0.498654
1105 1 0.508277 0.750279 -0.000585374
1536 1 0.875048 0.937378 0.432266
151 1 0.690986 1.00547 0.190727
1535 1 0.94019 0.874544 0.439765
1135 1 0.438441 0.871127 0.0615206
1136 1 0.376324 0.937716 0.0621543
1261 1 0.372431 0.869888 0.12177
1262 1 0.435393 0.938659 0.124905
1140 1 0.495048 0.935859 0.0664102
145 1 0.499576 0.99803 0.123386
1534 1 0.941382 0.939124 0.376772
1089 1 0.999522 0.749082 0.00761306
1265 1 0.496639 0.875521 0.126324
1139 1 0.559927 0.878274 0.0649687
1144 1 0.628963 0.939348 0.0593487
1266 1 0.566951 0.940735 0.123855
1269 1 0.630127 0.879887 0.124949
1542 1 0.188165 0.559374 0.496681
1533 1 0.871457 0.877662 0.372772
1143 1 0.69543 0.880059 0.0648538
1147 1 0.81296 0.871707 0.0636934
1148 1 0.754757 0.940969 0.0682134
1270 1 0.691778 0.939808 0.125388
1273 1 0.756782 0.87571 0.129919
1274 1 0.814875 0.936912 0.126764
1538 1 0.065502 0.56284 0.503035
1585 1 0.501206 0.627062 0.503062
1124 1 1.00253 0.93757 0.0629476
1151 1 0.938267 0.876952 0.0599486
1152 1 0.873696 0.932712 0.0563983
1277 1 0.873313 0.873037 0.12424
1278 1 0.934328 0.939965 0.124381
1505 1 0.998236 0.876582 0.373352
31 1 0.940728 0.997651 0.0627973
1289 1 0.253961 0.49215 0.251077
1134 1 0.436736 0.935434 0.00108923
1130 1 0.310389 0.937918 -0.000371015
1160 1 0.126102 0.55731 0.183333
1187 1 0.0616289 0.623805 0.193741
1282 1 0.0627952 0.559055 0.246778
1288 1 0.128266 0.562475 0.318863
1315 1 0.067002 0.620112 0.317738
1317 1 0.12865 0.616831 0.246088
1532 1 0.7459 0.940293 0.439789
7 1 0.190957 0.995196 0.0612206
1164 1 0.254693 0.560065 0.183935
1191 1 0.188451 0.619426 0.182868
1195 1 0.316795 0.622385 0.190532
1286 1 0.184161 0.555544 0.249397
1290 1 0.315888 0.5546 0.246638
1292 1 0.250128 0.558142 0.304946
1319 1 0.193831 0.619631 0.311499
1321 1 0.248279 0.619908 0.248559
1323 1 0.312458 0.614506 0.307686
1531 1 0.807644 0.878728 0.43354
1168 1 0.375935 0.56493 0.184865
1199 1 0.435925 0.622812 0.18377
1294 1 0.436437 0.560536 0.247765
1296 1 0.377725 0.557376 0.310427
1325 1 0.378765 0.620165 0.251528
1327 1 0.434371 0.626753 0.313945
1300 1 0.502022 0.567076 0.311005
1329 1 0.496665 0.620482 0.248171
1172 1 0.504268 0.554552 0.192208
1176 1 0.623497 0.560414 0.188936
1203 1 0.561815 0.621642 0.184865
1298 1 0.564878 0.565347 0.250857
1304 1 0.624547 0.560472 0.312018
1331 1 0.571353 0.620591 0.313301
1333 1 0.621637 0.627023 0.248765
1180 1 0.750223 0.560981 0.188907
1207 1 0.686732 0.631335 0.196063
1211 1 0.807162 0.627987 0.188304
1302 1 0.685908 0.559729 0.249207
1306 1 0.81333 0.561511 0.249241
1308 1 0.74785 0.55916 0.311064
1335 1 0.682686 0.623901 0.314958
1337 1 0.7479 0.622689 0.247766
1339 1 0.807263 0.621769 0.312551
1530 1 0.815519 0.93791 0.371811
1529 1 0.744429 0.874888 0.377365
1284 1 1.00325 0.560132 0.313919
1313 1 0.99444 0.619386 0.247625
1156 1 0.997862 0.556929 0.186536
1184 1 0.870073 0.561268 0.187015
1215 1 0.932409 0.624627 0.185824
1310 1 0.937144 0.55467 0.253493
1312 1 0.867744 0.561356 0.313158
1341 1 0.869964 0.624458 0.250174
1343 1 0.935687 0.62515 0.312486
1192 1 0.124319 0.68462 0.195546
1219 1 0.0611993 0.746689 0.193226
1224 1 0.123311 0.80656 0.190648
1314 1 0.0602744 0.68823 0.256714
1320 1 0.121298 0.681717 0.317325
1346 1 0.0603237 0.810089 0.248326
1347 1 0.0574385 0.74881 0.316868
1349 1 0.116419 0.747347 0.251498
1352 1 0.117493 0.813202 0.311719
1188 1 0.994317 0.68335 0.194437
1196 1 0.255397 0.682745 0.190157
1223 1 0.191332 0.753857 0.191537
1227 1 0.316263 0.744404 0.182329
1228 1 0.251226 0.817824 0.188002
1318 1 0.188203 0.686135 0.251245
1322 1 0.311831 0.686582 0.256861
1324 1 0.252606 0.682484 0.314437
1350 1 0.186078 0.813149 0.251171
1351 1 0.184771 0.749112 0.310438
1353 1 0.25257 0.74883 0.251256
1354 1 0.312481 0.810752 0.255281
1355 1 0.314234 0.748812 0.313507
1356 1 0.248734 0.813998 0.310918
1200 1 0.374697 0.682923 0.179267
1231 1 0.432552 0.751237 0.184516
1232 1 0.373883 0.810593 0.190891
1326 1 0.436553 0.688016 0.245245
1328 1 0.373936 0.686144 0.312932
1357 1 0.369405 0.750514 0.250051
1358 1 0.435435 0.819907 0.250203
1359 1 0.43957 0.747358 0.311792
1360 1 0.37884 0.813227 0.317067
1332 1 0.499735 0.685709 0.311179
1236 1 0.496699 0.813909 0.18815
1361 1 0.49925 0.752695 0.243971
1204 1 0.498963 0.688948 0.187917
1364 1 0.507286 0.807512 0.310867
1208 1 0.625603 0.68742 0.186852
1235 1 0.567012 0.741454 0.18801
1240 1 0.625887 0.817558 0.186774
1330 1 0.559222 0.681315 0.251794
1336 1 0.626335 0.688338 0.314945
1362 1 0.568381 0.805431 0.246666
1363 1 0.568148 0.747584 0.311156
1365 1 0.628372 0.745542 0.250498
1368 1 0.623919 0.810968 0.313899
1212 1 0.750964 0.689023 0.183238
1239 1 0.689738 0.747947 0.181266
1243 1 0.813361 0.753195 0.184423
1244 1 0.753384 0.811166 0.188671
1334 1 0.690376 0.686501 0.252511
1338 1 0.809832 0.693499 0.246861
1340 1 0.747506 0.685297 0.315755
1366 1 0.688863 0.809906 0.244947
1367 1 0.687346 0.74916 0.318959
1369 1 0.746833 0.745273 0.249756
1370 1 0.809847 0.810257 0.254713
1371 1 0.813035 0.75098 0.314072
1372 1 0.745088 0.811869 0.312869
1345 1 -0.00171173 0.74735 0.253822
1316 1 -0.00204793 0.680046 0.31296
1220 1 0.997541 0.805535 0.188902
1348 1 0.000347573 0.811331 0.308845
1216 1 0.870122 0.685188 0.189318
1247 1 0.935158 0.751788 0.187414
1248 1 0.875802 0.815454 0.185604
1342 1 0.930317 0.688627 0.251715
1344 1 0.86458 0.684699 0.313957
1373 1 0.873267 0.753055 0.246676
1374 1 0.936566 0.811708 0.251651
1375 1 0.932638 0.74658 0.311733
1376 1 0.870324 0.811682 0.316764
1527 1 0.684956 0.874413 0.43845
1649 1 0.500266 0.876218 0.503793
1251 1 0.0596337 0.881204 0.190585
1256 1 0.125533 0.936617 0.186781
1378 1 0.0602787 0.936733 0.249866
1379 1 0.0619643 0.87677 0.310825
1381 1 0.128514 0.870631 0.252717
1384 1 0.123104 0.937454 0.31408
1142 1 0.688242 0.943279 -0.00430617
19 1 0.564948 1.00108 0.0625408
1255 1 0.188193 0.874447 0.183769
1259 1 0.312233 0.872397 0.186621
1260 1 0.250139 0.934915 0.183279
1382 1 0.186265 0.937355 0.250154
1383 1 0.187695 0.878919 0.312222
1385 1 0.246977 0.874594 0.249211
1386 1 0.3083 0.935437 0.250487
1387 1 0.313978 0.874054 0.312282
1388 1 0.25045 0.932903 0.311418
1425 1 0.497616 0.505532 0.372742
1578 1 0.320517 0.690088 0.500429
267 1 0.314801 1.0004 0.30963
1565 1 0.877383 0.500338 0.497095
1027 1 0.0636521 0.501528 0.0633553
1263 1 0.436487 0.879462 0.188967
1264 1 0.37378 0.937284 0.192624
1389 1 0.374658 0.876513 0.253277
1390 1 0.439902 0.942582 0.253061
1391 1 0.440308 0.876136 0.309845
1392 1 0.378832 0.934785 0.312458
1393 1 0.501311 0.878068 0.250363
1268 1 0.50057 0.937092 0.187358
1121 1 0.000931298 0.876775 -0.000444487
1526 1 0.685774 0.939056 0.374488
1396 1 0.506081 0.938747 0.31376
1267 1 0.560467 0.87086 0.188108
1272 1 0.628189 0.942257 0.187773
1394 1 0.563297 0.936517 0.253209
1395 1 0.56591 0.871063 0.313836
1397 1 0.622549 0.874006 0.250888
1400 1 0.626937 0.936152 0.306482
1524 1 0.504095 0.938557 0.439916
1521 1 0.503583 0.872555 0.377931
1271 1 0.683921 0.882536 0.18677
1275 1 0.813579 0.872706 0.186269
1276 1 0.753689 0.938944 0.189248
1398 1 0.69271 0.938314 0.250709
1399 1 0.688244 0.875862 0.314422
1401 1 0.748666 0.873279 0.245147
1402 1 0.809533 0.937687 0.250261
1403 1 0.810485 0.879447 0.311076
1404 1 0.749929 0.936819 0.312378
1377 1 0.998229 0.868202 0.246924
1380 1 0.998769 0.944859 0.313537
1252 1 0.996026 0.943444 0.184854
1279 1 0.939775 0.875221 0.185382
1280 1 0.874878 0.93962 0.192931
1405 1 0.87272 0.874657 0.246657
1406 1 0.937086 0.940356 0.249485
1407 1 0.940817 0.874847 0.307201
1408 1 0.876614 0.935037 0.309486
1525 1 0.62693 0.873325 0.380199
1410 1 0.0645601 0.560975 0.377657
1416 1 0.123823 0.558842 0.442744
1443 1 0.0635709 0.62838 0.436311
1445 1 0.126159 0.622809 0.380208
1528 1 0.6258 0.945438 0.439142
385 1 -0.000680605 1.00753 0.375186
1414 1 0.194005 0.559137 0.376235
1418 1 0.314431 0.559106 0.370721
1420 1 0.253536 0.55804 0.437865
1447 1 0.191181 0.619439 0.437357
1449 1 0.248675 0.624266 0.376787
1451 1 0.311855 0.619832 0.437469
1645 1 0.380315 0.875416 0.500985
1417 1 0.251869 0.500601 0.371844
1422 1 0.434192 0.566885 0.376307
1424 1 0.373821 0.559184 0.439906
1453 1 0.367867 0.622561 0.373549
1455 1 0.437766 0.630434 0.441122
1428 1 0.499676 0.567707 0.444763
1457 1 0.505572 0.620497 0.374826
1641 1 0.24709 0.877911 0.501195
1309 1 0.872251 0.497569 0.251179
1522 1 0.569118 0.935721 0.378685
1426 1 0.562405 0.558817 0.379512
1432 1 0.624465 0.564353 0.445163
1459 1 0.561808 0.621251 0.440574
1461 1 0.626663 0.618346 0.383709
1566 1 0.939501 0.562537 0.499474
1523 1 0.565538 0.876709 0.438157
257 1 -0.00153054 1.00149 0.246902
1430 1 0.68494 0.558457 0.380119
1434 1 0.817147 0.56225 0.376574
1436 1 0.753579 0.564423 0.435929
1463 1 0.68955 0.621136 0.438966
1465 1 0.746646 0.618687 0.374636
1467 1 0.815276 0.622406 0.434169
1412 1 1.00151 0.56062 0.43659
1441 1 1.00086 0.624247 0.377991
1438 1 0.937741 0.56248 0.376085
1440 1 0.876753 0.562304 0.441087
1469 1 0.874524 0.622354 0.376173
1471 1 0.938048 0.625026 0.437387
1039 1 0.434953 0.49796 0.0619928
1558 1 0.689146 0.569193 0.505054
1442 1 0.0613448 0.689095 0.378236
1448 1 0.127679 0.684015 0.438593
1474 1 0.0619438 0.809021 0.376606
1475 1 0.06491 0.745193 0.436822
1477 1 0.124914 0.749643 0.37164
1480 1 0.129076 0.812442 0.437572
1589 1 0.6232 0.629576 0.503599
1550 1 0.44053 0.568371 0.502282
1446 1 0.188227 0.684199 0.371806
1450 1 0.307469 0.690968 0.37656
1452 1 0.250593 0.690687 0.442655
1478 1 0.189877 0.816978 0.372272
1479 1 0.188878 0.752188 0.437476
1481 1 0.248725 0.75104 0.374027
1482 1 0.311486 0.81487 0.373052
1483 1 0.312466 0.750173 0.438185
1484 1 0.251418 0.812381 0.442054
407 1 0.688793 1.00338 0.437579
5 1 0.128202 0.998861 0.00527175
1519 1 0.437286 0.872613 0.432885
1520 1 0.379077 0.935246 0.431521
1454 1 0.440806 0.686829 0.376899
1456 1 0.376788 0.68987 0.43371
1485 1 0.378974 0.750195 0.375201
1486 1 0.439672 0.813312 0.368887
1487 1 0.436297 0.754861 0.438439
1488 1 0.370794 0.812649 0.440496
1460 1 0.494308 0.691536 0.44553
1518 1 0.445184 0.937741 0.375389
1492 1 0.502697 0.813982 0.442039
1489 1 0.502637 0.753648 0.377926
1491 1 0.561929 0.749732 0.437765
1493 1 0.623656 0.745901 0.379897
1458 1 0.564386 0.684176 0.381251
1464 1 0.625446 0.683829 0.44346
1496 1 0.6262 0.815004 0.442584
1490 1 0.564581 0.810939 0.377853
1495 1 0.685901 0.753305 0.441093
1497 1 0.744769 0.752087 0.378547
1498 1 0.80651 0.814722 0.376882
1499 1 0.809532 0.750574 0.437487
1500 1 0.747672 0.811507 0.438746
1494 1 0.68701 0.810803 0.37746
1468 1 0.748458 0.684994 0.434757
1466 1 0.806011 0.687297 0.372401
1462 1 0.686108 0.685306 0.379704
1546 1 0.314491 0.554289 0.498041
261 1 0.126306 0.993448 0.245125
1473 1 0.997465 0.739704 0.374786
1476 1 -0.00209529 0.809441 0.4334
1444 1 0.00130467 0.687016 0.440269
1470 1 0.935126 0.681205 0.377165
1501 1 0.87306 0.745096 0.372396
1503 1 0.930531 0.754356 0.433891
1472 1 0.868859 0.687697 0.438036
1504 1 0.871419 0.815089 0.431671
1502 1 0.940629 0.81356 0.372717
1517 1 0.379293 0.875828 0.373803
397 1 0.374158 0.998187 0.372268
1508 1 0.00220164 0.936949 0.437895
1509 1 0.11544 0.874937 0.375653
1506 1 0.0575626 0.935292 0.373989
1507 1 0.0607239 0.870375 0.435649
1512 1 0.121771 0.932397 0.437666
1614 1 0.439896 0.813154 0.498865
1157 1 0.125848 0.49379 0.123785
1515 1 0.313101 0.872877 0.438802
1165 1 0.377949 0.496537 0.127674
1516 1 0.24878 0.935245 0.434197
1586 1 0.559486 0.688992 0.502592
1514 1 0.314256 0.938003 0.374854
1513 1 0.251454 0.875638 0.371058
1510 1 0.18372 0.935901 0.375796
1511 1 0.188791 0.87382 0.433454
1621 1 0.623913 0.749788 0.502039
1569 1 1.00663 0.627446 0.497558
405 1 0.62405 0.996209 0.373036
1541 1 0.123932 0.496277 0.503142
1413 1 0.127822 0.498446 0.373081
529 1 0.497253 1.00346 0.497979
23 1 0.685591 1.00287 0.0607515
1163 1 0.315524 0.499005 0.187079
1573 1 0.12772 0.623099 0.499646
1642 1 0.314607 0.93604 0.497585
1646 1 0.439438 0.939413 0.49591
1602 1 0.0642046 0.803644 0.494233
1625 1 0.751892 0.753175 0.49965
1653 1 0.628623 0.878066 0.503309
1593 1 0.756251 0.627023 0.498618
1594 1 0.804427 0.688925 0.491615
1598 1 0.937688 0.687348 0.499568
1618 1 0.564214 0.814061 0.500653
25 1 0.748045 0.997944 -0.000893659
1554 1 0.562791 0.563282 0.502278
1066 1 0.311601 0.685443 0.00358461
1601 1 -0.00155632 0.749701 0.493638
1141 1 0.626821 0.870343 -0.00167437
1038 1 0.432873 0.561138 0.00277681
1658 1 0.81004 0.937559 0.496847
1606 1 0.185462 0.812422 0.493559
1613 1 0.37493 0.75436 0.503365
1581 1 0.375191 0.621435 0.499298
1094 1 0.190503 0.810796 0.00348395
1605 1 0.126915 0.745867 0.498692
1145 1 0.748883 0.873709 -0.000404696
1630 1 0.933621 0.809562 0.495843
1133 1 0.370208 0.873378 -0.00333735
1657 1 0.746851 0.87284 0.497704
1113 1 0.753165 0.749935 0.00339678
1102 1 0.434825 0.808964 -0.00652327
1070 1 0.44369 0.690203 -0.00219257
1046 1 0.687189 0.566405 -0.00365099
1074 1 0.563923 0.688823 0.00304393
541 1 0.875814 0.998728 0.499912
1073 1 0.498899 0.626645 0.000501698
1058 1 0.0654355 0.689991 0.00233811
1544 1 0.128267 0.557403 0.56296
1571 1 0.0647708 0.627006 0.562034
1666 1 0.0632546 0.566131 0.62109
1701 1 0.123763 0.624237 0.625193
1570 1 0.0635621 0.691062 0.501707
1697 1 1.00022 0.626657 0.621903
2020 1 0.00111242 0.93416 0.934626
1629 1 0.871764 0.753973 0.499508
1931 1 0.311613 0.505599 0.938439
1548 1 0.248756 0.5575 0.56539
1575 1 0.18652 0.623414 0.559859
1579 1 0.313261 0.622004 0.562715
1670 1 0.183381 0.561781 0.624348
1674 1 0.317303 0.563302 0.625214
1705 1 0.250133 0.625633 0.626532
653 1 0.369985 0.994037 0.621647
2048 1 0.873826 0.937941 0.941499
1033 1 0.250352 0.503705 1.00317
1552 1 0.37735 0.56284 0.559121
1583 1 0.439484 0.627604 0.564187
1678 1 0.440191 0.560179 0.629382
1709 1 0.37418 0.62789 0.625082
1556 1 0.497106 0.564433 0.563956
1574 1 0.185975 0.687048 0.498914
2047 1 0.941492 0.875606 0.935807
1683 1 0.566198 0.495454 0.691795
663 1 0.688589 0.99922 0.690688
901 1 0.122427 0.996111 0.8746
1539 1 0.0636449 0.49923 0.564726
1713 1 0.494158 0.629252 0.629501
1560 1 0.623646 0.563719 0.564265
1587 1 0.559159 0.626575 0.567512
1682 1 0.561992 0.561164 0.63118
1717 1 0.622122 0.624541 0.623652
1543 1 0.186324 0.497754 0.568693
2046 1 0.937915 0.93615 0.878747
1564 1 0.751703 0.564966 0.565339
1591 1 0.688081 0.625924 0.567393
1595 1 0.812524 0.622883 0.567664
1686 1 0.687735 0.562401 0.62913
1690 1 0.8107 0.560581 0.628074
1721 1 0.748882 0.621875 0.626774
789 1 0.62481 0.993454 0.750602
1662 1 0.935356 0.939786 0.495991
1685 1 0.627914 0.495355 0.627807
1679 1 0.439105 0.498285 0.68837
1540 1 1.00064 0.562935 0.557946
1568 1 0.873184 0.557642 0.564558
1599 1 0.941323 0.624183 0.561641
1694 1 0.941273 0.565594 0.6242
1725 1 0.881206 0.626583 0.628153
1795 1 0.0707284 0.506592 0.810313
2045 1 0.879993 0.873367 0.873813
783 1 0.433794 0.999041 0.813123
657 1 0.501665 1.00294 0.627803
1576 1 0.12413 0.686464 0.562323
1603 1 0.0620242 0.747406 0.560956
1608 1 0.12743 0.81191 0.55552
1698 1 0.0640901 0.690112 0.626264
1730 1 0.0607429 0.810556 0.61798
1733 1 0.125828 0.750866 0.626737
1150 1 0.939342 0.941724 0.99939
1057 1 1.00263 0.622405 1.00202
523 1 0.307814 1.00171 0.560076
1580 1 0.254982 0.689713 0.564061
1607 1 0.185649 0.749571 0.565857
1611 1 0.315654 0.752761 0.562865
1612 1 0.249339 0.813328 0.557007
1702 1 0.189049 0.690819 0.628358
1706 1 0.310552 0.693173 0.627405
1734 1 0.190257 0.817613 0.620359
1737 1 0.251863 0.751977 0.624066
1738 1 0.311017 0.823536 0.623071
1935 1 0.429215 0.506066 0.936547
1584 1 0.372768 0.692006 0.567188
1615 1 0.434667 0.751371 0.562891
1616 1 0.375799 0.817364 0.56796
1710 1 0.437381 0.691062 0.623682
1741 1 0.376969 0.753531 0.625763
1742 1 0.437531 0.813738 0.625048
1620 1 0.503863 0.812149 0.563391
1588 1 0.498972 0.684805 0.565802
1634 1 0.0568619 0.937156 0.499749
1745 1 0.500764 0.74869 0.621847
1592 1 0.622676 0.686876 0.561059
1619 1 0.565332 0.754401 0.562108
1624 1 0.623776 0.813767 0.559977
1714 1 0.563498 0.689224 0.626289
1746 1 0.562211 0.813932 0.621747
1749 1 0.630992 0.746431 0.620598
797 1 0.872334 0.995697 0.746016
1797 1 0.126027 0.505319 0.747877
1626 1 0.812084 0.815561 0.499215
517 1 0.122024 0.997309 0.494579
1596 1 0.751867 0.68513 0.560607
1623 1 0.688688 0.744577 0.559662
1627 1 0.814692 0.741527 0.565024
1628 1 0.752294 0.814808 0.560357
1718 1 0.691427 0.691181 0.625475
1722 1 0.812927 0.685457 0.630786
1750 1 0.686203 0.809766 0.619882
1753 1 0.751609 0.751838 0.623601
1754 1 0.809685 0.814484 0.626529
1675 1 0.314068 0.501167 0.691822
1923 1 0.0641867 0.499931 0.937523
1086 1 0.938028 0.685088 1.00399
1114 1 0.814085 0.812619 0.998566
1604 1 0.000766879 0.808042 0.555862
1729 1 0.99931 0.750243 0.621992
1572 1 0.999567 0.689635 0.563801
1600 1 0.878781 0.684301 0.563663
1631 1 0.937131 0.751425 0.559956
1632 1 0.869328 0.80902 0.567703
1726 1 0.940029 0.69376 0.620957
1757 1 0.876301 0.746548 0.624817
1758 1 0.937136 0.812806 0.622941
1681 1 0.501542 0.500583 0.628065
1635 1 0.0591774 0.870762 0.5602
1640 1 0.119684 0.936389 0.562442
1762 1 0.0595234 0.930512 0.628851
1765 1 0.124688 0.87314 0.622061
1819 1 0.818065 0.497584 0.815387
1639 1 0.187322 0.880105 0.561622
1643 1 0.31508 0.879081 0.559159
1644 1 0.247067 0.936043 0.559836
1766 1 0.182612 0.93785 0.626513
1769 1 0.245014 0.875203 0.623647
1770 1 0.306942 0.935897 0.621289
1647 1 0.44094 0.87529 0.565986
1648 1 0.378489 0.941886 0.557966
1773 1 0.373657 0.876911 0.628972
1774 1 0.438338 0.942692 0.626403
1652 1 0.50219 0.942826 0.565665
2044 1 0.752845 0.930396 0.937235
2043 1 0.8153 0.871907 0.939484
1777 1 0.503445 0.87585 0.627756
1651 1 0.556605 0.874723 0.562324
1656 1 0.618355 0.940472 0.565631
1778 1 0.559743 0.940383 0.630015
1781 1 0.62485 0.87695 0.62185
2042 1 0.817581 0.934008 0.874239
775 1 0.184183 0.995274 0.815038
2041 1 0.755083 0.872502 0.870203
921 1 0.753318 0.995086 0.878424
773 1 0.121045 0.998253 0.749215
1655 1 0.68948 0.876492 0.568242
1659 1 0.813011 0.875639 0.561105
1660 1 0.752678 0.936191 0.558843
1782 1 0.69133 0.940241 0.630532
1785 1 0.75634 0.879863 0.624484
1786 1 0.813135 0.940993 0.618432
1061 1 0.126841 0.628537 1.00293
1809 1 0.500953 0.500894 0.751555
1761 1 -0.0036927 0.876751 0.625008
1636 1 -0.00520314 0.936623 0.558063
1663 1 0.933199 0.876217 0.560128
1664 1 0.875133 0.936529 0.560846
1789 1 0.871458 0.873088 0.623752
1790 1 0.935729 0.935134 0.621615
1026 1 0.0611657 0.557841 1.00253
1801 1 0.252979 0.507725 0.750177
1672 1 0.122986 0.565119 0.684335
1699 1 0.0648323 0.627566 0.681649
1794 1 0.0620733 0.567082 0.750237
1800 1 0.127841 0.568566 0.810627
1827 1 0.0642382 0.630401 0.813336
1829 1 0.123843 0.630029 0.750878
1668 1 -0.0028643 0.560193 0.690268
2039 1 0.691408 0.871519 0.934363
1825 1 -0.000511279 0.626152 0.753659
1085 1 0.870826 0.624861 0.999185
1823 1 0.937065 0.502152 0.814921
659 1 0.560328 0.999568 0.688695
2038 1 0.694599 0.935543 0.878476
1676 1 0.249587 0.563498 0.685817
1703 1 0.183013 0.628899 0.686887
1707 1 0.309378 0.623341 0.690449
1798 1 0.184945 0.569185 0.747311
1802 1 0.314556 0.562149 0.749336
1804 1 0.243855 0.566259 0.813631
1831 1 0.186066 0.625445 0.815553
1833 1 0.252579 0.629979 0.751357
1835 1 0.310318 0.626785 0.81357
1633 1 -0.00120256 0.870745 0.501141
1815 1 0.691852 0.497308 0.815306
649 1 0.25158 1.00032 0.623572
1680 1 0.374472 0.56378 0.690574
1711 1 0.431949 0.62908 0.68796
1806 1 0.434648 0.565497 0.749906
1808 1 0.375005 0.562407 0.809611
1837 1 0.370382 0.629309 0.743347
1839 1 0.439759 0.6271 0.810485
1050 1 0.810704 0.556488 0.997536
2036 1 0.4989 0.937825 0.942668
1812 1 0.494266 0.565287 0.808873
1841 1 0.495414 0.627931 0.747062
1684 1 0.499157 0.565543 0.688681
1688 1 0.628544 0.567243 0.692071
1715 1 0.566291 0.625608 0.693315
1810 1 0.561237 0.564924 0.749902
1816 1 0.62182 0.56581 0.809693
1843 1 0.561843 0.631268 0.805981
1845 1 0.629004 0.629128 0.753953
1673 1 0.248252 0.495746 0.628653
2035 1 0.5619 0.872711 0.944071
777 1 0.251758 0.999479 0.750274
1692 1 0.74751 0.558706 0.693507
1719 1 0.691554 0.624935 0.686495
1723 1 0.817225 0.626587 0.68767
1814 1 0.693001 0.559049 0.756688
1818 1 0.812023 0.559996 0.747456
1820 1 0.753969 0.559971 0.814184
1847 1 0.688207 0.626411 0.814552
1849 1 0.752422 0.629087 0.747186
1851 1 0.811112 0.622278 0.809422
2034 1 0.565833 0.933188 0.885204
2037 1 0.629721 0.877048 0.879863
1661 1 0.87271 0.876355 0.49852
1796 1 1.00158 0.558625 0.813887
1696 1 0.878167 0.561214 0.686643
1727 1 0.936147 0.62736 0.690463
1822 1 0.941084 0.559634 0.749096
1824 1 0.867854 0.562438 0.809962
1853 1 0.875574 0.622484 0.747989
1855 1 0.939053 0.622764 0.815566
535 1 0.685448 1.00217 0.566666
1704 1 0.124396 0.690414 0.68725
1731 1 0.0600697 0.749649 0.68578
1736 1 0.126513 0.812626 0.683227
1826 1 0.0623773 0.687825 0.755068
1832 1 0.130106 0.689385 0.813438
1858 1 0.0677303 0.816495 0.750695
1859 1 0.0611663 0.749792 0.809624
1861 1 0.122603 0.752826 0.752373
1864 1 0.124502 0.808826 0.815651
1860 1 1.00252 0.8158 0.813279
1828 1 0.996849 0.692801 0.813453
1700 1 0.998833 0.690905 0.689003
1708 1 0.25048 0.694062 0.685282
1735 1 0.190446 0.753661 0.689717
1739 1 0.313428 0.754332 0.686338
1740 1 0.250142 0.816176 0.681768
1830 1 0.189411 0.687926 0.751892
1834 1 0.311952 0.693336 0.746938
1836 1 0.249294 0.682764 0.812493
1862 1 0.184259 0.814287 0.746255
1863 1 0.190015 0.755817 0.815966
1865 1 0.251193 0.752783 0.751304
1866 1 0.316564 0.8139 0.747659
1867 1 0.30942 0.749374 0.811283
1868 1 0.251889 0.811246 0.814408
1712 1 0.373345 0.690677 0.68393
1743 1 0.437318 0.751061 0.689745
1744 1 0.374457 0.809272 0.684953
1838 1 0.439049 0.688612 0.753365
1840 1 0.369471 0.68344 0.811557
1869 1 0.378663 0.748179 0.747492
1870 1 0.437783 0.811686 0.750833
1871 1 0.439247 0.750337 0.811928
1872 1 0.375879 0.808438 0.813521
1876 1 0.503619 0.815498 0.813329
1873 1 0.50518 0.751908 0.745108
1716 1 0.50326 0.689422 0.687743
1748 1 0.502954 0.809048 0.682524
1844 1 0.500556 0.687111 0.807391
1720 1 0.625915 0.682626 0.690077
1747 1 0.564988 0.753487 0.686867
1752 1 0.621753 0.81201 0.683374
1842 1 0.566945 0.691178 0.752208
1848 1 0.62797 0.687449 0.812749
1874 1 0.560369 0.815862 0.752785
1875 1 0.565863 0.756191 0.813189
1877 1 0.628229 0.752978 0.752509
1880 1 0.631122 0.817981 0.816291
1724 1 0.750903 0.6903 0.686085
1751 1 0.686228 0.754142 0.690421
1755 1 0.815978 0.753082 0.689727
1756 1 0.74718 0.814677 0.686836
1846 1 0.693976 0.686951 0.744782
1850 1 0.810528 0.684409 0.747285
1852 1 0.753223 0.687661 0.807949
1878 1 0.688063 0.814195 0.751328
1879 1 0.691321 0.753094 0.815087
1881 1 0.752654 0.747793 0.746822
1882 1 0.808653 0.814382 0.751287
1883 1 0.814301 0.74941 0.813512
1884 1 0.749385 0.810999 0.809484
1857 1 0.999601 0.748262 0.747681
1732 1 -0.00318708 0.813371 0.687474
1728 1 0.877512 0.691617 0.685992
1759 1 0.938074 0.754163 0.68973
1760 1 0.879165 0.811651 0.682614
1854 1 0.935843 0.684759 0.750626
1856 1 0.876907 0.682739 0.81607
1885 1 0.87413 0.752772 0.747487
1886 1 0.940993 0.812529 0.753799
1887 1 0.938133 0.752142 0.809856
1888 1 0.877761 0.808628 0.815433
1763 1 0.0628944 0.873781 0.686998
1768 1 0.117575 0.937796 0.68779
1890 1 0.0605841 0.93845 0.753026
1891 1 0.0640167 0.877062 0.811483
1893 1 0.124848 0.873237 0.753692
1896 1 0.127006 0.93539 0.810665
2040 1 0.628887 0.933626 0.941318
1889 1 1.00024 0.87532 0.745928
1764 1 -0.000103747 0.938615 0.687104
2033 1 0.498396 0.874364 0.871117
1547 1 0.321582 0.500636 0.565975
655 1 0.436207 0.999784 0.68721
1767 1 0.181771 0.87969 0.690067
1771 1 0.31341 0.876048 0.690904
1772 1 0.245481 0.938478 0.686387
1894 1 0.18421 0.93812 0.748346
1895 1 0.186219 0.870628 0.810948
1897 1 0.245573 0.873049 0.753412
1898 1 0.310171 0.931073 0.753036
1899 1 0.314073 0.87004 0.81217
1900 1 0.246803 0.932517 0.818979
785 1 0.501089 1.00464 0.753179
1030 1 0.185404 0.562545 1.00731
1775 1 0.440276 0.874133 0.690021
1776 1 0.374997 0.940386 0.686857
1901 1 0.376813 0.876326 0.745108
1902 1 0.433225 0.940169 0.751725
1903 1 0.435073 0.872907 0.808125
1904 1 0.373655 0.929475 0.811432
1780 1 0.500356 0.938291 0.688801
1908 1 0.494566 0.936206 0.810972
1945 1 0.751935 0.499134 0.875538
1081 1 0.749128 0.631106 0.997922
539 1 0.816087 1.00502 0.563176
1905 1 0.496074 0.873225 0.751004
1779 1 0.563432 0.875571 0.688027
1784 1 0.620456 0.936734 0.689661
1906 1 0.559667 0.937363 0.753651
1907 1 0.562155 0.880138 0.811494
1909 1 0.626213 0.87071 0.74883
1912 1 0.630837 0.935446 0.808255
1034 1 0.314626 0.56283 1.00367
1984 1 0.874641 0.686618 0.933599
1065 1 0.249816 0.626391 1.005
2013 1 0.875156 0.749388 0.87537
1783 1 0.683939 0.876963 0.683571
1787 1 0.814954 0.877421 0.68627
1788 1 0.748532 0.941491 0.689834
1910 1 0.686667 0.937685 0.74927
1911 1 0.690886 0.875884 0.814634
1913 1 0.748312 0.878654 0.74866
1914 1 0.813973 0.938407 0.75095
1915 1 0.816965 0.870161 0.814139
1916 1 0.75021 0.934754 0.812865
1892 1 0.00252703 0.938438 0.815303
1791 1 0.937998 0.874117 0.686094
1792 1 0.878128 0.937876 0.678219
1917 1 0.877463 0.871518 0.745239
1918 1 0.938077 0.937826 0.747695
1919 1 0.941929 0.877104 0.812181
1920 1 0.875774 0.934451 0.811622
2021 1 0.123944 0.871713 0.870539
1922 1 0.064199 0.564956 0.872877
1928 1 0.129366 0.562738 0.946117
1955 1 0.0628347 0.628637 0.944884
1957 1 0.124884 0.626399 0.879115
1953 1 0.00128432 0.627183 0.880253
1924 1 0.998543 0.561405 0.939576
1029 1 0.128022 0.500294 1.00134
2024 1 0.120867 0.935207 0.941832
2018 1 0.0678895 0.935873 0.87097
2019 1 0.0610592 0.874221 0.934794
1077 1 0.626599 0.62601 0.992381
905 1 0.249899 1.0008 0.871605
1926 1 0.188912 0.55981 0.878483
1930 1 0.308037 0.564358 0.870549
1932 1 0.247292 0.570683 0.943528
1959 1 0.181448 0.629739 0.939671
1961 1 0.244746 0.630171 0.877192
1963 1 0.311764 0.625354 0.935874
2029 1 0.373302 0.87578 0.877701
1934 1 0.43802 0.564622 0.871491
1936 1 0.370005 0.565163 0.936471
1965 1 0.376781 0.627414 0.871162
1967 1 0.440388 0.623486 0.934166
1940 1 0.49665 0.559087 0.935391
2025 1 0.244412 0.872582 0.874489
2030 1 0.436532 0.933609 0.87335
1969 1 0.503305 0.626638 0.873505
1938 1 0.557087 0.563022 0.871677
1944 1 0.627476 0.556477 0.934269
1971 1 0.558459 0.626418 0.936964
1973 1 0.620045 0.618432 0.874172
2032 1 0.371751 0.937978 0.935522
2014 1 0.938708 0.812106 0.873908
665 1 0.749583 1.00009 0.62481
1093 1 0.124498 0.74677 1.0032
1942 1 0.684831 0.561273 0.869178
1946 1 0.81286 0.561762 0.881876
1948 1 0.750441 0.561438 0.939637
1975 1 0.691802 0.627215 0.934346
1977 1 0.750429 0.626403 0.874075
1979 1 0.813983 0.627098 0.932803
531 1 0.561981 1.00584 0.565599
2031 1 0.442845 0.872362 0.935502
2026 1 0.311842 0.937102 0.874798
1980 1 0.750784 0.690392 0.938442
1978 1 0.811078 0.684079 0.874314
1950 1 0.934113 0.564598 0.876965
1952 1 0.874314 0.564543 0.938344
1981 1 0.873476 0.621788 0.875196
1983 1 0.93329 0.626549 0.936912
1062 1 0.185898 0.688543 1.00012
2017 1 0.000789519 0.872979 0.871064
1954 1 0.0654754 0.694454 0.869947
1960 1 0.120537 0.690411 0.939805
1986 1 0.0569147 0.810834 0.875663
1987 1 0.0600832 0.750739 0.937046
1989 1 0.125468 0.749713 0.875837
1992 1 0.126337 0.811834 0.942028
1988 1 0.999589 0.813748 0.939663
1985 1 0.996221 0.751877 0.876919
1956 1 1.00093 0.690338 0.939271
2015 1 0.941556 0.748165 0.937499
1958 1 0.185959 0.688736 0.876814
1962 1 0.31002 0.687976 0.87634
1964 1 0.250237 0.691955 0.940272
1990 1 0.185864 0.814056 0.876935
1991 1 0.186907 0.753193 0.939531
1993 1 0.253965 0.749537 0.873255
1994 1 0.312438 0.812927 0.876549
1995 1 0.309757 0.749601 0.936156
1996 1 0.248014 0.807961 0.938691
1939 1 0.561724 0.497994 0.928553
2016 1 0.879926 0.810229 0.934355
2027 1 0.311423 0.873622 0.938075
2000 1 0.371102 0.811004 0.939887
1999 1 0.438348 0.750557 0.936143
1998 1 0.436384 0.810181 0.874797
1997 1 0.367492 0.744998 0.874093
1968 1 0.373845 0.683995 0.935353
1966 1 0.440277 0.687646 0.868235
2001 1 0.507421 0.752297 0.872178
2023 1 0.187132 0.869992 0.938829
2022 1 0.181335 0.936902 0.88102
1982 1 0.9386 0.686962 0.8742
2004 1 0.50317 0.814585 0.933672
1972 1 0.500345 0.690162 0.931307
1970 1 0.568562 0.693663 0.870272
1976 1 0.626077 0.689623 0.93889
2002 1 0.568997 0.815 0.873522
2003 1 0.564644 0.750749 0.939156
2005 1 0.625919 0.75247 0.877993
2008 1 0.623737 0.813233 0.938647
1078 1 0.691098 0.688778 1.00386
2028 1 0.246837 0.939782 0.934915
2007 1 0.68301 0.74681 0.940434
2006 1 0.693655 0.808747 0.875041
2011 1 0.814019 0.748575 0.940721
2010 1 0.810555 0.810902 0.870424
2009 1 0.75164 0.745386 0.879836
2012 1 0.751059 0.807891 0.938505
1974 1 0.690773 0.68671 0.879135
903 1 0.184022 1.0005 0.940226
1110 1 0.684367 0.809874 0.998653
1126 1 0.187926 0.934981 0.999344
1146 1 0.808634 0.936296 0.999228
1069 1 0.380232 0.625572 0.99928
1090 1 0.0650927 0.811223 0.999598
1042 1 0.559075 0.561875 0.996322
1054 1 0.937724 0.563778 0.999069
1149 1 0.875377 0.865551 0.996475
1118 1 0.936208 0.809426 1.00374
1129 1 0.250485 0.871142 0.998272
1109 1 0.625134 0.748055 1.00497
533 1 0.626158 1.00544 0.499128
1654 1 0.691841 0.940983 0.502253
1125 1 0.125046 0.875711 0.99938
1650 1 0.560688 0.941457 0.502554
1117 1 0.877896 0.748421 0.998781
1097 1 0.248137 0.745956 1.0039
1609 1 0.247081 0.750711 0.502024
1617 1 0.503393 0.750585 0.506692
1622 1 0.683955 0.815722 0.502367
1597 1 0.87367 0.619843 0.504828
1637 1 0.125716 0.873128 0.503309
1638 1 0.181913 0.939225 0.49496
1582 1 0.434857 0.688173 0.507096
1562 1 0.807792 0.562784 0.506123
1590 1 0.691694 0.684579 0.501257
1037 1 0.372563 0.496608 1.00207
1610 1 0.313542 0.817933 0.506286
| [
"[email protected]"
] | |
d7b8f8c2292943e3ede00ea4959fa4e0b8ea7ca8 | fb4e9ddb628ae19634eb1d51f02fa33d093ca5d1 | /tensorflow-master/tensorflow/contrib/learn/python/learn/estimators/head.py | 952cdeb5ec1bc8907c36f26c32eb274254e15d8d | [
"Apache-2.0"
] | permissive | zhentaowang/machine-learning | 68189bbc9bd052cecf068fb5fc7e88c04ec24e34 | 1dbba7bbe7f5c8c1449c312fb7e0c008581b90be | refs/heads/master | 2021-06-14T14:08:51.889455 | 2017-03-20T02:10:43 | 2017-03-20T02:10:43 | 63,773,867 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67,051 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstractions for the head(s) of a model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import inspect
import six
from tensorflow.contrib import framework as framework_lib
from tensorflow.contrib import layers as layers_lib
# TODO(ptucker): Use tf.losses and tf.metrics.
from tensorflow.contrib import losses as losses_lib
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey as mkey
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.summary import summary
from tensorflow.python.training import training
# TODO(zakaria): add functions that creates a head and returns ModelOpFn
def _regression_head(label_name=None,
weight_column_name=None,
label_dimension=1,
enable_centered_bias=False,
head_name=None):
"""Creates a _Head for linear regression.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
Returns:
An instance of _Head
"""
return _RegressionHead(
label_name=label_name,
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
loss_fn=_mean_squared_loss,
link_fn=array_ops.identity)
def _poisson_regression_head(label_name=None,
weight_column_name=None,
label_dimension=1,
enable_centered_bias=False,
head_name=None):
"""Creates a _Head for linear regression.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
Returns:
An instance of _Head
"""
return _RegressionHead(
label_name=label_name,
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
loss_fn=_poisson_loss,
link_fn=math_ops.exp)
# TODO(zakaria): Add logistic_regression_head
def _multi_class_head(n_classes,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,
metric_class_ids=None,
loss_fn=None):
"""Creates a _Head for multi class single label classification.
The Head uses softmax cross entropy loss.
Args:
n_classes: Integer, number of classes, must be >= 2
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
thresholds: thresholds for eval metrics, defaults to [.5]
metric_class_ids: List of class IDs for which we should report per-class
metrics. Must all be in the range `[0, n_classes)`. Invalid if
`n_classes` is 2.
loss_fn: Optional function that takes (`labels`, `logits`, `weights`) as
parameter and returns a weighted scalar loss. `weights` should be
optional. See `tf.losses`
Returns:
An instance of _MultiClassHead.
Raises:
ValueError: If `n_classes` is < 2, or `metric_class_ids` is provided when
`n_classes` is 2.
ValueError: If loss_fn does not have expected signature.
"""
if (n_classes is None) or (n_classes < 2):
raise ValueError("n_classes must be > 1 for classification: %s." %
n_classes)
if loss_fn:
_verify_loss_fn_args(loss_fn)
loss_fn = _wrap_custom_loss_fn(loss_fn) if loss_fn else None
if n_classes == 2:
if metric_class_ids:
raise ValueError("metric_class_ids invalid for n_classes==2.")
return _BinaryLogisticHead(
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds,
loss_fn=loss_fn)
return _MultiClassHead(
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds,
metric_class_ids=metric_class_ids,
loss_fn=loss_fn)
def _binary_svm_head(
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,):
"""Creates a `_Head` for binary classification with SVMs.
The head uses binary hinge loss.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
thresholds: thresholds for eval metrics, defaults to [.5]
Returns:
An instance of `_Head`.
"""
return _BinarySvmHead(
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds)
def _multi_label_head(n_classes,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,
metric_class_ids=None,
loss_fn=None):
"""Creates a _Head for multi label classification.
The Head uses sigmoid cross entropy loss.
Args:
n_classes: Integer, number of classes, must be >= 2
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
thresholds: thresholds for eval metrics, defaults to [.5]
metric_class_ids: List of class IDs for which we should report per-class
metrics. Must all be in the range `[0, n_classes)`.
loss_fn: Optional function that takes (`labels`, `logits`, `weights`) as
parameter and returns a weighted scalar loss. `weights` should be
optional. See `tf.losses`
Returns:
An instance of _MultiLabelHead.
Raises:
ValueError: If n_classes is < 2
ValueError: If loss_fn does not have expected signature.
"""
if n_classes < 2:
raise ValueError("n_classes must be > 1 for classification.")
if loss_fn:
_verify_loss_fn_args(loss_fn)
return _MultiLabelHead(
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds,
metric_class_ids=metric_class_ids,
loss_fn=_wrap_custom_loss_fn(loss_fn) if loss_fn else None)
def _multi_head(heads, loss_weights=None):
"""Creates a MultiHead stemming from same logits/hidden layer.
Args:
heads: list of _Head objects.
loss_weights: optional list of weights to be used to combine losses from
each head. All losses are weighted equally if not provided.
Returns:
A _Head instance that combines multiple heads.
Raises:
ValueError: if heads and loss_weights have different size.
"""
if loss_weights:
if len(loss_weights) != len(heads):
raise ValueError("heads and loss_weights must have same size")
def _weighted_loss_combiner(losses):
if loss_weights:
if len(losses) != len(loss_weights):
raise ValueError("losses and loss_weights must have same size")
weighted_losses = []
for loss, weight in zip(losses, loss_weights):
weighted_losses.append(math_ops.multiply(loss, weight))
return math_ops.add_n(weighted_losses)
else:
return math_ops.add_n(losses)
return _MultiHead(heads, loss_combiner=_weighted_loss_combiner)
def no_op_train_fn(loss):
del loss
return control_flow_ops.no_op()
# TODO(zakaria): Make the classes public once we are ready for users to subclass
# them. See b/34751732
class _Head(object):
"""Interface for the head/top of a model.
Given logits or output of a hidden layer, a Head knows how to compute
predictions, loss, default metric and export signature.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def logits_dimension(self):
"""Size of the last dimension of the logits `Tensor`.
Typically, logits is of shape `[batch_size, logits_dimension]`.
Returns:
Number of logits values per example.
"""
raise NotImplementedError("Calling an abstract method.")
@abc.abstractmethod
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""Returns ops for a model_fn.
Exactly one of `logits` and `logits_input` must be provided.
All args must be passed via name.
Args:
features: Input `dict` of `Tensor` objects.
mode: Estimator's `ModeKeys`.
labels: Labels `Tensor`, or `dict` of same.
train_op_fn: Function that takes a scalar loss and returns an op to
optimize with the loss. Must not be `None` in TRAIN mode. If you want
to optimize loss yourself you can pass `no_op_train_fn`.
logits: logits `Tensor`, or `dict` of same, to be used for the head.
logits_input: `Tensor` from which to build logits.
scope: Optional scope for `variable_scope`.
Returns:
`ModelFnOps`.
Raises:
ValueError: if `mode` is not recognized, or neither or both of `logits`
and `logits_input` is provided.
"""
raise NotImplementedError("Calling an abstract method.")
class _SingleHead(_Head):
"""Interface for a single head/top of a model."""
__metaclass__ = abc.ABCMeta
def __init__(
self, problem_type, logits_dimension, label_name=None,
weight_column_name=None, head_name=None):
if problem_type is None:
raise ValueError("Invalid problem_type %s." % problem_type)
if logits_dimension is None or logits_dimension < 1:
raise ValueError("Invalid logits_dimension %s." % logits_dimension)
self._problem_type = problem_type
self._logits_dimension = logits_dimension
self._label_name = label_name
self._weight_column_name = weight_column_name
self._head_name = head_name
@property
def logits_dimension(self):
return self._logits_dimension
@property
def label_name(self):
return self._label_name
@property
def weight_column_name(self):
return self._weight_column_name
@property
def head_name(self):
return self._head_name
def _create_output_alternatives(self, predictions):
"""Creates output alternative for the Head.
Args:
predictions: a dict of {tensor_name: Tensor}, where 'tensor_name' is a
symbolic name for an output Tensor possibly but not necessarily taken
from `PredictionKey`, and 'Tensor' is the corresponding output Tensor
itself.
Returns:
`dict` of {submodel_name: (problem_type, {tensor_name: Tensor})}, where
'submodel_name' is a submodel identifier that should be consistent across
the pipeline (here likely taken from the head_name),
'problem_type' is a `ProblemType`,
'tensor_name' is a symbolic name for an output Tensor possibly but not
necessarily taken from `PredictionKey`, and
'Tensor' is the corresponding output Tensor itself.
"""
return {self._head_name: (self._problem_type, predictions)}
# TODO(zakaria): use contrib losses.
def _mean_squared_loss(labels, logits, weights=None):
with ops.name_scope(None, "mean_squared_loss", (logits, labels)) as name:
logits = ops.convert_to_tensor(logits)
labels = ops.convert_to_tensor(labels)
# To prevent broadcasting inside "-".
if len(labels.get_shape()) == 1:
labels = array_ops.expand_dims(labels, dim=(1,))
# TODO(zakaria): make sure it does not recreate the broadcast bug.
if len(logits.get_shape()) == 1:
logits = array_ops.expand_dims(logits, dim=(1,))
logits.get_shape().assert_is_compatible_with(labels.get_shape())
loss = math_ops.square(logits - math_ops.to_float(labels), name=name)
return _compute_weighted_loss(loss, weights)
def _poisson_loss(labels, logits, weights=None):
"""Computes poisson loss from logits."""
with ops.name_scope(None, "_poisson_loss", (logits, labels)) as name:
logits = ops.convert_to_tensor(logits)
labels = ops.convert_to_tensor(labels)
# To prevent broadcasting inside "-".
if len(labels.get_shape()) == 1:
labels = array_ops.expand_dims(labels, dim=(1,))
# TODO(zakaria): make sure it does not recreate the broadcast bug.
if len(logits.get_shape()) == 1:
logits = array_ops.expand_dims(logits, dim=(1,))
logits.get_shape().assert_is_compatible_with(labels.get_shape())
loss = nn.log_poisson_loss(labels, logits, compute_full_loss=True,
name=name)
return _compute_weighted_loss(loss, weights)
def _logits(logits_input, logits, logits_dimension):
"""Validate logits args, and create `logits` if necessary.
Exactly one of `logits_input` and `logits` must be provided.
Args:
logits_input: `Tensor` input to `logits`.
logits: `Tensor` output.
logits_dimension: Integer, last dimension of `logits`. This is used to
create `logits` from `logits_input` if `logits` is `None`; otherwise, it's
used to validate `logits`.
Returns:
`logits` `Tensor`.
Raises:
ValueError: if neither or both of `logits` and `logits_input` are supplied.
"""
if (logits_dimension is None) or (logits_dimension < 1):
raise ValueError("Invalid logits_dimension %s." % logits_dimension)
# If not provided, create logits.
if logits is None:
if logits_input is None:
raise ValueError("Neither logits nor logits_input supplied.")
return layers_lib.linear(logits_input, logits_dimension, scope="logits")
if logits_input is not None:
raise ValueError("Both logits and logits_input supplied.")
logits = ops.convert_to_tensor(logits, name="logits")
logits_dims = logits.get_shape().dims
if logits_dims is not None:
logits_dims[-1].assert_is_compatible_with(logits_dimension)
return logits
def _create_model_fn_ops(features,
mode,
transform_labels_fn,
loss_fn,
logits_to_predictions_fn,
metrics_fn,
create_output_alternatives_fn,
default_variable_scope_name,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
logits_dimension=None,
head_name=None,
weight_column_name=None,
enable_centered_bias=False):
"""Returns a `ModelFnOps` object."""
_check_mode_valid(mode)
with variable_scope.variable_scope(
None,
default_name=head_name or default_variable_scope_name,
values=(tuple(six.itervalues(features)) +
(labels, logits, logits_input))):
if (mode != model_fn.ModeKeys.INFER) and (labels is not None):
labels = transform_labels_fn(labels)
else:
labels = None
logits = _logits(logits_input, logits, logits_dimension)
centered_bias = None
if enable_centered_bias:
centered_bias = _centered_bias(logits_dimension, head_name)
logits = nn.bias_add(logits, centered_bias)
predictions = logits_to_predictions_fn(logits)
loss = None
train_op = None
eval_metric_ops = None
if (mode != model_fn.ModeKeys.INFER) and (labels is not None):
weight_tensor = _weight_tensor(features, weight_column_name)
loss, weighted_average_loss = loss_fn(labels, logits, weight_tensor)
logging_ops.scalar_summary(
_summary_key(head_name, mkey.LOSS), weighted_average_loss)
if mode == model_fn.ModeKeys.TRAIN:
if train_op_fn is None:
raise ValueError("train_op_fn can not be None in TRAIN mode")
train_op = _train_op(loss, labels, train_op_fn, centered_bias,
logits_dimension, loss_fn, weight_tensor)
eval_metric_ops = metrics_fn(
weighted_average_loss, predictions, labels, weight_tensor)
return model_fn.ModelFnOps(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
output_alternatives=create_output_alternatives_fn(predictions))
class _RegressionHead(_SingleHead):
"""_Head for regression with a generalized linear model."""
def __init__(self,
label_dimension,
loss_fn,
link_fn,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None):
"""Head for regression.
Args:
label_dimension: Number of regression labels per example. This is the
size of the last dimension of the labels `Tensor` (typically, this has
shape `[batch_size, label_dimension]`).
loss_fn: Loss function, takes logits and labels and returns loss.
link_fn: Link function, takes a logits tensor and returns the output.
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. Predictions, summary and metrics keys are
suffixed by `"/" + head_name` and the default variable scope is
`head_name`.
"""
super(_RegressionHead, self).__init__(
problem_type=constants.ProblemType.LINEAR_REGRESSION,
logits_dimension=label_dimension,
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
self._loss_fn = loss_fn
self._link_fn = link_fn
self._enable_centered_bias = enable_centered_bias
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `_Head`."""
return _create_model_fn_ops(
features=features,
mode=mode,
transform_labels_fn=self._transform_labels,
loss_fn=self._loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
create_output_alternatives_fn=self._create_output_alternatives,
default_variable_scope_name="regression_head",
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_input=logits_input,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, labels):
"""Applies transformations to labels tensor."""
labels_tensor = _to_labels_tensor(labels, self._label_name)
_check_no_sparse_tensor(labels_tensor)
return labels_tensor
def _logits_to_predictions(self, logits):
"""Returns a dict of predictions.
Args:
logits: logits `Tensor` after applying possible centered bias.
Returns:
Dict of prediction `Tensor` keyed by `PredictionKey`.
"""
key = prediction_key.PredictionKey.SCORES
with ops.name_scope(None, "predictions", (logits,)):
if self.logits_dimension == 1:
logits = array_ops.squeeze(logits, squeeze_dims=(1,), name=key)
return {key: self._link_fn(logits)}
def _metrics(self, eval_loss, predictions, labels, weights):
"""Returns a dict of metrics keyed by name."""
del predictions, labels, weights # Unused by this head.
with ops.name_scope("metrics", values=[eval_loss]):
return {
_summary_key(self.head_name, mkey.LOSS):
metrics_lib.streaming_mean(eval_loss)}
def _log_loss_with_two_classes(labels, logits, weights=None):
with ops.name_scope(None, "log_loss_with_two_classes",
(logits, labels)) as name:
logits = ops.convert_to_tensor(logits)
labels = math_ops.to_float(labels)
# TODO(ptucker): This will break for dynamic shapes.
# sigmoid_cross_entropy_with_logits requires [batch_size, 1] labels.
if len(labels.get_shape()) == 1:
labels = array_ops.expand_dims(labels, dim=(1,))
loss = nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits,
name=name)
return _compute_weighted_loss(loss, weights)
def _one_class_to_two_class_logits(logits):
return array_ops.concat((array_ops.zeros_like(logits), logits), 1)
class _BinaryLogisticHead(_SingleHead):
"""_Head for binary logistic classifciation."""
def __init__(self,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
loss_fn=None,
thresholds=None):
"""Base type for all single heads.
Args:
label_name: String, name of the key in label dict. Can be `None` if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. Predictions, summary, metrics keys are
suffixed by `"/" + head_name` and the default variable scope is
`head_name`.
loss_fn: Loss function.
thresholds: thresholds for eval.
Raises:
ValueError: if n_classes is invalid.
"""
super(_BinaryLogisticHead, self).__init__(
problem_type=constants.ProblemType.LOGISTIC_REGRESSION,
logits_dimension=1,
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
self._thresholds = thresholds if thresholds else (.5,)
self._loss_fn = loss_fn if loss_fn else _log_loss_with_two_classes
self._enable_centered_bias = enable_centered_bias
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `_Head`."""
return _create_model_fn_ops(
features=features,
mode=mode,
transform_labels_fn=self._transform_labels,
loss_fn=self._loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
create_output_alternatives_fn=self._create_output_alternatives,
default_variable_scope_name="binary_logistic_head",
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_input=logits_input,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, labels):
"""Applies transformations to labels tensor."""
labels_tensor = _to_labels_tensor(labels, self._label_name)
_check_no_sparse_tensor(labels_tensor)
return labels_tensor
def _logits_to_predictions(self, logits):
"""Returns a dict of predictions.
Args:
logits: logits `Output` after applying possible centered bias.
Returns:
Dict of prediction `Output` keyed by `PredictionKey`.
"""
with ops.name_scope(None, "predictions", (logits,)):
two_class_logits = _one_class_to_two_class_logits(logits)
return {
prediction_key.PredictionKey.LOGITS:
logits,
prediction_key.PredictionKey.LOGISTIC:
math_ops.sigmoid(
logits, name=prediction_key.PredictionKey.LOGISTIC),
prediction_key.PredictionKey.PROBABILITIES:
nn.softmax(
two_class_logits,
name=prediction_key.PredictionKey.PROBABILITIES),
prediction_key.PredictionKey.CLASSES:
math_ops.argmax(
two_class_logits,
1,
name=prediction_key.PredictionKey.CLASSES)
}
def _metrics(self, eval_loss, predictions, labels, weights):
"""Returns a dict of metrics keyed by name."""
with ops.name_scope("metrics", values=(
[eval_loss, labels, weights] + list(six.itervalues(predictions)))):
classes = predictions[prediction_key.PredictionKey.CLASSES]
logistic = predictions[prediction_key.PredictionKey.LOGISTIC]
metrics = {_summary_key(self.head_name, mkey.LOSS):
metrics_lib.streaming_mean(eval_loss)}
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (
metrics_lib.streaming_accuracy(classes, labels, weights))
metrics[_summary_key(self.head_name, mkey.PREDICTION_MEAN)] = (
_predictions_streaming_mean(logistic, weights))
metrics[_summary_key(self.head_name, mkey.LABEL_MEAN)] = (
_indicator_labels_streaming_mean(labels, weights))
# Also include the streaming mean of the label as an accuracy baseline, as
# a reminder to users.
metrics[_summary_key(self.head_name, mkey.ACCURACY_BASELINE)] = (
_indicator_labels_streaming_mean(labels, weights))
metrics[_summary_key(self.head_name, mkey.AUC)] = (
_streaming_auc(logistic, labels, weights))
for threshold in self._thresholds:
metrics[_summary_key(
self.head_name, mkey.ACCURACY_MEAN % threshold)] = (
_streaming_accuracy_at_threshold(logistic, labels, weights,
threshold))
# Precision for positive examples.
metrics[_summary_key(
self.head_name, mkey.PRECISION_MEAN % threshold)] = (
_streaming_precision_at_threshold(logistic, labels, weights,
threshold))
# Recall for positive examples.
metrics[_summary_key(
self.head_name, mkey.RECALL_MEAN % threshold)] = (
_streaming_recall_at_threshold(logistic, labels, weights,
threshold))
return metrics
def _softmax_cross_entropy_loss(labels, logits, weights=None):
with ops.name_scope(
None, "softmax_cross_entropy_loss", (logits, labels,)) as name:
labels = ops.convert_to_tensor(labels)
# Check that we got integer for classification.
if not labels.dtype.is_integer:
raise ValueError("Labels dtype should be integer "
"Instead got %s." % labels.dtype)
# TODO(ptucker): This will break for dynamic shapes.
# sparse_softmax_cross_entropy_with_logits requires [batch_size] labels.
if len(labels.get_shape()) == 2:
labels = array_ops.squeeze(labels, squeeze_dims=(1,))
loss = nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name=name)
return _compute_weighted_loss(loss, weights)
class _MultiClassHead(_SingleHead):
"""_Head for classification."""
def __init__(self,
n_classes,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
loss_fn=None,
thresholds=None,
metric_class_ids=None):
"""_Head for classification.
Args:
n_classes: Number of classes, must be greater than 2 (for 2 classes, use
`_BinaryLogisticHead`).
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary, metrics
keys will be suffixed by `"/" + head_name` and the default variable
scope will be `head_name`.
loss_fn: Loss function.
thresholds: thresholds for eval.
metric_class_ids: List of class IDs for which we should report per-class
metrics. Must all be in the range `[0, n_classes)`.
Raises:
ValueError: if `n_classes` or `metric_class_ids` is invalid.
"""
super(_MultiClassHead, self).__init__(
problem_type=constants.ProblemType.CLASSIFICATION,
logits_dimension=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
if (n_classes is None) or (n_classes <= 2):
raise ValueError("n_classes must be > 2: %s." % n_classes)
self._thresholds = thresholds if thresholds else (.5,)
self._loss_fn = loss_fn if loss_fn else _softmax_cross_entropy_loss
self._enable_centered_bias = enable_centered_bias
self._metric_class_ids = tuple([] if metric_class_ids is None else
metric_class_ids)
for class_id in self._metric_class_ids:
if (class_id < 0) or (class_id >= n_classes):
raise ValueError("Class ID %s not in [0, %s)." % (class_id, n_classes))
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `_Head`."""
return _create_model_fn_ops(
features=features,
mode=mode,
transform_labels_fn=self._transform_labels,
loss_fn=self._loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
create_output_alternatives_fn=self._create_output_alternatives,
default_variable_scope_name="multi_class_head",
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_input=logits_input,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, labels):
"""Applies transformations to labels tensor."""
labels_tensor = _to_labels_tensor(labels, self._label_name)
_check_no_sparse_tensor(labels_tensor)
return labels_tensor
def _logits_to_predictions(self, logits):
"""Returns a dict of predictions.
Args:
logits: logits `Tensor` after applying possible centered bias.
Returns:
Dict of prediction `Tensor` keyed by `PredictionKey`.
"""
with ops.name_scope(None, "predictions", (logits,)):
return {
prediction_key.PredictionKey.LOGITS:
logits,
prediction_key.PredictionKey.PROBABILITIES:
nn.softmax(
logits, name=prediction_key.PredictionKey.PROBABILITIES),
prediction_key.PredictionKey.CLASSES:
math_ops.argmax(
logits, 1, name=prediction_key.PredictionKey.CLASSES)
}
def _metrics(self, eval_loss, predictions, labels, weights):
"""Returns a dict of metrics keyed by name."""
with ops.name_scope("metrics", values=(
[eval_loss, labels, weights] + list(six.itervalues(predictions)))):
classes = predictions[prediction_key.PredictionKey.CLASSES]
probabilities = predictions[prediction_key.PredictionKey.PROBABILITIES]
logits = predictions[prediction_key.PredictionKey.LOGITS]
metrics = {_summary_key(self.head_name, mkey.LOSS):
metrics_lib.streaming_mean(eval_loss)}
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (
metrics_lib.streaming_accuracy(classes, labels, weights))
metrics[_summary_key(self.head_name, mkey.AUC)] = (
_streaming_auc_with_class_id_label(
probabilities, labels, weights, self.logits_dimension))
for class_id in self._metric_class_ids:
# TODO(ptucker): Add per-class accuracy, precision, recall.
metrics[_summary_key(
self.head_name, mkey.CLASS_PREDICTION_MEAN % class_id)] = (
_class_predictions_streaming_mean(classes, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_LABEL_MEAN % class_id)] = (
_class_labels_streaming_mean(labels, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_PROBABILITY_MEAN % class_id)] = (
_predictions_streaming_mean(probabilities, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_LOGITS_MEAN % class_id)] = (
_predictions_streaming_mean(logits, weights, class_id))
metrics[_summary_key(self.head_name, mkey.CLASS_AUC % class_id)] = (
_class_streaming_auc(probabilities, labels, weights, class_id,
self.logits_dimension))
return metrics
def _to_labels_tensor(labels, label_name):
"""Returns label as a tensor.
Args:
labels: Label `Tensor` or `SparseTensor` or a dict containig labels.
label_name: Label name if labels is a dict.
Returns:
Label `Tensor` or `SparseTensor`.
"""
labels = labels[label_name] if isinstance(labels, dict) else labels
return framework_lib.convert_to_tensor_or_sparse_tensor(labels)
def _check_no_sparse_tensor(x):
"""Raises ValueError if the given tensor is `SparseTensor`."""
if isinstance(x, sparse_tensor.SparseTensor):
raise ValueError("SparseTensor is not supported.")
def _sparse_labels_to_indicator(labels, num_classes):
"""If labels is `SparseTensor`, converts it to indicator `Tensor`.
Args:
labels: Label `Tensor` or `SparseTensor`.
num_classes: Number of classes.
Returns:
Dense label `Tensor`.
Raises:
ValueError: If labels is `SparseTensot` and `num_classes` < 2.
"""
if isinstance(labels, sparse_tensor.SparseTensor):
if num_classes < 2:
raise ValueError("Must set num_classes >= 2 when passing labels as a "
"SparseTensor.")
return math_ops.to_int64(
sparse_ops.sparse_to_indicator(labels, num_classes))
return labels
def _assert_labels_rank(labels):
return control_flow_ops.Assert(
math_ops.less_equal(array_ops.rank(labels), 2),
("labels shape should be either [batch_size, 1] or [batch_size]",))
class _BinarySvmHead(_SingleHead):
"""_Head for binary classification using SVMs."""
def __init__(self, label_name, weight_column_name, enable_centered_bias,
head_name, thresholds):
def _loss_fn(labels, logits, weights=None):
with ops.name_scope(None, "hinge_loss", (logits, labels)) as name:
with ops.control_dependencies((_assert_labels_rank(labels),)):
labels = array_ops.reshape(labels, shape=(-1, 1))
loss = losses_lib.hinge_loss(logits=logits, labels=labels, scope=name)
return _compute_weighted_loss(loss, weights)
super(_BinarySvmHead, self).__init__(
problem_type=constants.ProblemType.LOGISTIC_REGRESSION,
logits_dimension=1,
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
self._thresholds = thresholds if thresholds else (.5,)
self._loss_fn = _loss_fn
self._enable_centered_bias = enable_centered_bias
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `_Head`."""
return _create_model_fn_ops(
features=features,
mode=mode,
transform_labels_fn=self._transform_labels,
loss_fn=self._loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
create_output_alternatives_fn=self._create_output_alternatives,
default_variable_scope_name="binary_svm_head",
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_input=logits_input,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, labels):
"""Applies transformations to labels tensor."""
labels_tensor = _to_labels_tensor(labels, self._label_name)
_check_no_sparse_tensor(labels_tensor)
return labels_tensor
def _logits_to_predictions(self, logits):
"""See `_MultiClassHead`."""
with ops.name_scope(None, "predictions", (logits,)):
return {
prediction_key.PredictionKey.LOGITS:
logits,
prediction_key.PredictionKey.CLASSES:
math_ops.argmax(
_one_class_to_two_class_logits(logits),
1,
name=prediction_key.PredictionKey.CLASSES)
}
def _metrics(self, eval_loss, predictions, labels, weights):
"""See `_MultiClassHead`."""
with ops.name_scope("metrics", values=(
[eval_loss, labels, weights] + list(six.itervalues(predictions)))):
metrics = {_summary_key(self.head_name, mkey.LOSS):
metrics_lib.streaming_mean(eval_loss)}
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
classes = predictions[prediction_key.PredictionKey.CLASSES]
metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (
metrics_lib.streaming_accuracy(classes, labels, weights))
# TODO(sibyl-vie3Poto): add more metrics relevant for svms.
return metrics
class _MultiLabelHead(_SingleHead):
"""_Head for multlabel classification."""
# TODO(zakaria): add signature and metric for multilabel.
def __init__(self,
n_classes,
label_name,
weight_column_name,
enable_centered_bias,
head_name,
thresholds,
metric_class_ids=None,
loss_fn=None):
super(_MultiLabelHead, self).__init__(
problem_type=constants.ProblemType.CLASSIFICATION,
logits_dimension=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
head_name=head_name)
self._thresholds = thresholds if thresholds else (.5,)
self._loss_fn = loss_fn if loss_fn else _sigmoid_cross_entropy_loss
self._enable_centered_bias = enable_centered_bias
self._metric_class_ids = tuple([] if metric_class_ids is None else
metric_class_ids)
for class_id in self._metric_class_ids:
if (class_id < 0) or (class_id >= n_classes):
raise ValueError("Class ID %s not in [0, %s)." % (class_id, n_classes))
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `_Head`."""
return _create_model_fn_ops(
features=features,
mode=mode,
transform_labels_fn=self._transform_labels,
loss_fn=self._loss_fn,
logits_to_predictions_fn=self._logits_to_predictions,
metrics_fn=self._metrics,
create_output_alternatives_fn=self._create_output_alternatives,
default_variable_scope_name="multi_label_head",
labels=labels,
train_op_fn=train_op_fn,
logits=logits,
logits_input=logits_input,
logits_dimension=self.logits_dimension,
head_name=self.head_name,
weight_column_name=self.weight_column_name,
enable_centered_bias=self._enable_centered_bias)
def _transform_labels(self, labels):
"""Applies transformations to labels tensor."""
labels_tensor = _to_labels_tensor(labels, self._label_name)
labels_tensor = _sparse_labels_to_indicator(labels_tensor,
self._logits_dimension)
return labels_tensor
def _logits_to_predictions(self, logits):
"""See `_MultiClassHead`."""
with ops.name_scope(None, "predictions", (logits,)):
return {
prediction_key.PredictionKey.LOGITS:
logits,
prediction_key.PredictionKey.PROBABILITIES:
math_ops.sigmoid(
logits, name=prediction_key.PredictionKey.PROBABILITIES),
prediction_key.PredictionKey.CLASSES:
math_ops.to_int64(
math_ops.greater(logits, 0),
name=prediction_key.PredictionKey.CLASSES)
}
def _metrics(self, eval_loss, predictions, labels, weights):
"""Returns a dict of metrics keyed by name."""
with ops.name_scope("metrics", values=(
[eval_loss, labels, weights] + list(six.itervalues(predictions)))):
classes = predictions[prediction_key.PredictionKey.CLASSES]
probabilities = predictions[prediction_key.PredictionKey.PROBABILITIES]
logits = predictions[prediction_key.PredictionKey.LOGITS]
metrics = {_summary_key(self.head_name, mkey.LOSS):
metrics_lib.streaming_mean(eval_loss)}
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (
metrics_lib.streaming_accuracy(classes, labels, weights))
metrics[_summary_key(self.head_name, mkey.AUC)] = _streaming_auc(
probabilities, labels, weights)
for class_id in self._metric_class_ids:
# TODO(ptucker): Add per-class accuracy, precision, recall.
metrics[_summary_key(
self.head_name, mkey.CLASS_PREDICTION_MEAN % class_id)] = (
_predictions_streaming_mean(classes, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_LABEL_MEAN % class_id)] = (
_indicator_labels_streaming_mean(labels, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_PROBABILITY_MEAN % class_id)] = (
_predictions_streaming_mean(probabilities, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_LOGITS_MEAN % class_id)] = (
_predictions_streaming_mean(logits, weights, class_id))
metrics[_summary_key(self.head_name, mkey.CLASS_AUC % class_id)] = (
_streaming_auc(probabilities, labels, weights, class_id))
return metrics
class _MultiHead(_Head):
"""_Head to combine multiple _Head objects.
All heads stem from the same logits/logit_input tensor.
For training, combines losses of each heads according a function provided by
user.
For eval, adds a /head_name suffix to the keys in eval metrics.
For inference, updates keys prediction dict to a 2-tuple,
(head_name, prediction_key)
"""
def __init__(self, heads, loss_combiner):
"""_Head to combine multiple _Head objects.
Args:
heads: list of _Head objects.
loss_combiner: function that takes a list of loss tensors for the heads
and returns the final loss tensor for the multi head.
Raises:
ValueError: if any head does not have a name.
"""
self._logits_dimension = 0
for head in heads:
# TODO(ptucker): Change this, and add head_name to MultiHead, to support
# nested MultiHeads.
if not isinstance(head, _SingleHead):
raise ValueError("Members of MultiHead must be SingleHead.")
if not head.head_name:
raise ValueError("Members of MultiHead must have names.")
self._logits_dimension += head.logits_dimension
self._heads = heads
self._loss_combiner = loss_combiner
@property
def logits_dimension(self):
return self._logits_dimension
def create_model_fn_ops(self,
features,
mode,
labels=None,
train_op_fn=None,
logits=None,
logits_input=None,
scope=None):
"""See `_Head.create_model_fn_ops`.
Args:
features: Input `dict` of `Tensor` objects.
mode: Estimator's `ModeKeys`.
labels: Labels `Tensor`, or `dict` of same.
train_op_fn: Function that takes a scalar loss and returns an op to
optimize with the loss.
logits: Concatenated logits for all heads or a dict of head name to logits
tensor. If concatenated logits, it should have (batchsize, x) shape
where x is the sum of `logits_dimension` of all the heads,
i.e., same as `logits_dimension` of this class. create_model_fn_ops
will split the logits tensor and pass logits of proper size to each
head. This is useful if we want to be agnostic about whether you
creating a single versus multihead. logits can also be a dict for
convenience where you are creating the head specific logits explicitly
and don't want to concatenate them yourself.
logits_input: tensor to build logits from.
scope: Optional scope for variable_scope. If provided, will be passed to
all heads. Most users will want to set this to `None`, so each head
constructs a separate variable_scope according to its `head_name`.
Returns:
`ModelFnOps`.
Raises:
ValueError: if `mode` is not recognized, or neither or both of `logits`
and `logits_input` is provided.
"""
_check_mode_valid(mode)
all_model_fn_ops = []
if logits is None:
# Use logits_input.
for head in self._heads:
all_model_fn_ops.append(
head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=no_op_train_fn,
logits_input=logits_input,
scope=scope))
else:
head_logits_pairs = []
if isinstance(logits, dict):
head_logits_pairs = []
for head in self._heads:
head_logits_pairs.append((head, logits[head.head_name]))
else:
# Split logits for each head.
head_logits_pairs = zip(self._heads, self._split_logits(logits))
for head, head_logits in head_logits_pairs:
all_model_fn_ops.append(
head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=no_op_train_fn,
logits=head_logits,
scope=scope))
if mode == model_fn.ModeKeys.TRAIN:
if train_op_fn is None:
raise ValueError("train_op_fn can not be None in TRAIN mode.")
return self._combine_train(all_model_fn_ops, train_op_fn)
if mode == model_fn.ModeKeys.INFER:
return self._combine_infer(all_model_fn_ops)
if mode == model_fn.ModeKeys.EVAL:
return self._combine_eval(all_model_fn_ops)
raise ValueError("mode=%s unrecognized" % str(mode))
def _split_logits(self, logits):
"""Splits logits for heads.
Args:
logits: the logits tensor.
Returns:
A list of logits for the individual heads.
"""
all_logits = []
begin = 0
for head in self._heads:
current_logits_size = head.logits_dimension
current_logits = array_ops.slice(logits, [0, begin],
[-1, current_logits_size])
all_logits.append(current_logits)
begin += current_logits_size
return all_logits
def _combine_train(self, all_model_fn_ops, train_op_fn):
"""Combines list of ModelFnOps for training.
Args:
all_model_fn_ops: list of ModelFnOps for the individual heads.
train_op_fn: Function to create train op. See `create_model_fn_ops`
documentaion for more details.
Returns:
ModelFnOps that combines all the heads.
"""
losses = []
additional_train_ops = []
for m in all_model_fn_ops:
losses.append(m.loss)
additional_train_ops.append(m.train_op)
loss = self._loss_combiner(losses)
train_op = train_op_fn(loss)
train_op = control_flow_ops.group(train_op, *additional_train_ops)
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.TRAIN,
loss=loss,
train_op=train_op)
def _combine_infer(self, all_model_fn_ops):
"""Combines list of ModelFnOps for inference.
Args:
all_model_fn_ops: list of ModelFnOps for the individual heads.
Returns:
ModelFnOps that combines all the heads.
"""
predictions = {}
output_alternatives = {}
for head, m in zip(self._heads, all_model_fn_ops):
head_name = head.head_name
output_alternatives[head_name] = m.output_alternatives[head_name]
for k, v in m.predictions.items():
predictions[(head_name, k)] = v
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.INFER,
predictions=predictions,
output_alternatives=output_alternatives)
def _combine_eval(self, all_model_fn_ops):
"""Combines list of ModelFnOps for eval.
Args:
all_model_fn_ops: list of ModelFnOps for the individual heads.
Returns:
ModelFnOps that combines all the heads.
"""
predictions = {}
metrics = {}
losses = []
for head, m in zip(self._heads, all_model_fn_ops):
losses.append(m.loss)
head_name = head.head_name
for k, v in m.predictions.items():
predictions[(head_name, k)] = v
for k, v in m.eval_metric_ops.items():
# metrics["%s/%s" % (k, head_name)] = v
metrics[k] = v
loss = self._loss_combiner(losses)
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=loss,
eval_metric_ops=metrics)
def _weight_tensor(features, weight_column_name):
"""Returns weights as 1d `Tensor`."""
if not weight_column_name:
return None
with ops.name_scope(None, "weight_tensor",
tuple(six.itervalues(features))):
return math_ops.to_float(features[weight_column_name])
# TODO(zakaria): This function is needed for backward compatibility and should
# be removed when we migrate to core.
def _compute_weighted_loss(loss_unweighted, weight, name="loss"):
"""Returns a tuple of (loss_train, loss_report).
loss is used for gradient descent while weighted_average_loss is used for
summaries to be backward compatible.
loss is different from the loss reported on the tensorboard as we
should respect the example weights when computing the gradient.
L = sum_{i} w_{i} * l_{i} / B
where B is the number of examples in the batch, l_{i}, w_{i} are individual
losses, and example weight.
Args:
loss_unweighted: Unweighted loss
weight: Weight tensor
name: Optional name
Returns:
A tuple of losses. First one for training and the second one for reproting.
"""
with ops.name_scope(name, values=(loss_unweighted, weight)) as name_scope:
if weight is None:
loss = math_ops.reduce_mean(loss_unweighted, name=name_scope)
return loss, loss
with ops.name_scope(None, "weighted_loss",
(loss_unweighted, weight)) as name:
weighted_loss = math_ops.multiply(
array_ops.reshape(loss_unweighted, shape=(-1,)),
array_ops.reshape(weight, shape=(-1,)), name=name)
# TODO(ptucker): This might be wrong if weights are broadcast to loss shape.
# We should use tf.losses here.
weighted_loss_mean = math_ops.reduce_mean(weighted_loss, name=name_scope)
weighted_loss_normalized = math_ops.div(
math_ops.reduce_sum(weighted_loss),
math_ops.to_float(math_ops.reduce_sum(weight)),
name="weighted_average_loss")
return weighted_loss_mean, weighted_loss_normalized
def _wrap_custom_loss_fn(loss_fn):
def _wrapper(labels, logits, weights=None):
if weights is None:
loss = loss_fn(labels, logits)
else:
loss = loss_fn(labels, logits, weights)
return loss, loss
return _wrapper
def _check_mode_valid(mode):
"""Raises ValueError if the given mode is invalid."""
if (mode != model_fn.ModeKeys.TRAIN and mode != model_fn.ModeKeys.INFER and
mode != model_fn.ModeKeys.EVAL):
raise ValueError("mode=%s unrecognized." % str(mode))
def _get_arguments(func):
"""Returns a spec of given func."""
if hasattr(func, "__code__"):
# Regular function.
return inspect.getargspec(func)
elif hasattr(func, "__call__"):
# Callable object.
return _get_arguments(func.__call__)
elif hasattr(func, "func"):
# Partial function.
return _get_arguments(func.func)
def _verify_loss_fn_args(loss_fn):
args = _get_arguments(loss_fn).args
for arg_name in ["labels", "logits", "weights"]:
if arg_name not in args:
raise ValueError("Argument %s not found in loss_fn." % arg_name)
def _centered_bias(logits_dimension, head_name=None):
"""Returns `logits`, optionally with centered bias applied.
Args:
logits_dimension: Last dimension of `logits`. Must be >= 1.
head_name: Optional name of the head.
Returns:
Centered bias `Variable`.
Raises:
ValueError: if `logits_dimension` is invalid.
"""
if (logits_dimension is None) or (logits_dimension < 1):
raise ValueError("Invalid logits_dimension %s." % logits_dimension)
# Do not create a variable with variable_scope.get_variable, because that may
# create a PartitionedVariable, which does not support indexing, so
# summary.scalar will not work.
centered_bias = variables.Variable(
name="centered_bias_weight",
initial_value=array_ops.zeros(shape=(logits_dimension,)),
trainable=True)
for dim in range(logits_dimension):
if head_name:
summary.scalar("centered_bias/bias_%d/%s" % (dim, head_name),
centered_bias[dim])
else:
summary.scalar("centered_bias/bias_%d" % dim, centered_bias[dim])
return centered_bias
def _centered_bias_step(centered_bias, logits_dimension, labels,
loss_fn, weights):
"""Creates and returns training op for centered bias."""
if (logits_dimension is None) or (logits_dimension < 1):
raise ValueError("Invalid logits_dimension %s." % logits_dimension)
with ops.name_scope(None, "centered_bias_step", (labels,)) as name:
batch_size = array_ops.shape(labels)[0]
logits = array_ops.reshape(
array_ops.tile(centered_bias, (batch_size,)),
(batch_size, logits_dimension))
with ops.name_scope(None, "centered_bias", (labels, logits)):
centered_bias_loss = math_ops.reduce_mean(
loss_fn(labels, logits, weights), name="training_loss")
# Learn central bias by an optimizer. 0.1 is a convervative lr for a
# single variable.
return training.AdagradOptimizer(0.1).minimize(
centered_bias_loss, var_list=(centered_bias,), name=name)
def _summary_key(head_name, val):
return "%s/%s" % (val, head_name) if head_name else val
def _train_op(loss, labels, train_op_fn, centered_bias, logits_dimension,
loss_fn, weights):
"""Returns op for the training step."""
if centered_bias is not None:
centered_bias_step = _centered_bias_step(centered_bias, logits_dimension,
labels, loss_fn, weights)
else:
centered_bias_step = None
with ops.name_scope(None, "train_op", (loss, labels)):
train_op = train_op_fn(loss)
if centered_bias_step is not None:
train_op = control_flow_ops.group(train_op, centered_bias_step)
return train_op
def _sigmoid_cross_entropy_loss(labels, logits, weights=None):
with ops.name_scope(None, "sigmoid_cross_entropy_loss",
(logits, labels)) as name:
# sigmoid_cross_entropy_with_logits requires [batch_size, n_classes] labels.
loss = nn.sigmoid_cross_entropy_with_logits(
labels=math_ops.to_float(labels), logits=logits, name=name)
return _compute_weighted_loss(loss, weights)
def _float_weights_or_none(weights):
if weights is None:
return None
with ops.name_scope(None, "float_weights", (weights,)) as name:
return math_ops.to_float(weights, name=name)
def _indicator_labels_streaming_mean(labels, weights=None, class_id=None):
labels = ops.convert_to_tensor(labels)
if class_id is not None:
labels = labels[:, class_id]
return metrics_lib.streaming_mean(labels, weights=weights)
def _predictions_streaming_mean(predictions,
weights=None,
class_id=None):
predictions = ops.convert_to_tensor(predictions)
if weights is not None:
weights = ops.convert_to_tensor(weights)
if class_id is not None:
predictions = predictions[:, class_id]
return metrics_lib.streaming_mean(predictions, weights=weights)
# TODO(ptucker): Add support for SparseTensor labels.
def _class_id_labels_to_indicator(labels, num_classes):
if (num_classes is None) or (num_classes < 2):
raise ValueError("Invalid num_classes %s." % num_classes)
with ops.control_dependencies((_assert_labels_rank(labels),)):
labels = array_ops.reshape(labels, (-1,))
return array_ops.one_hot(labels, depth=num_classes, axis=-1)
def _class_predictions_streaming_mean(predictions, weights, class_id):
return metrics_lib.streaming_mean(
array_ops.where(
math_ops.equal(
math_ops.to_int32(class_id), math_ops.to_int32(predictions)),
array_ops.ones_like(predictions),
array_ops.zeros_like(predictions)),
weights=weights)
def _class_labels_streaming_mean(labels, weights, class_id):
return metrics_lib.streaming_mean(
array_ops.where(
math_ops.equal(
math_ops.to_int32(class_id), math_ops.to_int32(labels)),
array_ops.ones_like(labels), array_ops.zeros_like(labels)),
weights=weights)
def _class_streaming_auc(predictions, labels, weights, class_id,
num_classes):
indicator_labels = _class_id_labels_to_indicator(
labels, num_classes=num_classes)
return _streaming_auc(predictions, indicator_labels, weights, class_id)
def _streaming_auc_with_class_id_label(predictions, labels, weights,
num_classes):
indicator_labels = _class_id_labels_to_indicator(
labels, num_classes=num_classes)
return _streaming_auc(predictions, indicator_labels, weights)
def _streaming_auc(predictions, labels, weights=None, class_id=None):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
if class_id is not None:
predictions = predictions[:, class_id]
labels = labels[:, class_id]
return metrics_lib.streaming_auc(
predictions,
math_ops.cast(labels, dtypes.bool),
weights=_float_weights_or_none(weights))
def _assert_class_id(class_id, num_classes=None):
"""Average label value for class `class_id`."""
if (class_id is None) or (class_id < 0):
raise ValueError("Invalid class_id %s." % class_id)
if num_classes is not None:
if num_classes < 2:
raise ValueError("Invalid num_classes %s." % num_classes)
if class_id >= num_classes:
raise ValueError("Invalid class_id %s." % class_id)
def _streaming_accuracy_at_threshold(predictions, labels, weights, threshold):
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
return metrics_lib.streaming_accuracy(
predictions=threshold_predictions, labels=labels, weights=weights)
def _streaming_precision_at_threshold(predictions, labels, weights, threshold):
precision_tensor, update_op = metrics_lib.streaming_precision_at_thresholds(
predictions, labels=labels, thresholds=(threshold,),
weights=_float_weights_or_none(weights))
return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)
def _streaming_recall_at_threshold(predictions, labels, weights, threshold):
precision_tensor, update_op = metrics_lib.streaming_recall_at_thresholds(
predictions, labels=labels, thresholds=(threshold,),
weights=_float_weights_or_none(weights))
return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)
| [
"[email protected]"
] | |
3f06029ceb9f9184313732f15f1334894534e149 | 18375af374e91e721fb16e5415bc4fc7540e5ced | /currency_exchange/views.py | cf8a2101498baf62f88fd5a950b8ec6f7712a9a2 | [] | no_license | youssefelmasry/tahweela_app_demo | 64d802df33ad6361a714a3119b3380b9afe98e4e | ee55b23e601f5e6580e9f051f6da89acab37d3a1 | refs/heads/master | 2023-02-25T23:05:08.853738 | 2021-01-29T17:55:33 | 2021-01-29T17:55:33 | 334,214,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | from rest_framework.generics import ListAPIView
from rest_framework.views import APIView
from currency_exchange.serializers import RateExchangeSerializer
from rest_framework.response import Response
from currency_exchange.models import BaseCurrencies
class BaseCurrenciesListView(APIView):
def get(self, request):
queryset = BaseCurrencies.objects.values_list('currency', flat=True)
return Response({"currencies":queryset})
class RateExchangeView(ListAPIView):
serializer_class = RateExchangeSerializer
def list(self, request):
base = request.query_params.get('base', None)
serializer = self.get_serializer(data={"base":base})
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
| [
"[email protected]"
] | |
f0202c853270176392aa90efe01b29e5549434cc | 80ef89008f34f3e52d3045db2ef158e9ad09832a | /homework/tf_regression.py | 60fc3016ff551928bf8cf1548a2f35d3ed9675e5 | [] | no_license | phantomamoeba/ast383 | 7836a24f28d4a2e0a110b04a72080f4e9bbfb1d5 | c68bb8c053d4c4dec6093c4a0349539423db7e2a | refs/heads/master | 2021-09-13T09:13:34.283002 | 2018-04-27T16:31:47 | 2018-04-27T16:31:47 | 118,023,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,646 | py | import os
import numpy as np
import tensorflow as tf
__all__ = ['linear_regression']
def linear_regression(x, y, gamma_w, gamma_b, minibatch_size, learning_rate,
training_epochs, architecture, method):
assert x.shape[0] == y.shape[0]
assert architecture in ['neural', 'explicit']
assert method in ['sgd', 'adam']
x_dim = x.shape[1]
y_dim = y.shape[1]
#placeholder ... something that will later be used as an input to the graph
#shape[None,x_dim] ... # of rows not specified ... will later supply one row or several or all
# with SGD normally feed in some subset
x_tf = tf.placeholder(dtype=tf.float32, shape=[None, x_dim])
y_tf = tf.placeholder(dtype=tf.float32, shape=[None, y_dim])
regularizer = tf.contrib.layers.l2_regularizer
initializer = tf.contrib.layers.xavier_initializer() #random initial guesses for w and b ... but with small dispersion
#random initial guesses for w and b ... but with small dispersion
# initializer = tf.glorot_uniform_initializer()
if architecture == 'neural':
with tf.variable_scope('neural') as scope:
#dense ... every input is connected to outputs? ... fully connected or dense layer
#same, mathematically, as matrix multiplication (where each edge is an element in the output matrix)
y_net = tf.layers.dense(inputs=x_tf, units=y_dim,
kernel_initializer=initializer,
bias_initializer=initializer,
name='dense') #here 'dense' is just the name or label
#convolution layers are NOT dense ... have some constraint that some edges must be the same
# and there is a finite extent s|t as nodes get farther apart, they are NOT connected
#TRAINABLE ... eg. fitable like w and b
def apply_regularization(param, gamma):
reg_vars = [var for var in
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=scope.name)
if param in var.name]
return tf.contrib.layers.apply_regularization(regularizer=regularizer(scale=gamma),
weights_list=reg_vars)
obj = tf.nn.l2_loss(y_net - y_tf)
obj += apply_regularization('kernel', gamma_w)
obj += apply_regularization('bias', gamma_b)
def w_extractor(sess): return sess.graph.get_tensor_by_name('neural/dense/kernel:0').eval().T
def b_extractor(sess): return sess.graph.get_tensor_by_name('neural/dense/bias:0').eval()
elif architecture == 'explicit':
with tf.variable_scope('explicit'):
def create_variable(name, shape, gamma):
return tf.get_variable(name=name,
shape=shape,
dtype=tf.float32,
initializer=initializer,
regularizer=regularizer(scale=gamma))
w = create_variable(name='weights', shape=[y_dim, x_dim], gamma=gamma_w)
b = create_variable(name='biases', shape=[y_dim], gamma=gamma_b)
#this is NOT an assignment ... more of a declaration ... is a part of the graph
y_exp = tf.matmul(x_tf, tf.transpose(w)) + b
#the objective
obj = tf.nn.l2_loss(y_exp - y_tf) + sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
def w_extractor(sess): return sess.run(w)
def b_extractor(sess): return sess.run(b)
else:
obj = w_extractor = b_extractor = None
optimizer = {'sgd': tf.train.GradientDescentOptimizer, 'adam': tf.train.AdamOptimizer}
optimizer = optimizer[method]
optimizer = optimizer(learning_rate).minimize(obj)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
#each epoch is fitting one mini-batch
for epoch in range(training_epochs):
def minibatch_dict():
count = x.shape[0]
#select a random mini-batch
minibatch = np.random.choice(count, size=minibatch_size, replace=False)
return {x_tf: x[minibatch], y_tf: y[minibatch]}
#each of these is different (called the function twice) but might have some overlap
train_dict = minibatch_dict()
test_dict = minibatch_dict()
sess.run(optimizer, feed_dict=train_dict)
def is_power2():
return not epoch & (epoch - 1)
#feed_dictionary is how to feed data in tf into placeholders
#keys are placeholders
#values are minibatches (matrices)
if is_power2():
#see if objective is decreasingg
obj_val = sess.run(obj, feed_dict=test_dict)
print('epoch {} obj = {}'.format(epoch, obj_val))
w_fit = w_extractor(sess)
b_fit = b_extractor(sess)
return w_fit, b_fit
def main(argv):
assert (len(argv) == 1)
x_dim = 4
y_dim = 8
nb_obs = 128
w_true = np.random.randn(y_dim, x_dim)
b_true = np.random.randn(y_dim)
x = np.random.randn(nb_obs, x_dim)
y = x @ w_true.T + b_true #broadcasting (add vector to every row), @ is matrix mult. (python 3.5+)
#want to recover w_true and b_true
#gamma_w and _b are regularizers (could have different values)
w_fit, b_fit = linear_regression(x, y, gamma_w=1e-4, gamma_b=1e-4,
minibatch_size=16,
learning_rate=1e-1,
training_epochs=1000,
architecture='explicit',
method='adam')
#tf can also calculate (don't have to use numpy)
def error(a_fit, a_true):
return np.max(np.absolute(a_fit - a_true) /
(0.5 * (np.absolute(a_fit) + np.absolute(a_true))))
b_error = error(b_fit, b_true)
w_error = error(w_fit, w_true)
print('maximum relative error in b = {}'.format(b_error))
print('maximum relative error in w = {}'.format(w_error))
#produces the graph (need to install TensorBoard to view this)
with tf.Session() as sess:
_ = tf.summary.FileWriter(os.getcwd(), sess.graph)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main=main)
| [
"[email protected]"
] | |
b5aaf890a1d7e6c6702ed2586719bf9dd36d1ebb | fd9ad90be33b4875a954fe93d62a485e1a1abdfa | /category/migrations/0002_alter_category_slug.py | dd16eed4e8f0a212df21c7d42e6b33299445d7e5 | [] | no_license | Elbek-Dalaboyev/greatkart-django | e25beba3e7e688fbcd8367b8f24e05069bf31d03 | 9ff1f1c37891efe4d5fc426e96d634eedf8b3ce2 | refs/heads/main | 2023-04-27T00:37:10.112281 | 2021-06-09T08:59:26 | 2021-06-09T08:59:26 | 373,591,851 | 1 | 1 | null | 2021-06-09T08:59:27 | 2021-06-03T17:36:04 | JavaScript | UTF-8 | Python | false | false | 390 | py | # Generated by Django 3.2.3 on 2021-05-26 15:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('category', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='category',
name='slug',
field=models.SlugField(max_length=100, unique=True),
),
]
| [
"[email protected]"
] | |
bd559fbd2c336eb6d115472c556cf088f0332d40 | daad090dd655bec07a9a6d54ade3e5ed7045f68a | /hw5/pi.py | 2d9e4d5c546bac80db15c4d9248f20fd04f75110 | [] | no_license | lukemvu/cs450 | 3482cac68cf4d080048fd015861318665985717e | e02a924052886302eb12519702cba980d0fa283b | refs/heads/master | 2023-03-22T15:34:11.487946 | 2021-03-16T17:22:24 | 2021-03-16T17:22:24 | 348,432,607 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,117 | py | # -*- coding: utf-8 -*-
# Copyright 2015 Sameer Suhas Marathe
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
#
# Module description
# ==================
# Implements the 'Unbounded Spigot Algorithm for the Digits of Pi' by
# Jeremy Gibbons. The paper describing this algorithm can be found at the
# following URL:
# http://www.cs.ox.ac.uk/jeremy.gibbons/publications/spigot.pdf
#
# This module implementes the alogrithm outlined in section 5 of the paper
# based on the expression for Pi derived from Leibniz series.
def __comp(a, b):
(q,r,s,t) = a
(u,v,w,x) = b
return (q*u+r*w, q*v+r*x, s*u+t*w, s*v+t*x)
def __extr(a, x):
(q,r,s,t) = a
return (q*x + r, s*x + t)
def __prod (a, n):
return __comp((10,-10*n, 0, 1), a)
def __safe(b, n):
a = __extr(b, 4)
return n == a[0]//a[1]
def __cons(z,z1):
return __comp(z,z1)
def __next(z):
a = __extr(z,3)
return a[0]//a[1]
def __lfts(k):
return (k, 4*k+2, 0, 2*k+1)
def piGenLeibniz():
"""A generator function that yields the digits of Pi
"""
k = 1
z = (1,0,0,1)
while True:
lft = __lfts(k)
n = int(__next(z))
if __safe(z,n):
z = __prod(z,n)
yield n
else:
z = __cons(z,lft)
k += 1
def getPiLeibniz(n):
"""Returns a list containing first n digits of Pi
"""
mypi = piGenLeibniz()
result = []
if n > 0:
result += [next(mypi) for i in range(n)]
mypi.close()
return result
| [
"[email protected]"
] | |
fda3a1030b55d55ed9191c6305d8643fcbda2aa8 | f8f78541796303975df16e97b38d0903ba2e363e | /Lesson6.13_Pandas.py | 35defc36b6d0a1ff794618e7a64ae895e74c9846 | [] | no_license | cb1118/dataAnalyst | 19537dab4b65dd068c6d626e1e2fda03eee6f914 | 65f54ef03c8a78586e7c64dd10f84ce65c467b70 | refs/heads/master | 2020-03-23T22:19:16.351054 | 2018-08-27T03:40:15 | 2018-08-27T03:40:15 | 142,168,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,322 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 1 18:59:47 2018
@author: CB1118
"""
import pandas as pd
countries = ['Albania', 'Algeria', 'Andorra', 'Angola', 'Antigua and Barbuda',
'Argentina', 'Armenia', 'Australia', 'Austria', 'Azerbaijan',
'Bahamas', 'Bahrain', 'Bangladesh', 'Barbados', 'Belarus',
'Belgium', 'Belize', 'Benin', 'Bhutan', 'Bolivia']
life_expectancy_values = [74.7, 75. , 83.4, 57.6, 74.6, 75.4, 72.3, 81.5, 80.2,
70.3, 72.1, 76.4, 68.1, 75.2, 69.8, 79.4, 70.8, 62.7,
67.3, 70.6]
gdp_values = [ 1681.61390973, 2155.48523109, 21495.80508273, 562.98768478,
13495.1274663 , 9388.68852258, 1424.19056199, 24765.54890176,
27036.48733192, 1945.63754911, 21721.61840978, 13373.21993972,
483.97086804, 9783.98417323, 2253.46411147, 25034.66692293,
3680.91642923, 366.04496652, 1175.92638695, 1132.21387981]
# Life expectancy and gdp data in 2007 for 20 countries
life_expectancy = pd.Series(life_expectancy_values)
gdp = pd.Series(gdp_values)
# Change False to True for each block of code to see what it does
# Accessing elements and slicing
if False:
print(life_expectancy[0])
print(gdp[3:6])
# Looping
if False:
for country_life_expectancy in life_expectancy:
print('Examining life expectancy {}'.format(country_life_expectancy))
# Pandas functions
if False:
print(life_expectancy.mean())
print(life_expectancy.std())
print(gdp.max())
print(gdp.sum())
# Vectorized operations and index arrays
if False:
a = pd.Series([1, 2, 3, 4])
b = pd.Series([1, 2, 1, 2])
print(a + b)
print(a * 2)
print(a >= 3)
print(a[a >= 3])
def variable_correlation(variable1, variable2):
'''
Fill in this function to calculate the number of data points for which
the directions of variable1 and variable2 relative to the mean are the
same, and the number of data points for which they are different.
Direction here means whether each value is above or below its mean.
You can classify cases where the value is equal to the mean for one or
both variables however you like.
Each argument will be a Pandas series.
For example, if the inputs were pd.Series([1, 2, 3, 4]) and
pd.Series([4, 5, 6, 7]), then the output would be (4, 0).
This is because 1 and 4 are both below their means, 2 and 5 are both
below, 3 and 6 are both above, and 4 and 7 are both above.
On the other hand, if the inputs were pd.Series([1, 2, 3, 4]) and
pd.Series([7, 6, 5, 4]), then the output would be (0, 4).
This is because 1 is below its mean but 7 is above its mean, and
so on.
'''
both_above = (variable1 > variable1.mean()) & \
(variable2 > variable2.mean())
both_below = (variable1 < variable1.mean()) & \
(variable2 < variable2.mean())
same_direction = both_above | both_below
num_same_direction = same_direction.sum()
num_diff_direction = len(variable1) - num_same_direction
return (num_same_direction, num_diff_direction)
print(variable_correlation(life_expectancy, gdp))
print(life_expectancy.describe())
print(gdp.describe()) | [
"[email protected]"
] | |
3ee0b16a3fa268a4f70f364d9ab5bc3b4fed2794 | 6630694f401f6f475dd81bb01ff9368db844ccff | /configs/repvgg/repvgg-B2_8xb32_in1k.py | b9a7d4ca5570518f0c4d0b81951e0e97c46606f9 | [
"Apache-2.0"
] | permissive | open-mmlab/mmpretrain | 98a4d6b3bb747efc3d50decebf84fc3ffa41076a | d2ccc44a2c8e5d49bb26187aff42f2abc90aee28 | refs/heads/main | 2023-08-30T19:11:24.771498 | 2023-08-23T02:45:18 | 2023-08-23T02:45:18 | 278,415,292 | 652 | 186 | Apache-2.0 | 2023-09-08T08:01:40 | 2020-07-09T16:25:04 | Python | UTF-8 | Python | false | false | 106 | py | _base_ = './repvgg-A0_8xb32_in1k.py'
model = dict(backbone=dict(arch='B2'), head=dict(in_channels=2560))
| [
"[email protected]"
] | |
270526ead40fed7395ab36a2f0e5538850c9fcd5 | 43575c1324dc0760958a110d7f056bce88422a03 | /listing/arrayqueue.py | fe91f1bc669035d14da292523bec642ed81d0941 | [] | no_license | nicolas4d/Data-Structures-and-Algorithms-Using-Python | 1ffd74d26f09de2057bdc53998a56e56ed77c1de | a879ce6fd4033867783ee487d57d459b029eb5f8 | refs/heads/master | 2020-09-24T12:48:30.726766 | 2019-12-31T03:15:44 | 2019-12-31T03:15:44 | 225,761,970 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | # Implementation of the Queue ADT using a circular array.
from array import Array
class Queue :
# Creates an empty queue.
def __init__( self, maxSize ) :
self._count = 0
self._front = 0
self._back = maxSize - 1
self._qArray = Array( maxSize )
# Returns True if the queue is empty.
def isEmpty( self ) :
return self._count == 0
# Returns True if the queue is full.
def isFull( self ) :
return self._count == len(self._qArray)
# Returns the number of items in the queue.
def __len__( self ) :
return self._count
# Adds the given item to the queue.
def enqueue( self, item ):
assert not self.isFull(), "Cannot enqueue to a full queue."
maxSize = len(self._qArray)
self._back = (self._back + 1) % maxSize
self._qArray[self._back] = item
self._count += 1
# Removes and returns the first item in the queue.
def dequeue( self ):
assert not self.isEmpty(), "Cannot dequeue from an empty queue."
item = self._qArray[ self._front ]
maxSize = len(self._qArray)
self._front = (self._front + 1) % maxSize
self._count -= 1
return item
| [
"[email protected]"
] | |
1ced79828f6469f7bab445f3f426e7431206e4f3 | ca5da7e25c0f4f96572dedf8831cb0f111490c28 | /OK/Python/HackerRank/Practice/Loops.py | 169f88823be1d26e6d51117e2eb78eb8634b3b7e | [] | no_license | 317702550/THC | fecc44aa953f9f6b98cb7e1856ef12537f629393 | f9fe9ddb2e55c5beab3eba8102915a7690524659 | refs/heads/master | 2020-07-05T08:53:11.087930 | 2019-12-10T15:54:35 | 2019-12-10T15:54:35 | 202,597,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | #!/usr/bin/python3
# Maximiliano Proaño Bernal
# 03/12/19
# Python 3.7.3
# Ciclos
if __name__ == '__main__':
n = int(input())
i = 0
while i != n:
print(i**2)
i = i + 1
| [
"[email protected]"
] | |
331391003523c28eedd1e4d6b841574bbed2b7ce | d3eba05768f13fb77037f094c8cd5bee0a2ce920 | /python/pyclaw/geotools/most2geoclaw.py | 110907c350aefc6585d894e4ed02d90d25a31179 | [
"BSD-3-Clause"
] | permissive | geoflows/geoclaw-4.x | cb262ffcc30b483e4e1bf0ba480f43408cb1b6fd | c8879d25405017b38392aa3b1ea422ff3e3604ea | refs/heads/master | 2023-07-10T06:39:42.774585 | 2021-02-23T20:15:36 | 2021-02-23T20:15:36 | 6,094,587 | 1 | 2 | BSD-3-Clause | 2023-06-28T16:50:43 | 2012-10-05T18:18:31 | Fortran | UTF-8 | Python | false | false | 3,350 | py | """
Module for converting MOST data and output to GeoClaw format.
"""
import os, glob, re
def most2tt3(fname):
"""
Converts MOST topo file to tt3 format.
"""
f = open(fname).readlines()
mn = f[0].split()
ncols = int(mn[0])
nrows = int(mn[1])
xll = float(f[1])
dx = float(f[2]) - xll
xll = xll - 360.
yll = float(f[nrows+ncols])
dy = float(f[nrows+ncols-1]) - yll
if abs(dx-dy) > 1.e-6:
print '*** WARNING: dx = ',dx,' dy = ',dy
cellsize = dx
fname2 = os.path.splitext(fname)[0] + '.asc'
f2 = open(fname2,'w')
f2.write('%s ncols\n%s nrows\n%s xll\n%s yll\n%s cellsize\n99999 nodata_value\n' \
% (ncols,nrows,xll,yll,cellsize))
f2.writelines(f[nrows+ncols+1:])
f2.close()
print "Created ",fname2
def most2fortt(fnameprefix):
"""
Converts MOST output files to fort.t files.
"""
files = glob.glob(r'%s*' % fnameprefix)
files.sort()
s = r"%s(?P<hours>[0-9]*)h(?P<minutes>[0-9]*)m(?P<seconds>[0-9]*)s" \
% fnameprefix
regexp = re.compile(s)
frameno = 1
for fname in files:
result = regexp.search(fname)
try:
hours = result.group("hours")
minutes = result.group("minutes")
seconds = result.group("seconds")
except:
print "*** Cannot parse fname: ",fname
raise
t = int(hours)*3600. + int(minutes)*60. + int(seconds)
fortname = "fort.t" + str(frameno).zfill(4)
f = open(fortname, 'w')
f.write("%18.8e time\n" % t)
f.write("%5i meqn\n" % 1)
f.write("%5i ngrids\n" % 1)
f.write("%5i ndim\n" % 0)
f.write("%5i maux\n" % 2)
f.close()
print "Created %s from %s at time t = %s" % (fortname, fname, t)
frameno = frameno + 1
def most2fortq(fnameprefix):
"""
Converts MOST output files to fort.q files.
"""
files = glob.glob(r'%s*' % fnameprefix)
files.sort()
frameno = 1
for fname in files:
f = open(fname).readlines()
mn = f[0].split()
ncols = int(mn[0])
nrows = int(mn[1])
xll = float(f[1])
dx = float(f[2]) - xll
xll = xll - 360.
yll = float(f[nrows+ncols])
dy = float(f[nrows+ncols-1]) - yll
if abs(dx-dy) > 1.e-6:
print '*** WARNING: dx = ',dx,' dy = ',dy
cellsize = dx
fortname = 'fort.q' + str(frameno).zfill(4)
f2 = open(fortname,'w')
f2.write("%5i grid_number\n" % 1)
f2.write("%5i AMR_level\n" % 1)
f2.write("%5i mx\n" % ncols)
f2.write("%5i my\n" % nrows)
f2.write("%5i xlow\n" % xll)
f2.write("%5i ylow\n" % yll)
f2.write("%5i dx\n" % dx)
f2.write("%5i dy\n" % dy)
f2.write("\n")
for k in range(len(f)-1, nrows+ncols, -1):
for s in f[k].split():
z = float(s)
f2.write("%18.8e\n" % z)
f2.close()
print "Created %s from %s" % (fortname,fname)
frameno += 1
if __name__=='__main__':
import sys
most2tt3(sys.argv[1])
| [
"[email protected]"
] | |
2c78b769a6a36326247be3501afa66070e7edcc9 | 4a4e0cc6a1fb19404e9139b77e3c1cdeb7352ee6 | /自动写诗2/write_poem.py | b86a9968067863ab8ff3af02946713525b8ad660 | [] | no_license | zouhuigang/tensorflow | 11193b4f38b24e5635bb155c81afc9e564d1adae | f9d9b4bbcb53e5eeb192740be8470dda31a34af5 | refs/heads/master | 2021-08-28T12:01:47.718920 | 2017-12-12T06:04:32 | 2017-12-12T06:04:32 | 113,291,005 | 10 | 5 | null | null | null | null | UTF-8 | Python | false | false | 6,342 | py | import json
import os, sys,time
import logging
import math
import numpy as np
import tensorflow as tf
from char_rnn_model import CharRNNLM,SampleType
from config_poem import config_sample
from word2vec_helper import Word2Vec
from rhyme_helper import RhymeWords
class WritePoem():
def __init__(self,args):
self.args = args
logging.basicConfig(stream=sys.stdout,
format='%(asctime)s %(levelname)s:%(message)s',
level=logging.INFO, datefmt='%I:%M:%S')
with open(os.path.join(self.args.model_dir, 'result.json'), 'r') as f:
result = json.load(f)
params = result['params']
best_model = result['best_model']
best_valid_ppl = result['best_valid_ppl']
if 'encoding' in result:
self.args.encoding = result['encoding']
else:
self.args.encoding = 'utf-8'
base_path = args.data_dir
w2v_file = os.path.join(base_path, "vectors_poem.bin")
self.w2v = Word2Vec(w2v_file)
RhymeWords.read_rhyme_words(os.path.join(base_path,'rhyme_words.txt'))
if args.seed >= 0:
np.random.seed(args.seed)
logging.info('best_model: %s\n', best_model)
self.sess = tf.Session()
w2v_vocab_size = len(self.w2v.model.vocab)
with tf.name_scope('evaluation'):
self.model = CharRNNLM(is_training=False,w2v_model = self.w2v.model,vocab_size=w2v_vocab_size, infer=True, **params)
saver = tf.train.Saver(name='model_saver')
saver.restore(self.sess, best_model)
def free_verse(self):
'''
自由诗
Returns:
'''
sample = self.model.sample_seq(self.sess, 40, '[',sample_type= SampleType.weighted_sample)
if not sample:
return 'err occar!'
print('free_verse:',sample)
idx_end = sample.find(']')
parts = sample.split('。')
if len(parts) > 1:
two_sentence_len = len(parts[0]) + len(parts[1])
if idx_end < 0 or two_sentence_len < idx_end:
return sample[1:two_sentence_len + 2]
return sample[1:idx_end]
@staticmethod
def assemble(sample):
if sample:
parts = sample.split('。')
if len(parts) > 1:
return '{}。{}。'.format(parts[0][1:],parts[1][:len(parts[0])])
return ''
def rhyme_verse(self):
'''
押韵诗
Returns:
'''
gen_len = 20
sample = self.model.sample_seq(self.sess, gen_len, start_text='[',sample_type= SampleType.weighted_sample)
if not sample:
return 'err occar!'
print('rhyme_verse:',sample)
parts = sample.split('。')
if len(parts) > 0:
start = parts[0] + '。'
rhyme_ref_word = start[-2]
rhyme_seq = len(start) - 3
sample = self.model.sample_seq(self.sess, gen_len , start,
sample_type= SampleType.weighted_sample,rhyme_ref =rhyme_ref_word,rhyme_idx = rhyme_seq )
print(sample)
return WritePoem.assemble(sample)
return sample[1:]
def hide_words(self,given_text):
'''
藏字诗
Args:
given_text:
Returns:
'''
if(not given_text):
return self.rhyme_verse()
givens = ['','']
split_len = math.ceil(len(given_text)/2)
givens[0] = given_text[:split_len]
givens[1] = given_text[split_len:]
gen_len = 20
sample = self.model.sample_seq(self.sess, gen_len, start_text='[',sample_type= SampleType.select_given,given=givens[0])
if not sample:
return 'err occar!'
print('rhyme_verse:',sample)
parts = sample.split('。')
if len(parts) > 0:
start = parts[0] + '。'
rhyme_ref_word = start[-2]
rhyme_seq = len(start) - 3
# gen_len = len(start) - 1
sample = self.model.sample_seq(self.sess, gen_len , start,
sample_type= SampleType.select_given,given=givens[1],rhyme_ref =rhyme_ref_word,rhyme_idx = rhyme_seq )
print(sample)
return WritePoem.assemble(sample)
return sample[1:]
def cangtou(self,given_text):
'''
藏头诗
Returns:
'''
if(not given_text):
return self.rhyme_verse()
start = ''
rhyme_ref_word = ''
rhyme_seq = 0
# for i,word in enumerate(given_text):
for i in range(4):
word = ''
if i < len(given_text):
word = given_text[i]
if i == 0:
start = '[' + word
else:
start += word
before_idx = len(start)
if(i != 3):
sample = self.model.sample_seq(self.sess, self.args.length, start,
sample_type= SampleType.weighted_sample )
else:
if not word:
rhyme_seq += 1
sample = self.model.sample_seq(self.sess, self.args.length, start,
sample_type= SampleType.max_prob,rhyme_ref =rhyme_ref_word,rhyme_idx = rhyme_seq )
print('Sampled text is:\n\n%s' % sample)
sample = sample[before_idx:]
idx1 = sample.find(',')
idx2 = sample.find('。')
min_idx = min(idx1,idx2)
if min_idx == -1:
if idx1 > -1 :
min_idx = idx1
else: min_idx =idx2
if min_idx > 0:
# last_sample.append(sample[:min_idx + 1])
start ='{}{}'.format(start, sample[:min_idx + 1])
if i == 1:
rhyme_seq = min_idx - 1
rhyme_ref_word = sample[rhyme_seq]
print('last_sample text is:\n\n%s' % start)
return WritePoem.assemble(start)
def start_model():
now = int(time.time())
args = config_sample('--model_dir output_poem --length 16 --seed {}'.format(now))
writer = WritePoem(args)
return writer
if __name__ == '__main__':
writer = start_model()
| [
"[email protected]"
] | |
888b5fcafa91cb7b2ebdcb7e5c1c67b2ae52449b | ead27e8ab8433d7c15bf81f51c18029cee6d39ac | /neutron_diagnose/__init__.py | df9c144b676c044d7b1feeb4f6a93063a65e5059 | [
"Apache-2.0"
] | permissive | zhuzhichaoTM/neutron-diagnose | 7b42c1679cc648474c06919a6c16243dcf080d1c | 223d0d5edaf77a8264ce06d68adaa1b329bb9f1b | refs/heads/master | 2020-05-15T08:41:44.551121 | 2017-04-13T06:55:43 | 2017-04-13T06:55:43 | 182,163,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
__all__ = ['__version__']
import pbr.version
version_info = pbr.version.VersionInfo('neutron-diagnose')
try:
__version__ = version_info.version_string()
except AttributeError:
__version__ = None
| [
"[email protected]"
] | |
2a363433acaaec2e11b27accea62497c51739654 | 305369594545fca818bbb8e0254f97a3ec50ccfd | /32c09a6e325db19533e2e272caed35fd.py | d09d0f72a2fbc88aec3ddb22bb95fb0c3d343f56 | [] | no_license | jkitchin/ACS-2016-data-sharing | c1950cf31f3bdb380e41427febb1888fb394a3ff | c23fc008d24aa637d2864d02533de2bf6bac7de7 | refs/heads/master | 2021-01-10T09:49:40.550918 | 2016-03-15T02:05:45 | 2016-03-15T02:05:45 | 53,690,308 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | return [x for x in data if x[1] == 'anatase']
| [
"[email protected]"
] | |
6333b09aba873b5e01d30fc103a78cbe1fe4e540 | fa2d32a77ebd558ffe54eea67504308fa1ac2d2c | /learn.strings_lists_tuples_sets/convertstring2list.py | 49f94079f5c63f303f9fb141816fcc83b042643f | [] | no_license | prpllrhd/morePYTHON | 329362c18a447f913fa1f73c06fd0451b1a79009 | 739f64eb5462de532a8f1788ebe7631cb155f565 | refs/heads/master | 2021-06-05T19:59:43.276455 | 2019-01-20T19:53:34 | 2019-01-20T19:53:34 | 14,663,754 | 0 | 1 | null | 2015-07-08T06:12:08 | 2013-11-24T15:38:51 | Python | UTF-8 | Python | false | false | 87 | py | #!/usr/bin/env python3
a = "sameer rakhee yuvi avni aai"
b = list(a.split())
print (b)
| [
"[email protected]"
] | |
2d21df815a992618fea1b1c56a61c64f357a3169 | cb75140ebed613adea538bad2b0de29c67db9213 | /mundo3dicionario4.py | 9fa3434ba392346299146c757d3c24be598229f0 | [] | no_license | AndsuLucas/exercicios-python | bdbacda7144a38d0d4852a05b15dadac6c830a88 | b78eceb1f7c73a02dfc3cd7175e851fee19bf972 | refs/heads/master | 2020-04-07T01:09:28.691794 | 2018-11-17T17:15:06 | 2018-11-17T17:15:06 | 157,932,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | from time import sleep
#quantas pessoas foram cadasradas,média de idade, quntas mulheres,pessoas acima da media
f = 0
funcao = str
contador = 0
cadastrototal = []
dc = dict()
soma = 0
am = 0
while True:
contador+=1
dc['nome'] = str(input('nome:'))
while True:
dc['sexo'] = str(input('sexo: [M/F]')).upper()[0]
if dc['sexo'] == "M" or dc['sexo'] == "F":
break
dc['idade'] = int(input('idade:'))
cadastrototal.append(dc.copy())
funcao = str(input('deseja continuar? [s/n]'))
if funcao == "n":
break
for c in (cadastrototal):
soma+= c['idade']
if c['sexo'] == "F":
f+=1
media =float( soma/contador)
sleep(1)
print(f'Foram cadastradas {contador} pessoas.')
sleep(1)
print(f'Média de idade: {media}.')
sleep(1)
print(f'Foram cadastradas {f} mulheres.')
sleep(1)
print('Pessoas acima da média>')
for c in (cadastrototal):
if c['idade']>media:
sleep(1)
print(f'{c["nome"]} com {c["idade"]} anos')
am+=1
print(f'Ao todo {am} pessoas acima da média.') | [
"[email protected]"
] | |
29bc3192a686ccae8db44c3e1d6e73ecf673b46d | 6dbfd83c4641d798934a7eb1c5f45757d6504ebe | /cloud/fs/redis/user.py | d1e12468a7051964f76203be6c7100fdfca9d109 | [] | no_license | xuanyuan1332/simple_cloud | 015e5eba38e82b3e0c78f44e535fd5bc0ff441dc | ffbbaa22816d79ae45c3475e25e352129a70057d | refs/heads/master | 2022-11-22T21:01:29.130241 | 2020-07-14T11:39:00 | 2020-07-14T11:39:00 | 280,647,428 | 0 | 0 | null | 2020-07-18T12:00:52 | 2020-07-18T12:00:52 | null | UTF-8 | Python | false | false | 663 | py | '''用户相关
'''
import json
from .base import BaseRedis
class UserRedis(BaseRedis):
'''用户操作reids
'''
user = '{username}'
sign_in = 'sign-in-{project_id}'
def get_sign_in(self, project_id):
sign_in = self.sign_in.format(project_id=project_id)
nums = self.redis.get(sign_in)
if nums:
nums = int(self.redis.get(sign_in).decode())
return nums if nums > 0 else 0
return 0
def get_user(self, username):
try:
res = self.redis.get(self.user.format(username=username))
return json.loads(res)
except Exception:
return {}
| [
"[email protected]"
] | |
ac5953399a647183382fd235afa3078fcf3f2cf8 | 7c9f28e371e8dfa9290c05a48a9d924484b4b18c | /1.py | e742ff9920b98902a4a6413dc4ff9b62916929d0 | [] | no_license | Pavithralakshmi/corekata | 1f9d963da44a6fdcdedaf2e39452545f6cc52e9b | 06d1c7bba25681ce12e2ab93ce461228afb6b984 | refs/heads/master | 2021-04-30T01:53:37.414318 | 2018-10-11T17:58:39 | 2018-10-11T17:58:39 | 121,491,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | s1=input("eter anything")
s2=input("enter somthing")
print(s2)
| [
"[email protected]"
] | |
1df2674dd840f1c05f994f93ddba2e65067ae23e | 3c639c452fac96b882fe2c26b47545079dd174ec | /RunEventNumberFilter/test/RunEventNumberFilter.py | 480e8087c369be7a4aee8a4529e23d65edb9612d | [] | no_license | cms-nd-user/RunEventNumberFilter | 84825ffdee1cd6aee5a695b21b66d84043859b41 | b47f4144a2e7a91baa726151319e090a45c5cd63 | refs/heads/master | 2021-01-21T13:08:39.958774 | 2013-08-20T16:15:36 | 2013-08-20T16:15:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,022 | py | # Auto generated configuration file
# using:
# Revision: 1.168.2.1
# Source: /cvs_server/repositories/CMSSW/CMSSW/Configuration/PyReleaseValidation/python/ConfigBuilder.py,v
# with command line options: SingleGammaPt35_cfi.py -s GEN,SIM,DIGI,DIGI2RAW,RAW2DIGI,RECO -n 2 --conditions FrontierConditions_GlobalTag,MC_36Y_V10::All --eventcontent RECOSIM --no_exec
import FWCore.ParameterSet.Config as cms
process = cms.Process('GetEvents')
#### Turn off printing every event ####
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(1000)
# import of standard configurations
#process.load('Configuration.StandardSequences.Services_cff')
#process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
#process.load('Configuration.EventContent.EventContent_cff')
#process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
#process.GlobalTag.globaltag = 'MC_3XY_V26::All'
#process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10) )
# Input source
process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring(
'file:/afs/crc.nd.edu/user/j/jslaunwh/RAW/8EE30C05-18E9-E211-A9A7-002618943810.root'
))
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
#Event Selection
process.selectEventsByRunEventNumber = cms.EDFilter("RunEventNumberFilter",
debug = cms.bool(False),
filename = cms.string('inputs.txt')
)
# Output definition
process.outputA = cms.OutputModule("PoolOutputModule",
#fastCloning = cms.untracked.bool(False),
fileName = cms.untracked.string('SkimRunEvent.root'),
SelectEvents = cms.untracked.PSet (
SelectEvents = cms.vstring('selectByRunEvent')
)
)
# Path and EndPath definitions
process.selectByRunEvent = cms.Path(process.selectEventsByRunEventNumber)
process.Aoutput = cms.EndPath(process.outputA)
| [
"[email protected]"
] | |
e1bfa27cf691a0b6ee3d8d8a074da682433cef02 | f20931826a557f0d884f8b46de259840c29b7428 | /meiduo_mall/meiduo_mall/utils/authenticate.py | 2079f8c659a70dbebbb5291a0c6b7c9cbcc4867e | [] | no_license | zy723/meiduo_project | 38ccecc2fa1d61f2eb848ebc572dd43d45a534c8 | f50a8105c63554b57419cb3494c3d323bb343f9c | refs/heads/master | 2022-12-15T02:34:42.578549 | 2020-05-20T16:56:27 | 2020-05-20T16:56:27 | 248,264,846 | 0 | 0 | null | 2022-12-12T20:28:41 | 2020-03-18T15:08:40 | TSQL | UTF-8 | Python | false | false | 2,024 | py | """
增加支持管理员用户登录账号
JWT扩展的登录视图,在收到用户名与密码时,也是调用Django的认证系统中提供的authenticate()来检查用户名与密码是否正确。
我们可以通过修改Django认证系统的认证后端(主要是authenticate方法)来支持登录账号既可以是用户名也可以是手机号。
修改Django认证系统的认证后端需要继承django.contrib.auth.backends.ModelBackend,并重写authenticate方法。
authenticate(self, request, username=None, password=None, **kwargs)方法的参数说明:
request 本次认证的请求对象
username 本次认证提供的用户账号
password 本次认证提供的密码
我们想要让管理员用户才能登录我们的admin后台,这时我们就要修改django原有的用户验证方法。
重写authenticate方法的思路:
根据username参数查找用户User对象,在查询条件中在加上is_staff=True的条件
若查找到User对象,调用User对象的check_password方法检查密码是否正确
"""
from django.contrib.auth.backends import ModelBackend
from users.models import User
class MeiduoModelBackend(ModelBackend):
def authenticate(self, request, username=None, password=None, **kwargs):
# 判断是否通过vue组件发送请求
if request is None:
try:
user = User.objects.get(username=username, is_staff=True)
except:
return None
# 检查密码
if user.check_password(password):
return user
else:
# 变量username的值,可以是用户名,也可以是手机号,需要判断,再查询
try:
user = User.objects.get(username=username)
except:
# 如果未查到数据,则返回None,用于后续判断
return None
# 判断密码
if user.check_password(password):
return user
else:
return None
| [
"[email protected]"
] | |
a582113ccd4d7aff4595ecaf83c5980d1e086ed2 | ece386649b39403cbb23e4c4071178e178311343 | /blog/admin.py | 46814e609e37e1a6f18bc9b715de4f5ff9a253f8 | [] | no_license | GadinganJayHarley06/my_first_blog | c743352178f570400aa2ba256c4bec9530eb0a76 | b2e5921475c161565bc214290eacd47a237041de | refs/heads/master | 2021-09-06T01:51:10.285943 | 2018-02-01T12:31:19 | 2018-02-01T12:31:19 | 119,833,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
# Register your models here.
from django.contrib import admin
from .models import Post
admin.site.register(Post)
| [
"[email protected]"
] | |
6618cc7ee42d5c64ce1da566bff7ac57a6f4fe95 | 86317245051f5256751ca3ac186eaffccf55af52 | /src/menu.py | 9e2bc6d87931c718dff91cfadeb55fabe57dd88a | [] | no_license | SaulCastel/LFP_PROY1 | aae34f98b2e784484fb284800515e6eb1d1857bf | f27d3d7c7c6c7ba13774b4dcaaa79370cfdf65f7 | refs/heads/main | 2023-03-31T01:33:19.910787 | 2021-03-22T07:48:05 | 2021-03-22T07:48:05 | 348,588,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | class Item:
def __init__(self, id:str, name:str, price:str,desc:str) -> None:
self.id = id
self.name = name
self.price = price
self.desc = desc
class Section:
def __init__(self, name:str) -> None:
self.name = name
self.items = []
def newItem(self,item:Item):
self.items.append(item)
class Menu:
def __init__(self, name:str) -> None:
self.name = name
self.sect = []
def newSect(self,sec:Section):
self.sect.append(sec)
def getItem(self,id:str):
for section in self.sect:
for item in section.items:
if id == item.id:
return item
| [
"[email protected]"
] | |
c446ccc3868b590d3af20686969206a7817c1464 | a3ff213f844fa494ba2ba06d26b4e7ba77fa7a58 | /django_patterns/management/__init__.py | b9b47da6d3b268a3cc8c8f9d7cdfcbbc09ce161d | [] | no_license | monetizeio/django-patterns | f757771c46859b9157b48d0f875a397239706702 | 9b5567cbc0d4ed58a9ff074bccc9026982daf307 | refs/heads/master | 2021-01-13T01:50:56.949546 | 2013-03-19T23:23:18 | 2013-03-19T23:23:18 | 5,908,449 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,044 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# === django_patterns.management ------------------------------------------===
# Copyright © 2011-2012, RokuSigma Inc. and contributors. See AUTHORS for more
# details.
#
# Some rights reserved.
#
# Redistribution and use in source and binary forms of the software as well as
# documentation, with or without modification, are permitted provided that the
# following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The names of the copyright holders or contributors may not be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE AND
# DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ===----------------------------------------------------------------------===
# ===----------------------------------------------------------------------===
# End of File
# ===----------------------------------------------------------------------===
| [
"[email protected]"
] | |
ba287699cf45b5845e4ef5418bb4df38b2b91214 | fb265f7443acf7c72e3a7d95e2be5a268ecd8443 | /rcwa/test/fastRunner.py | 68958164b6b599d0dc72f99e7be7ab2fdd7527df | [
"MIT"
] | permissive | itsjmj/rcwa | 71f1bd6bfbff1bb2cc630b6b35a23d1f919c329d | a946c3819e5e52ad9c92a8a73c48360749b06196 | refs/heads/master | 2023-08-25T19:04:44.699749 | 2021-05-12T15:25:56 | 2021-05-12T15:25:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,190 | py | """
Run all the core unit tests, not the lengthy and major integration tests
"""
import context
import unittest
import sys
import testCrystal
import testHarmonics
import testLayer
import testMaterial
import testMatrices1x1Harmonics
import testMatrices3x3Harmonics
import testNetlistParser
import testSolver1x1Harmonics
import testSolver3x3Harmonics
import testSource
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTests(loader.loadTestsFromModule(testCrystal))
suite.addTests(loader.loadTestsFromModule(testHarmonics))
suite.addTests(loader.loadTestsFromModule(testLayer))
suite.addTests(loader.loadTestsFromModule(testMaterial))
suite.addTests(loader.loadTestsFromModule(testSource))
suite.addTests(loader.loadTestsFromModule(testMatrices1x1Harmonics))
suite.addTests(loader.loadTestsFromModule(testMatrices3x3Harmonics))
suite.addTests(loader.loadTestsFromModule(testNetlistParser))
suite.addTests(loader.loadTestsFromModule(testSolver1x1Harmonics))
runner = unittest.TextTestRunner(verbosity=3)
result = runner.run(suite)
numberFailures = len(result.errors)
numberErrors= len(result.failures)
numberIssues = numberFailures + numberErrors
sys.exit(numberIssues)
| [
"[email protected]"
] | |
f17124384ece9a83dcb41df9bd0ab948cf7e0c16 | 8409cbba351dc3572949e933ea2e265c44452844 | /hsn_v1/densecrf.py | ddc2fd145b1d2ef2d2d89d3e17d3ee796e8775df | [
"MIT"
] | permissive | Kitatine/hsn_v1 | ffa239659745fc11083cbc61dd5d9e0c014c7a10 | 8cc524a7932deead6ff87fc5945522cf310b9fcd | refs/heads/master | 2022-09-06T19:03:09.757180 | 2020-06-01T16:26:46 | 2020-06-01T16:26:46 | 268,572,420 | 0 | 0 | MIT | 2020-06-01T16:20:33 | 2020-06-01T16:20:32 | null | UTF-8 | Python | false | false | 2,870 | py | import os
import numpy as np
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import unary_from_softmax
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
class DenseCRF:
"""Class for implementing a dense CRF"""
def __init__(self):
self.gauss_sxy = 3
self.gauss_compat = 30
self.bilat_sxy = 10
self.bilat_srgb = 20
self.bilat_compat = 50
self.n_infer = 5
def load_config(self, path):
"""Load dense CRF configurations from file"""
if os.path.exists(path):
config = np.load(path)
self.gauss_sxy, self.gauss_compat, self.bilat_sxy, self.bilat_srgb, self.bilat_compat, self.n_config = \
config[0]
else:
print('Warning: dense CRF config file ' + path + ' does not exist - using defaults')
def process(self, probs, images):
"""
Run dense CRF, given probability map and input image
Parameters
----------
probs : numpy 4D array
The class probability maps, in batch
images : numpy 4D array
The original input images, in batch
Returns
-------
maxconf_crf : numpy 3D array
The discrete class segmentation map from dense CRF, in batch
crf : numpy 4D array
The continuous class probability map from dense CRF, in batch
"""
# Set up variable sizes
num_input_images = probs.shape[0]
num_classes = probs.shape[1]
size = images.shape[1:3]
crf = np.zeros((num_input_images, num_classes, size[0], size[1]))
for iter_input_image in range(num_input_images):
pass_class_inds = np.where(np.sum(np.sum(probs[iter_input_image], axis=1), axis=1) > 0)
# Set up dense CRF 2D
d = dcrf.DenseCRF2D(size[1], size[0], len(pass_class_inds[0]))
cur_probs = probs[iter_input_image, pass_class_inds[0]]
# Unary energy
U = np.ascontiguousarray(unary_from_softmax(cur_probs))
d.setUnaryEnergy(U)
# Penalize small, isolated segments
# (sxy are PosXStd, PosYStd)
d.addPairwiseGaussian(sxy=self.gauss_sxy, compat=self.gauss_compat)
# Incorporate local colour-dependent features
# (sxy are Bi_X_Std and Bi_Y_Std,
# srgb are Bi_R_Std, Bi_G_Std, Bi_B_Std)
d.addPairwiseBilateral(sxy=self.bilat_sxy, srgb=self.bilat_srgb, rgbim=np.uint8(images[iter_input_image]),
compat=self.bilat_compat)
# Do inference
Q = d.inference(self.n_infer)
crf[iter_input_image, pass_class_inds] = np.array(Q).reshape((len(pass_class_inds[0]), size[0], size[1]))
maxconf_crf = np.argmax(crf, axis=1)
return maxconf_crf, crf | [
"[email protected]"
] | |
9a4a7f80301f045fa5e68ed1f5e0ef3af3e86480 | 00d4b8f0e974692af26da891a2ed0612d989abf0 | /script/program.py | 36fedeb83629aafae8369e4ff3dcb65972151391 | [
"MIT"
] | permissive | meteorolog90/PROGRAM | a8697d6781fb935b2b3545ef10bd2aad11e6d9df | 93ab22ef9c0cfb7cd0c833c08c5c7ed6318291ab | refs/heads/master | 2021-07-16T07:22:50.436225 | 2019-02-19T20:52:19 | 2019-02-19T20:52:19 | 147,817,658 | 0 | 1 | null | 2018-11-05T21:53:32 | 2018-09-07T12:00:53 | Python | UTF-8 | Python | false | false | 5,990 | py | #! /home/martin/anaconda3/bin/python3.6
import pandas as pd
import numpy as np
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from matplotlib.colors import BoundaryNorm
import matplotlib.pyplot as plt
from metpy.gridding.gridding_functions import interpolate, remove_nan_observations
def main ():
while True:
prompt = 'Izaberite opciju : \n 1.SREDNJE DNEVNE VREDNOSTI ZA TEMPERATURU'
prompt += '\n 2.SREDNJE MESEČNE VREDNOSTI ZA TEMPERATURU \n 3.KORDINATE TAČKE \n 4.IZLAZ \n >> '
s = input(prompt)
if not s: # Ako je string prazan, prekid
break
cmd = int(s)
if cmd == 4:
break
if cmd == 1:
dnevni_podaci()
if cmd == 2:
mesecni_podaci()
if cmd == 3:
kordinate_tacke()
def mesecni_podaci():
print ('Izaberite šta želite:')
prompt1 = '\n 1.ZAPIS U csv.file \n 2.CRTANJE POLJA \n >>'
d = input(prompt1)
smd = int(d)
if smd == 1:
zapis_m()
if smd == 2:
crtanje_m()
def dnevni_podaci():
print ('Izaberite šta želite:')
prompt1 = '\n 1.ZAPIS U csv.file \n 2.CRTANJE POLJA \n >>'
d = input(prompt1)
smd = int(d)
if smd == 1:
zapis_d()
if smd == 2:
crtanje_d()
def kordinate_tacke():
s = pd.read_csv('/home/martin//Master_rad/CARPATGRID_TA_M.ser',sep ='\s+')
d = pd.read_csv('/home/martin/Master_rad/PredtandfilaGrid.dat', sep ='\s+')
y = int(input('Unesite godinu: ' ' '))
m = int(input('Unesite mesec:' ' '))
x1 = s.loc[y,m]
d1 = d.drop(['index'],axis=1)
a = d1.set_index(['lon','lat'])
lon = d1['lon'].values
lat = d1['lat'].values
country = d1['country'].values
altitude = d1['altitude'].values
temp = x1.values
#pravljenje DataFrame oblika
r = { 'lon': lon, 'lat':lat, 'country':country,'altitude':altitude, 'temp':temp}
podaci = pd.DataFrame(r,columns=['lon','lat','temp','country','altitude'])
indexi = podaci.set_index(['lon','lat'])
xx = float(input('Unesite longitudu u rasponu od 17.0-27.0:'))
yy = float(input('Unesite latitudu u rasponu od 50.0-44.0:'))
print (indexi.loc[xx,yy])
def zapis_m():
data1 = pd.read_csv('/home/martin/Master_rad/CARPATGRID_TA_M.ser',sep ='\s+')
y = int(input('Unesite godinu: '' '))
m = int(input('Unesite mesec: '' '))
x1 = data1.loc[y,m]
izlazna_dadoteka = open('podaci.csv','w')
izlazna_dadoteka.write(str(x1))
izlazna_dadoteka.close()
def crtanje_m():
to_proj = ccrs.AlbersEqualArea(central_longitude=-1., central_latitude=10.)
#load cordinates
fname = '/home/martin/Master_rad/PredtandfilaGrid.dat'
#col_names = ['index','lon','lat','country','altitude'] ovo koristimo ako nemama definisane imena kolona
#load temp
df = pd.read_fwf(fname,na_values='MM')
lon = df['lon'].values
lat = df['lat'].values
xp, yp, _ = to_proj.transform_points(ccrs.Geodetic(), lon, lat).T
data1 = pd.read_csv('/home/martin/Master_rad/CARPATGRID_TA_M.ser',sep ='\s+')
y = int(input('Unesite godinu: '' '))
m = int(input('Unesite mesec: '' '))
x1 = data1.loc[y,m]
x_masked, y_masked, t = remove_nan_observations(xp, yp, x1.values)
tempx, tempy, temp = interpolate(x_masked, y_masked, t, interp_type='barnes',
minimum_neighbors=8, search_radius=150000, hres=30000)
temp = np.ma.masked_where(np.isnan(temp), temp)
levels = list(range(-20, 20, 1))
cmap = plt.get_cmap('viridis')
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
fig = plt.figure(figsize=(20, 10))
view = fig.add_subplot(1, 1, 1, projection=to_proj)
view.set_extent([27.0, 16.9, 49.5, 44.5])
view.add_feature(cfeature.STATES.with_scale('50m'))
view.add_feature(cfeature.OCEAN)
view.add_feature(cfeature.COASTLINE.with_scale('50m'))
view.add_feature(cfeature.BORDERS, linestyle=':')
mmb = view.pcolormesh(tempx, tempy, temp, cmap=cmap, norm=norm)
fig.colorbar(mmb, shrink=.4, pad=0.02, boundaries=levels)
view.set_title('Srednja temperatura')
plt.show()
def zapis_d():
data1 = pd.read_csv('/home/martin/Master_rad/CARPATGRID_TA_D.ser',sep ='\s+')
y = int(input('Unesite godinu: '' '))
m = int(input('Unesite mesec: '' '))
d = int(input('Unesite dan : '' '))
x1 = data1.loc[y,m,d]
test = open('podaci.csv','w')
test.write(str(x1))
test.close()
def crtanje_d():
to_proj = ccrs.AlbersEqualArea(central_longitude=-1., central_latitude=10.)
fname = '/home/martin/Master_rad/PredtandfilaGrid.dat'
df = pd.read_fwf(fname,na_values='MM')
lon = df['lon'].values
lat = df['lat'].values
xp, yp, _ = to_proj.transform_points(ccrs.Geodetic(), lon, lat).T
data1 = pd.read_csv('/home/martin/Master_rad/CARPATGRID_TA_D.ser',sep ='\s+')
y = int(input('Unesite godinu: '' '))
m = int(input('Unesite mesec: '' '))
d = int(input('Unesite dan : '' '))
x1 = data1.loc[y,m,d]
x_masked, y_masked, t = remove_nan_observations(xp, yp, x1.values)
tempx, tempy, temp = interpolate(x_masked, y_masked, t, interp_type='barnes',
minimum_neighbors=8, search_radius=150000, hres=30000)
temp = np.ma.masked_where(np.isnan(temp), temp)
levels = list(range(-20, 20, 1))
cmap = plt.get_cmap('viridis')
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
fig = plt.figure(figsize=(20, 10))
view = fig.add_subplot(1, 1, 1, projection=to_proj)
view.set_extent([27.0, 16.9, 49.5, 44.5])
view.add_feature(cfeature.STATES.with_scale('50m'))
view.add_feature(cfeature.OCEAN)
view.add_feature(cfeature.COASTLINE.with_scale('50m'))
view.add_feature(cfeature.BORDERS, linestyle=':')
mmb = view.pcolormesh(tempx, tempy, temp, cmap=cmap, norm=norm)
fig.colorbar(mmb, shrink=.4, pad=0.02, boundaries=levels)
view.set_title('Srednja temperatura')
plt.show()
main()
| [
"[email protected]"
] | |
9e819a87ebc1032db2785b8533da9e1f29cd9fe5 | f62e4c46fb0f98879fb63977fa29631b02e3928c | /16 задание/РекурсФункцСТекст_005.py | 555d533ac95655abf5e3a748654d72868d9b32bc | [] | no_license | SeveralCamper/USE-2020-2021 | c34f4d7a2c3e0f51529141781f523b63242a835d | ac1122649f2fd431a91af5dda5662492e2565109 | refs/heads/master | 2023-09-03T13:36:05.822568 | 2021-10-27T12:54:10 | 2021-10-27T12:54:10 | 392,303,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | # Задание 16 № 9163
# Ниже на пяти языках программирования записан рекурсивный алгоритм F.
# Чему равна сумма всех чисел, напечатанных на экране при выполнении вызова F(1)?
count = 0
def F(n):
global count
count += n
print(n)
if n < 4:
F(n + 1)
F(n + 3)
print(F(1), count)
# Ответ: 25 | [
"[email protected]"
] | |
38f7ec7f0bfb8a30bba8a7a34b7649c99e83b107 | f91e85aaad8cbe48c819a15fc2f7d728a1ab50a1 | /chapter21-case_study/pyimagesearch/utils/captchahelper.py | 2a7163debbb6e4c4e3cb9bbbac24500607c290f8 | [] | no_license | amuLee/StarterBundle | ac9c28cffc8494ac9957e014304bf2de8be5a6d1 | 861fe66d9bab55390c7faca5026769697fc51307 | refs/heads/master | 2020-05-23T14:05:18.849342 | 2019-05-15T09:42:34 | 2019-05-15T09:42:34 | 186,793,654 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | # import the necessary packages
import imutils
import cv2
def preprocess(image, width, height):
# grab the dimensions of the image, then initialize
# the padding values
(h, w) = image.shape[:2]
# if the width is greater than the height then resize along
# the width
if w > h:
image = imutils.resize(image, width=width)
# otherwise, the height is greater than the width so resize
# along the height
else:
image = imutils.resize(image, height=height)
# determine the padding values for the width and height to
# obtain the target dimensions
padW = int((width - image.shape[1]) / 2.0)
padH = int((height - image.shape[0]) / 2.0)
# pad the image then apply one more resizing to handle any
# rounding issues
image = cv2.copyMakeBorder(image, padH, padH, padW, padW,
cv2.BORDER_REPLICATE)
image = cv2.resize(image, (width, height))
# return the pre-processed image
return image
| [
"[email protected]"
] | |
baf15faab8930e47cb6f8e16e1a1e395a3a4b8b9 | 2283ba3286f90d8ed8213050e18a11f57f84e320 | /App/ToDoList/toDoAccess.py | 24f6e9213db251923f859095cd65dc57de0fdcea | [] | no_license | Wills-Personal-Projects/LifeOrganizer | ab508861e7ed699d3c51b95ea85015eac0fe3a80 | 8b2a84912a41e72597d6d6bdce36ad841b3c8fc4 | refs/heads/master | 2023-03-06T08:14:37.576149 | 2021-02-11T21:55:56 | 2021-02-11T21:55:56 | 330,888,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,839 | py | #!/usr/bin/python3
from ..Path.filePaths import getScreenPath, getToDoPath, getBaseToDoPath, getNamesPath
from ..Screen.screenAccess import getScreen,setScreen
def getToDoListDim(n):
tDListFile = open(getToDoPath(n), "r")
dim = tDListFile.readline().rstrip().split()
return dim
def getToDoList(n):
tDListFile = open(getToDoPath(n), "r")
dim = tDListFile.readline().rstrip()
numTD = int(tDListFile.readline())
i = 0
tDL = []
while(i < numTD):
tD = list(tDListFile.readline().rstrip())
tDL.append([])
j = 0
while(j < len(tD)):
tDL[i].append(tD[j])
j = j + 1
i = i + 1
tDListFile.close()
return tDL
def setToDoList(n, t, add):
tDListFile = open(getToDoPath(n), "r")
dim = tDListFile.readline().rstrip()
numTD = int(tDListFile.readline())
tDListFile.close()
tDListFile = open(getToDoPath(n), "w")
tDListFile.write(dim+"\n")
if(add):
tDListFile.write(str(numTD+1)+"\n")
else:
tDListFile.write(str(numTD-1)+"\n")
i = 0
while(i < len(t)):
tD = ''.join(t[i])
tDListFile.write(tD+"\n")
i = i + 1
tDListFile.close()
return
def saveToDoList(dim, name):
nameList = open(getNamesPath(), "a")
nameList.write(name+"\n")
nameList.close()
toDoFile = open(getBaseToDoPath()+name+".txt", "w")
i = 0
while(i < 4):
toDoFile.write(str(dim[i])+" ")
i = i + 1
toDoFile.write("\n")
toDoFile.write("0")
toDoFile.close()
return
def deleteToDoList( n ):
nameList = open(getNamesPath(), "r")
names = nameList.readlines()
nameList.close()
nameList = open(getNamesPath(), "w")
for name in names:
if(name.rstrip() != n):
nameList.write(name)
nameList.close()
return | [
"[email protected]"
] | |
300d9ae15924aa6d8ac27e9aeaf6236d61811704 | 482f24b9eda8ca802ef6f04d9f3cf8c1a79a9af2 | /dags/02_pemex_proveedores_contratos.py | d91735162881a333b7d6e5e856cecce044ceb361 | [
"MIT"
] | permissive | erikriver/mixtli-etc | 773896c831290a7b74374bc89983c87a033baac3 | 0c8e991cb6d92f0a3c87f41f7688a6a327e2472d | refs/heads/master | 2020-03-07T12:53:11.506416 | 2018-04-05T12:59:44 | 2018-04-05T12:59:44 | 127,487,455 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,935 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Erik Rivera
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import airflow
from airflow.models import DAG
from airflow.operators.python_operator import PythonOperator
args = {
'owner': 'airflow',
'start_date': airflow.utils.dates.days_ago(2)
}
dag = DAG(
dag_id='02_pemex_proveedores_contratos', default_args=args,
schedule_interval='@monthly')
def extraer_proveedores():
"""
Obtiene archivo en excel del portal Sistema de información pública de
proveedores y contratistas y lo guarda en el sistema de archivos
"""
# TODO Guardar en S3
logging.info("extraer_proveedores")
pass
def extraer_contratos():
"""
Obtiene archivo en excel del portal Sistema de información pública de
proveedores y contratistas y lo guarda en el sistema de archivos
"""
# TODO Guardar en S3
pass
def cargar_proveedores():
"""
Carga en la base de datos los archivos descargados del portal de
Sistema de información pública de proveedores y contratistas
"""
pass
def cargar_contratos():
"""
Carga en la base de datos los archivos descargados del portal de
Sistema de información pública de proveedores y contratistas
"""
pass
e_proveedores = PythonOperator(
task_id='extraer_proveedores',
provide_context=True,
python_callable=extraer_proveedores,
dag=dag)
c_proveedores = PythonOperator(
task_id='cargar_proveedores',
provide_context=True,
python_callable=cargar_proveedores,
dag=dag)
e_contratos = PythonOperator(
task_id='extraer_contratos',
provide_context=True,
python_callable=extraer_contratos,
dag=dag)
c_contratos = PythonOperator(
task_id='cargar_contratos',
provide_context=True,
python_callable=cargar_contratos,
dag=dag)
e_proveedores >> c_proveedores >> e_contratos >> c_contratos
| [
"[email protected]"
] | |
a387bd8541f6ae4169b7b9ac79fbcd17e4dbf771 | 84153ccd9d502d1e086000d66e58eaf756f52ed1 | /tweetme2-web/node_modules/watchpack-chokidar2/node_modules/fsevents/build/config.gypi | f499035769cb92132bf631e258809b1c8655ae7f | [
"MIT"
] | permissive | ezorfa/TweetMe | a86d6714595a1cd57054fadb62bd67d27448a995 | ff7843d79ba685265f32cab7fabb7f2336122dcc | refs/heads/master | 2022-12-14T14:57:37.897380 | 2020-08-23T21:52:08 | 2020-08-23T21:52:08 | 288,664,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,654 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt67l.dat",
"icu_default_data": "",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "6",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 72,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "72.dylib",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"v8_use_snapshot": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/mohammed/Library/Caches/node-gyp/12.18.3",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/mohammed/.npm-init.js",
"userconfig": "/Users/mohammed/.npmrc",
"cidr": "",
"node_version": "12.18.3",
"user": "",
"save": "true",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"save_exact": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/mohammed/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.6 node/v12.18.3 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/m6/y_cyz1b925jgkv6m34y67grr0000gn/T",
"unsafe_perm": "true",
"format_package_lock": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
""
] | |
3a2ad1e33b7dc2a198f28492c836efb94a98b834 | 3562fa51db47b1b1e97785191f0c04644d47c283 | /python/plat3/2152.py | 62489943de0aeb7ac35dc6bf78cf5e4aa950e1b3 | [] | no_license | seono/algorithm | c74181d564525e3a0214824c4a619c51cd52a042 | 78a252b29290eaa1ea076d76cd83e5dbbb7d8d89 | refs/heads/master | 2021-07-13T07:13:41.523888 | 2021-04-24T14:05:00 | 2021-04-24T14:05:00 | 244,609,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,697 | py | import sys
from collections import deque
sys.setrecursionlimit(100000)
input = sys.stdin.readline
N, M, S, T = map(int,input().split())
adj = [[] for _ in range(N+1)]
for i in range(M):
s,t = map(int,input().split())
adj[s].append(t)
cnt,SN = 0,0
dfsn = [0]*(N+1)
scc_arr = []
scc_num = [0]*(N+1)
finished = [False]*(N+1)
st = []
def scc(idx):
global cnt,SN
dfsn[idx] = cnt+1
cnt+=1
st.append(idx)
result = dfsn[idx]
for nx in adj[idx]:
if dfsn[nx]==0:result = min(result,scc(nx))
elif not finished[nx]: result = min(result, dfsn[nx])
if result == dfsn[idx]:
curSCC = []
while True:
t = st.pop()
curSCC.append(t)
finished[t]=True
scc_num[t]=SN
if t==idx:break
scc_arr.append(curSCC)
SN+=1
return result
for i in range(1,N+1):
if dfsn[i]==0:scc(i)
new_adj = [[] for _ in range(SN)]
indgree = [0]*SN
finished = [0]*SN
new_s,new_t = scc_num[S],scc_num[T]
for i,tmp in enumerate(scc_arr):
for n in tmp:
for nx in adj[n]:
if scc_num[nx]==i:continue
new_adj[i].append(scc_num[nx])
indgree[scc_num[nx]]+=1
def dfs():
can = [False]*SN
can[new_s]=True
finished[new_s]=len(scc_arr[new_s])
q = deque([])
for i in range(SN):
if not indgree[i]: q.append(i)
while q:
n = q.popleft()
for nx in new_adj[n]:
if can[n]:
finished[nx]=max(finished[nx],finished[n]+len(scc_arr[nx]))
can[nx]=True
indgree[nx]-=1
if indgree[nx]==0:
q.append(nx)
return finished[new_t]
print(dfs()) | [
"[email protected]"
] | |
ed2bd9596b3c087bafd28769728ffae53934a728 | e12e1e738d06dbbcdb7f3d051614e7aa493f795d | /mysite/config/settings.py | 41635b0505022517d3d2ca9886160f51f35e9721 | [] | no_license | suhjohn/ec2-deploy-mysite | 34c13e1ae3ff33ca14a6223ee8036432ea98d460 | fb3c33cb64ecfa673f16da0385942f76bde748a1 | refs/heads/master | 2021-07-19T17:57:53.701059 | 2017-10-27T05:33:27 | 2017-10-27T05:33:27 | 108,366,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,380 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8!yz3f*(+w^kkhls0sl3)lfngzupjo(rsydyr2(89ci7!av(_w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'localhost',
'.ap-northeast-2.compute.amazonaws.com',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_DIR = os.path.join(BASE_DIR, "static")
#Django에서 정적파일을 검색하고 가져올 폴더 목
STATICFILES_DIRS = [
STATIC_DIR,
]
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = '/media/'
| [
"[email protected]"
] | |
9292b50897599760a3af3e865199ee693377afc0 | bca910213912e3f74e28b3da920695eabf9bab1c | /sat/list_articles_test.py | 7b6c3905d6be9e414f0bbc7044ab53b5d88b91b0 | [] | no_license | francescobenintende/hub-blog | e9f2850ad030ea3af7051d15a9ee186b770391ee | f8960f229c3806fc1c6989f3ab7b6192f46c982f | refs/heads/master | 2023-06-23T18:10:34.593326 | 2021-07-20T07:04:01 | 2021-07-20T07:04:01 | 387,029,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,639 | py | from hub_blog import app
class TestListArticles:
def test_list_articles_returns_articles_with_specified_tags(self):
with app.test_client() as c:
article_to_create_one = {
'title': 'Article One',
'content': 'Lots of interesting content.',
'tags': ['marketing'],
'user_id': 'fran',
}
create_response_one = c.post('/articles', json=article_to_create_one)
assert create_response_one.status_code == 201
article_to_create_two = {
'title': 'Article Two',
'content': 'Lots of interesting content.',
'tags': ['learning', 'marketing'],
'user_id': 'mark',
}
create_response_two = c.post('/articles', json=article_to_create_two)
assert create_response_two.status_code == 201
article_id_one = create_response_one.json['article_id']
article_id_two = create_response_two.json['article_id']
list_response = c.get(f'/articles', json={'tags': ['marketing']})
assert list_response.status_code == 200
assert len(list_response.json) == 2
assert article_id_one in list_response.json
assert article_id_two in list_response.json
def test_list_articles_returns_articles_with_specified_keywords(self):
with app.test_client() as c:
article_to_create_one = {
'title': 'This is the way',
'content': 'Lots of interesting content.',
'tags': ['tech', 'finance'],
'user_id': 'fran',
}
create_response_one = c.post('/articles', json=article_to_create_one)
assert create_response_one.status_code == 201
article_to_create_two = {
'title': 'That is the way',
'content': 'Lots of interesting content.',
'tags': ['finance', 'travelling'],
'user_id': 'mark',
}
create_response_two = c.post('/articles', json=article_to_create_two)
assert create_response_two.status_code == 201
article_id_one = create_response_one.json['article_id']
article_id_two = create_response_two.json['article_id']
list_response = c.get(f'/articles', json={'keywords': ['is', 'the', 'way']})
assert list_response.status_code == 200
assert len(list_response.json) == 2
assert article_id_one in list_response.json
assert article_id_two in list_response.json
| [
"[email protected]"
] | |
ea8178226894384605a4056cb9263a40ae392b4f | eecd9cb2117aee5f8a41a38e07697524c5908003 | /concourse/client/api.py | 6364f9311bca6d38d5e26f4f165613ef1eb30a26 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | adracus/cc-utils | 426608fb2c7184e5f68c9073da9e7f1535e025ce | dcd1ff544d8b18a391188903789d1cac929f50f9 | refs/heads/master | 2020-04-25T23:30:25.454654 | 2019-02-27T14:31:19 | 2019-02-27T14:31:19 | 173,148,221 | 0 | 0 | Apache-2.0 | 2019-02-28T16:36:09 | 2019-02-28T16:36:09 | null | UTF-8 | Python | false | false | 8,926 | py | # Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed
# under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import warnings
from abc import abstractmethod
from ensure import ensure_annotations
from urllib3.exceptions import InsecureRequestWarning
from .routes import (
ConcourseApiRoutesBase,
)
from .model import (
Build,
BuildPlan,
BuildEvents,
SetPipelineResult,
PipelineConfig,
ResourceVersion,
)
from model.concourse import (
ConcourseTeamCredentials,
)
from http_requests import AuthenticatedRequestBuilder
from util import not_empty
warnings.filterwarnings('ignore', 'Unverified HTTPS request is being made.*', InsecureRequestWarning)
def select_attr(name: str):
return lambda o: o.get(name)
class ConcourseApiBase(object):
'''
Implements a subset of concourse REST API functionality.
After creation, `login` ought to be invoked at least once to allow for the
execution of requests that required autorization.
@param base_url: concourse endpoint (e.g. https://ci.concourse.ci)
@param team_name: the team name used for authentication
@param verify_ssl: whether or not certificate validation is to be done
'''
@ensure_annotations
def __init__(
self,
routes: ConcourseApiRoutesBase,
request_builder: AuthenticatedRequestBuilder,
verify_ssl=False,
):
self.routes = routes
self.request_builder = request_builder
self.verify_ssl = verify_ssl
@ensure_annotations
def _get(self, url: str):
return self.request_builder.get(url, return_type='json')
@ensure_annotations
def _put(self, url: str, body: str, headers={}, use_auth_token=True):
return self.request_builder.put(url, body=body, headers=headers)
@ensure_annotations
def _post(self, url: str, body: str="", headers={}):
return self.request_builder.post(url, body=body, headers=headers)
@ensure_annotations
def _delete(self, url: str):
return self.request_builder.delete(url)
@abstractmethod
def login(self, team: str, username: str, passwd: str):
raise NotImplementedError
@ensure_annotations
def set_pipeline(self, name: str, pipeline_definition):
previous_version = self.pipeline_config_version(name)
headers = {'x-concourse-config-version': previous_version}
url = self.routes.pipeline_cfg(name)
self._put(url, str(pipeline_definition), headers=headers)
return SetPipelineResult.CREATED if previous_version is None else SetPipelineResult.UPDATED
@ensure_annotations
def delete_pipeline(self, name: str):
url = self.routes.pipeline(pipeline_name=name)
self._delete(url)
def pipelines(self):
pipelines_url = self.routes.pipelines()
response = self._get(pipelines_url)
return map(select_attr('name'), response)
def order_pipelines(self, pipeline_names):
url = self.routes.order_pipelines()
self._put(url, json.dumps(pipeline_names))
@ensure_annotations
def pipeline_cfg(self, pipeline_name: str):
pipeline_cfg_url = self.routes.pipeline_cfg(pipeline_name)
response = self._get(pipeline_cfg_url)
not_empty(response)
return PipelineConfig(response, concourse_api=self, name=pipeline_name)
def pipeline_resources(self, pipeline_names):
if isinstance(pipeline_names, str):
pipeline_names = [pipeline_names]
resources = map(lambda name: self.pipeline_cfg(pipeline_name=name).resources, pipeline_names)
for resource_list in resources:
yield from resource_list
@ensure_annotations
def pipeline_config_version(self, pipeline_name: str):
pipeline_cfg_url = self.routes.pipeline_cfg(pipeline_name)
response = self.request_builder.get(
pipeline_cfg_url,
return_type=None,
check_http_code=False
)
if response.status_code == 404:
return None # pipeline did not exist yet
# ensure we did receive an error other than 404
self.request_builder._check_http_code(response, pipeline_cfg_url)
return response.headers['X-Concourse-Config-Version']
@ensure_annotations
def unpause_pipeline(self, pipeline_name: str):
unpause_url = self.routes.unpause_pipeline(pipeline_name)
self.request_builder.put(
unpause_url,
body=""
)
@ensure_annotations
def expose_pipeline(self, pipeline_name: str):
expose_url = self.routes.expose_pipeline(pipeline_name)
self.request_builder.put(
expose_url,
body="",
)
@ensure_annotations
def job_builds(self, pipeline_name: str, job_name: str):
'''
Returns a list of Build objects for the specified job.
The list is sorted by the build number, newest build last
'''
builds_url = self.routes.job_builds(pipeline_name, job_name)
response = self._get(builds_url)
builds = [Build(build_dict, self) for build_dict in response]
builds = sorted(builds, key=lambda b: b.id())
return builds
@ensure_annotations
def job_build(self, pipeline_name: str, job_name: str, build_name: str):
build_url = self.routes.job_build(pipeline_name, job_name, build_name)
response = self._get(build_url)
return Build(response, self)
@ensure_annotations
def trigger_build(self, pipeline_name: str, job_name: str):
trigger_url = self.routes.job_builds(pipeline_name, job_name)
self._post(trigger_url)
@ensure_annotations
def build_plan(self, build_id):
build_plan_url = self.routes.build_plan(build_id)
response = self._get(build_plan_url)
return BuildPlan(response, self)
@ensure_annotations
def build_events(self, build_id):
build_plan_url = self.routes.build_events(build_id)
# TODO: this request never seems to send an "EOF"
# (probably to support streaming)
# --> properly handle this special case
response = self.request_builder.get(
build_plan_url,
return_type=None,
stream=True # passed to sseclient
)
return BuildEvents(response, self)
@ensure_annotations
def trigger_resource_check(self, pipeline_name: str, resource_name: str):
url = self.routes.resource_check(pipeline_name=pipeline_name, resource_name=resource_name)
# Resource checks are triggered by a POST with an empty JSON-document as body against
# the resource's check-url
self._post(url, body='{}')
@ensure_annotations
def resource_versions(self, pipeline_name: str, resource_name: str):
url = self.routes.resource_versions(pipeline_name=pipeline_name, resource_name=resource_name)
response = self._get(url)
return [ResourceVersion(raw=raw, concourse_api=None) for raw in response]
class ConcourseApiV4(ConcourseApiBase):
def login(self, username: str, passwd: str):
login_url = self.routes.login()
form_data = "grant_type=password&password=" + passwd + \
"&scope=openid+profile+email+federated%3Aid+groups&username=" + username
response = self._post(
url=login_url,
body=form_data,
headers={"content-type": "application/x-www-form-urlencoded"}
)
auth_token = response.json()['access_token']
self.request_builder = AuthenticatedRequestBuilder(
auth_token=auth_token,
verify_ssl=self.verify_ssl
)
return auth_token
def set_team(self, team_credentials: ConcourseTeamCredentials):
body = {}
body['auth'] = {
"users": [
"local:" + team_credentials.username()
]
}
if team_credentials.has_github_oauth_credentials():
body['auth'].update({
"groups": [
"github:" + team_credentials.github_auth_team()
]
})
team_url = self.routes.team_url(team_credentials.teamname())
self._put(team_url, json.dumps(body))
| [
"[email protected]"
] | |
c663cfef1a695d5be22587d9ff42d87025c79fdc | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /netex/models/destination_display_variant_ref.py | 3255e0537fdd4c971133e232146768cd1aa74573 | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 332 | py | from dataclasses import dataclass
from .destination_display_variant_ref_structure import DestinationDisplayVariantRefStructure
__NAMESPACE__ = "http://www.netex.org.uk/netex"
@dataclass
class DestinationDisplayVariantRef(DestinationDisplayVariantRefStructure):
class Meta:
namespace = "http://www.netex.org.uk/netex"
| [
"[email protected]"
] | |
0d038bc8a4bea84b0f0d51230a42d848d9d9ce14 | 5c1e5935129b4d0f5626f64bd62e3370b0553262 | /vendor/deadline/custom/plugins/GlobalJobPreLoad.py | d1287dd2131f2cf667340d1bd133e38875f5b207 | [
"MIT"
] | permissive | dangerstudios/OpenPype | c72f9d6d3a4a964697fa77712c116cfd2949b696 | f82ed5e127340e852a7517ff1e13e2524ee6e704 | refs/heads/main | 2023-04-21T00:25:02.364398 | 2021-04-13T22:10:36 | 2021-04-13T22:10:36 | 362,811,887 | 0 | 0 | null | 2021-04-29T12:35:54 | 2021-04-29T12:35:53 | null | UTF-8 | Python | false | false | 6,060 | py | # -*- coding: utf-8 -*-
import os
import tempfile
import time
import subprocess
import json
import platform
from Deadline.Scripting import RepositoryUtils, FileUtils
def inject_openpype_environment(deadlinePlugin):
job = deadlinePlugin.GetJob()
job = RepositoryUtils.GetJob(job.JobId, True) # invalidates cache
print("inject_openpype_environment start")
try:
exe_list = job.GetJobExtraInfoKeyValue("openpype_executables")
openpype_app = FileUtils.SearchFileList(exe_list)
if openpype_app == "":
raise RuntimeError(
"OpenPype executable was not found " +
"in the semicolon separated list \"" + exe_list + "\". " +
"The path to the render executable can be configured " +
"from the Plugin Configuration in the Deadline Monitor.")
# tempfile.TemporaryFile cannot be used because of locking
export_url = os.path.join(tempfile.gettempdir(),
time.strftime('%Y%m%d%H%M%S'),
'env.json') # add HHMMSS + delete later
print("export_url {}".format(export_url))
args = [
openpype_app,
'extractenvironments',
export_url
]
add_args = {}
add_args['project'] = \
job.GetJobEnvironmentKeyValue('AVALON_PROJECT')
add_args['asset'] = job.GetJobEnvironmentKeyValue('AVALON_ASSET')
add_args['task'] = job.GetJobEnvironmentKeyValue('AVALON_TASK')
add_args['app'] = job.GetJobEnvironmentKeyValue('AVALON_APP_NAME')
if all(add_args.values()):
for key, value in add_args.items():
args.append("--{}".format(key))
args.append(value)
else:
msg = "Required env vars: AVALON_PROJECT, AVALON_ASSET, " + \
"AVALON_TASK, AVALON_APP_NAME"
raise RuntimeError(msg)
print("args::{}".format(args))
exit_code = subprocess.call(args, shell=True)
if exit_code != 0:
raise RuntimeError("Publishing failed, check worker's log")
with open(export_url) as fp:
contents = json.load(fp)
for key, value in contents.items():
deadlinePlugin.SetEnvironmentVariable(key, value)
os.remove(export_url)
print("inject_openpype_environment end")
except Exception:
import traceback
print(traceback.format_exc())
print("inject_openpype_environment failed")
RepositoryUtils.FailJob(job)
raise
def pype_command_line(executable, arguments, workingDirectory):
"""Remap paths in comand line argument string.
Using Deadline rempper it will remap all path found in command-line.
Args:
executable (str): path to executable
arguments (str): arguments passed to executable
workingDirectory (str): working directory path
Returns:
Tuple(executable, arguments, workingDirectory)
"""
print("-" * 40)
print("executable: {}".format(executable))
print("arguments: {}".format(arguments))
print("workingDirectory: {}".format(workingDirectory))
print("-" * 40)
print("Remapping arguments ...")
arguments = RepositoryUtils.CheckPathMapping(arguments)
print("* {}".format(arguments))
print("-" * 40)
return executable, arguments, workingDirectory
def pype(deadlinePlugin):
"""Remaps `PYPE_METADATA_FILE` and `PYPE_PYTHON_EXE` environment vars.
`PYPE_METADATA_FILE` is used on farm to point to rendered data. This path
originates on platform from which this job was published. To be able to
publish on different platform, this path needs to be remapped.
`PYPE_PYTHON_EXE` can be used to specify custom location of python
interpreter to use for Pype. This is remappeda also if present even
though it probably doesn't make much sense.
Arguments:
deadlinePlugin: Deadline job plugin passed by Deadline
"""
job = deadlinePlugin.GetJob()
# PYPE should be here, not OPENPYPE - backward compatibility!!
pype_metadata = job.GetJobEnvironmentKeyValue("PYPE_METADATA_FILE")
pype_python = job.GetJobEnvironmentKeyValue("PYPE_PYTHON_EXE")
# test if it is pype publish job.
if pype_metadata:
pype_metadata = RepositoryUtils.CheckPathMapping(pype_metadata)
if platform.system().lower() == "linux":
pype_metadata = pype_metadata.replace("\\", "/")
print("- remapping PYPE_METADATA_FILE: {}".format(pype_metadata))
job.SetJobEnvironmentKeyValue("PYPE_METADATA_FILE", pype_metadata)
deadlinePlugin.SetProcessEnvironmentVariable(
"PYPE_METADATA_FILE", pype_metadata)
if pype_python:
pype_python = RepositoryUtils.CheckPathMapping(pype_python)
if platform.system().lower() == "linux":
pype_python = pype_python.replace("\\", "/")
print("- remapping PYPE_PYTHON_EXE: {}".format(pype_python))
job.SetJobEnvironmentKeyValue("PYPE_PYTHON_EXE", pype_python)
deadlinePlugin.SetProcessEnvironmentVariable(
"PYPE_PYTHON_EXE", pype_python)
deadlinePlugin.ModifyCommandLineCallback += pype_command_line
def __main__(deadlinePlugin):
job = deadlinePlugin.GetJob()
job = RepositoryUtils.GetJob(job.JobId, True) # invalidates cache
openpype_render_job = \
job.GetJobEnvironmentKeyValue('OPENPYPE_RENDER_JOB') or '0'
openpype_publish_job = \
job.GetJobEnvironmentKeyValue('OPENPYPE_PUBLISH_JOB') or '0'
if openpype_publish_job == '1' and openpype_render_job == '1':
raise RuntimeError("Misconfiguration. Job couldn't be both " +
"render and publish.")
if openpype_publish_job == '1':
print("Publish job, skipping inject.")
return
elif openpype_render_job == '1':
inject_openpype_environment(deadlinePlugin)
else:
pype(deadlinePlugin) # backward compatibility with Pype2
| [
"[email protected]"
] | |
5e70589d8b5f904dfb860f482ccaa3caf4c7ec29 | 8685c7b28fcaf26505ec4210ff69f30ed0884d80 | /tools/c7n_gcp/c7n_gcp/actions/cscc.py | 32b0c5015f39db0b3979318624ad86139f3e2225 | [
"Apache-2.0"
] | permissive | AnatoliiHromov/cloud-custodian | 9026d490b1c8aabcdab07c5d95a3e7bdda11ce7b | 54b48040c0de8a34cea4c48209ca7e395285465b | refs/heads/master | 2022-10-13T15:56:48.654462 | 2020-06-04T16:32:25 | 2020-06-04T16:32:25 | 269,630,235 | 2 | 0 | Apache-2.0 | 2020-06-05T12:35:17 | 2020-06-05T12:35:14 | null | UTF-8 | Python | false | false | 8,032 | py | # Copyright 2018-2019 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import hashlib
from urllib.parse import urlparse
from c7n.exceptions import PolicyExecutionError, PolicyValidationError
from c7n.utils import local_session, type_schema
from .core import MethodAction
from c7n_gcp.provider import resources as gcp_resources
class PostFinding(MethodAction):
"""Post finding for matched resources to Cloud Security Command Center.
:Example:
.. code-block:: yaml
policies:
- name: gcp-instances-with-label
resource: gcp.instance
filters:
- "tag:name": "bad-instance"
actions:
- type: post-finding
org-domain: example.io
category: MEDIUM_INTERNET_SECURITY
The source for custodian can either be specified inline to the policy, or
custodian can generate one at runtime if it doesn't exist given a org-domain
or org-id.
Finding updates are not currently supported, due to upstream api issues.
"""
schema = type_schema(
'post-finding',
**{
'source': {
'type': 'string',
'description': 'qualified name of source to post to CSCC as'},
'org-domain': {'type': 'string'},
'org-id': {'type': 'integer'},
'category': {'type': 'string'}})
schema_alias = True
method_spec = {'op': 'create', 'result': 'name', 'annotation_key': 'c7n:Finding'}
# create throws error if already exists, patch method has bad docs.
ignore_error_codes = (409,)
CustodianSourceName = 'CloudCustodian'
DefaultCategory = 'Custodian'
Service = 'securitycenter'
ServiceVersion = 'v1beta1'
_source = None
def validate(self):
if not any([self.data.get(k) for k in ('source', 'org-domain', 'org-id')]):
raise PolicyValidationError(
"policy:%s CSCC post-finding requires one of source, org-domain, org-id" % (
self.manager.ctx.policy.name))
def process(self, resources):
self.initialize_source()
return super(PostFinding, self).process(resources)
def get_client(self, session, model):
return session.client(
self.Service, self.ServiceVersion, 'organizations.sources.findings')
def get_resource_params(self, model, resource):
return self.get_finding(resource)
def initialize_source(self):
# Ideally we'll be given a source, but we'll attempt to auto create it
# if given an org_domain or org_id.
if self._source:
return self._source
elif 'source' in self.data:
self._source = self.data['source']
return self._source
session = local_session(self.manager.session_factory)
# Resolve Organization Id
if 'org-id' in self.data:
org_id = self.data['org-id']
else:
orgs = session.client('cloudresourcemanager', 'v1', 'organizations')
res = orgs.execute_query(
'search', {'body': {
'filter': 'domain:%s' % self.data['org-domain']}}).get(
'organizations')
if not res:
raise PolicyExecutionError("Could not determine organization id")
org_id = res[0]['name'].rsplit('/', 1)[-1]
# Resolve Source
client = session.client(self.Service, self.ServiceVersion, 'organizations.sources')
source = None
res = [s for s in
client.execute_query(
'list', {'parent': 'organizations/{}'.format(org_id)}).get('sources')
if s['displayName'] == self.CustodianSourceName]
if res:
source = res[0]['name']
if source is None:
source = client.execute_command(
'create',
{'parent': 'organizations/{}'.format(org_id),
'body': {
'displayName': self.CustodianSourceName,
'description': 'Cloud Management Rules Engine'}}).get('name')
self.log.info(
"policy:%s resolved cscc source: %s, update policy with this source value",
self.manager.ctx.policy.name,
source)
self._source = source
return self._source
def get_name(self, r):
"""Given an arbitrary resource attempt to resolve back to a qualified name."""
namer = ResourceNameAdapters[self.manager.resource_type.service]
return namer(r)
def get_finding(self, resource):
policy = self.manager.ctx.policy
resource_name = self.get_name(resource)
# ideally we could be using shake, but its py3.6+ only
finding_id = hashlib.sha256(
b"%s%s" % (
policy.name.encode('utf8'),
resource_name.encode('utf8'))).hexdigest()[:32]
finding = {
'name': '{}/findings/{}'.format(self._source, finding_id),
'resourceName': resource_name,
'state': 'ACTIVE',
'category': self.data.get('category', self.DefaultCategory),
'eventTime': datetime.datetime.utcnow().isoformat('T') + 'Z',
'sourceProperties': {
'resource_type': self.manager.type,
'title': policy.data.get('title', policy.name),
'policy_name': policy.name,
'policy': json.dumps(policy.data)
}
}
request = {
'parent': self._source,
'findingId': finding_id[:31],
'body': finding}
return request
@classmethod
def register_resource(klass, registry, resource_class):
if resource_class.resource_type.service not in ResourceNameAdapters:
return
if 'post-finding' in resource_class.action_registry:
return
resource_class.action_registry.register('post-finding', klass)
# CSCC uses its own notion of resource id, if we want our findings on
# a resource to be linked from the asset view we need to post w/ the
# same resource name. If this conceptulization of resource name is
# standard, then we should move these to resource types with
# appropriate hierarchies by service.
def name_compute(r):
prefix = urlparse(r['selfLink']).path.strip('/').split('/')[2:][:-1]
return "//compute.googleapis.com/{}/{}".format(
"/".join(prefix),
r['id'])
def name_iam(r):
return "//iam.googleapis.com/projects/{}/serviceAccounts/{}".format(
r['projectId'],
r['uniqueId'])
def name_resourcemanager(r):
rid = r.get('projectNumber')
if rid is not None:
rtype = 'projects'
else:
rid = r.get('organizationId')
rtype = 'organizations'
return "//cloudresourcemanager.googleapis.com/{}/{}".format(
rtype, rid)
def name_container(r):
return "//container.googleapis.com/{}".format(
"/".join(urlparse(r['selfLink']).path.strip('/').split('/')[1:]))
def name_storage(r):
return "//storage.googleapis.com/{}".format(r['name'])
def name_appengine(r):
return "//appengine.googleapis.com/{}".format(r['name'])
ResourceNameAdapters = {
'appengine': name_appengine,
'cloudresourcemanager': name_resourcemanager,
'compute': name_compute,
'container': name_container,
'iam': name_iam,
'storage': name_storage,
}
gcp_resources.subscribe(PostFinding.register_resource)
| [
"[email protected]"
] | |
51bc17d770c472514c34e571a0a26ef96a715551 | f9235e935286e8b1b0d86957ebf843179ee1f733 | /PasswordLocker/platform.py | 38cabbd9ae56024350f327d77848bfab89a8fcab | [] | no_license | Oladunsi/automate_the_boring_stuff | 4a37203dec24f4c898161e7e517dd02184e247ef | 72963c38b42992c83826f141b0990c1aaf1abfac | refs/heads/master | 2023-02-20T01:52:28.796959 | 2021-01-19T14:02:57 | 2021-01-19T14:02:57 | 282,612,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py | import csv
import pandas as pd
def platform():
UserLocker = {}
Begin = True
while Begin:
platform = input("Enter the name of the platform You want to save its' username and password!!! ").upper()
if platform != "":
UserLocker["Platform"] = platform
UserName = input(f"Enter The UserName You used on {platform}: ")
if UserName != "":
UserLocker["UserName"] = UserName
Password = input(f"Enter the Password You used on {platform}: ")
if Password != "":
UserLocker["Password"] = Password
Begin = False
return UserLocker
else:
continue
else:
continue
else:
continue
if __name__ == "__main__":
# the input is intended to be converted into pandas dataframe
userlocker_data = platform()
with open('PasswordBank.csv', 'a+', newline='') as write_obj:
fieldnames = ['Platform','UserName','Password']
writer = csv.DictWriter(write_obj,fieldnames=fieldnames)
writer.writerow(userlocker_data)
| [
"[email protected]"
] | |
e27268bc22f9a86622a304e5caac2b5d7ec1d426 | ac8b9ab24164f5f282e9eaad2b2453690e5871c7 | /app.py | f54b84b37867cc222edf5c1d5bbfb3047cc0d0c7 | [] | no_license | Akrosys/M1_Python_EDA | 811cad373c49fea4a007db4213188162d1e199c9 | 7b17b05c5f2afc4da8042585fb385643cb288f18 | refs/heads/master | 2023-01-30T02:21:19.783569 | 2020-12-02T15:59:53 | 2020-12-02T15:59:53 | 313,864,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | from flask import Flaskr
import os
import socket
# Connect to Redis
redis = Redis(host="redis", db=0, socket_connect_timeout=2, socket_timeout=2)
app = Flask(__name__)
@app.route("/")
def hello():
try:
visites = redis.incr("compteur")
except RedisError:
visites = "<i>Erreur de connection Redis, compteur desactive</i>"
html = "<h3>Bonjour {nom}!</h3>" \
"<b>Hostname:</b> {hostname}<br/>" \
"<b>Visites:</b> {visites} <br/>" \
"<p>Abonne toi!</p>"
return html.format(nom=os.getenv("NOM", "youtube"), hostname=socket.gethostname(), visites=visites)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80)
| [
"[email protected]"
] | |
beebfb738ee3a7772df0488cef9794e2a300da83 | 2a197966a829ceb02f240f85d70f1c276efd91a3 | /MCP3008_VOC.py | adbd4db4c0f32e40230d0b0275e1e0704bd64273 | [] | no_license | Tinymaxi/Luftmessdaten | 16443a9e0276699569ff7a24942fa4538729a535 | 274859f0cc0758b40b2e8f0796a96699bf4ffa1e | refs/heads/master | 2020-07-13T06:17:53.996156 | 2019-08-28T20:08:33 | 2019-08-28T20:08:33 | 205,014,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | import busio
import digitalio
import board
import adafruit_mcp3xxx.mcp3008 as MCP
import time
from time import sleep
from adafruit_mcp3xxx.analog_in import AnalogIn
#spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI)
spi = busio.SPI(clock=board.D21, MISO=board.D19, MOSI=board.D20)
cs = digitalio.DigitalInOut(board.D25)
mcp = MCP.MCP3008(spi, cs)
channel = AnalogIn(mcp, MCP.P0)
##print('VOC Raw ADC Value: ', channel.value)
##print('VOC ADC Voltage: ' + str(channel.voltage) + 'V')
def MCP3008_VOC():
MCP3008_VOC = channel.value
sleep(1)
return MCP3008_VOC
#print(MCP3008_VOC())
| [
"[email protected]"
] | |
b4819e1ec3e683284917e6a9291f28ae1220f9c7 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/DELL-NETWORKING-TC.py | a5184bbe6b70262343039230a6f6eb6c4efb5c16 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 18,834 | py | #
# PySNMP MIB module DELL-NETWORKING-TC (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DELL-NETWORKING-TC
# Produced by pysmi-0.3.4 at Wed May 1 12:37:51 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint")
dellNetModules, = mibBuilder.importSymbols("DELL-NETWORKING-SMI", "dellNetModules")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ModuleIdentity, ObjectIdentity, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, Counter64, MibIdentifier, iso, Gauge32, TimeTicks, Bits, Counter32, NotificationType, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "ObjectIdentity", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "Counter64", "MibIdentifier", "iso", "Gauge32", "TimeTicks", "Bits", "Counter32", "NotificationType", "Integer32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
dellNetTextualConventions = ModuleIdentity((1, 3, 6, 1, 4, 1, 6027, 4, 2))
dellNetTextualConventions.setRevisions(('2009-04-07 12:00', '2008-09-16 12:00', '2008-09-02 12:00', '2007-06-28 12:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: dellNetTextualConventions.setRevisionsDescriptions(('Added new Nemesis card type.', 'Added ExaScale chassis mode and Nemesis card type.', 'Added DellNetCardOperStatus.', 'Added DellNetChassisType and DellNetHundredthdB.',))
if mibBuilder.loadTexts: dellNetTextualConventions.setLastUpdated('200904071200Z')
if mibBuilder.loadTexts: dellNetTextualConventions.setOrganization('Dell Inc')
if mibBuilder.loadTexts: dellNetTextualConventions.setContactInfo('http://www.dell.com/support')
if mibBuilder.loadTexts: dellNetTextualConventions.setDescription('The Textual Convention of Dell Networking OS MIB.')
class DellNetChassisType(TextualConvention, Integer32):
description = 'Dell Networking OS chassis type.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48))
namedValues = NamedValues(("e1200", 1), ("e600", 2), ("e300", 3), ("e150", 4), ("e610", 5), ("c150", 6), ("c300", 7), ("e1200i", 8), ("s2410cp", 9), ("s2410p", 10), ("s50", 11), ("s50e", 12), ("s50v", 13), ("s50nac", 14), ("s50ndc", 15), ("s25pdc", 16), ("s25pac", 17), ("s25v", 18), ("s25n", 19), ("s60", 20), ("s55", 21), ("s4810", 22), ("s6410", 23), ("z9000", 24), ("m-MXL", 25), ("m-IOA", 26), ("s4820", 27), ("s6000", 28), ("s5000", 29), ("s-FN410S-IOA", 30), ("s-FN410T-IOA", 31), ("s-FN2210S-IOA", 32), ("z9500", 33), ("c9010", 34), ("c1048p", 35), ("s4048on", 36), ("s4810on", 37), ("s6000on", 38), ("s3048on", 39), ("z9100", 40), ("s6100", 41), ("s3148p", 42), ("s3124p", 43), ("s3124f", 44), ("s3124", 45), ("s3148", 46), ("s4048ton", 47), ("s6010", 48))
class DellNetInterfaceType(TextualConvention, Integer32):
description = 'Interface types supported by the Dell Networking OS line cards. '
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
namedValues = NamedValues(("ethernetManagement", 1), ("ethernet100M", 2), ("ethernet1GB", 3), ("ethernet1GBCopper", 4), ("ethernet10GB", 5), ("ethernet10GBCopper", 6), ("sonetOC3OC12", 7), ("sonetOC48OC96", 8), ("sonetOC192", 9), ("ethernet40GB", 10))
class DellNetSystemPortType(TextualConvention, Integer32):
description = 'Port type available in Dell Networking OS series of products.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 99))
namedValues = NamedValues(("portSerial", 1), ("portAux", 2), ("portFastEther", 3), ("port0210E2TV", 4), ("port0210E2TE", 5), ("port2401E24S", 6), ("port2401E24L", 7), ("port12OC12OC3", 8), ("port01OC192", 9), ("port2401E24SEC", 10), ("port2401E24LEC", 11), ("port0210E2TY", 12), ("port0210E2TU", 13), ("port0110EW1YB", 14), ("port0110EW1YC", 15), ("port02S48YC2", 16), ("port0110EX1YB", 17), ("port0110EX1YC", 18), ("port1201F12PB", 19), ("port1201F12PC", 20), ("port0110EX1EB", 21), ("port0110EX1EC", 22), ("port0110EX1YBL", 23), ("port0210EX2YD", 24), ("port0210EX2ED", 25), ("port0210EX2ZD-DEP", 26), ("port0210EW2YD", 27), ("port0110EX1YD", 28), ("port0110EX1ED", 29), ("port0110EX1ZD", 30), ("port0110EW1YD", 31), ("port2401E24PD", 32), ("port0210EX2YD2", 33), ("port0210EX2YE", 34), ("port0110EX1YD2", 35), ("port0110EX1YE", 36), ("port0210EW2YD2", 37), ("port0210EW2YE", 38), ("port0110EW1YE", 39), ("port01OC192SE", 40), ("port2401E24TD", 41), ("port2401E24PE", 42), ("port1201F12PC2", 43), ("port0210EX2ZD", 44), ("port0210EW2YD3", 45), ("port0210EX2ZE", 46), ("port1201F12PE", 47), ("port2401E24PD2", 48), ("port1201E12TD3", 49), ("port0210EX2YD3", 50), ("port0110EX1YD3", 51), ("port1201E12PD3", 52), ("port02S48YE2", 53), ("port0110EX1YE3", 54), ("port1201E12PE3", 55), ("port4801E48PF", 56), ("port2401E24PF3", 57), ("port4801E48TF3", 58), ("port4801E48TF", 59), ("port0410EXW4PF", 60), ("port0210EXW2PF3", 61), ("port9001E90MF", 62), ("port4801E48T1F", 63), ("port1610EXW16PF", 64), ("port0810EXW8PF", 65), ("port0410EXW4PG", 66), ("port4801E48PG", 67), ("port4801E48TG", 68), ("port0210EXW2PG3", 69), ("port2401E24PG3", 70), ("port2401E24TG3", 71), ("port04S48P4G", 72), ("port04S48P4G3", 73), ("port1610EXW16PG", 74), ("port0810EXW8PG3", 75), ("port9001E90MH", 76), ("port1010EXW10SH", 77), ("port1010EXW10SJ", 78), ("port9001E90MJ", 79), ("port5001E50PH", 80), ("port5001E50PJ", 81), ("port1010EXW10PH", 82), ("port1010EXW10PJ", 83), ("port4010EXW40SH", 84), ("port4010EXW40SJ", 85), ("portUnknown", 99))
class DellNetSystemCardType(TextualConvention, Integer32):
description = 'The processor card supported by the Dell Networking OS products .'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 200, 201, 202, 203, 204, 205, 206, 207, 208, 250, 259))
namedValues = NamedValues(("notPresented", 0), ("lc0210E2TV", 1), ("lc0210E2TE", 2), ("lc2401E24S", 3), ("lc2401E24L", 4), ("lc12OC12OC3", 5), ("lc01OC192", 6), ("lcReserve", 7), ("lc2401E24SEC", 8), ("lc2401E24lEc", 9), ("lc0210E2TY", 10), ("lc0210E2TU", 11), ("lc0110EW1YB", 12), ("lc0110EW1YC", 13), ("lc02S48YC2", 14), ("lc0110EX1YB", 15), ("lc0110EX1YC", 16), ("lc1201F12PB", 17), ("lc1201F12PC", 18), ("lc0110EX1EB", 19), ("lc0110EX1EC", 20), ("lc0110EX1YBL", 21), ("lc0210EX2YD", 22), ("lc0210EX2ED", 23), ("lc0210EX2ZDdep", 24), ("lc0210EW2YD", 25), ("lc0110EX1YD", 26), ("lc0110EX1ED", 27), ("lc0110EX1ZD", 28), ("lc0110EW1YD", 29), ("lc2401E24PD", 30), ("lc0210EX2YD2", 31), ("lc0210EX2YE", 32), ("lc0110EX1YD2", 33), ("lc0110EX1YE", 34), ("lc0210EW2YD2", 35), ("lc0210EW2YE", 36), ("lc0110EW1YE", 37), ("lc01OC192SE", 38), ("lc2401E24TD", 39), ("lc2401E24PE", 40), ("lc1201F12PC2", 41), ("lc0210EX2ZD", 42), ("lc0210EW2YD3", 43), ("lc0210EX2ZE", 44), ("lc1201F12PE", 45), ("lc2401E24PD2", 46), ("lc0210EX2ZD2", 47), ("lc1201E12TD3", 48), ("lc0210EX2YD3", 49), ("lc0110EX1YD3", 50), ("lc1201E12PD3", 51), ("lc02S48YE2", 52), ("lc0110EX1YE3", 53), ("lc1201E12PE3", 54), ("lc4801E48PF", 55), ("lc2401E24PF3", 56), ("lc4801E48TF3", 57), ("lc4801E48TF", 58), ("lc0410EXW4PF", 59), ("lc0210EXW2PF3", 60), ("lc9001E90MF", 61), ("lc4801E48T1F", 62), ("lc1610EXW16PF", 63), ("lc0810EXW8PF", 64), ("lc0410EXW4PG", 65), ("lc4801E48PG", 66), ("lc4801E48TG", 67), ("lc0210EXW2PG3", 68), ("lc2401E24PG3", 69), ("lc2401E24TG3", 70), ("lc04S48P4G", 71), ("lc04S48P4G3", 72), ("lc1610EXW16PG", 73), ("lc0810EXW8PG3", 74), ("lc9001E90MH", 75), ("lc1010EXW10SH", 76), ("lc1010EXW10SJ", 77), ("lc9001E90MJ", 78), ("lc5001E50PH", 79), ("lc5001E50PJ", 80), ("lc1010EXW10PH", 81), ("lc1010EXW10PJ", 82), ("lc4010EXW40SH", 83), ("lc4010EXW40SJ", 84), ("z9500LC12", 85), ("z9500LC36", 86), ("z9500LC48", 87), ("c9000LC24X10GCu", 88), ("c9000LC24X10GOptics", 89), ("c9000LC6X40G", 90), ("rpmCard", 200), ("rpmCardEB", 201), ("rpmCardED", 202), ("rpmCardEE", 203), ("rpmCardEE3", 204), ("rpmCardEF", 205), ("rpmCardEF3", 206), ("rpmCardEH", 207), ("supCard", 208), ("sfmCard", 250), ("cardUnknown", 259))
class DellNetCardOperStatus(TextualConvention, Integer32):
description = "The operational status provides further condition of the card. If AdminStatus is changed to 'up', then the valid state is 'ready' - the card is present and ready and operational packets can be passed If AdminStatus is changed to 'down', the states can be as followed: 'cardNotmatch'- the card does not matche what is configured 'cardProblem' - the card detects hardware problems 'diagMode' - the card in the diagnostic mode 'cardAbsent' - the card is not present 'offline' - the card is not used."
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("ready", 1), ("cardNotmatch", 2), ("cardProblem", 3), ("diagMode", 4), ("cardAbsent", 5), ("offline", 6))
class DellNetIfType(TextualConvention, Integer32):
description = 'Port type available in Dell Networking OS products.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 99))
namedValues = NamedValues(("portSerial", 1), ("portFastEther", 2), ("portGigEther", 3), ("port10GigEther", 4), ("port40GigEther", 5), ("portFibreChannel", 6), ("portAux", 7), ("portUnknown", 99))
class DellNetCSeriesCardType(TextualConvention, Integer32):
description = 'The processor card supported by the Dell Networking OS C-Series system products .'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 99, 1024, 1026, 1027, 1028, 1280, 1284, 2049, 200))
namedValues = NamedValues(("notPresented", 0), ("cardUnknown", 99), ("lc4802E48TB", 1024), ("lc0410EX4PB", 1026), ("lc4801E48PB", 1027), ("lc4610E46TB", 1028), ("lc4802E48VB", 1280), ("lc4610E46VB", 1284), ("lc0810EX8PB", 2049), ("rpmCard", 200))
class DellNetProcessorModuleType(TextualConvention, Integer32):
description = 'The processor modules supported by the Dell Networking OS card.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("controlProcessor", 1), ("routingProcessor1", 2), ("routingProcessor2", 3), ("linecardProcessor", 4), ("rpmProcessor", 5), ("routingProcessor", 6))
class DellNetSlotState(TextualConvention, Integer32):
description = 'A bit string that represents the status of the slot in a E1200 chassis. Slot# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 1 1 1 0 1 1 1 0 1 1 1 1 1 1 1 1 | | | | | Least Significant bit <-----+ | +-----> Most Significant bit The slot numbers starts with the most significant bit. The most significant bit represents slot number 1 and the least significant bit is slot 16. A bit string that represents the status of the slot in a E600 chassis. Slot# 1 2 3 4 5 6 7 8 9 1 1 1 0 1 1 1 0 1 | | | V | Least Significant bit | +-----> Most Significant bit The slot numbers starts with the most significant bit. The most significant bit represents slot number 1 and the least significant bit is slot 9. Each slot occupies a bit. The value 1 indicates slot is in used and 0 indicates slot is empty.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 65535)
class DellNetSlotID(TextualConvention, Integer32):
description = 'Dell Networking OS Chassis Slot ID. '
status = 'current'
class DellNetSwDate(DisplayString):
description = 'The date format is MM/DD/YYYY. MM = Month DD = Day YYYY = Year For example, January 24, 2002 would be displayed as: 01/24/2002 '
status = 'current'
class DellNetMfgDate(DisplayString):
description = 'The manufacturing date format is PPWWYYYY PP = Plant #(ie, what building made the board;01= Sanmina Bldg 4,02=Sanmina Bldg 2) WW = Week number (01 = First full week of the year ie, Sunday through Saturday) YYYY = Year For example, 01482001 would have been produced at Samina Bldg 4 during the first week of December, 2001. '
status = 'current'
class PortList(TextualConvention, OctetString):
description = "Each octet within this value specifies a set of eight ports, with the first octet specifying ports 1 through 8, the second octet specifying ports 9 through 16, etc. Within each octet, the most significant bit represents the lowest numbered port, and the least significant bit represents the highest numbered port. Thus, each port of the bridge is represented by a single bit within the value of this object. If that bit has a value of '1' then that port is included in the set of ports; the port is not included if its bit has a value of '0'."
status = 'current'
class DellNetVlanID(TextualConvention, Integer32):
description = 'Dell Networking OS VLAN ID. A value used to index per-VLAN tables: values of 0 and 4095 are not permitted; if the value is between 1 and 4094 inclusive, it represents an IEEE 802.1Q VLAN-ID with global scope within a given bridged domain (see VlanId textual convention). If the value is greater than 4095 then it represents a VLAN with scope local to the particular agent, i.e. one without a global VLAN-ID assigned to it. Such VLANs are outside the scope of IEEE 802.1Q but it is convenient to be able to manage them in the same way using this MIB.'
status = 'current'
class DellNetChassisMode(TextualConvention, Integer32):
description = 'The chassis mode in Dell Networking series of products.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("nonJumbo", 0), ("etherScale", 1), ("mixed", 2), ("teraScale", 3), ("cseries1", 4), ("sseries1", 5), ("exaScale", 6))
class DellNetQueueID(TextualConvention, Integer32):
description = 'Dell Networking OS Queue ID. '
status = 'current'
class DellNetPortPipeID(TextualConvention, Integer32):
description = 'Dell Networking OS PortPipe ID. '
status = 'current'
class DellNetCycloneVersion(TextualConvention, Integer32):
description = 'the Dell Networking OS Cyclone based hardware version'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("onePointFive", 1), ("twoPointZero", 2), ("threePointZero", 3))
class DellNetCamPartitionType(TextualConvention, Integer32):
description = 'The CAM partition supported in the Dell Networking OS line card. The sequecing used here is Layer 2 Ingress CAM range is 1 - 30 Layer 2 Egress CAM range is 31 - 60 Layer 3 Ingress CAM range is 61 - 90 Layer 3 Egress CAM range is 91 - 120 Layer 3 Host abd LPM CAM (BCM specific) range is 121 - 150 '
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 31, 61, 62, 63, 64, 65, 66, 67, 91, 121, 122))
namedValues = NamedValues(("layer2AclIngress", 1), ("layer2AclPvstIngress", 2), ("layer2FibIngress", 3), ("layer2FibEgress", 31), ("layer3AclIngress", 61), ("layer3FibIngress", 62), ("layer3SysFlowIngress", 63), ("layer3TrcListIngress", 64), ("layer3McastFibIngress", 65), ("layer3QosIngress", 66), ("layer3PbrIngress", 67), ("layer3AclEgress", 91), ("layer3ExtHost", 121), ("layer3ExtLPM", 122))
class DellNetHundredthdB(TextualConvention, Integer32):
description = 'This data type represents power levels that are normally expressed in dB. Units are in hundredths of a dB; for example, -7.23 dB will be represented as -723.'
status = 'current'
displayHint = 'd-2'
class DellNetDeviceType(TextualConvention, Integer32):
description = 'The device category running the Dell Networking OS'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("chassis", 1), ("stack", 2), ("rpm", 3), ("supervisor", 4), ("linecard", 5), ("port-extender", 6))
class DellNetPEOperStatus(TextualConvention, Integer32):
description = 'The operational status of the port extender'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("up", 1), ("down", 2))
mibBuilder.exportSymbols("DELL-NETWORKING-TC", dellNetTextualConventions=dellNetTextualConventions, DellNetSwDate=DellNetSwDate, DellNetPEOperStatus=DellNetPEOperStatus, DellNetInterfaceType=DellNetInterfaceType, DellNetPortPipeID=DellNetPortPipeID, DellNetCamPartitionType=DellNetCamPartitionType, DellNetIfType=DellNetIfType, DellNetCardOperStatus=DellNetCardOperStatus, DellNetSlotID=DellNetSlotID, DellNetCSeriesCardType=DellNetCSeriesCardType, PortList=PortList, DellNetVlanID=DellNetVlanID, DellNetDeviceType=DellNetDeviceType, DellNetChassisMode=DellNetChassisMode, PYSNMP_MODULE_ID=dellNetTextualConventions, DellNetCycloneVersion=DellNetCycloneVersion, DellNetMfgDate=DellNetMfgDate, DellNetQueueID=DellNetQueueID, DellNetSlotState=DellNetSlotState, DellNetSystemPortType=DellNetSystemPortType, DellNetHundredthdB=DellNetHundredthdB, DellNetChassisType=DellNetChassisType, DellNetProcessorModuleType=DellNetProcessorModuleType, DellNetSystemCardType=DellNetSystemCardType)
| [
"[email protected]"
] | |
a32df99969cc2b00821ca9dfd9e146584b61aad7 | ed63b9b615c0f1484746e87b54a0c0b233ddf5c2 | /tests/test_parser.py | 90b755aedee63b62e364f59f3cb3c53381aaf2e0 | [
"MIT"
] | permissive | timofurrer/embedeval | ae02026da6cd5601b16afe1cbb543552cbf461ac | 08a69c950c9a0ac59a8a0ca728af2627c7bcc43a | refs/heads/master | 2020-07-29T10:41:56.322842 | 2020-01-20T10:32:46 | 2020-01-20T10:32:51 | 209,766,108 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,581 | py | """
embedeval
~~~~~~~~~
NLP Embedding Evaluation Tool
:copyright: (c) 2019 by Timo Furrer <[email protected]>
:license: MIT, see LICENSE for more details.
"""
import textwrap
import uuid
import numpy as np
import pytest
from embedeval.parsers.word2vec_gensim import load_embedding as gensim_load_embedding
from embedeval.parsers.word2vec_simple import load_embedding as simple_load_embedding
def create_tmp_word_embedding(path, embedding_content):
"""Create a temporary Word Embedding file"""
# FIXME(TF): maybe refactor interface so that file system can be avoided in unit tests.
created_file = path / str(uuid.uuid4())
with open(created_file, "w", encoding="utf-8") as embedding_file:
embedding_file.write(textwrap.dedent(embedding_content).strip())
return created_file
@pytest.mark.parametrize(
"load_embedding_func",
[
pytest.param(simple_load_embedding, id="simple parser"),
pytest.param(gensim_load_embedding, id="gensim parser"),
],
)
def test_should_parse_word2vec_with_single_entry(load_embedding_func, tmp_path):
"""Loading a Word2Vec Embedding should pass for single word"""
# GIVEN
word2vec_path = create_tmp_word_embedding(
tmp_path,
"""
1 2
word 1.0 2.0
""",
)
# WHEN
embedding = load_embedding_func(word2vec_path)
# THEN
assert embedding.get_words() == ["word"]
assert np.array_equal(embedding.get_word_vector("word"), np.array([1.0, 2.0]))
@pytest.mark.parametrize(
"load_embedding_func",
[
pytest.param(simple_load_embedding, id="simple parser"),
pytest.param(gensim_load_embedding, id="gensim parser"),
],
)
def test_should_parse_word2vec_with_multiple_entires(load_embedding_func, tmp_path):
"""Loading a Word2Vec Embedding should pass for multiple word entries"""
# GIVEN
word2vec_path = create_tmp_word_embedding(
tmp_path,
"""
4 2
word1 1.0 2.0
word2 3.0 4.0
word3 5.0 6.0
word4 7.0 8.0
""",
)
# WHEN
embedding = load_embedding_func(word2vec_path)
# THEN
assert embedding.get_words() == ["word1", "word2", "word3", "word4"]
assert np.array_equal(embedding.get_word_vector("word1"), np.array([1.0, 2.0]))
assert np.array_equal(embedding.get_word_vector("word2"), np.array([3.0, 4.0]))
assert np.array_equal(embedding.get_word_vector("word3"), np.array([5.0, 6.0]))
assert np.array_equal(embedding.get_word_vector("word4"), np.array([7.0, 8.0]))
| [
"[email protected]"
] | |
b4b13074223e847e9ab9ad0da398eb22f327322b | 5b8325ad8ec8e87fe85a74ada8f064b0a3ed8b94 | /main.py | 3d1afd8c64df97b7e9f99d3584ece9b90c09f0ec | [] | no_license | Ilyaslat/teensinAI_equipe6 | a9e48fe0a9d7ef5567c057dbf30b5fa88ed917db | 4d60eb9fffe646f759d916579e2a1dce771a3e99 | refs/heads/main | 2023-08-13T18:31:11.203045 | 2021-10-11T19:53:09 | 2021-10-11T19:53:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,667 | py | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# Define our training input and output data with type 16 bit float
# Each input maps to an output
X = tf.constant([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=tf.float16)
Y = tf.constant([[0], [1], [1], [0]], dtype=tf.float16)
# Create a new Sequential Model
model = keras.Sequential()
# Add our layers
model.add(layers.Dense(
4, # Amount of Neurons
input_dim=2, # Define an input dimension because this is the first layer
activation='relu' # Use relu activation function because all inputs are positive
))
model.add(layers.Dense(
1, # Amount of Neurons. We want one output
activation='sigmoid' # Use sigmoid because we want to output a binary classification
))
# Compile our layers into a model
model.compile(
loss='mean_squared_error', # The loss function that is being minimized
optimizer='adam', # Our optimization function
# Metrics are different values that you want the model to track while training
metrics=['binary_accuracy']
)
# Our function to take in two numerical inputs and output the relevant boolean
def cleanPredict(a, b):
inputTens = tf.constant([[a, b]])
# model.predict(input) yields a 2d tensor
return round(model.predict(inputTens)[0][0]) == 1
# Will yield a random value because model isn't yet trained
print(cleanPredict(1, 0))
model.fit(
X, # Input training data
Y, # Output training data
epochs=2000, # Amount of iterations we want to train for
verbose=1 # Amount of detail you want shown in terminal while training
)
print(cleanPredict(1, 0)) # Should Yield True
| [
"[email protected]"
] | |
32b305acaf5f83143bcdae4ddd616f2adcd7642a | 686248eaf40284ed593768057c58aeebaeae1497 | /cg.py | 9e0807e3b58cd3dd8e51a7fcd66bf9cd276a3220 | [] | no_license | chyyuu/kernel-call-graph | 76e6d3b4599886b3eac46083457fad472dc7d9a8 | 76fb610220bfae24f686f0bd37d8d806bf374548 | refs/heads/master | 2021-07-11T12:31:06.361565 | 2017-10-15T16:05:39 | 2017-10-15T16:05:39 | 106,992,794 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import networkx as nx
import matplotlib.pyplot as plt
#G=nx.path_graph(4)
#nx.write_adjlist(G, "/chycode/kernel-call-graph/g2.txt")
#G=nx.read_adjlist("/chycode/kernel-call-graph/g2.txt", create_using=nx.DiGraph())
#nx.write_adjlist(G, "/chycode/kernel-call-graph/g.txt")
G=nx.read_adjlist("/chycode/kernel-call-graph/g.txt", nodetype=str, create_using=nx.DiGraph())
#nx.draw(G)
#plt.savefig("path.png")
print "Nodes: ", G.nodes()
print "Edges: ", G.edges() | [
"[email protected]"
] | |
7968b2e1e5caed24d71f1e3153d7880c5f146136 | ea9704f1bddb32cc608525147f5d414184063de5 | /app/api/tokens.py | a98a8df86f54b70cf34ef45c2a98af54f62b59ba | [] | no_license | bastienbeurier/partners-web | f7fbcdf473873c76fe1b487e788e5c0391aef6a5 | 2903ccacd89f1c699e35df07d093cd7219ffadaf | refs/heads/master | 2022-12-13T07:52:38.645855 | 2018-12-25T10:52:03 | 2018-12-25T10:52:03 | 161,174,359 | 0 | 0 | null | 2022-12-08T01:28:19 | 2018-12-10T12:49:59 | Python | UTF-8 | Python | false | false | 495 | py | from flask import jsonify, g
from app import db
from app.api import bp
from app.api.auth import basic_auth, token_auth
@bp.route('/tokens', methods=['POST'])
@basic_auth.login_required
def get_token():
token = g.current_user.get_token()
db.session.commit()
return jsonify({'token': token})
@bp.route('/tokens', methods=['DELETE'])
@token_auth.login_required
def revoke_token():
g.current_user.revoke_token()
db.session.commit()
return jsonify({'message': 'success'})
| [
"[email protected]"
] | |
813f053bea3bd35889f9553328b8c671b5afb626 | f23959561944599f02943350429eb76d8ad4cf81 | /hangman.py | b82bbc9228432b32684e559efabda769a60ab9bd | [] | no_license | yuuNishimura/hangman | 822b7df229215c6c4462bf1db775553473606e0b | 9b7f98cc6df93d9ce0fefd96916b5ed6f9ec9678 | refs/heads/master | 2023-06-17T18:11:22.030088 | 2021-07-13T09:26:07 | 2021-07-13T09:26:07 | 385,514,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | import random
def hangman():
a = ["cat", "dog"]
n = random.randint(0, 1)
word = a[n]
wrong = 0
stages = ["",
"________ ",
"| | ",
"| 0 ",
"| /|\ ",
"| | ",
"| / \ "
]
rletters = list(word)
board = ["_"] * len(word)
win = False
print("ハングマンへようこそ!")
while wrong < len(stages) - 1:
print("\n")
msg = "1文字を予想してね"
char = input(msg)
if char in rletters:
cind = rletters.index(char)
board[cind] = char
rletters[cind] = "$"
else:
wrong += 1
print(" ".join(board))
e = wrong + 1
print("\n".join(stages[0:e]))
if "_" not in board:
print("あなたの勝ち!")
print(" ".join(board))
win = True
break
if not win:
print("\n".join(stages[0:wrong + 1]))
print("あなたの負け!正解は{}。".format(word))
hangman()
| [
"[email protected]"
] | |
1e7fd967ad595fb9792cb574c9219de21724fb93 | ac652ff7636d4c3336918d0f96aa8ea1bba3ab28 | /fastvid/posts/serializers/postmodel.py | c0152d21e28e0e4c646800e2244e7b172f680400 | [] | no_license | pn101/fastvid | eebff58e9dd6b967a52361713ed34462e0713d88 | 9f57c577c558906e3fd5c3ab44f112588ae84ed2 | refs/heads/develop | 2021-01-20T18:58:34.398325 | 2016-07-05T09:29:49 | 2016-07-05T09:29:49 | 61,545,668 | 0 | 0 | null | 2016-06-20T13:14:42 | 2016-06-20T12:22:08 | null | UTF-8 | Python | false | false | 412 | py | from rest_framework import serializers
from posts.models import Post
class PostModelSerializer(serializers.ModelSerializer):
username = serializers.CharField(source='user.username')
class Meta:
model = Post
fields = [
'pk',
'username',
'title',
'content',
'youtube_original_url',
'youtube_embed_url',
]
| [
"[email protected]"
] | |
5dd69b1619ff38f19f642f1981ad84e205ad6583 | 49f6ecfbf4cf66b0867311ce7d4ba5f690410b2f | /greeter_client.py | bc93f9ed2c90fa240e53385b66881e941c070aaa | [] | no_license | jha8/grpc | b93d434d6c1c58517de52a4158092069cc29c900 | 0d24e265addfd25806b807c0457a275bc1805887 | refs/heads/master | 2021-09-24T15:27:00.380727 | 2018-10-10T22:25:00 | 2018-10-10T22:25:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the GRPC helloworld.Greeter client."""
from __future__ import print_function
import grpc
import reverse_pb2
import reverse_pb2_grpc
def run():
# NOTE(gRPC Python Team): .close() is possible on a channel and should be
# used in circumstances in which the with statement does not fit the needs
# of the code.
with grpc.insecure_channel('localhost:50051') as channel:
stub = reverse_pb2_grpc.integer_messageStub(channel)
response = stub.SendInteger(reverse_pb2.requestInteger(value = 32))
print("Greeter client received: " + str(response.value))
if __name__ == '__main__':
run() | [
"[email protected]"
] | |
b9744336db4592f260f93b389841d5b72c0fcc0d | 960d14fb31f37adf70607e9cb0e986c7764a7bf8 | /pizza.py | 229035f4f512b90d84a0691b358e4a52b2c68c50 | [] | no_license | alexcar/PythonExercises | 7a33e47f7adbcbcb7239b138db1ef6ae04d57c2b | 5f21089b8b9758da948496acc1420869361c58c4 | refs/heads/master | 2023-07-27T01:59:16.110629 | 2021-09-15T19:13:34 | 2021-09-15T19:13:34 | 406,729,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | def make_pizza(size, *toppings):
"""Summarize the pizza we are about to make"""
print(f"\nMaking a {size}-inch pizza with the following toppings:")
for topping in toppings:
print(f"- {topping}")
| [
"[email protected]"
] | |
33b855bde50b1d0eaf72ec0bde4d3cd78ab60fc0 | f2a97b4077d3988e2413ec7e6406ae6a5bc41ecb | /assignment 2/wormup-2/string_match.py | 0f5e76cd9048ef3d343254ebd85a3ec18e6b6c74 | [] | no_license | Sahyoun98/CS498 | 0cdc05add4faa061d6f88d7c6b270781ecf9d81f | 073de710d75a241b021ff93c9da504c5d49b8469 | refs/heads/master | 2020-03-30T14:17:43.993681 | 2018-10-15T03:29:44 | 2018-10-15T03:29:44 | 151,308,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | def string_match(a, b):
length = len(a)
if length > len(b):
length = len(b)
x = [1 for i in range(length - 1) if a[i:i+2] == b[i:i+2]]
return len(x) | [
"[email protected]"
] | |
03f01b12540d4dda0403bbe2e399a4ae7b21bdfd | 76caf7060397c9b697e61613a40bd4d27dd93128 | /manage.py | 9105de0fd8553cd0f7e1d10ee894681e49aaf073 | [] | no_license | balaprasadmb/Eblogger | a0bb0f894e876ca86290bb70291271f43b64c5a3 | 115f931fb55914477db142909c755fc4cb431537 | refs/heads/master | 2022-12-02T19:10:22.442511 | 2020-02-25T12:27:55 | 2020-02-25T12:27:55 | 89,342,554 | 0 | 0 | null | 2022-11-22T04:28:20 | 2017-04-25T09:29:32 | CSS | UTF-8 | Python | false | false | 260 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eblogger.BootCamp.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
c35af357be8ae118dde3133aaaff753973cb786b | f756eedd0fd4cee9b369978d1e20287dd579e4da | /InmoovScript/services/7_Inmoov.py | ac1ef25dc0796c3cb41c2579757db642007edf0a | [] | no_license | linuxrodo/inmoov | cf02421443d6976f153a64c898e2c209e32cc246 | fe8391d6d59ccdf6bdf5b382872fdf4bf77f4b09 | refs/heads/master | 2021-01-21T09:06:52.835689 | 2017-02-23T04:07:38 | 2017-02-23T04:07:38 | 82,861,443 | 0 | 0 | null | 2017-02-22T23:05:24 | 2017-02-22T23:05:24 | null | UTF-8 | Python | false | false | 934 | py | # ##############################################################################
# INMOOV SERVICE
# ##############################################################################
# ##############################################################################
# MRL SERVICE CALL
# ##############################################################################
inMoov=i01
#varduinoright = Runtime.start("varduinoright","VirtualArduino")
#varduinoright.connect(MyRightPort)
#varduinoleft = Runtime.start("varduinoleft","VirtualArduino")
#varduinoleft.connect(MyLeftPort)
#Inmoov Left / right arduino connect
if ScriptType=="RightSide" or ScriptType=="Full":
right = Runtime.createAndStart("i01.right", "Arduino")
RightPortIsConnected=CheckArduinos(right,MyRightPort)
if ScriptType=="LeftSide" or ScriptType=="Full":
left = Runtime.createAndStart("i01.left", "Arduino")
LeftPortIsConnected=CheckArduinos(left,MyLeftPort) | [
"[email protected]"
] | |
427df9e17f2ecf2d62cae9972583d48410810886 | 46def0fe8f0c3106e7abd5675730ec1905dc3a29 | /customer_transfer/models.py | 3327a7c616316ce3f47c0e570c86c8bfa172087d | [] | no_license | PillaiManish/Bank-Website-Django- | 23c88d4e694f8534918b58ed9cf31e246acaf020 | dd63deb94b52f429a6ff3560a4d5f522e9a56c1c | refs/heads/master | 2022-11-29T04:53:17.784472 | 2020-08-17T13:33:51 | 2020-08-17T13:33:51 | 245,742,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | from django.db import models
# Create your models here.
class customer_transfer(models.Model):
self_userid = models.IntegerField()
to_userid = models.IntegerField()
date = models.DateField(auto_now=True)
to_amount = models.IntegerField()
def __str__(self):
return str(self.self_userid) | [
"[email protected]"
] | |
f5c2f6f667942466f986c459ee041e49ea69a197 | c1975972f4ebf8ce40515a0f09a6cb4aca951d94 | /venv/Scripts/easy_install-3.8-script.py | d2540252aaa23d2747328ccf4c55798dc84f918e | [] | no_license | anqier-lala/PO_001 | e30eb77b9a4be2614831b32ec47b8aa75fde87f6 | 0f0a437b2b4d1d7b393d41af1a1afcef92975d41 | refs/heads/master | 2022-07-03T19:00:51.789440 | 2020-05-19T15:29:57 | 2020-05-19T15:29:57 | 259,664,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | #!D:\Git_code\PO_001\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
| [
"[email protected]"
] | |
1523aeb6c776c3d3f4d1591ad4e5cc42e0987fcf | fe149dc01dd185f43197038c97d6cc049fb6ba68 | /blog/migrations/0004_auto_20190419_0845.py | 7b8c4632fef3d25a4ea6ad97561f8a19c285b5b1 | [] | no_license | LBarry97/mysite | eee6e0971d394cf20f75d99919e067944b5ec714 | 5715add5c362e90e731afc6544007aafdfa6e0a3 | refs/heads/master | 2020-05-02T23:10:37.837629 | 2019-04-19T09:21:36 | 2019-04-19T09:21:36 | 178,274,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,273 | py | # Generated by Django 2.1.7 on 2019-04-19 08:45
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('blog', '0003_blogpagegalleryimage'),
]
operations = [
migrations.CreateModel(
name='BlogPageTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_items', to='blog.BlogPage')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_blogpagetag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='blogpage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='blog.BlogPageTag', to='taggit.Tag', verbose_name='Tags'),
),
]
| [
"[email protected]"
] | |
a11834a8740c99d04887a515be714497458a6f92 | 33b3ab4ac3f1c4f8db6be538777a9d16c0c9045d | /textmine.py | 8f197587fa7ca7523dfaaa48ba51603e19610c63 | [
"MIT"
] | permissive | fredryce/stocker | fbe643d3406fbf63af6a13562afd8822e65e244a | 041fbe8348f7a035a607a214477cf423c4259171 | refs/heads/main | 2023-07-12T20:00:54.328703 | 2021-08-03T22:13:33 | 2021-08-03T22:13:33 | 392,459,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,964 | py | import spacy
import sqlite3
import pandas as pd
import json
import robin_stocks as r
import yfinance as yf
import re, string
from os import makedirs, getcwd, path
import threading
from datetime import datetime, timedelta
#for the user, associate the user with the most recent stock he/she disccussed about
#we can use the basic youtube trading algo for long term invtestment
#use kelly formula, based the percentage on the faith of the discord chat
#https://www.youtube.com/watch?v=Hi-zhLgP_TQ&ab_channel=%E5%BC%82%E7%95%8C%E7%95%AA%E8%8C%84%E8%AF%B4%E7%BE%8E%E8%82%A1
#https://www.youtube.com/watch?v=FZ9Kf1xfA40&ab_channel=%E5%BC%82%E7%95%8C%E7%95%AA%E8%8C%84%E8%AF%B4%E7%BE%8E%E8%82%A1
#theory of large numbers maybe each user in discord's call is following a prob distribution
'''
high cred: first to call out stock
shortest duration
highest gain
low number of people call out the same stock
#returns prob of wining vs prob of losing and the amount to win and lose maxmize profit pass in kelly for each investment interval
#the formula should mimic the behavior of a sigmoid function where x is the result from the parameters and y is the cred score
#\frac{6}{\frac{1}{6}+e^{-x}\ }
low:
'''
#
import logging
class VideoModel(object):
#uses youtube model with kelly and discord chat faith determination
def __init__(self):
pass
def kelly_formula(self):
pass
#this user can be removed each user its own table with
#this allows to see which user have more influence on the stock market price is more accurate
class NLPstock(object):
def __init__(self, db_name="stocks.db"):
self.nlp = spacy.load("en_core_web_sm")
self.db_name = db_name
self._current_time = datetime.now()
self._date = self.current_time.date()
@property
def current_time(self):
return self._current_time
@current_time.setter
def current_time(self, ct):
#self.start_hours = ct.replace(hour=9, minute=30, second=0, microsecond=0)
#self.end_hours = ct.replace(hour=16, minute=00, second=0, microsecond=0)
if (ct.hour >= 5) and (ct.hour < 14):
self._date = (ct + timedelta(days=-1)).date()
logging.info(f"setting time.. current hour is {ct.hour}, {self._date} ")
self._current_time = ct
def update_stock_table(self, stock_tk, message, c):
c.execute("SELECT * FROM %s WHERE today = ?" % (stock_tk), (str(self._date),))
rows = c.fetchall()
logging.info(f"try to fetch for {str(self._date)} stock is {stock_tk} result {rows}")
if rows:
c.execute("UPDATE %s SET today_count = today_count + 1 WHERE today = ?" % (stock_tk), (str(self._date),))
logging.info(f"find existing {str(self._date)} for stock {stock_tk}")
else:
#first time of the day
c.execute('INSERT INTO %s VALUES (?,?,?,?,?)'% (stock_tk), (
self._date,
0,
None,
message['author']['id'],
message['timestamp']
))
logging.info(f"NO existing {str(self._date)} for stock {stock_tk} creating..... ")
def insert_stock(self, stock_tk, tk_value, message):
logging.info(f"inserting stock {stock_tk}.......")
dbdir = path.join(getcwd(), 'data')
if not path.exists(dbdir):
makedirs(dbdir)
dbfile = path.join(dbdir, self.db_name)
db = sqlite3.connect(dbfile)
c = db.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS stocks (
ticker TEXT NOT NULL PRIMARY KEY,
name TEXT,
count INTEGER,
call_user TEXT,
call_price REAL,
call_time TEXT
)''')
c.execute('INSERT INTO %s VALUES (?,?,?,?,?,?)'% ("stocks"), (
stock_tk,
tk_value.info['longName'],
0,
message['author']['id'],
tk_value.history('1d')['Close'][0],
message['timestamp']
))
#when the stock is already made sure to be true
c.execute('''CREATE TABLE IF NOT EXISTS %s (
today TEXT NOT NULL PRIMARY KEY,
today_count INTEGER,
top_user TEXT,
first_call TEXT,
call_time TEXT
)''' %(stock_tk))
self.update_stock_table(stock_tk, message, c)
logging.info(f"{stock_tk} Insert Sucess")
db.commit()
db.close()
def stock_in_table(self, stock_tk, message):
logging.info(f"Finding stock {stock_tk} in tab")
dbdir = path.join(getcwd(), 'data')
if not path.exists(dbdir):
makedirs(dbdir)
dbfile = path.join(dbdir, self.db_name)
db = sqlite3.connect(dbfile)
c = db.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS stocks (
ticker TEXT NOT NULL PRIMARY KEY,
name TEXT,
count INTEGER,
call_user TEXT,
call_price REAL,
call_time TEXT
)''')
c.execute("SELECT * FROM stocks WHERE ticker = ?", (stock_tk,))
rows = c.fetchall()
if rows:
c.execute("UPDATE stocks SET count = count + 1 WHERE ticker = ?", (stock_tk,))
self.update_stock_table(stock_tk, message, c)
db.commit()
db.close()
return True
else:
db.close()
return False
def get_stocks(self, message):
string_value = message['content']
self.doc = self.nlp(string_value)
stock_list = [x.text for x in self.doc.ents if x.label_ == "ORG"]
stock_list += re.findall("[A-Z]{2,}", string_value)
stock_list = set(stock_list)
stock_string = []
for stock in stock_list:
processed_stock = self.process_org(stock, message)
if processed_stock:
stock_string.append(processed_stock)
return stock_string
def process_org(self, stock, message):#for processing the org into a ticker
stock =stock.strip()
stock = " ".join(re.findall("[a-zA-Z]+", stock))
if (len(stock) > 4) or (len(stock) < 2):
#print(f"Failed: {stock}")
pass
else:
try:
if self.stock_in_table(stock, message):
logging.info(f"{stock} already in table")
return stock
tk = yf.Ticker(stock)
#t = threading.Thread()
self.insert_stock(stock, tk, message)
return stock
except KeyError:
logging.info(f"Yahoo cant find {stock}")
except Exception as e:
logging.info(f'Weird stock bugg {stock}')
#this means either it contains the $ or its not a stock we are looking for
if __name__ == "__main__":
pass
| [
"[email protected]"
] | |
1e3299112d0d4a422e71d7d55d2a4869b4e74dc6 | 917e376668f325c0452fe05fcf3f6348a6ac4336 | /tests/xla_interpreter_test.py | d3b758aa0cb0e09d3959f3ad74c8e0192d75cc0a | [
"Apache-2.0"
] | permissive | wusixer/jax | 5f8d78a89679db74d0d62806725cc820246d4b4e | 66de981e1dfbe04a41b2c003f171fea7bb92585f | refs/heads/main | 2023-06-15T09:10:45.599555 | 2021-07-06T01:58:11 | 2021-07-06T01:58:11 | 383,305,925 | 0 | 0 | NOASSERTION | 2021-07-06T01:32:55 | 2021-07-06T01:32:55 | null | UTF-8 | Python | false | false | 1,252 | py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from absl.testing import absltest
from jax import test_util as jtu
from jax._src import api
from jax.interpreters import xla
class XlaInterpreterTest(jtu.JaxTestCase):
@unittest.skipIf(not xla._ALLOW_ARG_PRUNING, "Test requires jaxlib 0.1.66")
def test_prune_jit_args(self):
def f(*args):
return args[0]
closed_jaxpr = api.make_jaxpr(f)(*range(10))
pruned_jaxpr, kept_const_idx, kept_var_idx = xla._prune_unused_inputs(
closed_jaxpr.jaxpr)
assert len(pruned_jaxpr.invars) == 1
assert kept_const_idx == set()
assert kept_var_idx == {0}
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| [
"[email protected]"
] | |
6f47ad10c4d8add20d063805aae912c0a742a686 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/bd_-11162/sdB_bd_-11162_lc.py | b076fadab2c00cc4cf60f263d06b3a4df11907ba | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[13.062792,-10.662778], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_bd_-11162/sdB_bd_-11162_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
90040b20dd66779595a0aa99340b7c09f6b8acaf | 235da299e64f217d5fc581bbe20d2020b989ea45 | /server/api/mongo/mongo.py | 5c31178c466ab317e207089a97118722ec687a55 | [] | no_license | DAWZayas-Projects/CORTINA-GUDE-JACOBO | 5f902c225e3bf78ba74758abe1c92168bca5b561 | 10c9cfd10fbbdade3a2c048c985aa289b1aa826f | refs/heads/master | 2021-01-12T01:40:13.236300 | 2017-06-19T19:38:36 | 2017-06-19T19:38:36 | 78,414,986 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | from pymongo import MongoClient
try:
client = MongoClient()
db = client.AIPowerDB
print ("Connected successfully!!!")
except Exception as ex:
print ("Could not connect to MongoDB: %s" % ex)
client
| [
"[email protected]"
] | |
d67eb25ac2e4909f2eeb6a8ba705f13acca87cad | 9ce6a0eaba9f82d536ca4348a1594f90f5d67638 | /zException.py | 33d0d6ef5a48bf8d56b71c83d909615745a662ae | [] | no_license | Jeevankv/LearnPython | 028d57ac7b6b68d129e9769541ae509df8ef204d | 504b96795b6ccd107f0b176adc142246f9a26094 | refs/heads/master | 2022-12-21T15:53:27.669206 | 2020-09-01T10:36:38 | 2020-09-01T10:36:38 | 279,399,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | #Exception Handling
a=int(input("Enter A\n"))
b=int(input("Enter B\n"))
def Divide(a,b):
try:
c=a/b
return c
except Exception as e:
print(e)
x = Divide(a, b)
print(x)
| [
"[email protected]"
] | |
8432bf99fe707d484f269c98c4d0077f351f625e | e931efb116e93cc2b92b472a7cf37bbf7be0598f | /DB_project/manage.py | 16f16eb5e9813dc12720e594bb6ee70b08668355 | [] | no_license | ghazalb76/DB_project | f4a2dd38c5e2be4a1ac5335ca468f0f541b08f88 | ac1edafffcc40f2779cbd0180534a22dc586d4f8 | refs/heads/master | 2021-01-04T20:23:18.186963 | 2020-02-15T16:25:09 | 2020-02-15T16:25:09 | 240,747,130 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DB_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
cfc2f2d584cba1ecced6cada0df5775d6baf7951 | e8b23cacc1856e87319491200c2b24778fcd6699 | /Blackjack/blackjack.py | 1cfeddf18158a678cc2bc60cf68bcf08acc53d22 | [] | no_license | ACNoonan/PythonMasterclass | 69fa204fea1588458853172483b2b85075ac9829 | 479cfb4c1bc55e4c3480b214fa4028e0f1ed9f2a | refs/heads/master | 2022-12-13T04:28:57.573020 | 2020-09-19T17:23:47 | 2020-09-19T17:23:47 | 263,746,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,830 | py | import random
try:
import tkinter
except ImportError: # python 2
import Tkinter as tkinter
player_hit_count = 0
def load_images(card_images):
suits=['heart', 'club', 'diamond', 'spade']
face_cards = ['jack', 'queen', 'king']
if tkinter.TkVersion >= 8.6:
extension = 'png'
else:
extension = 'ppm'
# for each suit, retrieve the image for the cars
for suit in suits:
# first the number cards 1 to 10
for card in range(1, 11):
name = 'cards/{}_{}.{}'.format(str(card), suit, extension)
image = tkinter.PhotoImage(file=name)
card_images.append((card, image,))
# then face cards
for card in face_cards:
name = 'cards/{}_{}.{}'.format(str(card), suit, extension)
image = tkinter.PhotoImage(file=name)
card_images.append((10, image,))
def _deal_card(frame):
# pop the next cards off the top of the deck
next_card = deck.pop(0)
# and add it to the back of the pack
deck.append(next_card)
# add the image to a Label and display the label
tkinter.Label(frame, image=next_card[1], relief='raised').pack(side='left')
# return the card's face value
return next_card
def score_hand(hand):
# Calculate the total score of all cards in the list
# Only one ace can be 11, which changes to 1 if the hand goes bust
score = 0
ace = False
for next_card in hand:
card_value = next_card[0]
if card_value == 1 and not ace:
card_value = 11
ace = True
score += card_value
# Check for ace at bust
if score > 21 and ace:
score -= 10
ace = False
return score
def deal_dealer():
global dealer_record
global player_record
dealer_score = score_hand(dealer_hand)
while 0 < dealer_score < 17:
dealer_hand.append(_deal_card(dealer_card_frame))
dealer_score = score_hand(dealer_hand)
dealer_score_label.set(dealer_score)
player_score = score_hand(player_hand)
if player_score > 21:
result_text.set('Dealer Wins!')
dealer_record += 1
dealer_record_label.set(dealer_record)
elif dealer_score > 21 or dealer_score < player_score:
result_text.set('Player Wins!')
player_record += 1
player_record_label.set(player_record)
elif dealer_score > player_score:
result_text.set('Dealer Wins!')
dealer_record += 1
dealer_record_label.set(dealer_record)
else:
result_text.set('Draw!')
def deal_player():
global player_record
global dealer_record
global player_hit_count
player_hand.append(_deal_card(player_card_frame))
player_score = score_hand(player_hand)
player_hit_count += 2
player_score_label.set(player_score)
if player_score == 21 and player_hit_count == 2:
result_text.set('Blackjack!')
player_record += 1
player_record_label.set(player_record)
elif player_score > 21:
result_text.set('Dealer Wins!')
dealer_record += 1
dealer_record_label.set(dealer_record)
def initial_deal():
deal_player()
dealer_hand.append(_deal_card(dealer_card_frame))
dealer_score_label.set(score_hand(dealer_hand))
deal_player()
def new_game():
global dealer_card_frame
global player_card_frame
global dealer_hand
global player_hand
global player_hit_count
# Create a new deck of cards and shuffle 'em
deck = list(cards)
random.shuffle(deck)
# embedded frame to hold the card images
dealer_card_frame.destroy()
dealer_card_frame = tkinter.Frame(card_frame, background='green')
dealer_card_frame.grid(row=0, column=1, sticky='ew', rowspan=2)
player_card_frame.destroy()
player_card_frame = tkinter.Frame(card_frame, background='green')
player_card_frame.grid(row=2, column=1, sticky='ew', rowspan=2)
result_text.set('')
# Create the lists to store the dealer's & player's hands
dealer_hand = []
player_hand = []
initial_deal()
def play():
initial_deal()
mainWindow.mainloop()
# Instantiate screen and frames for the dealer and player
mainWindow = tkinter.Tk()
mainWindow.title('Black Jack')
mainWindow.geometry('640x480')
mainWindow.configure(background='green')
result_text = tkinter.StringVar()
result = tkinter.Label(mainWindow, textvariable=result_text)
result.grid(row=0, column=0, columnspan=3)
card_frame = tkinter.Frame(mainWindow, relief='sunken', borderwidth=1, background='green')
card_frame.grid(row=1, column=0, sticky='ew', columnspan=3, rowspan=2)
dealer_score_label = tkinter.IntVar()
tkinter.Label(card_frame, text='Dealer', background='green', fg='white').grid(row=0, column=0)
tkinter.Label(card_frame, textvariable=dealer_score_label, background='green', fg='white').grid(row=1, column=0)
# embedded frame holds the dealer's card images
dealer_card_frame = tkinter.Frame(card_frame, background='green')
dealer_card_frame.grid(row=0, column=1, sticky='ew', rowspan=2)
player_score_label = tkinter.IntVar()
tkinter.Label(card_frame, text='Player', background='green', fg='white').grid(row=2, column=0)
tkinter.Label(card_frame, textvariable=player_score_label, background='green', fg='white').grid(row=3, column=0)
# embedded frame holds the player's card images
player_card_frame = tkinter.Frame(card_frame, background='green')
player_card_frame.grid(row=2, column=1, sticky='ew', rowspan=2)
dealer_record_label = tkinter.IntVar()
tkinter.Label(card_frame, text='Dealer Wins', background='green', fg='white').grid(row=0, column=4)
tkinter.Label(card_frame, textvariable=dealer_record_label, background='green', fg='white').grid(row=1, column=4)
player_record_label = tkinter.IntVar()
tkinter.Label(card_frame, text='Player Wins', background='green', fg='white').grid(row=2, column=4)
tkinter.Label(card_frame, textvariable=player_record_label, background='green', fg='white').grid(row=3, column=4)
button_frame = tkinter.Frame(mainWindow)
button_frame.grid(row=3, column=0, columnspan=2, sticky='w')
dealer_button = tkinter.Button(button_frame, text='Dealer', command=deal_dealer)
dealer_button.grid(row=0, column=0)
player_button = tkinter.Button(button_frame, text='Player', command=deal_player)
player_button.grid(row=0, column=1)
new_game_button = tkinter.Button(button_frame, text='New Game', command=new_game)
new_game_button.grid(row=0, column=2)
# load cards
cards = []
load_images(cards)
print(cards)
# Create a new deck of cards and shuffle 'em
deck = list(cards) + list(cards) + list(cards)
random.shuffle(deck)
# Create the lists to store the dealer's & player's hands
dealer_hand = []
player_hand = []
# Create win record
dealer_record = 0
player_record = 0
if __name__ == '__main__':
play()
| [
"[email protected]"
] | |
2324633dffd51ed394a8b21d1ac6260a2f93d21b | 598caea4a632c7fbf1598a341b192fe19329f5cc | /bi-gram/test-new.py | e6b18bf534aea2ca91c274d2e78d16ef4e51f2d9 | [] | no_license | vijaym123/EECS-337-TweetAnalysis | c8917f6ab88413e9b9b67885e9d6d2db6b68cc09 | 3cba4c16b56832f2a91e81cc022c11ae3002622e | refs/heads/master | 2020-05-22T14:34:28.410722 | 2014-02-25T19:10:13 | 2014-02-25T19:10:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,389 | py | import re
import json
import nltk
from nltk.corpus import wordnet as wn
from collections import defaultdict
import math
from lxml import html
import requests
import sys
import unicodedata
year = raw_input("Golden Globes year: ")
pagename = "http://www.goldenglobes.com/awards/" + year
print "\n"
page = requests.get(pagename)
tree = html.fromstring(page.text)
winners = tree.xpath('//div[@class="views-field views-field-nominee-name gold"]/text()')
noms = tree.xpath('//div[@class="views-field views-field-nominee-name grey"]/text()')
nominees = []
for z in winners:
nominees.append(z.replace("-"," "))
for z in noms:
z = z.replace(u"\xe9","e")
nominees.append(z)
def levenshtein(s1, s2):
if len(s1) < len(s2):
return levenshtein(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = xrange(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
def issubstr(substr, mystr, start_index=0):
try:
for letter in substr:
start_index = mystr.index(letter, start_index) + 1
return True
except: return False
def bigrams(words):
wprev = None
for w in words:
if wprev!=None and w != "golden" and w != "Golden" and w != "globe" and w != "Globe" and w != "globes" and w != "Globes" and w != "goldenglobes":
yield (wprev, w)
wprev = w
def getFreqDistribution(filename,freqDict):
f = open(filename,'r')
base = defaultdict(int)
count = 0
for line in f:
count += 1
tweet = json.loads(line)
for word in tweet:
base[word]+=1
#print count
for word in freqDict:
#print len(freqDict.keys())-1/base[word]
freqDict[word]=math.log10(freqDict[word])*1.0*math.log10(len(freqDict.keys())-1/base[word])
return freqDict
def buildHistogram(filename,tags):
f = open(filename,'r')
freqDict = defaultdict(int)
#print filename,tags
#porter = nltk.PorterStemmer()
for line in f:
tweet = json.loads(line)
if len(tweet)>=2 and all(any(e in word for word in tweet) for e in tags):
#print tweet
bigramList = bigrams(tweet)
for i in bigramList:
freqDict[i[0]+" "+i[1]] += 1
return freqDict
def getAnswer(filename,tags):
freqDict = buildHistogram(filename,tags)
freqList = sorted([(freqDict[key],key) for key in freqDict],reverse=True)
return freqList
def filterResults(result,tags):
newResult = []
for i in result:
if i[1].split(" ")[0] in tags or i[1].split(" ")[1] in tags:
continue
newResult.append(i)
return newResult
def getName(theWinner):
minimum = 99
best = None
for i in nominees:
if issubstr(theWinner.lower(), i.lower()):
temp = levenshtein(i.lower(),theWinner.lower())
if temp < minimum:
best = i
minimum = temp
if best == None:
firstWinner = theWinner.split()[0]
for i in nominees:
if issubstr(firstWinner.lower(), i.lower()):
temp = levenshtein(i.lower(),firstWinner.lower())
if temp < minimum:
best = i
minimum = temp
firstWinner = theWinner.split()[1]
for i in nominees:
if issubstr(firstWinner.lower(), i.lower()):
temp = levenshtein(i.lower(),firstWinner.lower())
if temp < minimum:
best = i
minimum = temp
if best == None:
return theWinner
return best
def guessWinner(results):
return getName(results[0][1].encode('ascii', 'ignore'))
def guessSecond(results):
return getName(results[3][1].encode('ascii', 'ignore'))
def guessNoms(results):
print "Nominees might be: "
print getName(results[1][1].encode('ascii', 'ignore'))
print getName(results[2][1].encode('ascii', 'ignore'))
print getName(results[3][1].encode('ascii', 'ignore'))
print getName(results[4][1].encode('ascii', 'ignore'))
return None
if __name__ == "__main__":
filename = "goldenglobes-processedTweets.json"
question = "best picture drama"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nBest Picture - Drama: "
temp = guessWinner(results)
print temp
print guessNoms(results)
question = "present best picture drama " + temp.lower()
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nPresenter for Best Picture - Drama: "
print results[0][1].encode('ascii', 'ignore')
question = "best actor drama"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nBest Actor - Drama: "
temp = guessWinner(results)
print temp
print guessNoms(results)
question = "presents present best actor"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nPresenter for Best Actor - Drama: "
print guessWinner(results)
question = "best actress drama"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nBest Actress - Drama: "
temp = guessWinner(results)
print temp
print guessNoms(results)
question = "presents present " + temp.lower()
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nPresenter for Best Actress - Drama: "
print guessWinner(results)
# Fix
# No presenter
question = "best picture comedy musical"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nBest Picture - Comedy or Musical: "
print guessWinner(results)
print guessNoms(results)
question = "best actor comedy"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nBest Actor - Comedy or Musical: "
temp = guessWinner(results)
print temp
print guessNoms(results)
question = "presents presenter best actress comedy musical"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nPresenter for Best Actor - Comedy or Musical: "
print results[0][1].encode('ascii', 'ignore')
question = "best actress comedy"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nBest Actress - Comedy or Musical: "
print guessWinner(results)
print guessNoms(results)
question = "presents presenter best actress comedy musical"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nPresenter for Best Actress - Comedy or Musical: "
print guessWinner(results)
question = "best supporting actor"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nBest Supporting Actor: "
temp = guessWinner(results)
print temp
print guessNoms(results)
question = "presents presenter best supporting actor"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nPresenter for Best Supporting Actor: "
print guessWinner(results)
# No presenter
question = "best supporting actress"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nBest Supporting Actress: "
temp = guessWinner(results)
print temp
print guessNoms(results)
question = "best director"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nBest Director: "
print guessWinner(results)
print guessNoms(results)
question = "presents presenter best director"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nPresenter for Best Director: "
print guessWinner(results)
question = "best screenplay"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nBest Screenplay: "
print guessWinner(results)
print guessNoms(results)
question = "presents present screenplay"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nPresenter for Best Screenplay: "
print guessWinner(results)
# No presenter
question = "best original song"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nBest Original Song: "
temp = guessWinner(results)
print temp
print guessNoms(results)
# No presenter
question = "best actor television series drama"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nBest Actor in a Television Series - Drama: "
temp = guessWinner(results)
print temp
print guessNoms(results)
# No presenter
question = "best actress television series drama"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nBest Actress in a Television Series - Drama: "
print guessWinner(results)
print guessNoms(results)
# No presenter
question = "best actor television series comedy"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nBest Actor in a Television Series - Comedy: "
print guessWinner(results)
print guessNoms(results)
# No presenter
question = "best actress performance comedy series"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nBest Actress in a Television Series - Comedy: "
print guessWinner(results)
print guessNoms(results)
# No presenter
question = "best actor miniseries"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nBest Actor - Miniseries or Television Film: "
temp = guessWinner(results)
print temp
print guessNoms(results)
# No presenter
question = "best actress miniseries motion picture television"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nBest Actress - Miniseries or Television Film: "
temp = guessWinner(results)
print temp
print guessNoms(results)
# No presenter
question = "best supporting actor miniseries motion picture"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nBest Supporting Actor - Miniseries or Television Film: "
temp = guessWinner(results)
print temp
print guessNoms(results)
# No presenter
question = "best supporting actress miniseries"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:20],tags)
print "\nBest Supporting Actress - Miniseries or Television Film: "
temp = guessWinner(results)
print temp
print guessNoms(results)
question = "host hosts"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:40],tags)
print "\nHosts: "
print guessWinner(results)
print guessSecond(results)
question = "best dress"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:40],tags)
print "\nBest Dress: "
print results[0][1].encode('ascii', 'ignore')
question = "speech"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:40],tags)
print "\nNoteworthy Speech: "
print guessWinner(results)
question = "awesome"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:40],tags)
print "\nPeople Thought He/She Was Awesome: "
print results[0][1].encode('ascii', 'ignore')
question = "hated"
porter = nltk.PorterStemmer()
tags = [porter.stem(i) for i in question.split()]
results = filterResults(getAnswer(filename,tags)[:40],tags)
print "\nPeople Hated: "
print guessWinner(results)
| [
"[email protected]"
] | |
9df31fb3505037651d94f77ee635edd5db359530 | 710c6104124e57f778701291d573e746bbc559f3 | /teambuildingapp/env/bin/virtualenv | a574068755fad926eb2cb1903956c89cd0f2fa64 | [] | no_license | ckjoon/teambuilding | 711791952828529ac4c5a44d23a883ae66680836 | ee49e95e1cd56f4e7d8efbb46ab1c38b200c08a6 | refs/heads/master | 2020-04-09T13:33:33.205704 | 2016-12-04T20:20:25 | 2016-12-04T20:20:25 | 68,007,148 | 2 | 0 | null | 2016-12-04T20:20:25 | 2016-09-12T12:31:40 | HTML | UTF-8 | Python | false | false | 255 | #!/Users/JChoi/teambuilding/teambuildingapp/env/bin/python3.4
# -*- coding: utf-8 -*-
import re
import sys
from virtualenv import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
7f574f502fc6e92a306eb7d28de67fe32e7f5549 | 3e3038c14e081bb110a702bcd385b54cd41a6261 | /879/879.py | f9e5434cbee5596262fbd6fb2addf7800dec5024 | [] | no_license | michael153/leetcode | 0f1535ae511f74fd60f479ed644c677de7ece1cb | 4be3b0d6892028f6abff1e0ec9c05e0b1e721c0b | refs/heads/master | 2020-07-04T09:09:12.192768 | 2019-08-13T23:35:57 | 2019-08-13T23:35:57 | 202,235,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 878 | py | class Solution(object):
def profitableSchemes(self, G, P, group, profit):
"""
:type G: int
:type P: int
:type group: List[int]
:type profit: List[int]
:rtype: int
"""
mod = (10**9 + 7)
dp = [[0 for __ in range(G + 1)] for ___ in range(P+1)]
dp[0][0] = 1
for c in range(len(group)):
freeze = [r[:] for r in dp]
reqppl = group[c]
prof = profit[c]
for k in range(P + 1):
for p in range(G - reqppl, -1, -1):
b = min(k + prof, P)
freeze[b][p + reqppl] += (dp[k][p] % mod)
freeze[b][p + reqppl] %= mod
dp = freeze
ans = 0
for p in range(G + 1):
ans += dp[P][p]
ans %= mod
return ans
| [
"[email protected]"
] | |
304cf36358cb78ff18c9f2fa5b28c3e25c49d023 | 8b3426e770056ae7d3d946dc2dc324bbaab4391c | /collatz.py | 0b9ac12199daace07475c0a719d631f4fefff323 | [] | no_license | ctrl101/journey | 9743896ca64cf2a2913823ffab4213b4e1988b5e | 893f8ce08005c5e89f4b5ccba870f8b726ae21f8 | refs/heads/master | 2023-01-13T15:08:14.346450 | 2020-11-17T01:12:54 | 2020-11-17T01:12:54 | 313,557,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | def collatz(number):
if number % 2 == 0:
print(number // 2)
elif number % 2 == 1:
print(number += 1)
try:
number = int(input("Enter number :"))
while number > 1:
collatz(number)
number = number // 2
except:
print("must be an interger")
| [
"[email protected]"
] | |
71626b5ee0ccfe0ba075fb9842f193a99eeca3fa | 030b801cb9bed35be80bb305d60c155f806513a5 | /regex/regex.py | ef3129779cd88011001158299266d448c4056e85 | [] | no_license | Yerkonite/py4e | 67dea6f2843266cf789cc05ae1c9ed7f96614fae | dfaf2a8c4eaf0575762a810b79bb332128bbdef9 | refs/heads/main | 2023-01-03T17:29:40.070949 | 2020-11-02T11:29:22 | 2020-11-02T11:29:22 | 306,399,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | # Search for lines that start with 'X' followed by any non
# whitespace characters and ':'
# followed by a space and any number.
# The number can include a decimal.
import re
fname = input("Enter file:")
hand = open(fname)
y = []
for line in hand:
line = line.rstrip()
x = re.findall('[0-9]+', line)
if len(x) > 0:
y = y + x
jiyn = 0
for z in y:
jiyn = jiyn + int(z)
print(jiyn) | [
"[email protected]"
] | |
ea844bd080f216727d29578c7e83e37d9e5000c6 | e949a970f404a501d54ec0ea5cce6a9d8b0abc3e | /10chapter13某城市财政收入影响因素分析及预测模型(未完成)/code/chapter13Adaptive_Lasso变量选择模型.py | 6df38f606feb93dbacdf4287cdd433851b38653b | [] | no_license | xuhande/Data-analysis-and-mine-using-python | 3b20a25b9fc2d88e1e29d71b38a9a7589a92ff12 | 34a97afdc60a52d4ef72c5edf6f715182ddb57f7 | refs/heads/master | 2023-09-06T06:52:05.505839 | 2019-03-31T08:32:04 | 2019-03-31T08:32:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | """模型参数
x1:社会从业人数; x2: 在岗职工工资总额;x3: 社会消费品零售总额; x4: 城镇居民人均可支配收入
x5: 城镇居民人均消费性支出;x6: 年末总人口;x7: 全社会固定资产投资额; x8: 地区生产总值
x9: 第一产业产值; x10: 税收; x11: 居民消费价格指数;x12: 第三产业与第二产业产值比
x13:居民消费水平;y: 财政收入
"""
import pandas as pd
from sklearn.linear_model import Lasso
import numpy as np
data = pd.read_csv('../../data2/C13_data1.csv')
# print(data)
model = Lasso(alpha=10, max_iter=10000)
model.fit(data.iloc[:, :13], data['y'])
print(model.coef_)
# [-1.85085555e-04 -3.15519378e-01 4.32896206e-01 -3.15753523e-02
# 7.58007814e-02 4.03145358e-04 2.41255896e-01 -3.70482514e-02
# -2.55448330e+00 4.41363280e-01 5.69277642e+00 -0.00000000e+00
# -3.98946837e-02]
result = pd.DataFrame({'特征': data.columns[:13], '系数': model.coef_})
result = result.set_index('特征')
result = result.T
print(result)
# result.to_excel('../outputfiles/Apaptive_lasso变量选择模型系数表.xls')
| [
"[email protected]"
] | |
4c1f5b0e0a57e6c3f61c17a248587224aafe2656 | 9910a1e38dc02abd0e0684b83601c950598aa179 | /Bookmarks/models.py | c2045fe8ca931b508736934afbd17c660fd23bfc | [] | no_license | crispad/Bookmark-app | 2197b8bc25b927f02a7c61512203759e82e7e34b | 4430240799a0b67d7f196cf256b2a2a1676d9829 | refs/heads/master | 2020-03-21T14:18:07.095404 | 2018-06-27T07:33:36 | 2018-06-27T07:33:36 | 138,650,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | from django.db import models
from uuid import uuid4
from django.contrib.auth.models import User
class Bookmark(models.Model):
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
title = models.CharField(max_length=200)
url = models.URLField(unique=True)
created_at = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
category = models.CharField(max_length=20)
class PersonalBookmark(Bookmark):
user = models.ForeignKey(User, on_delete=models.CASCADE)
| [
"[email protected]"
] | |
84d255ed685f170cc51fdbac351069866c092930 | 64740d39a04f00730ad078280ef30707bcba2129 | /blog/migrations/0004_auto_20170210_1408.py | efc9de3c452c324f5d9cbcebedd10da9b56e16be | [
"MIT"
] | permissive | sighill/blog | 1f4b4985146a7cf97ab186d23d8f14bd596e2c39 | 7ab126549398daf9dbf996a2ee9f96d7f3b44868 | refs/heads/master | 2021-01-13T17:12:00.540574 | 2017-02-20T13:18:58 | 2017-02-20T13:18:58 | 81,751,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,559 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-02-10 13:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_auto_20170130_1440'),
]
operations = [
migrations.CreateModel(
name='Galery',
fields=[
('uid', models.AutoField(db_index=True, primary_key=True, serialize=False)),
('title', models.CharField(max_length=100)),
('legend', models.CharField(blank=True, max_length=500, null=True)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('uid', models.AutoField(db_index=True, primary_key=True, serialize=False)),
('title', models.CharField(max_length=255)),
('thumbnail', models.CharField(blank=True, max_length=500, null=True)),
('full_img', models.CharField(blank=True, max_length=500, null=True)),
('external_link', models.CharField(blank=True, max_length=500, null=True)),
('legend', models.CharField(blank=True, max_length=500, null=True)),
],
),
migrations.RenameModel(
old_name='Media',
new_name='Video',
),
migrations.AddField(
model_name='galery',
name='img_content',
field=models.ManyToManyField(blank=True, related_name='galery_content', to='blog.Image'),
),
]
| [
"[email protected]"
] | |
5d7a771e779f0b24d4bc1ae2bf01ac98e9d0c325 | 9423dd5312d6c05f61ec902a26ff627c6ef58f97 | /Python/functions/get_middle_point.py | 6d5bb3a5c5f3d271d454c9c6da74dc57df5a617c | [] | no_license | NehvedovichVlad/small_tasks | 01c093b07d521da59c559591559d61e81829df0f | 1c4085e3a2f0a4530c82f57b98f0f83b18e68567 | refs/heads/main | 2023-03-17T15:49:04.480092 | 2021-03-11T20:29:05 | 2021-03-11T20:29:05 | 308,935,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | """"
Середина отрезка
Напишите функцию get_middle_point(x1, y1, x2, y2),
которая принимает в качестве аргументов координаты концов отрезка
(x_1; \, y_1)(x 1;y1) и (x_2; \, y_2)(x2;y2)
и возвращает координаты точки являющейся серединой данного отрезка.
"""
# -------------------------------------------------------------------------------------------------
# 1)вариант
def get_middle_point(x1, y1, x2, y2):
return (x1+x2)/2, (y1+y2)/2
x_1, y_1 = int(input()), int(input())
x_2, y_2 = int(input()), int(input())
x, y = get_middle_point(x_1, y_1, x_2, y_2)
print(x, y)
# -------------------------------------------------------------------------------------------------
# 2)вариант
def get_middle_point(x1, y1, x2, y2):
return (x1 + x2) / 2, (y1 + y2) / 2
print(*get_middle_point(int(input()), int(input()), int(input()), int(input())))
| [
"[email protected]"
] | |
a092eb7078a59ea3dcfd2d364d4380b098066ba6 | c6da89d2af85263fe73cdaa74735c95deae3e951 | /not_used/mod_fair_stochastic_dominance.py | cf2721fadb06eef2fc9fb44c976063a54ae0e97e | [] | no_license | antonm94/Fairness_in_Bandits | 7489fb97f94c1b2116756ab76f1ac705687e088d | f37960efd1ef84e5d21f9c402393851f6c050305 | refs/heads/master | 2021-09-10T21:48:21.001625 | 2018-04-02T21:46:39 | 2018-04-02T21:46:39 | 106,277,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,165 | py | import random
import numpy as np
from thompson_sampling.calc_c import c_alg2
from fairness_calc import smooth_fairness
class ModFairStochasticDominance(object):
def __init__(self, bandits, T, e1, e2, delta, lam, distance):
self.k = bandits.k
self.arm = bandits.arms
self.r_theta = bandits.theta
self.T = T
self.e1 = e1
self.e2 = e2
self.delta = delta
self.lam = lam
self.distance = distance
self.s = np.full(self.k, .5)
self.f = np.full(self.k, .5)
self.not_smooth_fair = np.zeros(self.T)
self.smooth_fair = np.zeros(self.T)
self.fairness_regret = np.zeros(self.T)
self.theta = np.zeros((self.T, self.k))
self.n = np.zeros((self.T, self.k))
self.pi = np.zeros((self.T,self.k))
self.p_star = [float(i) / sum(self.r_theta) for i in self.r_theta]
self.rounds_exploring = 0
self.rounds_exploiting = 0
self.average_smooth_fair = np.zeros((len(e1), len(e2), len(delta), self.T, ))
self.average_not_smooth_fair = np.zeros((len(e1), len(e2), len(delta), self.T, ))
self.average_fair_ratio = np.zeros((len(e1), len(e2), len(delta), self.T))
self.average_fairness_regret = np.zeros((len(e2), len(delta), T))
self.regret = np.zeros((len(e2),len(delta), T))
self.average_n = np.zeros((len(e2), len(delta), self.T, self.k))
if lam == 0.:
self.name = 'Thompson Sampling'
elif lam == 1.:
self.name = 'Fair Stochastic Dominance Thompson Sampling'
else:
self.name = 'Thompson Sampling - Fair Stochastic Dominance Thompson Sampling trade-off' \
' with Lambda = {}'.format(self.lam)
def reset(self):
self.s = np.full(self.k, .5)
self.f = np.full(self.k, .5)
self.not_smooth_fair = np.zeros(self.T)
self.smooth_fair = np.zeros(self.T)
self.fairness_regret = np.zeros(self.T)
self.n = np.zeros((self.T, self.k))
self.rounds_exploring = 0
self.rounds_exploiting = 0
def update_smooth_fairness(self, e1, e2):
for t in range(self.T):
[self.not_smooth_fair[t], self.smooth_fair[t]] = smooth_fairness(e1, e2, self.theta[t], self.r_theta,
self.distance)
def update_fairness_regret(self):
for t in range(self.T):
# print self.pi[t]
# print self.p_star
self.fairness_regret[t] = sum([max(self.p_star[i] - self.pi[t][i], 0.) for i in range(self.k)])
def get_not_fair_ratio(self):
return np.divide(self.average_not_smooth_fair, self.average_not_smooth_fair + self.average_smooth_fair)
def get_fair_ratio(self):
return np.divide(self.average_smooth_fair, self.average_not_smooth_fair + self.average_smooth_fair)
def get_rounds(self):
return self.rounds_exploring, self.rounds_exploiting
def get_regret(self, n_average):
distance_to_max = max(self.r_theta) - self.r_theta
for j in range(len(self.e2)):
for d in range(len(self.delta)):
self.regret[j][d] = np.apply_along_axis(lambda x: sum(x * distance_to_max), 1, n_average[j][d])
def run(self, e2, delta):
for t in range(self.T):
b = np.random.binomial(1, [self.lam])[0]
if b == 1:
# O(t)={i:n_j,i(t) <=C(e2,delta)}
if t > 0:
self.n[t] = self.n[t - 1]
o = set()
for i in range(self.k):
if self.n[t, i] <= c_alg2(e2, delta, self.r_theta, i, self.k):
o.add(i)
if len(o) == 0:
# exploition
self.rounds_exploiting = self.rounds_exploiting + 1
self.theta[t] = np.random.beta(self.s, self.f, self.k)
# guessed bernoulli reward for each arm
guessed_r = np.random.binomial(1, self.theta[t])
# selected arm with random tie - breaking
a = np.random.choice(np.where(guessed_r == guessed_r.max())[0])
self.pi[t] = self.theta[t] / sum(self.theta[t])
else:
# exploration
self.rounds_exploring = self.rounds_exploring + 1
self.theta[t] = np.full(self.k, .5)
a = np.random.choice(o)
for i in o:
self.pi[t][i] = 1./len(o)
print pi[t]
else:
self.theta[t] = np.random.beta(self.s, self.f, self.k)
max_theta = np.where(self.theta[t] == self.theta[t].max())[0]
a = np.random.choice(max_theta)
for i in range(self.k):
if i in max_theta:
self.pi[t][i] = 1. / len(max_theta)
else:
self.pi[t][i] = 0.
# real bernoulli reward for each arm
reward = random.choice(self.arm[a])
if reward:
self.s[a] = self.s[a] + 1
else:
self.f[a] = self.f[a] + 1
if t > 0:
self.n[t] = self.n[t - 1]
self.n[t][a] = self.n[t][a] + 1
print 'Rounds Exploring: {}'.format(self.rounds_exploring)
print 'Rounds Exploiting: {}'.format(self.rounds_exploiting)
def analyse(self, n_iterations):
for it in range(int(n_iterations)):
for j in range(len(self.e2)):
for d in range(len(self.delta)):
self.run(self.e2[j], self.delta[d])
self.update_fairness_regret()
self.average_fairness_regret[j][d] = self.average_fairness_regret[j][d] + np.add.accumulate(
self.fairness_regret)
self.average_n[j][d] = self.average_n[j][d] + self.n
for i in range(len(self.e1)):
self.update_smooth_fairness(self.e1[i], self.e2[j])
self.average_smooth_fair[i][j][d] = self.average_smooth_fair[i][j][d] + np.add.accumulate(self.smooth_fair)
self.average_not_smooth_fair[i][j][d] = self.average_not_smooth_fair[i][j][d] + np.add.accumulate(self.not_smooth_fair)
self.reset()
self.average_n = np.divide(self.average_n, n_iterations)
self.get_regret(self.average_n)
self.average_fairness_regret = np.divide(self.average_fairness_regret, n_iterations)
self.average_smooth_fair = np.divide(self.average_smooth_fair, n_iterations)
self.average_not_smooth_fair = np.divide(self.average_not_smooth_fair, n_iterations)
for i in range(len(self.e1)):
for j in range(len(self.e2)):
self.average_fair_ratio[i][j] = np.divide(self.average_smooth_fair[i][j],
self.average_not_smooth_fair[i][j] + self.average_smooth_fair[i][j])
| [
"[email protected]"
] | |
2b2276e3dd8f08b1dbd9b020b05b911e0447b425 | ec2a196f582effd10d1f1c80a99748597371294b | /2018/cyb_notify/models/model.py | 21736203da773772afb38cd45f7f15213c62d7fc | [] | no_license | baiqianyi/starfish | 71bffd4f2718a7b50c4ff7e6d66afd18dbccff4c | 89e63a0dc06f4cb73b87fae44206cda275f12fbd | refs/heads/master | 2020-03-25T13:48:10.978468 | 2018-08-26T00:27:48 | 2018-08-26T00:27:48 | 143,843,514 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,878 | py | import datetime
import utils.email_notify as en
import pymysql
class model:
def __init__(self,log=None):
self.logger = log
self.begin_delta_time = datetime.timedelta(seconds=360)
self.std_buy = -0.826
self.std_sell = 0.37
# 0.34453064057819965, -0.58066720314595333
self.amEnd = datetime.datetime.combine(datetime.datetime.now().date(), datetime.time(hour=11, minute=30, second=30))
self.pmBegin = datetime.datetime.combine(datetime.datetime.now().date(), datetime.time(hour=13, minute=0, second=0))
self.last_report_time = None
self.tmp_six_indst = None
self.max_diff = 0
self.min_diff = 0
self.diff_change_0_time = 0
self.base_change_0_time = 0
self.base_factor = 0
self.tmp_diff = 0
self.base = 0
self.max_diff_time = None
self.min_diff_time = None
self.analysis_cursor = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='1111',
db='analysis')
self.diff_list = []
#1为500,2为50,3为不做操作,0为空仓
# 0.38850739380403643, -0.3196062508549381, 0.18273999548929595, 0.25591966139562783
#0.3885, -0.31960, 0.18274, 0.256
def position(self,six_indst = 0.2,cyb_real=None, sz50_real=None,cyb_b=0.73,sz50_b=0.73,cyb_industry=0.0):
if self.tmp_six_indst != six_indst:
sz50_diff = sz50_real - six_indst + sz50_b
self.tmp_six_indst = six_indst
# cyb_six_industry = 0.256 * cyb_real - (1-0.183)*six_indst + cyb_b + 0.183*cyb_industry#- cyb_b2
# 0.37, -0.826, 0.266
cyb_diff = 0.266 * cyb_real + (- six_indst + cyb_b)# + 0.2415 * cyb_industry
# base_cyb_diff = self.base_diff(cyb_diff)
self.diff_list.append(cyb_diff)
if len(self.diff_list) > 10:
del self.diff_list[0]
cyb_diff = self.weight_mean(self.diff_list)
self.store_analysis(cyb_real, cyb_industry, 0, six_indst, cyb_b)
self.store_m(cyb_diff)
sell_notify = False
buy_notify = False
msg = ""
if cyb_diff > self.std_sell:
sell_notify = True
if cyb_diff < self.std_buy:
buy_notify = True
if sell_notify:
msg = " <font color=\"red\">sell_point_time : "+datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+"</font>"
en.email_notify(title="sell notify", msg=msg)
import time
time.sleep(30)
if buy_notify:
msg = " <font size=\"5\" face=\"verdana\" color=\"red\">buy_point_time : " + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + "</font>"
en.email_notify(title="buy notify", msg=msg)
import time
time.sleep(30)
msg = msg + "vmware <br> cyb_diff : " + str(cyb_diff) + "<br> cyb_min_diff : " + str(self.min_diff) +" , cyb_min_diff_time : " + str(self.min_diff_time) +"<br> cyb_max_diff : " + str(self.max_diff) +" , cyb_max_diff_time : " + str(self.max_diff_time) +"<br> cyb_std_sell : " + str(self.std_sell) +"<br> cyb_std_buy : " + str(self.std_buy)+" <br> cyb_industry : " + str(cyb_industry) + "<br> six_indst : "+str(six_indst)+"<br> six_i : " + str(six_indst) + "<br> a50_diff : " + str(sz50_diff) + " <br> sz50_real : " + str(sz50_real) + "<br> cyb_real : " + str(cyb_real) + "<br> cyb_b : " + str(cyb_b) + "<br> sz50_b : " + str(sz50_b)
if (self.last_report_time == None or self.last_report_time + datetime.timedelta(seconds=300) < datetime.datetime.now()) and datetime.datetime.now() > datetime.datetime.combine(datetime.datetime.now().date(), datetime.time(hour=9, minute=36, second=0)):
self.last_report_time = datetime.datetime.now()
en.email_notify(title="5 minutes notify",msg=msg)
self.logger.info(msg)
def weight_mean(self,list, mean_num=5):
mean = 0.0
t_list = []
if len(list) > mean_num:
t_list = list[-1 * mean_num:]
else:
t_list = list
if len(t_list) > 0:
i_sum = 0
for i in range(len(t_list)):
i_sum = i_sum + i + 1
for i in range(len(t_list)):
mean = mean + float(i + 1) / float(i_sum) * t_list[i]
else:
return 0
return mean
def var(self,list):
if len(list) > 50:
mean = 0
vs = 0
for l in list:
vs = vs + (l-mean)**2
re = (vs/float(len(list)))**0.5
return re
else:
return 1
def store_m(self,cyb_diff):
if self.max_diff < cyb_diff:
self.max_diff = cyb_diff
self.max_diff_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if self.min_diff > cyb_diff:
self.min_diff = cyb_diff
self.min_diff_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def base_diff(self,diff):
self.base_factor = self.base_factor + diff
now = datetime.datetime.now()
if now > datetime.datetime.combine(now.date(),datetime.time(hour=9,minute=40,second=0)):
if self.tmp_diff * diff < 0:
self.diff_change_0_time = self.diff_change_0_time + 1
if diff * self.base_factor < 0:
self.base_change_0_time = self.base_change_0_time + 1
self.tmp_diff = diff
return 0
if self.base_change_0_time > 0 and diff * self.base_factor > 0 and self.base == 0:
self.base = diff/2.0
else:
return diff
self.tmp_diff = diff
if diff * self.base_factor > 0:
return diff - self.base
else :
return 0
def store_analysis(self,cyb,cyb_industry_diff, cyb_six_industry_diff,six_indst,cyb_b):
try:
sql = "replace into analysis_data (datetime, cyb,cyb_industry_diff, cyb_six_industry_diff,six_indst,cyb_b) VALUES (%s,%s,%s,%s,%s,%s);"
self.analysis_cursor.cursor().execute(sql,(datetime.datetime.now(), cyb,cyb_industry_diff, cyb_six_industry_diff,six_indst,cyb_b))
self.analysis_cursor.commit()
except Exception as e:
# 发生错误时回滚
print(e)
self.analysis_cursor.rollback()
if __name__ == "__main__":
m = model()
print(m.base_diff(-1))
print(m.base_diff(-1))
m.position(six_indst=-1,cyb_real=1,sz50_real=-1,cyb_b=0.7,sz50_b = 0.6,cyb_industry=1)
m.position(six_indst=-1.1, cyb_real=1, sz50_real=-1, cyb_b=0.7, sz50_b=0.6, cyb_industry=1)
m.position(six_indst=-1, cyb_real=1, sz50_real=-1, cyb_b=0.7, sz50_b=0.6, cyb_industry=1)
| [
"[email protected]"
] | |
acce8e1a21ccf28ffadc38b8002f50cdbcf6987b | afdda9b5185826747814dd82fdf74f809cfa62ef | /Python/tdw/librarian.py | ce2301ec287ecfd7eebbf45a81bb85c00ca3ed8d | [
"BSD-2-Clause"
] | permissive | lijin929/tdw | e011b831c650a383a22e7e16d934c7940416fcd0 | 957cff2b400fbd24e31bbae886c307ecb7a74cdb | refs/heads/master | 2023-08-14T22:35:29.172322 | 2021-09-14T13:52:30 | 2021-09-14T13:52:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,269 | py | import json
from typing import List, Dict, TypeVar, Union, Generic, Optional, Tuple
import pkg_resources
from pathlib import Path
import platform
from secrets import token_hex
class _Record:
"""
Abstract class for a metadata record.
"""
_PLATFORM = platform.system()
def __init__(self, data: Optional[dict] = None):
"""
:param data: JSON data for the record. If None, the record will initialize with default values.
"""
if data is None:
self.name: str = ""
self.urls: Dict[str, str] = {"Windows": "", "Darwin": "", "Linux": ""}
else:
self.name = data["name"]
self.urls: Dict[str, str] = data["urls"]
def get_url(self) -> str:
"""
Returns the URL of the asset bundle for this platform. This is a wrapper for record.urls.
"""
return self.urls[_Record._PLATFORM]
def get_serializable(self) -> dict:
"""
Returns the serializable dictionary of this record.
"""
return self.__dict__
class ModelRecord(_Record):
"""
A record of a model asset bundle.
"""
def __init__(self, data: Optional[dict] = None):
super().__init__(data)
if data is None:
self.wnid: str = ""
self.wcategory: str = ""
self.scale_factor: float = 1
self.do_not_use: bool = False
self.do_not_use_reason: str = ""
self.flex: bool = False
self.substructure: List[dict] = []
self.bounds: Dict[str, Dict[str, float]] = {"back": {"x": 0, "y": 0, "z": 0},
"bottom": {"x": 0, "y": 0, "z": 0},
"center": {"x": 0, "y": 0, "z": 0},
"front": {"x": 0, "y": 0, "z": 0},
"left": {"x": 0, "y": 0, "z": 0},
"right": {"x": 0, "y": 0, "z": 0},
"top": {"x": 0, "y": 0, "z": 0}}
self.canonical_rotation: Dict[str, float] = {"x": 0, "y": 0, "z": 0}
self.physics_quality: float = -1
self.asset_bundle_sizes: Dict[str, int] = {"Windows": -1, "Darwin": -1, "Linux": -1}
self.composite_object = False
else:
self.wnid: str = data["wnid"]
self.wcategory: str = data["wcategory"]
self.scale_factor: float = data["scale_factor"]
self.do_not_use: bool = data["do_not_use"]
self.do_not_use_reason: str = data["do_not_use_reason"]
self.flex: bool = data["flex"]
self.substructure: List[dict] = data["substructure"]
self.bounds: Dict[str, Dict[str, float]] = data["bounds"]
self.canonical_rotation: Dict[str, float] = data["canonical_rotation"]
self.physics_quality: float = data["physics_quality"]
self.asset_bundle_sizes: Dict[str, int] = data["asset_bundle_sizes"]
self.composite_object: bool = data["composite_object"]
class MaterialRecord(_Record):
"""
A record of a visual material asset bundle.
"""
def __init__(self, data: Optional[dict] = None):
super().__init__(data)
if data is None:
self.type: str = "Ceramic"
else:
self.type: str = data["type"]
class SceneRecord(_Record):
"""
A record of a scene asset bundle.
"""
def __init__(self, data: Optional[dict] = None):
super().__init__(data)
if data is None:
self.description: str = ""
self.hdri: bool = False
self.location: str = ""
else:
self.description: str = data["description"]
self.hdri: bool = data["hdri"]
self.location: str = data["location"]
class HDRISkyboxRecord(_Record):
"""
A record of an HDRI skybox asset bundle.
"""
def __init__(self, data: Optional[dict] = None):
super().__init__(data)
if data is None:
self.color_temperature: float = 0
self.sun_elevation: float = 0
self.sun_initial_angle: float = 0
self.sun_intensity: float = 0
self.initial_skybox_rotation: float = 0
self.exposure: float = 0
self.location: str = ""
else:
self.color_temperature: float = data["color_temperature"]
self.sun_elevation: float = data["sun_elevation"]
self.sun_initial_angle: float = data["sun_initial_angle"]
self.sun_intensity: float = data["sun_intensity"]
self.initial_skybox_rotation: float = data["initial_skybox_rotation"]
self.exposure: float = data["exposure"]
self.location: str = data["location"]
class HumanoidAnimationRecord(_Record):
"""
A record for a humanoid animation asset bundle.
"""
def __init__(self, data: Optional[dict] = None):
super().__init__(data)
if data is None:
self.duration: float = 0
self.loop: bool = False
self.framerate: int = 0
else:
self.duration: float = data["duration"]
self.loop: bool = data["loop"]
self.framerate: int = data["framerate"]
def get_num_frames(self) -> int:
"""
Returns the number of frames, given the duration and framerate.
"""
return int(self.duration * self.framerate)
class HumanoidRecord(_Record):
"""
A record for a humanoid asset bundle.
"""
def __init__(self, data: Optional[dict] = None):
super().__init__(data)
class RobotRecord(_Record):
"""
A record for a robot asset bundle.
"""
def __init__(self, data: Optional[dict] = None):
super().__init__(data)
self.source: str = data["source"]
self.immovable: bool = data["immovable"]
self.targets: dict = data["targets"]
T = TypeVar("T", bound=_Record)
class _Librarian(Generic[T]):
"""
Base abstract class for a metadata librarian.
"""
def __init__(self, library: str = ""):
"""
:param library: The absolute path to the library .json file. If empty, a default path in the tdw module will be used.
"""
if library == "":
self.library = pkg_resources.resource_filename(__name__, "metadata_libraries/" + self.get_default_library())
else:
module_path = pkg_resources.resource_filename(__name__, "metadata_libraries/" + library)
if Path(module_path).exists():
self.library = module_path
else:
self.library = library
with open(self.library, "rt") as f:
self.data = json.load(f)
self.description = self.data["description"]
self.records: List[T] = []
for key in self.data["records"]:
record = self._generate_record(self.data["records"][key])
temp_urls = dict()
# De-localize URLs
for p in record.urls:
# Set an absolute path.
absolute = False
for prefix in ["file:///", "http://", "https://"]:
if record.urls[p].startswith(prefix):
temp_urls[p] = record.urls[p]
absolute = True
# De-localize a local path.
if not absolute:
temp_urls[p] = f"file:///{str(Path(self.library).parent.joinpath(record.urls[p]).resolve())}"
temp_urls[p] = temp_urls[p].replace("\\", "/")
record.urls = temp_urls
self.records.append(record)
def get_default_library(self) -> str:
"""
Returns the default library path (which is always the first in the list of `get_library_filenames()`)
"""
return self.get_library_filenames()[0]
@staticmethod
def create_library(description: str, path: str) -> None:
"""
Create a new library JSON file.
:param path: The absolute filepath to the .json records database file.
:param description: A brief description of the library.
"""
path = Path(path)
data = {"description": description,
"records": {}}
path.write_text(json.dumps(data), encoding="utf-8")
print(f"Created new library: {path}")
@staticmethod
def get_library_filenames() -> List[str]:
"""
Returns a list of the filenames of the libraries of this type in the tdw module.
"""
raise Exception()
def get_record(self, name: str) -> Optional[T]:
"""
Returns a record with the specified name. If that record can't be found, returns None.
:param name: The name of the record.
"""
records = [r for r in self.records if r.name == name]
if len(records) == 0:
return None
else:
return records[0]
def search_records(self, search: str) -> List[T]:
"""
Returns a list of records whose names include the search keyword.
:param search: The string to search for in the model name.
"""
return [r for r in self.records if search in r.name]
def add_or_update_record(self, record: T, overwrite: bool, write: bool = True, quiet: bool = True) -> bool:
"""
Add a new record or update an existing record.
:param record: The record.
:param overwrite: If true, overwrite the record if it already exists.
:param write: If true, write the library data to disk (overwriting the existing file).
:param quiet: If true, silently correct the model name if need be.
"""
# Valid the name of the record.
name_ok, name, problems = self.get_valid_record_name(record.name, overwrite)
record.name = name
if not name_ok and not quiet:
print(f"Renaming this record to {name} because:")
for p in problems:
print(f"\t{p}")
added = False
if len([r for r in self.records if r.name == record.name]) > 0:
# If this record exists and we want to overwrite, update the record.
if overwrite:
records_list = [r for r in self.records if r.name != record.name]
records_list.append(record)
added = True
# Add the record.
else:
self.records.append(record)
added = True
# Write to disk.
if added:
if record.name in self.data["records"]:
self.data["records"][record.name] = record.get_serializable()
else:
self.data["records"].update({record.name: record.get_serializable()})
if write:
self.write()
return added
def remove_record(self, record: Union[str, T], write: bool = True) -> bool:
"""
Remove a record. Returns true if the record was removed.
:param record: The record or the name of the record.
:param write: If true, write the library data to disk (overwriting the existing file).
"""
if isinstance(record, str):
record_name = record
else:
record_name = record.name
records_list = [r for r in self.records if r.name != record_name]
removed = len(records_list) < len(self.records)
if removed:
del self.data["records"][record_name]
self.records = records_list
if write:
self.write()
return removed
def write(self, pretty=True) -> None:
"""
Write the data to disk.
:param pretty: Pretty print.
"""
with open(self.library, "wt") as f:
if pretty:
json.dump(self.data, f, sort_keys=True, indent=4)
else:
json.dump(self.data, f)
def get_valid_record_name(self, name: str, overwrite: bool) -> Tuple[bool, str, List[str]]:
"""
Generates a valid record name. Returns: true if the name is good as-is, the new name, and a list of problems with the old name.
:param name: The name of a record we'd like to add.
:param overwrite: If true, raise an exception if the record doesn't exist. Otherwise, overwrite. If False: If the record exists, suggest a new name.
"""
record_names = [r.name for r in self.records]
if overwrite and name not in record_names:
return False, name, [f"Can't override a record named {name} because no such record exists!"]
good_name = name[:]
ok = True
problems: List[str] = []
good_name = good_name.replace(" ", "_")
if good_name != name:
ok = False
problems.append("Name has spaces. They have been replaced with underscores.")
good_name = good_name.lower()
if good_name != name:
ok = False
problems.append("Name has uppercase letters. They are now all lowercase.")
if not overwrite and good_name in record_names:
ok = False
while good_name in record_names:
good_name = good_name + token_hex(2)
problems.append(f"A record named {name} already exists, and we don't want to overwrite it.")
return ok, good_name, problems
def _generate_record(self, data: dict) -> T:
"""
Generate a record of type T from JSON data.
:param data: The record JSON data.
"""
raise Exception("Not defined.")
class ModelLibrarian(_Librarian[ModelRecord]):
"""
Librarian class for model metadata.
"""
def get_model_wnids_and_wcategories(self) -> Dict[str, str]:
"""
Returns a dictionary of all model wnids and categories.
Key=wnid Value=category
"""
wnids: Dict[str, str] = {}
for model in self.records:
if model.wnid in wnids:
if wnids[model.wnid] != model.wcategory:
print(f"WARNING: Model {model.name} wcategory is {model.wcategory} (expected: {wnids[model.wnid]})")
else:
wnids.update({model.wnid: model.wcategory})
return wnids
def get_model_wnids(self) -> List[str]:
"""
Returns a list of all unique wnids in the database, sorted numerically.
"""
return sorted(set([r.wnid for r in self.records]))
def get_all_models_in_wnid(self, wnid: str) -> List[ModelRecord]:
"""
Returns a list of all models with the same wnid.
:param wnid: The WordNet ID.
"""
return [r for r in self.records if r.wnid == wnid]
def get_flex_models(self) -> List[ModelRecord]:
"""
Returns a list of all Flex-compatible models.
"""
return [r for r in self.records if r.flex]
@staticmethod
def get_library_filenames() -> List[str]:
return ["models_core.json", "models_full.json", "models_special.json", "models_flex.json"]
def _generate_record(self, data: dict) -> T:
return ModelRecord(data)
class MaterialLibrarian(_Librarian[MaterialRecord]):
"""
Librarian class for material metadata.
"""
def get_all_materials_of_type(self, material_type: str) -> List[MaterialRecord]:
"""
Returns a list of all material records of a given type.
:param material_type: The type of material.
"""
return [r for r in self.records if r.type == material_type]
def get_material_types(self) -> List[str]:
"""
Returns a list of all types of materials, sorted alphabetically.
"""
return sorted(set([r.type for r in self.records]))
@staticmethod
def get_library_filenames() -> List[str]:
return ["materials_med.json", "materials_low.json", "materials_high.json"]
def _generate_record(self, data: dict) -> T:
return MaterialRecord(data)
class SceneLibrarian(_Librarian[SceneRecord]):
"""
Librarian class for scene metadata.
"""
@staticmethod
def get_library_filenames() -> List[str]:
return ["scenes.json"]
def _generate_record(self, data: dict) -> T:
return SceneRecord(data)
class HDRISkyboxLibrarian(_Librarian[HDRISkyboxRecord]):
"""
Librarian class for HDRI skybox metadata.
"""
@staticmethod
def get_library_filenames() -> List[str]:
return ["hdri_skyboxes.json"]
def _generate_record(self, data: dict) -> T:
return HDRISkyboxRecord(data)
class HumanoidAnimationLibrarian(_Librarian[HumanoidAnimationRecord]):
"""
Librarian class for humanoid animation metadata.
"""
@staticmethod
def get_library_filenames() -> List[str]:
return ["humanoid_animations.json"]
def _generate_record(self, data: dict) -> T:
return HumanoidAnimationRecord(data)
class HumanoidLibrarian(_Librarian[HumanoidRecord]):
"""
Librarian class for humanoid metadata.
"""
@staticmethod
def get_library_filenames() -> List[str]:
return ["humanoids.json"]
def _generate_record(self, data: dict) -> T:
return HumanoidRecord(data)
class RobotLibrarian(_Librarian[RobotRecord]):
"""
Librarian class for robot metadata.
"""
@staticmethod
def get_library_filenames() -> List[str]:
return ["robots.json"]
def _generate_record(self, data: dict) -> T:
return RobotRecord(data)
| [
"[email protected]"
] | |
01cfcefe303540943ca361d2cab65b79310ffdd5 | fb7e69d5dfe309c90b7eba06e5cfef3ecaa141a7 | /v2.0/exercise9.7.2.py | 46f4e072ff6b6e30b2f25cfa775728f2170965da | [] | no_license | ajsaule/Python | 9e74c46b8f804faaaa18c2506f01d9d09d6f7a8d | d3546001822e6a514411e5ed5369f6b2cc18cee4 | refs/heads/master | 2020-05-09T16:12:22.800696 | 2020-03-15T23:02:58 | 2020-03-15T23:02:58 | 181,262,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | file = open(input('Please type filename to open: '))
read = file.read()
count = dict()
for line in read:
if line.startswith('From'):
select = dict(str(line[34:36]))
print(count)
| [
"[email protected]"
] | |
a6f533ee1a748cadb33dd88731d1d874b6732849 | b138195f988aa6ce71ad35fc4d5707c2127739bb | /sesc_mate/wsgi.py | 6b7c2401184455e99900682d72ae1d9ac59abd21 | [] | no_license | WoodieDudy/SESC_MATE-backend | 46866cf449ab9b7be2401088967d593b4db396e9 | e46df5d9d6f8bca798965618963ba27a31e2dd23 | refs/heads/master | 2023-04-17T02:05:46.836769 | 2021-04-22T12:20:09 | 2021-04-22T12:20:09 | 359,822,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for sesc_mate project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sesc_mate.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
26413742d0a2765fe4de84c03b8f3b80530d12a0 | ba0e5bd46a81a0bbbaabc9f1bdb5ed08f04b5d1e | /boards/models.py | 6cd9eba0dbbba05989ef57081d24a9ac4eadf5a2 | [] | no_license | okok0415/Cheating_Detection | 2bdecbae289d946723269cd3cf9e47c63f9b5f14 | 4bdf63e987f32b3b511f361c6e5224da5a682205 | refs/heads/main | 2023-04-24T18:59:27.406963 | 2021-05-04T10:23:37 | 2021-05-04T10:23:37 | 353,056,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | from django.db import models
from users import models as user_models
class Board(models.Model):
""" Board Model Definition """
title = models.CharField(max_length=200)
content = models.TextField()
user = models.ForeignKey(
user_models.User,
on_delete=models.SET_NULL,
null=True,
default="1"
)
def __str__(self):
return self.title
| [
"[email protected]"
] | |
681900145067710e4f147f7537e281b59275efcb | e1a0a211adfdeaac830607ef01922ad2d7c602b5 | /cifar10.py | 0072978dc078387e4f3e6a51c60651a0d7d019ca | [] | no_license | yixu34/tftest | 7a0f294072ef491d3750917ce42afd09db561bf8 | 45efd9854faf217180350b4a9a42a2ad417e2c3d | refs/heads/master | 2022-12-09T12:38:07.948849 | 2020-08-24T00:23:08 | 2020-08-24T00:23:08 | 289,611,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | import tensorflow as tf
from tensorflow.keras import layers
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
layers.Conv2D(
filters=32,
kernel_size=(3, 3),
activation='relu',
input_shape=(32, 32, 3)
),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(10)
])
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer='adam',
loss=loss_fn,
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
| [
"[email protected]"
] | |
fa81b6f980eefe4e61b156f0138eb67cf63c9040 | 34ec71e93a70c951e02f54f764ab401b9f739a70 | /FIEK-UDP/UDP_Klient.py | cab73eb248a6e06527dca912c336ca9003958618 | [] | no_license | FortesaHysenaj/Socket-Programming | b2057e66c7250cbdd69fe0d7d9a51c15e193b2b5 | 753a2a9313ff9d3796d6a30d70a923fe69b94631 | refs/heads/master | 2020-05-05T08:49:31.819994 | 2019-04-29T23:57:30 | 2019-04-29T23:57:30 | 179,879,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | import socket
host = 'localhost'
port = 12000
print("----------------------------UDP KLIENTI-----------------------")
socketClient = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print("Zgjedhni nje metode: \nIPADRESA\nNUMRIIPORTIT\nHOST\nBASHKETINGELLORE (+shkruaj nje fjale-fjali)\n"
"\nPRINTIMI (+shkruaj nje fjale/fjali)\nPALINDROME (+shkruaj nje fjale-fjali)\nDUPLIKIMI (+shkruaj nje fjale-fjali)\n"
"KOHA\nLOJA\nFIBONACCI (nr>2)\nKONVERTIMI [(KilowattToHorsepower, HorsepowerToKilowatt,\n "
"DegreesToRadians, RadiansToDegrees,\n GallonsToLiters, LitersToGallons)+vlera]")
print("--------------------------------------------------------------")
message = input("OPERACIONI >>> ")
while (message != 'Q' and (message != "")):
socketClient.sendto(message.encode(), (host, port))
data = socketClient.recv(128)
'''
if not data:
print("Kjo mundesi nuk ekziston")
message=input("OPERACIONI >>> ")
continue
'''
print(data)
message = input("OPERACIONI >>> ")
socketClient.close(); | [
"[email protected]"
] | |
defeb5a0a197d38f8b8809a81f0557ed0285e0b3 | dcf4df0c24fb2ba4f44ccc3e625e56aeabce5d9f | /calificaciones/urls.py | 021f927d58bdbab25719afd8c89f3099886c7c26 | [] | no_license | arelyibarrrivas13/calificaciones_Django | 19f5acfe051079d86fb4194f1f2487a2cdc34ddb | ead6a96db2f838ba045bb73f2683ceb6bf04c7ac | refs/heads/master | 2022-07-29T13:43:46.827219 | 2020-06-18T01:50:29 | 2020-06-18T01:50:29 | 273,113,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | """calificaciones URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from gestionCalificaciones import views
urlpatterns = [
path('admin/', admin.site.urls),
path('consulta/', views.consulta),
path('busqueda/', views.busqueda),
path('ingreso/', views.ingreso),
path('nuevoingreso/', views.nuevoingreso),
path('buscar_calificacion_materia/', views.buscar_calificacion_materia),
path('buscar_calificacion_alumno/', views.buscar_calificacion_alumno),
]
| [
"[email protected]"
] | |
ddf9ff835fbc973f18c4f56f5c0ff7440397ec56 | 2ab444e427955d3dbf1f8c0947837c7d73e66a0a | /25-刘杰-北京/第九周/datasets/__init__.py | b57add28328dba5e5f4d023c2bafaed95b186619 | [] | no_license | Yang-chen205/badou-Turing | 6bfc0a4622cb0882f89117e73e2868d40601e7ff | f2a1b2f8b6b292815d92a294d49954616d3624d5 | refs/heads/main | 2023-08-07T03:57:07.471322 | 2021-09-26T08:20:10 | 2021-09-26T08:20:10 | 380,449,981 | 1 | 0 | null | 2021-06-26T08:12:02 | 2021-06-26T08:12:02 | null | UTF-8 | Python | false | false | 157 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
@Project :badou-Turing
@File :__init__.py.py
@Author :luigi
@Date :2021/8/31 4:18 下午
'''
| [
"[email protected]"
] | |
4c8658c14906c41768a4f2f85fcae70ef46c9d23 | c78c1919b78751e88a8fa6816c27b6b173ba245f | /06_1_loss_and_optimizer.py | 224cac213a97ce3b013893ec7525074da72eab1c | [
"MIT"
] | permissive | umit-ai/pytorchTutorial | 742055e6abccc167f3bc821e800afca34ea0e769 | aa2573f1e829e7f1201e53dbb9df351785229fac | refs/heads/master | 2022-04-21T13:53:06.504027 | 2020-04-13T17:01:38 | 2020-04-13T17:01:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,015 | py | # 1) Design model (input, output, forward pass with different layers)
# 2) Construct loss and optimizer
# 3) Training loop
# - Forward = compute prediction and loss
# - Backward = compute gradients
# - Update weights
import torch
import torch.nn as nn
# Linear regression
# f = w * x
# here : f = 2 * x
# 0) Training samples, watch the shape!
X = torch.tensor([[1], [2], [3], [4]], dtype=torch.float32)
Y = torch.tensor([[2], [4], [6], [8]], dtype=torch.float32)
n_samples, n_features = X.shape
print(f'#samples: {n_samples}, #features: {n_features}')
# 0) create a test sample
X_test = torch.tensor([5], dtype=torch.float32)
# 1) Design Model, the model has to implement the forward pass!
# Here we can use a built-in model from PyTorch
input_size = n_features
output_size = n_features
# we can call this model with samples X
model = nn.Linear(input_size, output_size)
'''
class LinearRegression(nn.Module):
def __init__(self, input_dim, output_dim):
super(LinearRegression, self).__init__()
# define diferent layers
self.lin = nn.Linear(input_dim, output_dim)
def forward(self, x):
return self.lin(x)
model = LinearRegression(input_size, output_size)
'''
print(f'Prediction before training: f(5) = {model(X_test).item():.3f}')
# 2) Define loss and optimizer
learning_rate = 0.01
n_iters = 100
loss = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# 3) Training loop
for epoch in range(n_iters):
# predict = forward pass with our model
y_predicted = model(X)
# loss
l = loss(Y, y_predicted)
# calculate gradients = backward pass
l.backward()
# update weights
optimizer.step()
# zero the gradients after updating
optimizer.zero_grad()
if epoch % 10 == 0:
[w, b] = model.parameters() # unpack parameters
print('epoch ', epoch+1, ': w = ', w[0][0].item(), ' loss = ', l)
print(f'Prediction after training: f(5) = {model(X_test).item():.3f}')
| [
"[email protected]"
] | |
cec69182b84e9aa6bff4f48d54f59182d811ddf5 | de847b2e9a5236887fb6a164fedc0e0c86b84e6c | /pythonturorial/workshopprograms/userinput.py | 0b0ce93aae289361bd5e6a95386c281114c27be5 | [] | no_license | raghuprasadks/pythonmicrosoftexam | 9a6bcafcdbc5bb6727278f421bb1a31dc5b7427b | 68dacab8aa98d0ff39f1f36c3ce8e666be3760a0 | refs/heads/master | 2020-09-15T02:51:06.809959 | 2020-02-12T01:18:42 | 2020-02-12T01:18:42 | 223,330,626 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | name = input("Enter your name")
print(type(name))
print('your name is ',name)
age = int(input("enter your age"))
print('your age is ',age)
nextyear = age +1
print('your age after one year',nextyear)
amount = float(input("Enter the payment made for purchase of fruits"))
print('float conversion',amount)
print("Enter names of your friends")
friends = eval(input("Enter names as a list"))
print('evaluated as list ',type(friends))
print('here comes your friends ',friends)
| [
"[email protected]"
] | |
2c8c9cb25515b039215ca4a231c839dec248cb22 | 236e6d7c644d8e0f8c2e0e1d0bf222a31163abbb | /shop/models/user.py | 7e0ea29f83a39f9800174060bdfb42b14a90c995 | [] | no_license | itsluja/digi | 6687c91f149f4bdb15987e576c069b8851d9c715 | 047cdf40d33263c7e9b9e7b8d54bd499e339c5bd | refs/heads/main | 2023-03-15T05:45:56.020204 | 2021-03-18T19:38:42 | 2021-03-18T19:38:42 | 345,308,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | from django.db import models
class User(models.Model):
name = models.CharField(max_length= 50)
active = models.BooleanField(default=True)
email = models.CharField(max_length= 100 , unique= True)
password = models.CharField(max_length= 500)
phone = models.CharField(max_length= 10)
city = models.CharField(max_length= 200,null=True,blank=True)
gender = models.CharField(max_length=10,default="")
def __str__(self):
return self.name
| [
"[email protected]"
] | |
96c4eded58823e78c863134df63303135ce70051 | 69dcbd7053449b2522448545927604cc8c8e16f7 | /exporter.py | c3ecf3da53e86efc5ada4dc01ec89350b4fb2eb8 | [] | no_license | namikiri/vk-dialogue-export | 29d701b6be086d4122d3fced8f3f6da646985746 | 0293715f73db611e2545959c9375b9b898d207cc | refs/heads/master | 2020-03-27T15:16:10.176653 | 2018-08-30T06:59:19 | 2018-08-30T06:59:19 | 146,707,475 | 1 | 0 | null | 2018-08-30T06:45:50 | 2018-08-30T06:45:50 | null | UTF-8 | Python | false | false | 15,592 | py | import os
import urllib
from progress import *
from utils import *
class ExportContext:
def __init__(self, user_fetcher, depth=0, users=None):
self.depth = depth
self.user_fetcher = user_fetcher
self.users = users if users is not None else dict()
def add_user(self, user_id, exporter=None):
if user_id and user_id not in self.users:
self.users[user_id] = self.user_fetcher.get_data(user_id, exporter)
def next_level(self):
return ExportContext(self.user_fetcher, self.depth, self.users)
class UserFetcher:
def __init__(self, api):
self.api = api
self.cache = dict()
def get_data(self, user_id, exporter=None):
if not (user_id in self.cache):
if user_id < 0:
groups = self.api.call("groups.getById", [("group_id", str(-user_id))])
data = groups[0]
downloaded = None
if exporter is not None:
downloaded = exporter.download_image(data)
self.cache[user_id] = {
'name': data['name'],
'first_name': data['name'],
'last_name': '',
'link': 'https://vk.com/%s' % data['screen_name'],
'filename': downloaded
}
else:
users = self.api.call("users.get", [("user_ids", str(user_id)), ("fields", "photo_50")])
data = users[0]
downloaded = None
if exporter is not None:
downloaded = exporter.download_image(data)
self.cache[user_id] = {
'name': '%s %s' % (data['first_name'], data['last_name']),
'first_name': data['first_name'],
'last_name': data['last_name'],
'link': 'https://vk.com/id%s' % data['id'],
'filename': downloaded
}
return self.cache[user_id]
progress = Progress()
class DialogExporter:
def __init__(self, api, dlg_type, dlg_id, options):
self.api = api
self.type = dlg_type
self.id = dlg_id
self.attach_dir = str(self.id)
self.output_dir = options.output_dir
self.options = options
self.user_fetcher = UserFetcher(api)
self.json_out = {
'messages': []
}
def find_largest(self, obj, key_override='photo_'):
def get_photo_keys():
for k, v in iter(obj.items()):
if k.startswith(key_override):
yield k[len(key_override):]
return "%s%s" % (key_override, max(map(lambda k: int(k), get_photo_keys())))
def download_file(self, url, out_filename, auto_image_ext=False, size=-1):
if not url:
# blocked documents or audio files go here
return None
abs_attach_dir = os.path.join(self.output_dir, self.attach_dir)
if not os.path.exists(abs_attach_dir):
os.makedirs(abs_attach_dir)
elif not os.path.isdir(abs_attach_dir):
raise OSError("Unable to create attachments directory %s" % abs_attach_dir)
rel_out_path = esc("%s/%s" % (self.attach_dir, out_filename))
abs_out_path = os.path.join(self.output_dir, rel_out_path)
has_ext = len(os.path.splitext(rel_out_path)[1]) > 0
if has_ext and os.path.exists(abs_out_path) and os.stat(abs_out_path).st_size > 0:
return rel_out_path # file was already downloaded?
elif not has_ext and auto_image_ext:
downloaded_image = has_downloaded_image(abs_attach_dir, out_filename)
if downloaded_image is not None:
return os.path.join(self.attach_dir, downloaded_image)
def update_progress():
display_filename = out_filename
if auto_image_ext and not has_ext:
display_filename = out_filename + '.jpg' # we cannot determine it right now, but jpg is common, so...
if size > 0:
display_filename += ', ' + fmt_size(size)
progress.step_msg('%s -> %s' % (url, display_filename))
def try_download(src_url):
nonlocal out_filename
nonlocal rel_out_path
nonlocal abs_out_path
nonlocal has_ext
try:
request = urllib.request.urlopen(src_url, timeout=20)
if not has_ext and auto_image_ext and 'Content-Type' in request.info():
ext = '.' + guess_image_ext(request.info()['Content-Type'])
out_filename = out_filename + ext
rel_out_path = rel_out_path + ext
abs_out_path = abs_out_path + ext
has_ext = True
update_progress()
with open(abs_out_path, 'wb') as f:
f.write(request.read())
return True
except Exception:
return None
update_progress()
try:
try_count = 0
while try_count < 3:
# sys.stdout.write("Downloading photo %s\n" % (message["id"]))
if try_download(url):
return rel_out_path
try_count += 1
finally:
progress.clear_step_msg()
progress.error("Failed to retrieve file (%s) after 3 attempts, skipping\n" % url)
return None
def download_image(self, attachment, key_override="photo_"):
filename = str(attachment['id'])
url = attachment[self.find_largest(attachment, key_override)]
return self.download_file(url, filename, True)
def fetch_messages(self):
offset = 0
selector = 'user_id' if self.type == 'user' else 'peer_id'
author_id = self.id if self.type == 'user' else (2000000000 + self.id if self.type == 'chat' else -self.id)
while True:
messages = self.api.call('messages.getHistory',
[('offset', offset), ('count', 200), (selector, author_id), ('rev', 1)])
if len(messages['items']) == 0:
break
for msg in messages['items']:
yield (msg, messages['count'])
offset += len(messages['items'])
def handle_link(self, context, link):
downloaded = None
if 'photo' in link:
downloaded = self.download_image(link['photo'])
return {
'type': 'link',
'url': link.get('url', ''),
'title': link.get('title', ''),
'caption': link.get('caption', ''),
'description': link.get('description', ''),
'filename': downloaded
}
def handle_photo(self, context, photo):
downloaded = self.download_image(photo)
return {
'type': 'photo',
'filename': downloaded,
'url': self.find_largest(photo),
'description': photo.get('text', ''),
'owner_id': photo.get('owner_id', 0),
'width': photo.get('width', 0),
'height': photo.get('height', 0),
'date': photo.get('date', 0),
'id': photo.get('id', 0),
'album_id': photo.get('album_id', 0)
}
def handle_sticker(self, context, sticker):
# find the largest sticker image file
largest = None
if 'images' in sticker:
for image in sticker['images']:
if largest is None or image['width'] > largest['width']:
largest = image
url = largest['url'] if largest is not None else ''
downloaded = self.download_file(url, str(sticker.get('sticker_id', 0)), True) if largest is not None else None
return {
'type': 'sticker',
'filename': downloaded,
'url': url
}
def handle_video(self, context, video):
video_thumb = self.download_image(video)
context.add_user(video.get('owner_id', 0), self)
return {
'type': 'video',
'description': video.get('description', ''),
'url': "https://vk.com/video%s_%s" % (video.get('owner_id', 0), video.get('id', 0)),
'title': video.get("title", ''),
'duration': video.get("duration", 0),
'views': video.get('views', 0),
'comments': video.get('comments', 0),
'thumbnail_filename': video_thumb,
'platform': video.get('platform', '?'),
'date': video.get('date', 0),
'owner_id': video.get('owner_id', 0)
}
def handle_wall(self, context, wall):
if 'from_id' in wall:
context.add_user(wall['from_id'], self)
if 'to_id' in wall:
context.add_user(wall['to_id'], self)
exported_post = {
'type': 'post',
'from_id': wall.get('from_id', 0),
'to_id': wall.get('to_id', 0),
'post_type': wall.get('post_type', ''),
'date': wall.get('date', 0),
'text': wall.get('text', ''),
'url': "https://vk.com/wall%s_%s" % (wall.get('from_id', 0), wall.get('id', 0)),
'views': wall.get('views', {}).get('count', 0),
'likes': wall.get('likes', {}).get('count', 0),
'comments': wall.get('comments', {}).get('count', 0),
'reposts': wall.get('reposts', {}).get('count', 0),
'source': wall.get('post_source', {'type': 'api', 'platform': 'unknown'})
}
if "attachments" in wall:
exported_post['attachments'] = self.export_attachments(context.next_level(), wall['attachments'])
if "copy_history" in wall:
# this is a repost
for repost in wall['copy_history']:
exported_post['repost'] = []
post_type = repost.get('post_type', '')
if post_type == "post":
exported_post['repost'].append(self.handle_wall(context.next_level(), repost))
else:
progress.error("No handler for post type: %s\n" % post_type)
return exported_post
def handle_audio(self, context, audio):
filename = '%s.mp3' % audio.get('id', 0)
url = audio.get('url', '')
downloaded = None
if self.options.arguments.audio and context.depth <= self.options.arguments.audio_depth:
if not url or "audio_api_unavailable.mp3" in url:
progress.error("Audio file [%s - %s] is no more available, skipping\n"
% (audio.get('artist', ''), audio.get('title', '')))
else:
downloaded = self.download_file(url, filename)
return {
'type': 'audio',
'artist': audio.get('artist', ''),
'title': audio.get('title', ''),
'duration': audio.get('duration', 0),
'filename': downloaded,
'url': url
}
def handle_voice_msg(self, context, audio_msg):
filename = '%s.%s' %(audio_msg.get('id', 0), audio_msg.get('ext', 'mp3'))
msg_preview = audio_msg.get('preview', {}).get('audio_msg', {})
url = msg_preview.get('link_mp3') or msg_preview.get('link_ogg') or ''
downloaded = None
if not self.options.arguments.no_voice:
if url:
downloaded = self.download_file(url, filename)
else:
progress.error("Voice message is no more available, skipping\n")
return {
'type': 'voice',
'filename': downloaded,
'url': url,
'duration': msg_preview.get('duration', 0),
'id': audio_msg.get('id', 0),
'owner_id': audio_msg.get('owner_id', 0),
'date': audio_msg.get('date', 0)
}
def handle_doc(self, context, doc):
if 'preview' in doc and 'audio_msg' in doc['preview']:
return self.handle_voice_msg(context, doc)
filename = '%s.%s' % (doc.get('id', 0), doc.get('ext', 'unknown'))
url = doc.get('url', '')
downloaded = None
if self.options.arguments.docs and context.depth <= self.options.arguments.docs_depth:
if url:
downloaded = self.download_file(url, filename, False, doc.get('size', -1))
else:
progress.error("Document [%s] is no more available, skipping\n" % doc.get('title', ''))
return {
'type': 'doc',
'filename': downloaded,
'url': url,
'title': doc.get('title', ''),
'size': doc.get('size', 0),
'ext': doc.get('ext', '')
}
def handle_gift(self, context, gift):
gift_thumb = self.download_image(gift, 'thumb_')
return {
'type': 'gift',
'thumbnail': gift_thumb
}
def handle_unknown(self, context, attachment):
return {
'type': attachment['type']
}
def export_attachments(self, context, attachments):
known_types = ('photo', 'video', 'audio', 'doc', 'wall', 'sticker', 'link', 'gift')
results = []
for att in attachments:
if att['type'] in known_types:
results.append(getattr(self, 'handle_' + att['type'])(context, att[att['type']]))
else:
results.append(self.handle_unknown(context, att))
return results
def export_message(self, ctx, vk_msg):
# write message head
exported_msg = {
'date': vk_msg.get('date', 0),
'message': vk_msg.get('body', ''),
'is_important': vk_msg.get('important', False),
'is_updated': 'update_time' in vk_msg and vk_msg['update_time']
}
is_updated = False
if 'update_time' in vk_msg and vk_msg['update_time']:
is_updated = True
exported_msg['updated_at'] = vk_msg['update_time']
exported_msg['is_updated'] = is_updated
sender_id = vk_msg.get('from_id', 0) or vk_msg.get('user_id', 0)
ctx.add_user(sender_id, self)
exported_msg['sender'] = {
'id': sender_id
}
# handle forwarded messages
if len(vk_msg.get('fwd_messages', [])) > 0:
exported_msg['forwarded'] = []
for fwd_msg in vk_msg['fwd_messages']:
exported_msg['forwarded'].append(self.export_message(ctx, fwd_msg))
# handle attachments
if 'attachments' in vk_msg:
exported_msg['attachments'] = self.export_attachments(ctx, vk_msg['attachments'])
if 'action' in vk_msg:
exported_msg['action'] = vk_msg['action']
if 'action_text' in vk_msg:
exported_msg['action_text'] = vk_msg['action_text']
if 'action_mid' in vk_msg:
exported_msg['action_mid'] = vk_msg['action_mid']
if self.options.arguments.save_raw:
exported_msg['raw'] = vk_msg
return exported_msg
def export(self):
cur_step = 0
ctx = ExportContext(self.user_fetcher)
for msg, total in self.fetch_messages():
if cur_step == 0:
progress.update(0, total)
exported_msg = self.export_message(ctx, msg)
self.json_out['messages'].append(exported_msg)
cur_step += 1
progress.update(cur_step, total)
self.json_out['users'] = ctx.users
return self.json_out
| [
"[email protected]"
] | |
4d7ed2a1a08f6964c45a24c6f61dfc5c70731d1c | b9de1691e91ea5dba082ae3e3bd876a6fe4a2d45 | /cogs/worksheets.py | 0dbf614affdb86f7fdd627cb046cdac9d069e887 | [
"MIT"
] | permissive | Developing-Studio/ci-Administrator | ee313758219222e6a2c338bd8d8ac018ba578871 | 087748cb73b3c0edd186f82987b4ffedbe198ac4 | refs/heads/main | 2023-02-07T10:23:06.317291 | 2020-12-28T21:58:10 | 2020-12-28T21:58:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,209 | py | import datetime
import io
import operator
import os
import random
import re
from typing import List
from typing import Optional
from typing import Tuple
import discord
from discord.ext import commands
from discord.ext import flags
from discord.ext import tasks
from base import custom
from converters import OperationConverter
from errors import WorksheetsError
from objects import Operation
from objects import MST
def positive_int(arg: str):
arg = int(arg)
if arg > 0:
return arg
raise commands.BadArgument("integer must be positive")
class Worksheets(custom.Cog):
def __init__(self, bot):
self.bot = bot
operation = OperationConverter.OPERATORS["mul"]
self.message: str = None
self.question_format = re.compile(
r"(?P<x>[0-9]{1,2})\s*"
r"(?P<operator>[\+-x÷])\s*"
r"(?P<y>[0-9]{1,2})\s*=\s*"
r"(?P<answer>[0-9]{1,3})?"
)
self.MST = MST()
self.bot.loop.create_task(self.__ainit__())
self._remind.start(operation, questions=30)
def cog_unload(self):
self._remind.cancel()
async def __ainit__(self):
await self.bot.wait_until_ready()
self.message = f"{self.kai.mention} Study"
@property
def bot_channel(self):
return self.bot.home.get_channel(531807782916194314)
@property
def kai(self):
return self.bot.home.get_member(297874688145752066)
def _get_next_target_date(self):
now = datetime.datetime.now(tz=self.MST)
target = datetime.datetime(now.year,
now.month,
now.day,
hour=12,
tzinfo=self.MST)
if now < target:
return target
return target + datetime.timedelta(days=1)
def create_worksheet(self, operation: Operation, questions: int = 30):
now = datetime.datetime.now()
filename = now.strftime("%d-%m-%Y.txt")
stream = io.BytesIO()
for _ in range(questions):
x = random.randint(1, 12)
y = random.randint(1, 12)
answer = operation(x, y)
stream.write(str.encode(f"{x} {operation.symbol} {y} = {answer}\n"))
stream.write(str.encode("\nTime: \n"))
stream.seek(0)
return discord.File(stream, filename)
async def validate_worksheets(self,
operation: Optional[Operation],
attachments: List[discord.Attachment]):
stream = io.BytesIO()
success = 0
total = 0
attachment = attachments[0]
name, _ = os.path.splitext(attachment.filename)
filename = f"{name}-ANSWERS.txt"
content = await attachment.read()
if content[:3] == b"\xef\xbb\xbf":
content = content[3:]
content = content.decode()
for line in content.split("\n"):
question_found = self.question_format.match(line)
if question_found:
append = line.strip()
total += 1
# UNTESTED
if operation is None:
operation = discord.utils.get(
OperationConverter.OPERATORS.values(),
symbol=question_found.group("operator")
)
# UNTESTED
args = ("x", "y", "answer")
x, y, response = map(int, question_found.group(*args))
answer = operation(x, y)
if response == answer:
success += 1
else:
append += f" ❌ {answer}"
stream.write(str.encode(f"{append}\n"))
stream.write(str.encode(f"\nResults: {success}/{total}\n"))
stream.seek(0)
return discord.File(stream, filename)
@tasks.loop()
async def _remind(self, operation: Operation, questions: int):
date = self._get_next_target_date()
await discord.utils.sleep_until(date)
file = self.create_worksheet(operation, questions)
await self.bot_channel.send(self.message, file=file)
@_remind.before_loop
async def _before_remind(self):
await self.bot.wait_for_display()
print("Running _remind.start()")
@flags.add_flag("--questions", type=positive_int, default=30)
@flags.add_flag("--validate", action="store_true")
@flags.command()
async def worksheets(self,
ctx,
operation: Optional[OperationConverter],
**flags):
content: Optional[str] = self.message
if flags["validate"]:
content = None
if not ctx.message.attachments:
raise WorksheetsError("no attachment found")
file = await self.validate_worksheets(operation,
ctx.message.attachments)
else:
file = self.create_worksheet(operation, flags["questions"])
await ctx.send(content, file=file)
def setup(bot):
bot.add_cog(Worksheets(bot))
| [
"[email protected]"
] | |
2196349d34fe90de1d957e98c7117e16312f2e30 | d5080d96e40525f6d3cbf3f97910234fb4c35ee2 | /program/classifierExp.py | 12c35c44b61ff4c502d0903d0bc4a31aaf1442f0 | [] | no_license | kojotek/contest_at_least_we_tried_public | 2e1d60ae86f3a72340495d2d8d9d8f57f37ef506 | dc9959f3aee618b7b65a9b0bd17f2499d8529b39 | refs/heads/master | 2020-06-15T05:23:02.547145 | 2016-12-10T16:30:58 | 2016-12-10T16:30:58 | 75,324,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(X) | [
"[email protected]"
] | |
ef4cf60a83f3320c1d17fdc3aa26f2e066eb5006 | d2cc300faf038c018ad6612bb93dbb5defb83e2d | /tests/test_utils.py | 4b61c414166027f3ffbfe007bf6f9802a88e6c10 | [] | no_license | paliwal90/winning_price_pred | addadeca5285b22c8ef02b2d5958177bcd22d598 | c126ac40a1ed13baabe096e5ff55072b428d1a55 | refs/heads/master | 2020-04-17T04:16:50.272407 | 2017-11-14T05:00:17 | 2017-11-14T05:00:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | import numpy as np
from scipy.sparse import csr_matrix
from nose.tools import assert_equal, assert_true
from winning_price_pred import utils as testee
def test_add_bias():
# scipy.sparse.csr_matrix
X = csr_matrix([[1,2],[2,3],[3,4]])
got = testee.add_bias(X)
expected = csr_matrix([[1,1,2],[1,2,3],[1,3,4]])
assert_equal((got - expected).nnz, 0)
# numpy.matrix
X = np.matrix([[1,2],[2,3],[3,4]])
got = testee.add_bias(X)
expected = np.matrix([[1,1,2],[1,2,3],[1,3,4]])
assert_true(np.array_equal(got, expected))
| [
"[email protected]"
] | |
af45066f57cba7e2d31be99095220cd6aaec789a | 353e4113d9763ef04ed49de02cf16e4a25a27aed | /mysite/ads/forms.py | c1636aae456e52f70a7726abd0e06cdefe0e089d | [] | no_license | niveditaprity/Django-for-Everybody | 31ea1093b6f09a77769f05e17ae5ffa95ba449a2 | a9ce3b5a584a32790342f2c4ff53676b3112a32d | refs/heads/master | 2022-12-23T20:23:05.194503 | 2020-09-28T11:32:56 | 2020-09-28T11:32:56 | 299,275,681 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,258 | py | from django import forms
from ads.models import Ad
from django.core.files.uploadedfile import InMemoryUploadedFile
from ads.humanize import naturalsize
# Create the form class.
class CreateForm(forms.ModelForm):
max_upload_limit = 2 * 1024 * 1024
max_upload_limit_text = naturalsize(max_upload_limit)
# Call this 'picture' so it gets copied from the form to the in-memory model
# It will not be the "bytes", it will be the "InMemoryUploadedFile"
# because we need to pull out things like content_type
picture = forms.FileField(required=False, label='File to Upload <= '+max_upload_limit_text)
upload_field_name = 'picture'
# Hint: this will need to be changed for use in the ads application :)
class Meta:
model = Ad
fields = ['title', 'text', 'price', 'picture'] # Picture is manual
# Validate the size of the picture
def clean(self) :
cleaned_data = super().clean()
ad = cleaned_data.get('picture')
if ad is None : return
if len(ad) > self.max_upload_limit:
self.add_error('picture', "File must be < "+self.max_upload_limit_text+" bytes")
# Convert uploaded File object to a picture
def save(self, commit=True) :
instance = super(CreateForm, self).save(commit=False)
# We only need to adjust picture if it is a freshly uploaded file
f = instance.picture # Make a copy
if isinstance(f, InMemoryUploadedFile): # Extract data from the form to the model
bytearr = f.read();
instance.content_type = f.content_type
instance.picture = bytearr # Overwrite with the actual image data
if commit:
instance.save()
return instance
class CommentForm(forms.Form):
comment = forms.CharField(required=True, max_length=500, min_length=3, strip=True)
# https://docs.djangoproject.com/en/3.0/topics/http/file-uploads/
# https://stackoverflow.com/questions/2472422/django-file-upload-size-limit
# https://stackoverflow.com/questions/32007311/how-to-change-data-in-django-modelform
# https://docs.djangoproject.com/en/3.0/ref/forms/validation/#cleaning-and-validating-fields-that-depend-on-each-other | [
"niveditaprity@gmail"
] | niveditaprity@gmail |
Subsets and Splits