blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2203668db377a10685419a408b6e9a15f41782ba | a8927d693f885e202021379da0244d5991fdcba5 | /classe4/test_devices.py | 2df110cbbaf95cc28efbc41158c59fd8e530b16c | [] | no_license | HassanHbar/pynet_ansible | 28c007193c612752b212763c3f38d0f5c024dc3b | a0cc9cd696bf1e9d0448876d39022da1140a55be | refs/heads/master | 2020-04-15T13:37:21.042826 | 2016-08-31T10:32:43 | 2016-08-31T10:32:43 | 58,642,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py |
pynet1 = {
'device_type': 'cisco_ios',
'username': 'pyclass',
}
pynet2 = {
'device_type': 'cisco_ios',
'username': 'pyclass',
'secret': '',
}
juniper_srx = {
'device_type': 'juniper',
'username': 'pyclass',
}
| [
"[email protected]"
] | |
cb09beba8a6df9801c13f0845c0c182a4d71c943 | 5c729eb7b7d294482f5c614e99205f12047006bb | /Modulo_4_Ciencia da Computacao/Bloco 36: Algoritmos e Estrutura de Dados/dia_3: Algoritmos de ordenação e busca/conteudo/merge-sort.py | 8ff4b733daa4ab78e1efcea7ac8912dd55997841 | [] | no_license | marcelosoliveira/trybe-exercises | 132c42304cdaf9a82d3d1b030fdf12a456a096da | 025d81eba6e14dc2abf1600f4a271dde06d593d9 | refs/heads/master | 2023-07-17T20:35:30.013110 | 2021-08-24T21:38:22 | 2021-08-24T21:38:22 | 288,297,968 | 1 | 0 | null | 2020-08-18T22:23:49 | 2020-08-17T22:19:09 | Shell | UTF-8 | Python | false | false | 2,554 | py | # # Vamos supor os números não ordenados
# - coleção = 7 5 1 2 8 4 6 3
# # Separamos nosso array em porções menores
# - 7 5 1 2 8 4 6 3
# # continuamos fazendo isto
# # até a menor porção possível (1)
# - 7 5 1 2 8 4 6 3
# # Até a menor porção possível (1)
# - 7 5 1 2 8 4 6 3
# # Feito o processo de divisão, vamos ao processo de conquista.
# # Vamos reagrupando as divisões mas de forma ordenada
# - 5 7 1 2 8 4 6 3
# - 5 7 1 2 8 4 6 3
# - 5 7 1 2 4 8 6 3
# - 5 7 1 2 4 8 3 6
# # Continuamos o reagrupamento
# - 1 2 5 7 4 8 3 6
# - 1 2 5 7 3 4 6 8
# # Por fim misturamos todos os elementos
# - 1 2 3 4 5 6 7 8
def merge_sort(array):
# caso base: se já atingiu a menor porção (1)
if len(array) <= 1:
# retorne o array
return array
# calculo do pivot: índice que indica onde o array será particionado
# no caso, metade
mid = len(array) // 2
# para cada metade do array
# chama a função merge_sort de forma recursiva
left, right = merge_sort(array[:mid]), merge_sort(array[mid:])
# mistura as partes que foram divididas
return merge(left, right, array.copy())
# função auxiliar que realiza a mistura dos dois arrays
def merge(left, right, merged):
left_cursor, right_cursor = 0, 0
# enquanto nenhumas das partes é percorrida por completo
while left_cursor < len(left) and right_cursor < len(right):
# compare os dois itens das partes e insira no array de mistura o menor
if left[left_cursor] <= right[right_cursor]:
merged[left_cursor + right_cursor] = left[left_cursor]
left_cursor += 1
else:
merged[left_cursor + right_cursor] = right[right_cursor]
right_cursor += 1
# a iteração acima irá inserir os elementos de forma ordenada
# quando uma das partes termina, devemos garantir
# que a outra sera totalmente inserida no array de mistura
# itera sobre os elementos restantes na partição "esquerda"
# inserindo-os no array de mistura
for left_cursor in range(left_cursor, len(left)):
merged[left_cursor + right_cursor] = left[left_cursor]
# itera sobre os elementos restantes na partição "direita"
# inserindo-os no array de mistura
for right_cursor in range(right_cursor, len(right)):
merged[left_cursor + right_cursor] = right[right_cursor]
return merged
print(merge_sort([100, 4, 6, 33, 56, 67]))
| [
"[email protected]"
] | |
b515d720289ec7fabc547f59cebd2a8436f4be7e | 7f8d6b47c41d7ca312c94779230dd00101eb305d | /TaxiFareModel/encoders.py | 0c35c630f670aaa069a215164d766b10ded70e75 | [] | no_license | pierrevermeulen/TaxiFareModel | ee3a7cab334b4c247e690615d76613bfe5d585ad | f871e85fdcacefd6831f3a17779370157e64a6f5 | refs/heads/master | 2023-01-21T13:26:17.978691 | 2020-12-03T20:20:05 | 2020-12-03T20:20:05 | 317,551,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,530 | py | import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
import pygeohash as gh
from TaxiFareModel.utils import haversine_vectorized, minkowski_distance
DIST_ARGS = dict(start_lat="pickup_latitude",
start_lon="pickup_longitude",
end_lat="dropoff_latitude",
end_lon="dropoff_longitude")
# Implement DistanceTransformer and TimeFeaturesEncoder
class TimeFeaturesEncoder(BaseEstimator, TransformerMixin):
def __init__(self, time_column, time_zone_name='America/New_York'):
self.time_column = time_column
self.time_zone_name = time_zone_name
def transform(self, X, y=None):
assert isinstance(X, pd.DataFrame)
X.index = pd.to_datetime(X[self.time_column])
X.index = X.index.tz_convert(self.time_zone_name)
X["dow"] = X.index.weekday
X["hour"] = X.index.hour
X["month"] = X.index.month
X["year"] = X.index.year
return X[["dow", "hour", "month", "year"]].reset_index(drop=True)
def fit(self, X, y=None):
return self
class AddGeohash(BaseEstimator, TransformerMixin):
def __init__(self, precision=6):
self.precision = precision
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
assert isinstance(X, pd.DataFrame)
X['geohash_pickup'] = X.apply(
lambda x: gh.encode(x.pickup_latitude, x.pickup_longitude, precision=self.precision), axis=1)
X['geohash_dropoff'] = X.apply(
lambda x: gh.encode(x.dropoff_latitude, x.dropoff_longitude, precision=self.precision), axis=1)
return X[['geohash_pickup', 'geohash_dropoff']]
class DistanceTransformer(BaseEstimator, TransformerMixin):
def __init__(self, distance_type="euclidian", **kwargs):
self.distance_type = distance_type
def transform(self, X, y=None):
assert isinstance(X, pd.DataFrame)
if self.distance_type == "haversine":
X["distance"] = haversine_vectorized(X, **DIST_ARGS)
if self.distance_type == "euclidian":
X["distance"] = minkowski_distance(X, p=2, **DIST_ARGS)
if self.distance_type == "manhattan":
X["distance"] = minkowski_distance(X, p=1, **DIST_ARGS)
return X[["distance"]]
def fit(self, X, y=None):
return self
class DistanceToCenter(BaseEstimator, TransformerMixin):
def __init__(self, verbose=False):
self.verbose = verbose
def transform(self, X, y=None):
nyc_center = (40.7141667, -74.0063889)
X["nyc_lat"], X["nyc_lng"] = nyc_center[0], nyc_center[1]
args_pickup = dict(start_lat="nyc_lat", start_lon="nyc_lng",
end_lat="pickup_latitude", end_lon="pickup_longitude")
args_dropoff = dict(start_lat="nyc_lat", start_lon="nyc_lng",
end_lat="dropoff_latitude", end_lon="dropoff_longitude")
X['pickup_distance_to_center'] = haversine_vectorized(X, **args_pickup)
X['dropoff_distance_to_center'] = haversine_vectorized(X, **args_dropoff)
return X[["pickup_distance_to_center", "dropoff_distance_to_center"]]
def fit(self, X, y=None):
return self
class DistanceTojfk(BaseEstimator, TransformerMixin):
def __init__(self, verbose=False):
self.verbose = verbose
def transform(self, X, y=None):
jfk_center = (40.6441666667, -73.7822222222)
X["jfk_lat"], X["jfk_lng"] = jfk_center[0], jfk_center[1]
args_pickup = dict(start_lat="jfk_lat", start_lon="jfk_lng",
end_lat="pickup_latitude", end_lon="pickup_longitude")
args_dropoff = dict(start_lat="jfk_lat", start_lon="jfk_lng",
end_lat="dropoff_latitude", end_lon="dropoff_longitude")
X['pickup_distance_to_jfk'] = haversine_vectorized(X, **args_pickup)
X['dropoff_distance_to_jfk'] = haversine_vectorized(X, **args_dropoff)
X['from_to_airport'] = (np.logical_or(X['pickup_distance_to_jfk']<2,\
X['dropoff_distance_to_jfk']<2))*1
#return X[["pickup_distance_to_center", "dropoff_distance_to_center",'from_to_airport']]
return X[['from_to_airport']]
def fit(self, X, y=None):
return self
class Direction(BaseEstimator, TransformerMixin):
def __init__(self,
start_lat="pickup_latitude",
start_lon="pickup_longitude",
end_lat="dropoff_latitude",
end_lon="dropoff_longitude"):
self.start_lat = start_lat
self.start_lon = start_lon
self.end_lat = end_lat
self.end_lon = end_lon
def transform(self, X, y=None):
def calculate_direction(d_lon, d_lat):
result = np.zeros(len(d_lon))
l = np.sqrt(d_lon ** 2 + d_lat ** 2)
result[d_lon > 0] = (180 / np.pi) * np.arcsin(d_lat[d_lon > 0] / l[d_lon > 0])
idx = (d_lon < 0) & (d_lat > 0)
result[idx] = 180 - (180 / np.pi) * np.arcsin(d_lat[idx] / l[idx])
idx = (d_lon < 0) & (d_lat < 0)
result[idx] = -180 - (180 / np.pi) * np.arcsin(d_lat[idx] / l[idx])
return result
X['delta_lon'] = X[self.start_lon] - X[self.end_lon]
X['delta_lat'] = X[self.start_lat] - X[self.end_lat]
X['direction'] = calculate_direction(X.delta_lon, X.delta_lat)
return X[["delta_lon", "delta_lat", "direction"]]
def fit(self, X, y=None):
return self
| [
"[email protected]"
] | |
8ec54b6d8b7c076822df3455b18119ff1f057e01 | 13058b4d2b4a8d02f529e1a06d3080fc07afaf40 | /orderportal/scripts/messenger.py | 945cad0f1ee6cae11e29b238c24f92335ed46c03 | [
"MIT"
] | permissive | ewels/OrderPortal | c23f2c2c76235373d2cdfd20a12cfd799f575ab4 | 4c15981558e84389eda8136a4e56993ce1c7d659 | refs/heads/master | 2021-01-15T09:46:34.414406 | 2015-12-11T14:59:33 | 2015-12-11T14:59:33 | 29,193,166 | 0 | 0 | null | 2015-12-11T14:59:35 | 2015-01-13T14:19:56 | Python | UTF-8 | Python | false | false | 11,247 | py | """OrderPortal: Send messages to users about recent events from log records.
The logic is that it gets the timestamp for the latest message, and looks
through all log entries since that timestamp for new events that require
messages to be sent.
The messaging rules and texts are configured in the files defined by
settings ACCOUNT_MESSAGES_FILENAME and ORDER_MESSAGES_FILENAME.
This script is to be run as a cron job.
"""
from __future__ import print_function, absolute_import
import email.mime.text
import smtplib
import urllib
import couchdb
import yaml
from orderportal import constants
from orderportal import saver
from orderportal import settings
from orderportal import utils
class MessageSaver(saver.Saver):
doctype = constants.MESSAGE
def log(self):
"No log entry for message; its creation is a log in itself."
pass
class Messenger(object):
"Process log records and send messages for interesting events."
def __init__(self, db, verbose=False, dry_run=False):
self.db = db
self.verbose = verbose
self.dry_run = dry_run
if self.verbose:
print('Messenger', utils.timestamp())
try:
with open(settings['ACCOUNT_MESSAGES_FILENAME']) as infile:
self.account_messages = yaml.safe_load(infile)
except (IOError, KeyError):
self.account_messages = {}
try:
with open(settings['ORDER_MESSAGES_FILENAME']) as infile:
self.order_messages = yaml.safe_load(infile)
except (IOError, KeyError):
self.order_messages = {}
@property
def server(self):
try:
return self._server
except AttributeError:
host = settings['EMAIL']['HOST']
try:
port = settings['EMAIL']['PORT']
except KeyError:
self._server = smtplib.SMTP(host)
else:
self._server = smtplib.SMTP(host, port=port)
if settings['EMAIL'].get('TLS'):
self._server.starttls()
try:
user = settings['EMAIL']['USER']
password = settings['EMAIL']['PASSWORD']
except KeyError:
pass
else:
self._server.login(user, password)
return self._server
def __del__(self):
try:
self._server.quit()
except AttributeError:
pass
def absolute_url(self, *args, **query):
path = '/'
if args:
path += '/'.join(args)
if query:
path += '?' + urllib.urlencode(query)
return settings['BASE_URL'].rstrip('/') + path
def process(self):
"""Go through unprocessed log entries for items to send messages about.
Currently, account and order logs are checked.
"""
view = self.db.view('message/modified', descending=True, limit=1)
messages = list(view)
try:
endkey = messages[0].key
except IndexError:
endkey = None
if self.verbose:
print('latest message', endkey)
view = self.db.view('log/modified',
include_docs=True,
descending=True,
startkey=constants.CEILING,
endkey=endkey)
for row in view:
if self.verbose:
print('log', row.id)
if row.value == constants.ACCOUNT:
self.process_account(row.doc)
elif row.value == constants.ORDER:
self.process_order(row.doc)
def process_account(self, logdoc):
"Check for relevant event in account log entry and send message(s)."
message = None
if logdoc['changed'].get('status') == constants.PENDING:
self.process_account_pending(logdoc)
# Account has been enabled.
elif logdoc['changed'].get('status') == constants.ENABLED:
self.process_account_enabled(logdoc)
# Account password has been reset; must be checked after 'enabled'!
elif logdoc['changed'].get('code'):
self.process_account_reset(logdoc)
def get_account_params(self, account, **kwargs):
"Get the template parameters for the account message."
result = dict(site=settings['SITE_NAME'],
support=settings.get('SITE_SUPPORT', '[not defined]'),
account=account['email'],
url=self.absolute_url('account', account['email']))
result.update(kwargs)
return result
def process_account_pending(self, logdoc):
"Account was created, is pending. Tell the admins to enable it."
message = self.account_messages.get('pending')
if not message:
if self.verbose: print('No message for account pending.')
return
try:
account = self.db[logdoc['entity']]
except couchdb.ResourceNotFound:
return
params = self.get_account_params(account)
self.send_email(self.get_admins(), message, params)
def process_account_enabled(self, logdoc):
"""Account was enabled. Send URL and code for setting password."""
message = self.account_messages.get('enabled')
if not message:
if self.verbose: print('No message for account enabled.')
return
try:
account = self.db[logdoc['entity']]
except couchdb.ResourceNotFound:
return
params = self.get_account_params(
account,
password=self.absolute_url('password'),
password_code=self.absolute_url('password',
email=account['email'],
code=account['code']),
code=account['code'])
self.send_email([account['owner']], message, params)
def process_account_reset(self, logdoc):
"Account password was reset. Send URL and code for setting password."
message = self.account_messages.get('reset')
if not message:
if self.verbose: print('No message for account reset.')
return
try:
account = self.db[logdoc['entity']]
except couchdb.ResourceNotFound:
return
params = self.get_account_params(
account,
password=self.absolute_url('password'),
password_code=self.absolute_url('password',
email=account['email'],
code=account['code']),
code=account['code'])
self.send_email([account['owner']], message, params)
def process_order(self, logdoc):
"Check for relevant event in order log entry and send message(s)."
status = logdoc['changed'].get('status')
message = self.order_messages.get(status)
if not message:
if self.verbose:
print("No message for order status {0}.".format(status))
return
try:
order = self.db[logdoc['entity']]
except couchdb.ResourceNotFound:
return
owner = self.get_account(order['owner'])
# Owner may have disappeared (OK, OK, unlikely)
if not owner: return
params = self.get_order_params(order)
# Send to administrators, if so configured
for role in message['recipients']:
if role == 'admin':
self.send_email(self.get_admins(), message, params)
break
# Send to owner and group, if so configured
recipients = set()
for role in message['recipients']:
if role == 'owner':
recipients.add(owner['email'])
elif role == 'group':
recipients.update(self.get_colleagues(owner['email']))
self.send_email(list(recipients), message, params)
def get_order_params(self, order, **kwargs):
"Get the template parameters for the order message."
result = dict(site=settings['SITE_NAME'],
support=settings.get('SITE_SUPPORT', '[not defined]'),
owner=owner['email'],
order=order.get('title') or order['_id'],
url=self.absolute_url('order', order['_id']))
result.update(kwargs)
return result
def send_email(self, recipients, message, params):
"Actually send the message as email; not if the dry_run flag is set."
if not recipients: return
subject = message['subject'].format(**params)
text = message['text'].format(**params)
sender = settings['MESSAGE_SENDER_EMAIL']
mail = email.mime.text.MIMEText(text)
mail['Subject'] = subject
mail['From'] = sender
for recipient in recipients:
mail['To'] = recipient
if self.dry_run:
print(mail.as_string())
else:
self.server.sendmail(sender, recipients, mail.as_string())
with MessageSaver(db=self.db) as saver:
saver['sender'] = sender
saver['recipients'] = recipients
saver['subject'] = subject
saver['text'] = text
saver['type'] = 'email'
if self.verbose:
print("sent email '{0}' to {1}".format(subject,
', '.join(recipients)))
def get_account(self, email):
"Get the account document for the email."
view = self.db.view('account/email', include_docs=True)
try:
return [r.doc for r in view[email]][0]
except IndexError:
return None
def get_admins(self):
"Get the list of enabled admin emails."
view = self.db.view('account/role', include_docs=True)
admins = [r.doc for r in view[constants.ADMIN]]
return [a['email'] for a in admins if a['status'] == constants.ENABLED]
def get_colleagues(self, email):
"Get list of emails for accounts in same groups as the given email."
colleagues = set()
for row in self.db.view('group/member', include_docs=True, key=email):
for member in row.doc['members']:
account = self.get_account(member)
if account['status'] == constants.ENABLED:
colleagues.add(account['email'])
return list(colleagues)
def get_args():
parser = utils.get_command_line_parser(description=
'Send messages for recent log record events.')
parser.add_option('-d', '--dry-run',
action='store_true', dest='dry_run', default=False,
help='do not send messages; for debug')
return parser.parse_args()
if __name__ == '__main__':
(options, args) = get_args()
utils.load_settings(filepath=options.settings,
verbose=options.verbose)
messenger = Messenger(utils.get_db(),
verbose=options.verbose,
dry_run=options.dry_run)
messenger.process()
| [
"[email protected]"
] | |
9ced7efaf19574dd504e7070e03dd19af25de5e0 | 230bc6445bac6aa009284eb7bbd0665bba0ef3c6 | /run.py | a083ada2e3eb1038e298fc9955f43fd45687737c | [] | no_license | bioinfo-fr/pythonfaitlanumba | a8f7373f987ae17ddf14b19d2186959dd659f726 | 13fd5a5183d8a160af1494e1d618e914267d787e | refs/heads/master | 2021-01-20T04:32:29.791015 | 2014-10-30T15:47:52 | 2014-10-30T15:47:52 | 25,976,666 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,322 | py | # coding: utf-8
import random
import timeit
import matplotlib.pyplot as plt
from numba import autojit
from taille_cython import cython_test, cython_test_nocdef
####################
## TEST FUNCTIONS ##
####################
### TRIPLE FOR
def triplefor(taille):
count = 0
for i in xrange(taille):
for j in xrange(taille):
for k in xrange(taille):
count += i * j + k
return count / taille
tripleforN = autojit(triplefor)
#######################
## TIMINGS FUNCTIONS ##
#######################
def timings(nb_tries, start=1, stop=200, step=10):
values = range(start, stop, step)
setup = {
'triplefor': ('triplefor(%s)', 'from __main__ import triplefor'),
'tripleforN': ('tripleforN(%s)', 'from __main__ import tripleforN'),
'tripleforC': ('cython_test(%s)', 'from __main__ import cython_test'),
'tripleforCnocdef': ('cython_test_nocdef(%s)', 'from __main__ import cython_test_nocdef'),
}
def do(setup):
result = {}
for k, v in setup.iteritems():
print k, v
result[k] = [timeit.timeit(v[0] % x, number=nb_tries, setup=v[1]) for x in values]
return result
return do(setup), values
####################
## PLOT FUNCTIONS ##
####################
def plot(timings, title, ranked_labels, labels, orders_n):
plt.rcParams.update({'font.size': 12})
plt.figure(figsize=(11, 10))
for lb in ranked_labels:
plt.plot(orders_n, timings[lb], alpha=0.5, label=labels[lb], marker='o', lw=3)
plt.xlabel('sample size n (items in the list)')
plt.ylabel('time per computation')
plt.xlim([min(orders_n) / 10, max(orders_n) * 10])
plt.legend(loc=2)
plt.grid()
plt.xscale('log')
plt.yscale('log')
plt.title(title)
plt.savefig('myfilename.png')
##########
## MAIN ##
##########
if __name__ == '__main__':
N = 5 # nombre d'essais
t, values = timings(5, start=10, stop=2000, step=10)
labels = {
'triplefor': 'For loop',
'tripleforN': 'For loop (numba)',
'tripleforC': 'For loop (cython)',
'tripleforCnocdef': 'For loop (cython_nocdef)',
}
plot(t, 'Title', [
#'triplefor',
'tripleforN', 'tripleforC',
#'tripleforCnocdef'
], labels, values)
| [
"[email protected]"
] | |
3bc05a9a9df7031514ea03d8d64ed1360cc8e2e9 | 789a8d480b2b1accb30a01e5c7e8eb8698418c19 | /setup.py | c61a6ed7eeedbf172b4801ac92118e500c1e7290 | [
"MIT"
] | permissive | wuyeguo/qqbot | edeb0b126ef228ad8a7539e652f12e3c8eb80d47 | 77fd6c927e9700598f4ded449d370244c8a82cb6 | refs/heads/master | 2021-05-11T05:21:36.455917 | 2018-01-15T11:59:38 | 2018-01-15T11:59:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | # -*- coding: utf-8 -*-
from setuptools import setup
version = 'v2.3.9'
setup(
name = 'qqbot',
version = version,
packages = ['qqbot', 'qqbot.qcontactdb', 'qqbot.plugins'],
entry_points = {
'console_scripts': [
'qqbot = qqbot:RunBot',
'qq = qqbot:QTerm'
]
},
install_requires = ['requests', 'certifi', 'apscheduler'],
description = "QQBot: A conversation robot base on Tencent's SmartQQ",
author = 'pandolia' ,
author_email = '[email protected]',
url = 'https://github.com/pandolia/qqbot/',
download_url = 'https://github.com/pandolia/qqbot/archive/%s.tar.gz' % version,
keywords = ['QQBot', 'conversation robot', 'tencent', 'qq',
'web', 'network', 'python', 'http'],
classifiers = [],
)
| [
"[email protected]"
] | |
e376c31e4f81b43df7a1f8bd768f7d4051f9029b | 0eb0fc983eab95e06d412249fbf3d0f52727e34c | /party.py | b9616e34bc1637e58005c53bb2a1317ec30c8fec | [] | no_license | groovycol/testing-py | 0b1f79559ed8742aaad5608d7ddc717c429cf308 | 0cd38c04f44709aed02887e012458ac93381edb1 | refs/heads/master | 2016-09-12T20:47:59.648040 | 2016-04-22T19:54:12 | 2016-04-22T19:54:12 | 56,881,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,781 | py | """Flask site for Balloonicorn's Party."""
from flask import Flask, session, render_template, request, flash, redirect
from flask_debugtoolbar import DebugToolbarExtension
app = Flask(__name__)
app.secret_key = "SECRETSECRETSECRET"
def is_mel(name, email):
"""Is this user Mel?
>>> is_mel('Mel Melitpolski', '[email protected]')
True
>>> is_mel('Colleen Michelle', '[email protected]')
False
>>> is_mel('Mel Melitpolski', '[email protected]')
True
>>> is_mel('Runtime Rabbit', '[email protected]')
True
"""
return name == "Mel Melitpolski" or email == "[email protected]"
def most_and_least_common_type(treats):
"""Given list of treats, return {most, least} common types.
>>> treats = [{'type': 'dessert'}, {'type': 'dessert'}, {'type': 'drink'}, {'type': 'drink'}, {'type': 'appetizer'}, {'type': 'drink'}]
>>> most_and_least_common_type(treats)
('drink', 'appetizer')
>>> treats = [{'type': 'dessert'}, {'type': 'drink'}, {'type': 'appetizer'}]
>>> most_and_least_common_type(treats)
('dessert', 'dessert')
>>> treats = [{'type': 'appetizer'}]
>>> most_and_least_common_type(treats)
('appetizer', 'appetizer')
>>> most_and_least_common_type([])
(None, None)
"""
types = {}
# Count number of each type and store as value in `types`
for treat in treats:
types[treat['type']] = types.get(treat['type'], 0) + 1
most_count = most_type = None
least_count = least_type = None
# Find most, least common by comparing each item
for ttype, count in types.items():
if most_count is None or count > most_count:
most_count = count
most_type = ttype
if least_count is None or count < least_count:
least_count = count
least_type = ttype
return (most_type, least_type)
def get_treats():
"""Get treats being brought to the party.
One day, I'll move this into a database! -- Balloonicorn
"""
return [
{'type': 'dessert',
'description': 'Chocolate mousse',
'who': 'Heather'},
{'type': 'dessert',
'description': 'Cardamom-Pear pie',
'who': 'Joel'},
{'type': 'appetizer',
'description': 'Humboldt Fog cheese',
'who': 'Meggie'},
{'type': 'dessert',
'description': 'Lemon bars',
'who': 'Cynthia'},
{'type': 'appetizer',
'description': 'Mini-enchiladas',
'who': 'David'},
{'type': 'drink',
'description': 'Sangria',
'who': 'Kari'},
{'type': 'dessert',
'description': 'Chocolate-raisin cookies',
'who': 'Denise'},
{'type': 'dessert',
'description': 'Brownies',
'who': 'Lavinia'}
]
@app.route("/")
def homepage():
"""Show homepage."""
return render_template("homepage.html")
@app.route("/treats")
def show_treats():
"""Show treats people are bringing."""
treats = get_treats()
most, least = most_and_least_common_type(get_treats())
return render_template("treats.html",
treats=treats,
most=most,
least=least)
@app.route("/rsvp", methods=['POST'])
def rsvp():
"""Register for the party."""
name = request.form.get("name")
email = request.form.get("email")
if not is_mel(name, email):
session['rsvp'] = True
flash("Yay!")
return redirect("/")
else:
flash("Sorry, Mel. This is kind of awkward.")
return redirect("/")
if __name__ == "__main__":
app.debug = True
app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
DebugToolbarExtension(app)
app.run()
| [
"[email protected]"
] | |
37f1f7cf6b1d633e238e4303987ac629fc97486e | e4de33a639446e329e058a67e0418d97f5c0d1a9 | /venv/Scripts/pip3.7-script.py | a2f0581dbbe1e0da6ea7535ae01c4550378892d8 | [] | no_license | bopopescu/LyricsSearchEngine_python_API | 1fd761f8a36758c142a10d9d39c235d235f5a989 | a321a92cf9874e7cf84df58f0265d8a83b139a6c | refs/heads/master | 2022-11-22T19:01:39.712625 | 2020-02-26T15:25:29 | 2020-02-26T15:25:29 | 281,265,993 | 0 | 0 | null | 2020-07-21T01:36:32 | 2020-07-21T01:36:31 | null | UTF-8 | Python | false | false | 420 | py | #!D:\PyCharm\PyCharm_workspace\0208exercise\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| [
"[email protected]"
] | |
24fb13323f0a0122a05a534bc09c6891d1ff7451 | f505bc6cb86d783569b3c3dcfc8dc6047cfe7ebe | /Loading model.py | 3a66d947872ffe298cfd9c0d902d35ab225265c2 | [] | no_license | se7ven012/Keras | 58627d6863e255865bfa3da80d03170034452efa | 0fa0880fa3c5bd655ed9200deb7daf7fff8ac72d | refs/heads/master | 2020-07-25T19:40:55.266541 | 2019-09-14T07:33:27 | 2019-09-14T07:33:27 | 208,405,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | #%%
import keras
import numpy as np
import matplotlib.pyplot as plt
#傻瓜式网络结构模块
from keras.models import Sequential
#Dense全连接层
from keras.layers import Dense,Activation
#
from keras.optimizers import SGD
from keras.models import load_model
#%%
#生成100个点
x_data = np.linspace(-0.5,0.5,200)
noise = np.random.normal(0,0.02,x_data.shape)
y_data = np.square(x_data) + noise
#显示随机点
plt.scatter(x_data,y_data)
plt.show()
#%%
#loading model
model = load_model('test.h5')
#print wight, bias
W,b = model.layers[0].get_weights()
print('W:',W,'b:',b)
#predict y_pred
y_pred = model.predict(x_data)
#display random dots
plt.scatter(x_data,y_data)
#display prediction result
plt.plot(x_data,y_pred)
plt.show()
#save weights
model.save_weights('weights.h5')
model.load_weights('weights.h5')
#save network structure, loading network structure
from keras.models import model_from_json
json_string = model.to_json()
model=model_from_json(json_string)
#%%
print(json_string)
#%%
| [
"[email protected]"
] | |
380aeccf87ff7ee654c887306c7c69d1c1b96697 | 8f24e443e42315a81028b648e753c50967c51c78 | /python/ray/data/datasource/parquet_datasource.py | 674410a1c29b24325030d270b2eab7d5f232104d | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | simon-mo/ray | d07efdada8d05c6e10417f96e8dfc35f9ad33397 | 1e42e6cd15e2fb96c217cba8484e59ed0ef4b0c8 | refs/heads/master | 2023-03-06T00:09:35.758834 | 2022-12-23T18:46:48 | 2022-12-23T18:46:48 | 122,156,396 | 4 | 2 | Apache-2.0 | 2023-03-04T08:56:56 | 2018-02-20T04:47:06 | Python | UTF-8 | Python | false | false | 18,880 | py | import itertools
import logging
from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Union
import numpy as np
from ray.data._internal.output_buffer import BlockOutputBuffer
from ray.data._internal.progress_bar import ProgressBar
from ray.data._internal.remote_fn import cached_remote_fn
from ray.data._internal.util import _check_pyarrow_version
from ray.data.block import Block
from ray.data.context import DatasetContext
from ray.data.datasource.datasource import Reader, ReadTask
from ray.data.datasource.file_based_datasource import _resolve_paths_and_filesystem
from ray.data.datasource.file_meta_provider import (
DefaultParquetMetadataProvider,
ParquetMetadataProvider,
_handle_read_os_error,
)
from ray.data.datasource.parquet_base_datasource import ParquetBaseDatasource
from ray.types import ObjectRef
from ray.util.annotations import PublicAPI
import ray.cloudpickle as cloudpickle
if TYPE_CHECKING:
import pyarrow
from pyarrow.dataset import ParquetFileFragment
logger = logging.getLogger(__name__)
PIECES_PER_META_FETCH = 6
PARALLELIZE_META_FETCH_THRESHOLD = 24
# The number of rows to read per batch. This is sized to generate 10MiB batches
# for rows about 1KiB in size.
PARQUET_READER_ROW_BATCH_SIZE = 100000
FILE_READING_RETRY = 8
# The default size multiplier for reading Parquet data source in Arrow.
# Parquet data format is encoded with various encoding techniques (such as
# dictionary, RLE, delta), so Arrow in-memory representation uses much more memory
# compared to Parquet encoded representation. Parquet file statistics only record
# encoded (i.e. uncompressed) data size information.
#
# To estimate real-time in-memory data size, Datasets will try to estimate the correct
# inflation ratio from Parquet to Arrow, using this constant as the default value for
# safety. See https://github.com/ray-project/ray/pull/26516 for more context.
PARQUET_ENCODING_RATIO_ESTIMATE_DEFAULT = 5
# The lower bound size to estimate Parquet encoding ratio.
PARQUET_ENCODING_RATIO_ESTIMATE_LOWER_BOUND = 2
# The percentage of files (1% by default) to be sampled from the dataset to estimate
# Parquet encoding ratio.
PARQUET_ENCODING_RATIO_ESTIMATE_SAMPLING_RATIO = 0.01
# The minimal and maximal number of file samples to take from the dataset to estimate
# Parquet encoding ratio.
# This is to restrict `PARQUET_ENCODING_RATIO_ESTIMATE_SAMPLING_RATIO` within the
# proper boundary.
PARQUET_ENCODING_RATIO_ESTIMATE_MIN_NUM_SAMPLES = 2
PARQUET_ENCODING_RATIO_ESTIMATE_MAX_NUM_SAMPLES = 10
# The number of rows to read from each file for sampling. Try to keep it low to avoid
# reading too much data into memory.
PARQUET_ENCODING_RATIO_ESTIMATE_NUM_ROWS = 1024
# TODO(ekl) this is a workaround for a pyarrow serialization bug, where serializing a
# raw pyarrow file fragment causes S3 network calls.
class _SerializedPiece:
def __init__(self, frag: "ParquetFileFragment"):
self._data = cloudpickle.dumps(
(frag.format, frag.path, frag.filesystem, frag.partition_expression)
)
def deserialize(self) -> "ParquetFileFragment":
# Implicitly trigger S3 subsystem initialization by importing
# pyarrow.fs.
import pyarrow.fs # noqa: F401
(file_format, path, filesystem, partition_expression) = cloudpickle.loads(
self._data
)
return file_format.make_fragment(path, filesystem, partition_expression)
# Visible for test mocking.
def _deserialize_pieces(
serialized_pieces: List[_SerializedPiece],
) -> List["pyarrow._dataset.ParquetFileFragment"]:
return [p.deserialize() for p in serialized_pieces]
# This retry helps when the upstream datasource is not able to handle
# overloaded read request or failed with some retriable failures.
# For example when reading data from HA hdfs service, hdfs might
# lose connection for some unknown reason expecially when
# simutaneously running many hyper parameter tuning jobs
# with ray.data parallelism setting at high value like the default 200
# Such connection failure can be restored with some waiting and retry.
def _deserialize_pieces_with_retry(
serialized_pieces: List[_SerializedPiece],
) -> List["pyarrow._dataset.ParquetFileFragment"]:
min_interval = 0
final_exception = None
for i in range(FILE_READING_RETRY):
try:
return _deserialize_pieces(serialized_pieces)
except Exception as e:
import random
import time
retry_timing = (
""
if i == FILE_READING_RETRY - 1
else (f"Retry after {min_interval} sec. ")
)
log_only_show_in_1st_retry = (
""
if i
else (
f"If earlier read attempt threw certain Exception"
f", it may or may not be an issue depends on these retries "
f"succeed or not. serialized_pieces:{serialized_pieces}"
)
)
logger.exception(
f"{i + 1}th attempt to deserialize ParquetFileFragment failed. "
f"{retry_timing}"
f"{log_only_show_in_1st_retry}"
)
if not min_interval:
# to make retries of different process hit hdfs server
# at slightly different time
min_interval = 1 + random.random()
# exponential backoff at
# 1, 2, 4, 8, 16, 32, 64
time.sleep(min_interval)
min_interval = min_interval * 2
final_exception = e
raise final_exception
@PublicAPI
class ParquetDatasource(ParquetBaseDatasource):
"""Parquet datasource, for reading and writing Parquet files.
The primary difference from ParquetBaseDatasource is that this uses
PyArrow's `ParquetDataset` abstraction for dataset reads, and thus offers
automatic Arrow dataset schema inference and row count collection at the
cost of some potential performance and/or compatibility penalties.
Examples:
>>> import ray
>>> from ray.data.datasource import ParquetDatasource
>>> source = ParquetDatasource() # doctest: +SKIP
>>> ray.data.read_datasource( # doctest: +SKIP
... source, paths="/path/to/dir").take()
[{"a": 1, "b": "foo"}, ...]
"""
def create_reader(self, **kwargs):
return _ParquetDatasourceReader(**kwargs)
class _ParquetDatasourceReader(Reader):
def __init__(
self,
paths: Union[str, List[str]],
local_uri: bool = False,
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
columns: Optional[List[str]] = None,
schema: Optional[Union[type, "pyarrow.lib.Schema"]] = None,
meta_provider: ParquetMetadataProvider = DefaultParquetMetadataProvider(),
_block_udf: Optional[Callable[[Block], Block]] = None,
**reader_args,
):
_check_pyarrow_version()
import pyarrow as pa
import pyarrow.parquet as pq
paths, filesystem = _resolve_paths_and_filesystem(paths, filesystem)
if len(paths) == 1:
paths = paths[0]
self._local_scheduling = None
if local_uri:
import ray
from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy
self._local_scheduling = NodeAffinitySchedulingStrategy(
ray.get_runtime_context().get_node_id(), soft=False
)
dataset_kwargs = reader_args.pop("dataset_kwargs", {})
try:
pq_ds = pq.ParquetDataset(
paths, **dataset_kwargs, filesystem=filesystem, use_legacy_dataset=False
)
except OSError as e:
_handle_read_os_error(e, paths)
if schema is None:
schema = pq_ds.schema
if columns:
schema = pa.schema(
[schema.field(column) for column in columns], schema.metadata
)
if _block_udf is not None:
# Try to infer dataset schema by passing dummy table through UDF.
dummy_table = schema.empty_table()
try:
inferred_schema = _block_udf(dummy_table).schema
inferred_schema = inferred_schema.with_metadata(schema.metadata)
except Exception:
logger.debug(
"Failed to infer schema of dataset by passing dummy table "
"through UDF due to the following exception:",
exc_info=True,
)
inferred_schema = schema
else:
inferred_schema = schema
try:
prefetch_remote_args = {}
if self._local_scheduling:
prefetch_remote_args["scheduling_strategy"] = self._local_scheduling
self._metadata = (
meta_provider.prefetch_file_metadata(
pq_ds.pieces, **prefetch_remote_args
)
or []
)
except OSError as e:
_handle_read_os_error(e, paths)
self._pq_ds = pq_ds
self._meta_provider = meta_provider
self._inferred_schema = inferred_schema
self._block_udf = _block_udf
self._reader_args = reader_args
self._columns = columns
self._schema = schema
self._encoding_ratio = self._estimate_files_encoding_ratio()
def estimate_inmemory_data_size(self) -> Optional[int]:
total_size = 0
for file_metadata in self._metadata:
for row_group_idx in range(file_metadata.num_row_groups):
row_group_metadata = file_metadata.row_group(row_group_idx)
total_size += row_group_metadata.total_byte_size
return total_size * self._encoding_ratio
def get_read_tasks(self, parallelism: int) -> List[ReadTask]:
# NOTE: We override the base class FileBasedDatasource.get_read_tasks()
# method in order to leverage pyarrow's ParquetDataset abstraction,
# which simplifies partitioning logic. We still use
# FileBasedDatasource's write side (do_write), however.
read_tasks = []
for pieces, metadata in zip(
np.array_split(self._pq_ds.pieces, parallelism),
np.array_split(self._metadata, parallelism),
):
if len(pieces) <= 0:
continue
serialized_pieces = [_SerializedPiece(p) for p in pieces]
input_files = [p.path for p in pieces]
meta = self._meta_provider(
input_files,
self._inferred_schema,
pieces=pieces,
prefetched_metadata=metadata,
)
if meta.size_bytes is not None:
meta.size_bytes = int(meta.size_bytes * self._encoding_ratio)
block_udf, reader_args, columns, schema = (
self._block_udf,
self._reader_args,
self._columns,
self._schema,
)
read_tasks.append(
ReadTask(
lambda p=serialized_pieces: _read_pieces(
block_udf,
reader_args,
columns,
schema,
p,
),
meta,
)
)
return read_tasks
def _estimate_files_encoding_ratio(self) -> float:
"""Return an estimate of the Parquet files encoding ratio.
To avoid OOMs, it is safer to return an over-estimate than an underestimate.
"""
if not DatasetContext.get_current().decoding_size_estimation:
return PARQUET_ENCODING_RATIO_ESTIMATE_DEFAULT
# Sample a few rows from Parquet files to estimate the encoding ratio.
# Launch tasks to sample multiple files remotely in parallel.
# Evenly distributed to sample N rows in i-th row group in i-th file.
# TODO(ekl/cheng) take into account column pruning.
num_files = len(self._pq_ds.pieces)
num_samples = int(num_files * PARQUET_ENCODING_RATIO_ESTIMATE_SAMPLING_RATIO)
min_num_samples = min(
PARQUET_ENCODING_RATIO_ESTIMATE_MIN_NUM_SAMPLES, num_files
)
max_num_samples = min(
PARQUET_ENCODING_RATIO_ESTIMATE_MAX_NUM_SAMPLES, num_files
)
num_samples = max(min(num_samples, max_num_samples), min_num_samples)
# Evenly distributed to choose which file to sample, to avoid biased prediction
# if data is skewed.
file_samples = [
self._pq_ds.pieces[idx]
for idx in np.linspace(0, num_files - 1, num_samples).astype(int).tolist()
]
sample_piece = cached_remote_fn(_sample_piece)
futures = []
scheduling = self._local_scheduling or "SPREAD"
for sample in file_samples:
# Sample the first rows batch in i-th file.
# Use SPREAD scheduling strategy to avoid packing many sampling tasks on
# same machine to cause OOM issue, as sampling can be memory-intensive.
serialized_sample = _SerializedPiece(sample)
futures.append(
sample_piece.options(scheduling_strategy=scheduling).remote(
self._reader_args,
self._columns,
self._schema,
serialized_sample,
)
)
sample_bar = ProgressBar("Parquet Files Sample", len(futures))
sample_ratios = sample_bar.fetch_until_complete(futures)
sample_bar.close()
ratio = np.mean(sample_ratios)
logger.debug(f"Estimated Parquet encoding ratio from sampling is {ratio}.")
return max(ratio, PARQUET_ENCODING_RATIO_ESTIMATE_LOWER_BOUND)
def _read_pieces(
block_udf, reader_args, columns, schema, serialized_pieces: List[_SerializedPiece]
) -> Iterator["pyarrow.Table"]:
# This import is necessary to load the tensor extension type.
from ray.data.extensions.tensor_extension import ArrowTensorType # noqa
# Deserialize after loading the filesystem class.
pieces: List[
"pyarrow._dataset.ParquetFileFragment"
] = _deserialize_pieces_with_retry(serialized_pieces)
# Ensure that we're reading at least one dataset fragment.
assert len(pieces) > 0
import pyarrow as pa
from pyarrow.dataset import _get_partition_keys
ctx = DatasetContext.get_current()
output_buffer = BlockOutputBuffer(
block_udf=block_udf,
target_max_block_size=ctx.target_max_block_size,
)
logger.debug(f"Reading {len(pieces)} parquet pieces")
use_threads = reader_args.pop("use_threads", False)
batch_size = reader_args.pop("batch_size", PARQUET_READER_ROW_BATCH_SIZE)
for piece in pieces:
part = _get_partition_keys(piece.partition_expression)
batches = piece.to_batches(
use_threads=use_threads,
columns=columns,
schema=schema,
batch_size=batch_size,
**reader_args,
)
for batch in batches:
table = pa.Table.from_batches([batch], schema=schema)
if part:
for col, value in part.items():
table = table.set_column(
table.schema.get_field_index(col),
col,
pa.array([value] * len(table)),
)
# If the table is empty, drop it.
if table.num_rows > 0:
output_buffer.add_block(table)
if output_buffer.has_next():
yield output_buffer.next()
output_buffer.finalize()
if output_buffer.has_next():
yield output_buffer.next()
def _fetch_metadata_remotely(
pieces: List["pyarrow._dataset.ParquetFileFragment"],
**ray_remote_args,
) -> List[ObjectRef["pyarrow.parquet.FileMetaData"]]:
remote_fetch_metadata = cached_remote_fn(_fetch_metadata_serialization_wrapper)
metas = []
parallelism = min(len(pieces) // PIECES_PER_META_FETCH, 100)
meta_fetch_bar = ProgressBar("Metadata Fetch Progress", total=parallelism)
for pcs in np.array_split(pieces, parallelism):
if len(pcs) == 0:
continue
metas.append(
remote_fetch_metadata.options(**ray_remote_args).remote(
[_SerializedPiece(p) for p in pcs]
)
)
metas = meta_fetch_bar.fetch_until_complete(metas)
return list(itertools.chain.from_iterable(metas))
def _fetch_metadata_serialization_wrapper(
pieces: str,
) -> List["pyarrow.parquet.FileMetaData"]:
pieces: List[
"pyarrow._dataset.ParquetFileFragment"
] = _deserialize_pieces_with_retry(pieces)
return _fetch_metadata(pieces)
def _fetch_metadata(
pieces: List["pyarrow.dataset.ParquetFileFragment"],
) -> List["pyarrow.parquet.FileMetaData"]:
piece_metadata = []
for p in pieces:
try:
piece_metadata.append(p.metadata)
except AttributeError:
break
return piece_metadata
def _sample_piece(
reader_args,
columns,
schema,
file_piece: _SerializedPiece,
) -> float:
# Sample the first rows batch from file piece `serialized_piece`.
# Return the encoding ratio calculated from the sampled rows.
piece = _deserialize_pieces_with_retry([file_piece])[0]
# Only sample the first row group.
piece = piece.subset(row_group_ids=[0])
batch_size = max(
min(piece.metadata.num_rows, PARQUET_ENCODING_RATIO_ESTIMATE_NUM_ROWS), 1
)
# Use the batch_size calculated above, and ignore the one specified by user if set.
# This is to avoid sampling too few or too many rows.
reader_args.pop("batch_size", None)
batches = piece.to_batches(
columns=columns,
schema=schema,
batch_size=batch_size,
**reader_args,
)
# Use first batch in-memory size as ratio estimation.
try:
batch = next(batches)
except StopIteration:
ratio = PARQUET_ENCODING_RATIO_ESTIMATE_LOWER_BOUND
else:
if batch.num_rows > 0:
in_memory_size = batch.nbytes / batch.num_rows
metadata = piece.metadata
total_size = 0
for idx in range(metadata.num_row_groups):
total_size += metadata.row_group(idx).total_byte_size
file_size = total_size / metadata.num_rows
ratio = in_memory_size / file_size
else:
ratio = PARQUET_ENCODING_RATIO_ESTIMATE_LOWER_BOUND
logger.debug(
f"Estimated Parquet encoding ratio is {ratio} for piece {piece} "
f"with batch size {batch_size}."
)
return ratio
| [
"[email protected]"
] | |
9a6110eea7dd40e138e1870e4daa4a6be79d007a | 44b28bbe3f01428417c20ea712e5999aa3dc950a | /mmdet/datasets/extra_aug.py | 0e48c08869deab649bd1b618447abf6fd34e0252 | [
"Apache-2.0"
] | permissive | gfjiangly/AerialDetection | 1ce9e15685b19c8228b0269ee3d7a54e67b5008e | ee8a945c67c8e9ddef725900ac300d2d5a785e08 | refs/heads/master | 2023-04-02T17:03:28.711163 | 2021-04-08T03:10:24 | 2021-04-08T03:10:24 | 322,861,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,319 | py | import mmcv
import numpy as np
from numpy import random
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from mmdet.core.mask.utils import mask_expand, mask_crop
class MixUp(object):
"""暂未实现在loss上对label按mix加权"""
def __init__(self, p=0.3, lambd=0.5):
self.lambd = lambd
self.p = p
self.img2 = None
self.boxes2 = None
self.labels2 = None
self.masks2 = None
def __call__(self, img1, boxes1, labels1, masks1=None):
if random.random() < self.p and self.img2 is not None \
and img1.shape[1] == self.img2.shape[1]:
height = max(img1.shape[0], self.img2.shape[0])
width = max(img1.shape[1], self.img2.shape[1])
mixup_image = np.zeros([height, width, 3], dtype='float32')
mixup_image[:img1.shape[0], :img1.shape[1], :] = \
img1.astype('float32') * self.lambd
mixup_image[:self.img2.shape[0], :self.img2.shape[1], :] += \
self.img2.astype('float32') * (1. - self.lambd)
mixup_image = mixup_image.astype('uint8')
mixup_boxes = np.vstack((boxes1, self.boxes2))
mixup_labels = np.hstack((labels1, self.labels2))
if masks1 is not None:
mixup_masks = np.vstack((masks1, self.masks2))
else:
mixup_image = img1
mixup_boxes = boxes1
mixup_labels = labels1
mixup_masks = masks1
# 更新img2信息用于mix的样本
self.img2 = img1
self.boxes2 = boxes1
self.labels2 = labels1
self.masks2 = masks1
return mixup_image, mixup_boxes, mixup_labels, mixup_masks
class CutOut(object):
"""CutOut operation.
Randomly drop some regions of image used in
`Cutout <https://arxiv.org/abs/1708.04552>`_.
Args:
n_holes (int | tuple[int, int]): Number of regions to be dropped.
If it is given as a list, number of holes will be randomly
selected from the closed interval [`n_holes[0]`, `n_holes[1]`].
cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate
shape of dropped regions. It can be `tuple[int, int]` to use a
fixed cutout shape, or `list[tuple[int, int]]` to randomly choose
shape from the list.
cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The
candidate ratio of dropped regions. It can be `tuple[float, float]`
to use a fixed ratio or `list[tuple[float, float]]` to randomly
choose ratio from the list. Please note that `cutout_shape`
and `cutout_ratio` cannot be both given at the same time.
fill_in (tuple[float, float, float] | tuple[int, int, int]): The value
of pixel to fill in the dropped regions. Default: (0, 0, 0).
"""
def __init__(self,
n_holes,
cutout_shape=None,
cutout_ratio=None,
fill_in=(0, 0, 0)):
assert (cutout_shape is None) ^ (cutout_ratio is None), \
'Either cutout_shape or cutout_ratio should be specified.'
assert (isinstance(cutout_shape, (list, tuple))
or isinstance(cutout_ratio, (list, tuple)))
if isinstance(n_holes, tuple):
assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1]
else:
n_holes = (n_holes, n_holes)
self.n_holes = n_holes
self.fill_in = fill_in
self.with_ratio = cutout_ratio is not None
self.candidates = cutout_ratio if self.with_ratio else cutout_shape
def __call__(self, img, bboxes, labels=None, masks=None):
"""Call function to drop some regions of image."""
h, w, c = img.shape
n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1)
if n_holes > len(bboxes):
n_holes = len(bboxes)
holes_idxs = random.choice(range(len(bboxes)), n_holes, replace=False)
for idx in holes_idxs:
bbox = bboxes[idx]
bbox_x1, bbox_y1 = bbox[0], bbox[1]
bbox_x2, bbox_y2 = bbox[2], bbox[3]
bbox_w = bbox_x2 - bbox_x1 + 1
bbox_h = bbox_y2 - bbox_y1 + 1
if bbox_x1 >= bbox_x2 or bbox_y1 >= bbox_y2:
continue
x1 = np.random.randint(bbox_x1, bbox_x2)
y1 = np.random.randint(bbox_y1, bbox_y2)
cutout_w = random.uniform(self.candidates[0], self.candidates[1])
cutout_h = random.uniform(self.candidates[0], self.candidates[1])
if self.with_ratio:
cutout_w = int(cutout_w * bbox_w)
cutout_h = int(cutout_h * bbox_h)
x2 = np.clip(x1 + cutout_w, 0, w)
y2 = np.clip(y1 + cutout_h, 0, h)
img[y1:y2, x1:x2, :] = self.fill_in
return img, bboxes, labels, masks
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(n_holes={self.n_holes}, '
repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio
else f'cutout_shape={self.candidates}, ')
repr_str += f'fill_in={self.fill_in})'
return repr_str
class MixCut(object):
"""暂未实现对label的mix"""
def __init__(self,
n_holes,
cutout_shape=None,
cutout_ratio=None):
assert (cutout_shape is None) ^ (cutout_ratio is None), \
'Either cutout_shape or cutout_ratio should be specified.'
assert (isinstance(cutout_shape, (list, tuple))
or isinstance(cutout_ratio, (list, tuple)))
if isinstance(n_holes, tuple):
assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1]
else:
n_holes = (n_holes, n_holes)
self.n_holes = n_holes
self.with_ratio = cutout_ratio is not None
self.candidates = cutout_ratio if self.with_ratio else cutout_shape
self.img2 = None
def __call__(self, img1, boxes1, labels1, masks1=None):
if self.img2 is not None:
h, w, _ = img1.shape
n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1)
if n_holes > len(boxes1):
n_holes = len(boxes1)
holes_idxs = random.choice(range(len(boxes1)), n_holes, replace=False)
for idx in holes_idxs:
bbox = boxes1[idx]
bbox_x1, bbox_y1 = bbox[0], bbox[1]
bbox_x2, bbox_y2 = bbox[2], bbox[3]
bbox_w = bbox_x2 - bbox_x1 + 1
bbox_h = bbox_y2 - bbox_y1 + 1
if bbox_x1 >= bbox_x2 or bbox_y1 >= bbox_y2:
continue
x1 = np.random.randint(bbox_x1, bbox_x2)
y1 = np.random.randint(bbox_y1, bbox_y2)
cutout_w = random.uniform(self.candidates[0], self.candidates[1])
cutout_h = random.uniform(self.candidates[0], self.candidates[1])
if self.with_ratio:
cutout_w = int(cutout_w * bbox_w)
cutout_h = int(cutout_h * bbox_h)
x2 = np.clip(x1 + cutout_w, 0, w)
y2 = np.clip(y1 + cutout_h, 0, h)
# cut from img2
img2_h, img2_w = self.img2.shape[:2]
cut2_w, cut2_h = x2 - x1, y2 - y1
if cut2_w >= img2_w - cut2_w or cut2_h >= img2_h - cut2_h:
continue
img2_x1 = np.random.randint(cut2_w, img2_w - cut2_w)
img2_y1 = np.random.randint(cut2_h, img2_h - cut2_h)
img2_cut = self.img2[img2_y1:img2_y1+cut2_h, img2_x1:img2_x1+cut2_w].copy()
# update img2
self.img2 = img1.copy()
try:
img1[y1:y2, x1:x2, :] = img2_cut
except Exception as e:
print(e)
else:
self.img2 = img1.copy()
return img1, boxes1, labels1, masks1
class PhotoMetricDistortion(object):
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18,
color_choose=0,
gray_p=0.3):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
self.color_choose = color_choose
self.gray_p = gray_p
def __call__(self, img, boxes, labels, masks=None):
if self.color_choose == 0:
if random.uniform() < self.gray_p:
gray = mmcv.bgr2gray(img)
img = mmcv.gray2bgr(gray)
return img, boxes, labels, masks
# random brightness
if random.randint(2):
delta = random.uniform(-self.brightness_delta,
self.brightness_delta)
img += delta
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(2)
if mode == 1:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# convert color from BGR to HSV
img = mmcv.bgr2hsv(img)
# random saturation
if random.randint(2):
img[..., 1] *= random.uniform(self.saturation_lower,
self.saturation_upper)
# random hue
if random.randint(2):
img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)
img[..., 0][img[..., 0] > 360] -= 360
img[..., 0][img[..., 0] < 0] += 360
# convert color from HSV to BGR
img = mmcv.hsv2bgr(img)
# random contrast
if mode == 0:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# randomly swap channels
if random.randint(2):
img = img[..., random.permutation(3)]
else:
if self.color_choose == 1:
# random brightness
if random.randint(2):
delta = random.uniform(-self.brightness_delta,
self.brightness_delta)
img += delta
elif self.color_choose == 2:
# random contrast first
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
else:
# convert color from BGR to HSV
img = mmcv.bgr2hsv(img)
if self.color_choose == 3:
# random saturation
if random.randint(2):
img[..., 1] *= random.uniform(self.saturation_lower,
self.saturation_upper)
if self.color_choose == 4:
# random hue
if random.randint(2):
img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)
img[..., 0][img[..., 0] > 360] -= 360
img[..., 0][img[..., 0] < 0] += 360
# convert color from HSV to BGR
img = mmcv.hsv2bgr(img)
return img, boxes, labels, masks
class Expand(object):
def __init__(self, mean=(0, 0, 0), to_rgb=True, ratio_range=(1, 4)):
if to_rgb:
self.mean = mean[::-1]
else:
self.mean = mean
self.min_ratio, self.max_ratio = ratio_range
def __call__(self, img, boxes, labels, masks):
if random.randint(2):
return img, boxes, labels, masks
h, w, c = img.shape
ratio = random.uniform(self.min_ratio, self.max_ratio)
expand_img = np.full((int(h * ratio), int(w * ratio), c),
self.mean).astype(img.dtype)
left = int(random.uniform(0, w * ratio - w))
top = int(random.uniform(0, h * ratio - h))
expand_img[top:top + h, left:left + w] = img
img = expand_img
boxes += np.tile((left, top), 2)
if masks is not None:
masks = mask_expand(masks, expand_img.shape[0],
expand_img.shape[1], top, left)
return img, boxes, labels, masks
class RandomCrop(object):
def __init__(self, min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3):
# 1: return ori img
self.sample_mode = (1, *min_ious, 0)
self.min_crop_size = min_crop_size
def __call__(self, img, boxes, labels, masks):
h, w, c = img.shape
while True:
mode = random.choice(self.sample_mode)
if mode == 1:
return img, boxes, labels, masks
min_iou = mode
for i in range(50):
new_w = random.uniform(self.min_crop_size * w, w)
new_h = random.uniform(self.min_crop_size * h, h)
# h / w in [0.5, 2]
if new_h / new_w < 0.5 or new_h / new_w > 2:
continue
left = random.uniform(w - new_w)
top = random.uniform(h - new_h)
patch = np.array((int(left), int(top), int(left + new_w),
int(top + new_h)))
overlaps = bbox_overlaps(
patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)
if overlaps.min() < min_iou:
continue
# center of boxes should inside the crop img
center = (boxes[:, :2] + boxes[:, 2:]) / 2
mask = (center[:, 0] > patch[0]) * (
center[:, 1] > patch[1]) * (center[:, 0] < patch[2]) * (
center[:, 1] < patch[3])
if not mask.any():
continue
boxes = boxes[mask]
labels = labels[mask]
# adjust boxes
img = img[patch[1]:patch[3], patch[0]:patch[2]]
boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])
boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])
boxes -= np.tile(patch[:2], 2)
if masks is not None:
masks = masks[mask]
masks = mask_crop(masks, patch)
return img, boxes, labels, masks
class ExtraAugmentation(object):
def __init__(self,
photo_metric_distortion=None,
expand=None,
random_crop=None,
mixup=None,
cutout=None,
mixcut=None):
self.transforms = []
if photo_metric_distortion is not None:
self.transforms.append(
PhotoMetricDistortion(**photo_metric_distortion))
if expand is not None:
self.transforms.append(Expand(**expand))
if random_crop is not None:
self.transforms.append(RandomCrop(**random_crop))
if mixup is not None:
self.transforms.append(MixUp(**mixup))
if cutout is not None:
self.transforms.append(CutOut(**cutout))
if mixcut is not None:
self.transforms.append(MixCut(**mixcut))
def __call__(self, img, boxes, labels, masks=None):
img = img.astype(np.float32)
for transform in self.transforms:
if masks is not None:
img, boxes, labels, masks = transform(img, boxes, labels, masks)
else:
img, boxes, labels = transform(img, boxes, labels)
if masks is not None:
return img, boxes, labels, masks
else:
return img, boxes, labels
| [
"[email protected]"
] | |
215b0cc5ee454bc4870b1fc88195a77867b0fa9c | a066134017b7d4cb7ac6f342b18f75c3b768fb87 | /3-exceptions/1-basics/FinallyBlock.py | da3b0ef05534f4166a184889876a3c38c8ca423d | [] | no_license | akyare/Python-Students-IoT | e6612af05629a32bae841225f17c5076a4499ca9 | 3886e7cced42622e3989873749d33476cc6b5f6f | refs/heads/main | 2023-01-22T04:43:41.723276 | 2020-12-03T10:56:01 | 2020-12-03T10:56:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | path = 'GeneralExceptions.py'
# There are times in Python that we need to work with resources like:
# files;
# DB connections;
# network connections;
# ...
# And if we are using certain resources we should always close those resources at the end of our program.
# We can handle this in the following way:
# BAD WAY:
try:
file = open(path)
file.write('Would be to easy right?')
except OSError as oe:
file.close()
print('Something went wrong with IO operation')
else:
file.close()
# GOOD WAY, no code duplication [good programmers avoid that shit].
try:
file = open(path)
file.write('Would be to easy right?')
except OSError as oe:
print('Something went wrong with IO operation')
finally:
file.close() # This way even if an exception is raised the resource used will be closed.
| [
"[email protected]"
] | |
745ebd5a93fa292c9280ac4ca66f3ec83dcc627b | 6dee59b96671f2adcfbfe4d5732b7a585e05a419 | /bin/wheel | cf05771a23e8690b10679a2739fe9f0a3b0eae70 | [] | no_license | arnoldblandon/microserviciodocumento | dbd747a459caef63125bbf7fa1920ef5f66724b0 | 73889ab1acbbfef317c03b4a074a866e751a73d8 | refs/heads/master | 2021-06-27T07:18:26.632327 | 2017-09-14T17:10:41 | 2017-09-14T17:10:41 | 102,908,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | #!/home/arnold/Escritorio/microservicio/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ay.blandon.mesfix.co"
] | ay.blandon.mesfix.co |
|
5316a4c200e07018aca93fa33bea460da0c3a5ad | e31fd8de16f5d928ad7f8a423bb6f26cfd0871b1 | /Python/Day 4/To find cubes of all numbers of list using 'map' fn.py | e6868cf4f2d4eba3ba1dd8b913057f586e5c40da | [] | no_license | adrija24/languages_learning | 47f8755b950058658501e69748457bbda9529c52 | 851bafca2c27cb87a6ea2f4edf316da178168d8b | refs/heads/master | 2023-08-24T07:28:08.233696 | 2021-10-02T13:38:03 | 2021-10-02T13:38:03 | 412,806,715 | 0 | 0 | null | 2021-10-02T13:37:52 | 2021-10-02T13:37:51 | null | UTF-8 | Python | false | false | 108 | py | """To find cubes of all numbers of list using 'map' fn"""
a=[1,2,3,4]
print(list(map(lambda x:pow(x,3),a))) | [
"[email protected]"
] | |
58a1479231c6df07a1390e753e25667f72519694 | 7a2965bb69ca8e0966fd9077f8e9180e63e1b6a2 | /app.py | 81d2cf878b6628e4b362c45e5298506a9f1eac7d | [] | no_license | thorntcj/team1 | f085271e712da7f3ced87520765906e1b6731d65 | 26bf976e9979dff711132324e49751780c90f8e2 | refs/heads/main | 2023-06-21T22:12:07.039039 | 2021-07-12T22:19:25 | 2021-07-12T22:19:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | #!/usr/bin/env python3
#
# Installed packages:
#
#
from flask import (Flask, redirect, render_template, url_for)
from markupsafe import escape
app = Flask(__name__)
@app.route('/')
def index():
return redirect('http://127.0.0.1:5000/welcome')
@app.route('/welcome')
def welcome():
return render_template('welcome.html')
@app.route('/budget')
def budget():
return render_template('budget.html')
@app.route('/lodging')
def lodging():
return render_template('lodging.html')
@app.route('/meals')
def meals():
return render_template('meals.html')
@app.route('/entertainment')
def entertainment():
return render_template('entertainment.html')
@app.route('/itinerary')
def itinerary():
return render_template('itinerary.html')
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
] | |
b922221aa6a8d382df22ff335a91e74dc8ce015a | becf33483b318e0101f2e23cc57eda5c76626fbd | /model/bert.py | e28e6bef3e9abd6ba9125bf39bd6c9c8d9b00466 | [] | no_license | LearningHarder/query_similar_tianchi_2020 | fc74aa1d74423faf6ac53e254abba6f6f097ef3d | 6208a0b1ff9b896b56179989200a1c48372e3ad3 | refs/heads/master | 2023-08-11T02:25:11.982982 | 2021-09-17T07:14:46 | 2021-09-17T07:14:46 | 269,318,002 | 1 | 0 | null | 2020-06-04T09:36:17 | 2020-06-04T09:36:16 | null | UTF-8 | Python | false | false | 965 | py | from bert4keras.bert import build_bert_model
from tensorflow.keras.layers import Dropout, Dense
from tensorflow.keras import Model
class BertModel:
def __init__(self, config_path: str, checkpoint_path: str):
"""
初始化预训练的模型参数
:param config_path :
:param checkpoint_path:
"""
self.config_path = config_path
self.checkpoint_path = checkpoint_path
def get_model(self):
# 加载预训练模型
bert = build_bert_model(
config_path=self.config_path, checkpoint_path=self.checkpoint_path,
with_pool=True, return_keras_model=False, model="albert")
output = Dropout(rate=0.1)(bert.model.output)
output = Dense(units=2,
activation='softmax',
kernel_initializer=bert.initializer)(output)
model = Model(bert.model.input, output)
model.summary()
return model
| [
"[email protected]"
] | |
c3d0c5088b17dc4892ea8a5da270c9658f67e475 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/typeshed/stdlib/email/errors.pyi | 656cbd374ac4c6ac5b1341328348f5c65ab2666a | [
"Apache-2.0",
"MIT"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 1,532 | pyi | import sys
class MessageError(Exception): ...
class MessageParseError(MessageError): ...
class HeaderParseError(MessageParseError): ...
class BoundaryError(MessageParseError): ...
class MultipartConversionError(MessageError, TypeError): ...
class CharsetError(MessageError): ...
class MessageDefect(ValueError):
def __init__(self, line: str | None = ...) -> None: ...
class NoBoundaryInMultipartDefect(MessageDefect): ...
class StartBoundaryNotFoundDefect(MessageDefect): ...
class FirstHeaderLineIsContinuationDefect(MessageDefect): ...
class MisplacedEnvelopeHeaderDefect(MessageDefect): ...
class MultipartInvariantViolationDefect(MessageDefect): ...
class InvalidMultipartContentTransferEncodingDefect(MessageDefect): ...
class UndecodableBytesDefect(MessageDefect): ...
class InvalidBase64PaddingDefect(MessageDefect): ...
class InvalidBase64CharactersDefect(MessageDefect): ...
class InvalidBase64LengthDefect(MessageDefect): ...
class CloseBoundaryNotFoundDefect(MessageDefect): ...
class MissingHeaderBodySeparatorDefect(MessageDefect): ...
MalformedHeaderDefect = MissingHeaderBodySeparatorDefect
class HeaderDefect(MessageDefect): ...
class InvalidHeaderDefect(HeaderDefect): ...
class HeaderMissingRequiredValue(HeaderDefect): ...
class NonPrintableDefect(HeaderDefect):
def __init__(self, non_printables: str | None) -> None: ...
class ObsoleteHeaderDefect(HeaderDefect): ...
class NonASCIILocalPartDefect(HeaderDefect): ...
if sys.version_info >= (3, 10):
class InvalidDateDefect(HeaderDefect): ...
| [
"[email protected]"
] | |
3af0d9e86e8534f2ebc70878d3b6cc81f137dbe0 | 21700527a1a4d572331fc2d236fdf4e5920e7834 | /Domashka/env/bin/pip3 | c116a1464b98aa3b884ddc49dffacc16bbbb21f8 | [] | no_license | ilovepizzawithpineapples/dz | 7d68030554e2bec65472030348cd1745e09f873a | 1e7df1fbd51b1a552db58cf51abec66b0e2c4b43 | refs/heads/master | 2020-05-19T17:15:26.839101 | 2019-05-06T05:39:19 | 2019-05-06T05:39:19 | 185,129,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | #!/home/pixelsmaker/Projects_for_study_django/Youtube_molchanov/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
8fd967ac11f6f58b538c09958fa15f834b7c0f27 | 34bfba985cf6a2ae98e9ea19b58b3f7c25dda9d9 | /alembic/versions/23a622f768da_initial_revision.py | cc9e947b84af99fe1d293ecd12effd0d2336b86f | [] | no_license | buddyup/buddyup | 19c05fa5d83abe9611f4dad407538a9ea898969a | c5a64ff1680b48e3bc552434539d051166e665fd | refs/heads/master | 2021-03-27T20:15:13.772744 | 2015-08-11T03:21:39 | 2015-08-11T03:21:39 | 18,304,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | """Initial Revision
Revision ID: 23a622f768da
Revises: None
Create Date: 2013-11-19 21:15:03.210863
"""
# revision identifiers, used by Alembic.
revision = '23a622f768da'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pass
| [
"[email protected]"
] | |
aa3a6e385c0912000d5014878d7a916a15c8ceb6 | 7dab8df68b1a11011d401555471cd74b965626b7 | /plotter.py | e78bbc1ac4339820ef86812b451755618523863d | [] | no_license | brilund/3D-PAWS | cb4cd31ec53850302d75cb4da55d6c6b0b9ffa0a | fb973d32cd3699ee01c02a7b68a803cdafc86833 | refs/heads/master | 2023-07-15T15:52:41.911569 | 2021-02-04T00:57:31 | 2021-02-04T00:57:31 | 269,197,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,133 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 4 16:05:12 2020
@author: blund
"""
###############################################################################
'''
_____ _____ _ ____ ___ ___ _____
| \ | / \ | \ | \ / | |
|____/ |__ /___\ | | | \/ | |__
| \ | / \ | | | | |
| \ |____ / \ |___/ | | |____
'''
#This code plots 3D-PAWS data based on user input from a parent program.
#
#Written by Brianna Lund
#
#QUESTIONS?
#Email me at [email protected]
#
#LICENSE:
#This code may be used and distributed freely, provided proper attribution is
# given to UCAR and the author.
#
#
#REQUIREMENTS:
# Python 3
# Numpy
# Pandas
# Sys
#
#
#HISTORY:
# Nov 04, 2020 - First Write; modified from original BMP_weekly_plotter.py
#
#
#PLANNED FEATURES:
#
#
#HOW TO USE:
# 1. Save this file/program in the same directory as the parent program you
# will use this function in conjunction with
# 2. Import this function in the parent program (no need for file
# extensions):
#
# a) from plotter import plotter
# ... or...
# b) import plotter as pltr
#
# 3. Call the function in the parent program, ensuring that you pass the
# appropriate attributes/parameters:
#
# a) call_plotter = plotter(mintime, maxtime, df)
# ... or...
# b) call_plotter = pltr.plotter(mintime, maxtime, df)
#
# 4. Run the parent program within terminal (e.g. "python main.py"),
# or open the parent program in Spyder and run from there.
#
#
#Example header from files --> no file header(s)!!! (this could change...)
#
#Example data from files:
#
#
#
#NOTES: At the moment, this program reads in all data (the whole dataframe)
# even if a subset of time is requested for plotting, analysis, etc.,
# which is a bit of an overkill
##############################################################################
######################### IMPORTING MODULES ############################
##############################################################################
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sys
import datetime
##############################################################################
##################### UNIVERSAL PLOTTING PARAMETERS #####################
##############################################################################
def _universal_params(ax,df,mintime,maxtime,sensor,site_ID):
#add dashed grid lines
plt.grid(which='major', linestyle='--', color='dimgray')
plt.grid(which='minor', linestyle=':',color='gray')
#set x-axis limits/range
ax.set_xlim(df.time[mintime], df.time[maxtime])
#set the plot's title
if sensor.lower() == "wind_vane":
plt.title("%s : %s" % (site_ID, ''.join(sensor.replace('_', ' ')).upper()), fontsize=12)
elif sensor.lower == "rain":
plt.title("%s : TIPPING BUCKET" % site_ID, fontsize=12)
else: #all other sensors follow the standard plot title below
plt.title("%s : %s" % (site_ID, sensor.upper()), fontsize=12)
#set the x-axis label
plt.xlabel("Date / Time (UTC)")
#set the plot legend
plt.legend(loc='upper left', bbox_to_anchor=(1, 1), framealpha=0.95,
fancybox=True, shadow=True, fontsize=10)
return
##############################################################################
############################## SAVE FIGURE ##############################
##############################################################################
def _save_figure(sensor, save_dir, site_ID, var_name, units, averaged,
avg_window, mintime, maxtime, plot_opt, tag, df):
if plot_opt == "weekly":
#'tag' here represents the dates for which the figure's time frame is valid;
# it is used in the name of the figure
tag = str(df.time[mintime])[:10].split("-")+["-"]+str(df.time[maxtime])[:10].split("-")
tag = ''.join(x for x in tag)
elif plot_opt =="daily":
#'tag' here represents the date for which the figure's time frame is valid;
# it is used in the name of the figure
tag = str(df.time[mintime])[:10].split("-")
tag = ''.join(x for x in tag)
elif plot_opt == "monthly":
#'tag' here represents the dates for which the figure's time frame is valid;
# it is used in the name of the figure
tag = str(df.time[mintime])[:7]
#replace underscores in 'var_name' with hyphens/dashes; merely a personal
# preference for figure naming
var_name = ''.join(var_name.replace('_', '-'))
#save the figure
if sensor.lower() == "anemometer":
var_name = "wind-spd"
if averaged == False:
plt.savefig('%s%s_%s-%s_%s.png' % (save_dir, site_ID, var_name, units, tag), \
dpi=500, bbox_inches='tight')
elif averaged == True:
plt.savefig('%s%s_%s-%s_%s-min_%s.png' % (save_dir, site_ID, var_name, units, avg_window, tag), \
dpi=500, bbox_inches='tight')
elif averaged == "static" or averaged == "resampled":
plt.savefig('%s%s_%s-%s_%s-min-%s_%s.png' % (save_dir, site_ID, var_name, units, avg_window, averaged, tag), \
dpi=500, bbox_inches='tight')
elif sensor.lower() == "wind_vane":
var_name = "wind-dir"
plt.savefig('%s%s_%s_%s.png' % (save_dir, site_ID, var_name, tag), \
dpi=500, bbox_inches='tight')
elif sensor.lower() == "rain":
plt.savefig('%s%s_%s-%s_%s.png' % (save_dir, site_ID, sensor.upper(), units, tag), \
dpi=500, bbox_inches='tight')
else:
plt.savefig('%s%s_%s_%s_%s.png' % (save_dir, site_ID, sensor, var_name, tag),
dpi=500, bbox_inches='tight')
#show the figure that was generated
plt.show()
##############################################################################
############################## PLOTTERs #################################
##############################################################################
#to plot figures for the user-defined time frame, call this function; this is
# the default plotting function; the other plotting options depend on this
# function; think of this one as the PARENT plotter
def plotter(sensor, save_dir, site_ID, var_name, units, averaged, avg_window,
mintime, maxtime, plot_opt, tag, df):
#no print statement here telling the user that this function was called
# because this one gets called MANY times from the other plotting
# functions (daily, weekly and monthly)
#plot based on 'sensor'
if sensor.lower() == "anemometer":
#import matplotlib for anemometer only because we need to use that
# rcParams thing
import matplotlib as mpl
#not sure what this does, but supposedly it is necessary for larger datasets;
# this will not plot without it
mpl.rcParams['agg.path.chunksize'] = 10000
#plot based on the user-defined averaging/smoothing parameters
if averaged == False:
#plotting all raw data
ax = df.plot(x='time', y='wind_speed', color='b', label='wind_%s' % units,
figsize=(30,5))
elif averaged == True:
#plotting running averaged data
ax = df.plot(x='time', y='windspd_avg', color='b', label='%s_%s-min' % (units, avg_window),
figsize=(30,5))
elif averaged == "static":
#plotting static averaged data (fewer data points)
ax = df[::avg_window].plot(x='time', y='windspd_avg', color='b',label='%s_%s-min-%s' % (units, avg_window, averaged),
figsize=(30,5))
elif averaged == "resampled":
#plotting every nth raw data point (fewer data points)
ax = df[::avg_window].plot(x='time', y='wind_speed', color='b', label='wind_%s_%s' % (units, averaged),
figsize=(30,5))
else:
#plotting all raw data; should eliminate this statement because
# averaging parameters are checked in input_checker.py and if
# 'averaged' does not meet any of the above conditions, the
# program will exit at the input-checking stage, never getting
# to this stage so the line below is a wasted of space; will
# vet this thoroughly
ax = df.plot(x='time', y='wind_speed', color='b', label='wind_%s' % units,
figsize=(30,5))
##########
#some parameters specific to 'units'
if units == "mps": #plot wind speed in meters per second
#set y-axis limits/range
ax.set_ylim(0., 10.)
#set the y-axis label
plt.ylabel("Wind Speed (m s$^-1$)")
elif units == "kmph": #plot wind speed in kilometers per hour
#set y-axis limits/range
ax.set_ylim(0., 90.)
#set the y-axis label
plt.ylabel("Wind Speed (km h$^-1$)")
elif units == "mph":
#set y-axis limits/range
ax.set_ylim(0., 50.)
#set the y-axis label
plt.ylabel("Wind Speed (m h$^-1$)")
elif units == "kts": #plot wind speed in knots
#set y-axis limits/range
ax.set_ylim(0., 50.)
#set the y-axis label
plt.ylabel("Wind Speed (kts)")
else:
#in theory, we have already accounted for this potential error with
# the input_checker, so this is redundant; may remove at some
# point
print("'units' not recognized. Program exiting...")
sys.exit()
##########
elif sensor.lower() == "wind_vane":
ax = df.plot(x='time', y='wind_dir', color='b', label='wind_dir',
figsize=(30,5))
#set y-axis limits/range
ax.set_ylim(0., 360.)
#set the y-axis label
plt.ylabel("Wind Direction (degrees)")
##########
elif sensor.lower() == "rain":
ax = df.plot(x='time', y=['rain', 'no_rain'], color=['b','r'],
label=['rain','no-rain'], figsize=(30,5))
#set y-axis range and title based on "millimeters"
if units == "mm":
#set y-axis limits/range
ax.set_ylim(-0.1, 5.)
#set the y-axis label
plt.ylabel("Precipitation (mm)")
#set y-axis range and title based on "inches"
elif units == "inches":
#set y-axis limits/range
ax.set_ylim(-0.01, 0.2)
#set the y-axis label
plt.ylabel("Precipitation (in.)")
##########
else: #for all other sensors, we plot here
ax = df.plot(x='time', y=var_name, color='b', label=var_name, figsize=(30,5))
#plot parameters within these 'if' statements are those specific to each
# variable
if var_name == "temp_C": #plot temperature in Celsius
#set y-axis limits/range
ax.set_ylim(-20., 45.)
#set the y-axis label
plt.ylabel("Temperature ($^o$C)")
elif var_name == "temp_F": #plot temperature in Fahrenheit
#set y-axis limits/range
ax.set_ylim(-10., 110.)
#set the y-axis label
plt.ylabel("Temperature ($^o$F)")
elif var_name == "rel_hum": #plot relative humidity (%)
#set y-axis limits/range
ax.set_ylim(0., 100.)
#set the y-axis label
plt.ylabel("Relative Humidity (%)")
elif var_name == "station_P": #plot station pressure (hPa))
#set y-axis limits/range
ax.set_ylim(800., 875.)
#set the y-axis label
plt.ylabel("Station Pressure (hPa)")
elif var_name == "SLP_hPa": #plot sea-level pressure (hPa)
#set y-axis limits/range
ax.set_ylim(900., 1100.)
#set the y-axis label
plt.ylabel("Sea-Level Pressure (hPa)")
elif var_name == "SLP_inHg": #plot sea-level pressure (inches of Hg)
#set y-axis limits/range
ax.set_ylim(28., 32.)
#set the y-axis label
plt.ylabel("Sea-Level Pressure (inches of Hg)")
elif var_name == "alt": #plot altitude (m)
#set y-axis limits/range
ax.set_ylim(1400., 1800.)
#set the y-axis label
plt.ylabel("Altitude (m)")
#plot a horizontal line marking the actual altitude (according to Google
# Earth)
#plt.axhline(y=1617, color='r', linestyle=":", label="1617 m")
elif var_name == "vis": #plot temperature in Celsius
#set y-axis limits/range
ax.set_ylim(0., 1600.)
#set the y-axis label
plt.ylabel("Visible (W m$^-2$)")
elif var_name == "ir": #plot temperature in Fahrenheit
#set y-axis limits/range
ax.set_ylim(0., 10000.)
#set the y-axis label
plt.ylabel("Infrared (W m$^-2$)")
elif var_name == "uv": #plot temperature in Fahrenheit
#set y-axis limits/range
ax.set_ylim(0., 700.)
#set the y-axis label
plt.ylabel("Ultraviolet (W m$^-2$)")
elif var_name == "uvi": #plot temperature in Fahrenheit
#set y-axis limits/range
ax.set_ylim(0., 7.)
#set the y-axis label
plt.ylabel("UV Index")
else:
#if none of the conditions above are met, print an error statement
# pointing the user to the potential cause (i.e. spelling)
print("Variable name not found. Check the spelling of 'var_name'. Program exiting...")
sys.exit()
#Need to include a section that accounts for the wind BARB plotter
### UNIVERSAL PLOTTING PARAMETERS ###
#call the function that sets up all the universal plotting parameters:
# gridlines, x-axis limits, titles, labels, legends, etc.
_universal_params(ax,df,mintime,maxtime,sensor,site_ID)
#save the figure by calling the hidden '_save_figure' function
_save_figure(sensor, save_dir, site_ID, var_name, units, averaged,
avg_window, mintime, maxtime, plot_opt, tag, df)
return
##############################################################################
########################### DAILY PLOTTER ##############################
##############################################################################
#to plot figures on a daily basis within the user-defined time frame, call
# this function
def daily_plotter(sensor, save_dir, site_ID, var_name, units, averaged,
avg_window, mintime, maxtime, plot_opt, tag, df):
#tell the user that the function was called
print("------------------------------------------------------------------\n")
print("'daily_plotter' function called...\n")
''' Note that the very first occurrence of a 00:00 UTC time in the dataset
should never occur later than the 1,439th index because there are 1,440
minutes in a day'''
#set two counters: one to count the total number of days, and the other to
# count the number of days that were actually plotted (skipping days
# for which there are no data)
#NOTE: the total count is technically equivalent to n-2 total FULL days for
# time frames not perfectly divisible by 1440 minutes; if the last
# interval does not equate to a full day but contains data, it will
# still be counted in the total count of days
total_count = 0 #count for ALL weeks possible within the time frame
plotted_count = 0 #count for all weeks that were plotted within the time frame
#this line will find the first occurrence of a 00:00 UTC time in the
# selected timeframe; this is the start of your range to loop through
# for making daily plots
start = pd.DatetimeIndex(df.time[mintime:maxtime]).indexer_at_time('00:00:00')[0] + mintime
#before getting into the loop, to account for instances such that the
# dataset or the time frame set by the user begins with a partial day
# (e.g. the first timestamp is NOT 00:00 UTC) plot that partial day all
# by itself only up to the index marking the first occurrence of a
# 00:00 UTC timestamp in the dataset / timeframe (a.k.a. 'start'
# variable)
if df.time[mintime:start].dt.time.empty != True and df.time[mintime:start].dt.time[mintime] != datetime.time(0,0):
#increase total counter by 1
total_count += 1
#skip the creation of plot if there are no data within the current 1-day
# period
if (averaged == True or averaged == "static") and (df[df.columns[2]][mintime:start].notna().any() == False):
#we need this special except above because when 'averaged' is set
# to either of those two conditions, a new column is created in
# the dataframe and it contains a different number of NaNs than
# the column from which it was computed->created; the only
# change is that we are referring to df.columns[2] instead of
# df.columns[1]; this will likely have to change if I add the
# ability to average ANY variable (not just wind speed)
print("%s - %s not plotted --> No data" % (df.time[mintime], df.time[start]))
pass #don't plot
else:
if df[df.columns[1]][mintime:start].empty == True:
#this little condition avoids printing the statement that
# "2019-01-08 00:00 - 2019-01-08 00:00" has no data because, of
# course, this is a single time so it has no length/data; this
# will only happen for datasets or given time frames that
# exactly equal 1 day / 1440 minutes
pass
elif df[df.columns[1]][mintime:start].notna().any() == False:
#tell the user this time frame was not plotted due to the absence of
# any data
print("%s - %s not plotted --> No data" % (df.time[mintime], df.time[start]))
pass #move on the next iteration of the loop
#if there is even a single data point within the current time frame,
# plot it
else:
#increase the plotting counter by 1
plotted_count += 1
#call the default plotter function here; this also sets up the
# universal plotting parameters AND saves the figures all in one
plotter(sensor, save_dir, site_ID, var_name, units, averaged,
avg_window, mintime, start, plot_opt, tag, df)
#since data are recorded every minute, and there are 1440 minutes in 1 day,
# we set the range interval to 1440 (equivalent to 'every 1440th index');
#NOTE: this hard-coded method only works because we explicitly set each
# record at a 1-minute interval and we fill any gaps in data with NaNs;
# this will get more complicated if you start using raw seconds in
# your timestamps
interval = 1440
#find the ending index of the range over which to plot; this will be the
# index of df.time that equals the 'maxtime' specified by the user; this,
# too, is simply equal to 'maxtime', but we must add 1 to the index because
# the 'range' function does not include the end value
end = maxtime + 1
#store a list of dates/times that encompass weeks that were not plotted due to
# missing data
#######################
#now, loop through the entire time frame set by the user with 1-week intervals
# making a week-long plot of the chosen variable for each iteration
for d in range(start,end,interval):
#increase total counter by 1
total_count += 1
#'mintime' and 'maxtime will now change through each iteration of the loop
# to serve as the indices defining the 1-day intervals throughout the
# dataset over which to plot; the difference between them, however, will
# always represent 1 day / 1440 minutes
mintime = d
maxtime = d + interval
#set 'maxtime' to the end of the range if 'maxtime' is out of the range
# specified; this will only occur if the range is not perfectly
# divisible by 1 day / 1440 minutes; in this instance, the very last
# figure generated would NOT represent a 1-day period
if maxtime > end:
maxtime = end - 1 #now we subtract 1 because set_xlim() includes the
# upper limit given when setting the range; if we
# did not subtract 1, 'maxtime' would be out of
# range for set_xlim()
#skip the creation of plot if there are no data within the current 1-day
# period
if (averaged == True or averaged == "static") and (df[df.columns[2]][mintime:maxtime].notna().any() == False):
#we need this special except above because when 'averaged' is set
# to either of those two conditions, a new column is created in
# the dataframe and it contains a different number of NaNs than
# the column from which it was computed->created; the only
# change is that we are referring to df.columns[2] instead of
# df.columns[1]; this will likely have to change if I add the
# ability to average ANY variable (not just wind speed)
print("%s - %s not plotted --> No data" % (df.time[mintime], df.time[maxtime]))
pass #don't plot
else:
if df[df.columns[1]][mintime:maxtime].empty == True:
#this little condition avoids printing the statement that
# "2019-01-08 00:00 - 2019-01-08 00:00" has no data because, of
# course, this is a single time so it has no length/data; this
# will only happen for datasets or given time frames that
# exactly equal 1 day / 1440 minutes
pass
elif df[df.columns[1]][mintime:maxtime].notna().any() == False:
#tell the user this time frame was not plotted due to the absence of
# any data
print("%s - %s not plotted --> No data" % (df.time[mintime], df.time[maxtime]))
pass #move on the next iteration of the loop
#if there is even a single data point within the current time frame,
# plot it
else:
#increase the plotting counter by 1
plotted_count += 1
#call the default plotter function here; this also sets up the
# universal plotting parameters AND saves the figures all in one
plotter(sensor, save_dir, site_ID, var_name, units, averaged,
avg_window, mintime, maxtime, plot_opt, tag, df)
return
##############################################################################
########################### WEEKLY PLOTTER #############################
##############################################################################
#to plot figures on a weekly basis within the user-defined time frame, call
# this function
def weekly_plotter(sensor, save_dir, site_ID, var_name, units, averaged,
avg_window, mintime, maxtime, plot_opt, tag, df):
#tell the user that the function was called
print("------------------------------------------------------------------\n")
print("'weekly_plotter' function called...\n")
#find the starting index of the range over which to plot; this is simply
# 'mintime' but we will give it a different name because 'mintime' has a
# differnt purpose within the 'for' loop below
start = mintime
#find the ending index of the range over which to plot; this will be the
# index of df.time that equals the 'maxtime' specified by the user; this,
# too, is simply equal to 'maxtime', but we must add 1 to the index because
# the 'range' function does not include the end value
end = maxtime + 1
#since data are recorded every minute, and there are 10080 minutes in 1 week,
# we set the range interval to 10080 (equivalent to 'every 10080th index');
#NOTE: this hard-coded method only works because we explicitly set each
# record at a 1-minute interval and we fill any gaps in data with NaNs;
# this will get more complicated if you start using raw seconds in
# your timestamps
interval = 10080
#set two counters: one to count the total number of weeks, and the other to
# count the number of weeks that were actually plotted (skipping weeks
# for which there are no data)
#NOTE: the total count is technically equivalent to n-1 total FULL weeks for
# time frames not perfectly divisible by 7 days; if the last interval
# does not equate to a full week but contains data, it will still be
# counted in the total count of weeks
total_count = 0 #count for ALL weeks possible within the time frame
plotted_count = 0 #count for all weeks that were plotted within the time frame
#store a list of dates/times that encompass weeks that were not plotted due to
# missing data
#######################
#now, loop through the entire time frame set by the user with 1-week intervals
# making a week-long plot of the chosen variable for each iteration
for w in range(start,end,interval):
#increase total counter by 1
total_count += 1
#'mintime' and 'maxtime will now change through each iteration of the loop
# to serve as the indices defining the 1-week intervals throughout the
# dataset over which to plot; the difference between them, however, will
# always represent 1 week / 7 days (i.e. 10080)
mintime = w
maxtime = w + interval
#set 'maxtime' to the end of the range if 'maxtime' is out of the range
# specified; this will only occur if the range is not perfectly
# divisible by 1 week / 7 days; in this instance, the very last figure
# generated would NOT represent a 7 day period
if maxtime > end:
maxtime = end - 1 #now we subtract 1 because set_xlim() includes the
# upper limit given when setting the range; if we
# did not subtract 1, 'maxtime' would be out of
# range for set_xlim()
#skip the creation of plot if there are no data within the current 7-day
# period
if df[df.columns[1]][mintime:maxtime].empty == True:
#this little condition avoids printing the statement that
# "2019-01-08 00:00 - 2019-01-08 00:00" has no data because, of
# course, this is a single time so it has no length/data; this
# will only happen for datasets or given time frames that
# exactly equal 1 week / 7 days
pass
elif df[df.columns[1]][mintime:maxtime].notna().any() == False:
#tell the user this time frame was not plotted due to the absence of
# any data
print("%s - %s not plotted --> No data" % (df.time[mintime], df.time[maxtime]))
pass #move on the next iteration of the loop
#if there is even a single data point within the current time frame,
# plot it
else:
#increase the plotting counter by 1
plotted_count += 1
#call the default plotter function here; this also sets up the
# universal plotting parameters AND saves the figures all in one
plotter(sensor, save_dir, site_ID, var_name, units, averaged,
avg_window, mintime, maxtime, plot_opt, tag, df)
return
##############################################################################
########################### MONTHLY PLOTTER ############################
##############################################################################
#to plot figures on a monthly basis within the user-defined time frame, call
# this function
def monthly_plotter(sensor, save_dir, site_ID, var_name,
units, averaged, avg_window, mintime, maxtime, plot_opt, tag, df):
#tell the user that the function was called
print("------------------------------------------------------------------\n")
print("'monthly_plotter' function called...\n")
#set two counters: one to count the total number of months, and the other to
# count the number of months that were actually plotted (skipping months
# for which there are no data)
#NOTE: the total count is technically equivalent to n-2 total FULL months for
# time frames that do not exactly start(end) on the very beginning(end)
# of the month; if the last interval does not equate to a full month
# but contains data, it will still be counted in the total count of
# days
total_count = 0 #count for ALL months possible within the time frame
plotted_count = 0 #count for all months that were plotted within the time frame
#this line will find the index of the very first day/time for each month in
# the whole dataframe
day1_of_month_idx = df.index[df.set_index('time').index.day == 1][::1440]
#this line will find the index(indices) of the very first day/time for each
# month within the time frame set by the user (within 'mintime' and
# 'maxtime')
day1_of_month_idx = day1_of_month_idx[(day1_of_month_idx>mintime) & (day1_of_month_idx<maxtime)]
#the starting index of the range over which to plot; this is the first
# index of 'day1_of_month_idx'; notice that if 'day1_of_month_idx' has
# length of 1, 'start' and 'day1_of_month_idx' are the same
start = day1_of_month_idx[0]
#before getting into the loop, to account for instances such that the
# dataset or the time frame set by the user begins with a partial month
# (e.g. the first timestamp is NOT 00:00 UTC on the first of the mont)
# plot that partial month all by itself only up to the index marking the
# first of the month in the dataset / timeframe (a.k.a. 'start' variable)
if (day1_of_month_idx.empty == False) and (mintime not in day1_of_month_idx):
#increase total counter by 1
total_count += 1
#skip the creation of plot if there are no data within the current 1-day
# period
if (averaged == True or averaged == "static") and (df[df.columns[2]][mintime:start].notna().any() == False):
#we need this special except above because when 'averaged' is set
# to either of those two conditions, a new column is created in
# the dataframe and it contains a different number of NaNs than
# the column from which it was computed->created; the only
# change is that we are referring to df.columns[2] instead of
# df.columns[1]; this will likely have to change if I add the
# ability to average ANY variable (not just wind speed)
print("%s - %s not plotted --> No data" % (df.time[mintime], df.time[start]))
pass #don't plot
else:
if df[df.columns[1]][mintime:start].empty == True:
#this little condition avoids printing the statement that
# "2019-01-08 00:00 - 2019-01-08 00:00" has no data because, of
# course, this is a single time so it has no length/data; this
# will only happen for datasets or given time frames that
# exactly equal 1 day / 1440 minutes
pass
elif df[df.columns[1]][mintime:start].notna().any() == False:
#tell the user this time frame was not plotted due to the absence of
# any data
print("%s - %s not plotted --> No data" % (df.time[mintime], df.time[start]))
pass #move on the next iteration of the loop
#if there is even a single data point within the current time frame,
# plot it
else:
#increase the plotting counter by 1
plotted_count += 1
#call the default plotter function here; this also sets up the
# universal plotting parameters AND saves the figures all in one
plotter(sensor, save_dir, site_ID, var_name, units, averaged,
avg_window, mintime, start, plot_opt, tag, df)
#find the ending index of the range over which to plot; this will be the
# index of df.time that equals the 'maxtime' specified by the user; this,
# too, is simply equal to 'maxtime'
end = maxtime
#store a list of dates/times that encompass weeks that were not plotted due to
# missing data
#######################
#now, loop through the entire time frame set by the user with 1-week intervals
# making a week-long plot of the chosen variable for each iteration
for m in range(len(day1_of_month_idx)):
#increase total counter by 1
total_count += 1
#'mintime' and 'maxtime will now change through each iteration of the loop
# to serve as the indices defining the 1-month intervals throughout
# the dataset over which to plot; the difference between them will
# vary depending on the date/time represented by them because the
# duration of months are not always equal
mintime = day1_of_month_idx[m]
if mintime == day1_of_month_idx[-1] and mintime == end:
#exit the loop if 'mintime' and 'end' are both equal to the last
# element of 'day1_of_month_idx'
break
elif mintime == day1_of_month_idx[-1] and mintime != end:
#set maxtime to 'end' if the last plot will not span a full month
# (i.e. the original 'maxtime' was not equal to the first of a
# month)
maxtime = end
else:
#set 'maxtime' to the next element of 'day1_of_month_idx', after
# 'mintime'
maxtime = day1_of_month_idx[m+1]
#skip the creation of plot if there are no data within the current 1-day
# period
if (averaged == True or averaged == "static") and (df[df.columns[2]][mintime:maxtime].notna().any() == False):
#we need this special except above because when 'averaged' is set
# to either of those two conditions, a new column is created in
# the dataframe and it contains a different number of NaNs than
# the column from which it was computed->created; the only
# change is that we are referring to df.columns[2] instead of
# df.columns[1]; this will likely have to change if I add the
# ability to average ANY variable (not just wind speed)
print("%s - %s not plotted --> No data" % (df.time[mintime], df.time[maxtime]))
pass #don't plot
else:
if df[df.columns[1]][mintime:maxtime].empty == True:
#this little condition avoids printing the statement that
# "2019-01-08 00:00 - 2019-01-08 00:00" has no data because, of
# course, this is a single time so it has no length/data; this
# will only happen for datasets or given time frames that
# exactly equal 1 day / 1440 minutes
pass
elif df[df.columns[1]][mintime:maxtime].notna().any() == False:
#tell the user this time frame was not plotted due to the absence of
# any data
print("%s - %s not plotted --> No data" % (df.time[mintime], df.time[maxtime]))
pass #move on the next iteration of the loop
#if there is even a single data point within the current time frame,
# plot it
else:
#increase the plotting counter by 1
plotted_count += 1
#call the default plotter function here; this also sets up the
# universal plotting parameters AND saves the figures all in one
plotter(sensor, save_dir, site_ID, var_name, units, averaged,
avg_window, mintime, maxtime, plot_opt, tag, df)
return
''' From here, you will want to use the indices in that list as the starting
index from which to plot for each monthly plot. You only want to plot from
that index to the next index in the list -1 in the dataframe (e.g. index
for 2020-01-01 00:00 is 1236915 and index for 2020-02-01 00:00 is 1281555
so you plot from df.plot[1236915:1281555] so because indexing is exclusive
of the ending value, you don't have to worry about subracting 1). For the
very last plot in the dataset, you will have to make sure that it plots to
the end of the time frame, even if not a complete month. Something similar
to what you did with weekly plotting such that for the last iteration, you
set the ending index to the last index of the dataframe+1 but make sure to
subtract that +1 again so that set_xlim is not out of range'''
# #returns a boolean array inditcating whether the datetimeindex given is the
# # start of the month (doesn't care about time so any time on the first of
# # the month will also return True); might be useful!
# pd.DatetimeIndex(df.time[mintime:maxtime]).is_month_start
##############################################################################
#only execute the functions if they are explicitly called from the parent
# 3D_main.py program
if __name__ == "__main__":
plotter()
daily_plotter()
weekly_plotter()
#monthly_plotter()
| [
"[email protected]"
] | |
9771e560a49f34cfc2e40084ce20457d90961989 | ab42fea7aaf909a9cdf63cbde2d169be841e4c01 | /SHDY/merchant_agent_foreign/There_is_no_card/银二交易/银二提现.py | 0661da3b483161be175ddd1e7636274f1db3096c | [] | no_license | niah161951/pytest | 0e9692086f9ec450a7b1d5a3b310db807ab8982f | 14b48950d956bbf05d226ba8dd39223a4bdd40e3 | refs/heads/master | 2022-11-05T16:54:10.408068 | 2020-06-22T08:18:36 | 2020-06-22T08:18:36 | 265,437,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | # -*- coding: utf-8 -*-
# @The_author: Lenovo
# @Time : 2020/2/5 9:17
import requests
from SHDY.Public.conf import Config
tradeNo = Config().get_param("mer_order_no")
dyMchNo = Config().get_param("mercid")
dyMchNo1 = Config().get_param("id")
url = 'http://localhost:8080/DemoJava/settle/mer/pay'
data = {
"orgNumber":"121", #机构号
"dyMchNo":dyMchNo, #电银商户号
"amt":"449", #提现金额
"fee":"1", #提现手续费
"tradeNo":tradeNo, #交易流水号获取交易的流水单号
"tradeDate":"20200219", #交易日期
"tradeTime":"174323", #交易时间
"token":"e41669b81958498182f6550a02ff13c0"
}
reg = requests.post(url,json=data).json()
print(reg) | [
"[email protected]"
] | |
b2fd0b0e0edb47a2a5bbc7365ac7cf6d117dd589 | 1470cc68d35157084fca66ce76f342b3dd481d22 | /test.py | 18b1c2a3c4d4d315fec7a4d8b0116763cf30d26c | [] | no_license | desmondykl/AI-assignment-2 | c44a35d946945ee4e48f265d7ba92a1f1a29deeb | b5074e9fe00c5c6217e890d2c157442179410b9d | refs/heads/main | 2023-04-08T16:43:01.549268 | 2021-04-04T09:44:53 | 2021-04-04T09:44:53 | 354,506,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,165 | py | import argparse
import random
from environment import TreasureCube
import numpy as np
import matplotlib.pyplot as plt
import pprint
import pandas as pd
# you need to implement your agent based on one RL algorithm
class RandomAgent(object):
def __init__(self):
self.action_space = ['left','right','forward','backward','up','down'] # in TreasureCube
self.Q = []
def take_action(self, state):
action = random.choice(self.action_space)
return action
# implement your train/update function to update self.V or self.Q
# you should pass arguments to the train function
def train(self, state, action, next_state, reward):
pass
class Q_learnAgent(object):
def __init__(self):
self.action_space = ['left','right','forward','backward','up','down'] # in TreasureCube
self.dim = 4
self.explorationRate = 0.01
self.discountFactor = 0.99
self.learningRate = 0.5
self.Q = {}
for z in range(self.dim):
for x in range(self.dim):
for y in range(self.dim):
allPossibleAction = {}
for a in self.action_space:
allPossibleAction[a] = 0
self.Q[str(z)+str(x)+str(y)] = allPossibleAction
def print_Qtable(self):
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 500)
pd.set_option('display.max_colwidth', None)
Qtable = pd.DataFrame.from_dict(self.Q, orient='index', columns=self.action_space)
print(Qtable)
Qtable.to_csv (r'export_dataframe.csv', index = False, header=True)
def take_action(self, state):
if random.random() < self.explorationRate:
action = random.choice(self.action_space)
else:
Q_value_action = self.Q[state]
maxKey = max(Q_value_action, key=Q_value_action.get)
maxValue =Q_value_action[maxKey]
BestAction = []
for a in Q_value_action:
if Q_value_action[a] == maxValue:
BestAction.append(a)
action = np.random.choice(BestAction)
return action
# implement your train/update function to update self.V or self.Q
# you should pass arguments to the train function
def train(self, state, action, next_state, reward):
#old estimation
Q_old = self.Q[state][action]
#new sample
Q_old_next = self.Q[next_state]
max_Q_old_next = Q_old_next[max(Q_old_next, key=Q_old_next.get)]
#new estimation
self.Q[state][action] = Q_old + self.learningRate*(reward + self.discountFactor*max_Q_old_next - Q_old)
def test_cube(max_episode, max_step):
env = TreasureCube(max_step=max_step)
agent = Q_learnAgent()
reward_per_episode = []
for epsisode_num in range(0, max_episode):
state = env.reset()
terminate = False
t = 0
episode_reward = 0
while not terminate:
action = agent.take_action(state)
reward, terminate, next_state = env.step(action)
episode_reward += reward
# you can comment the following two lines, if the output is too much
#env.render() # comment
#print(f'step: {t} state:{state} action: {action}, reward: {reward}') # comment
t += 1
agent.train(state, action, next_state, reward)
state = next_state
reward_per_episode.append(episode_reward)
print(f'epsisode: {epsisode_num}, total_steps: {t} episode reward: {episode_reward}')
agent.print_Qtable()
plt.title('Episode rewards vs Episodes')
plt.xlabel('Episodes')
plt.ylabel('Episode rewards')
plt.plot(reward_per_episode)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Test')
parser.add_argument('--max_episode', type=int, default=500)
parser.add_argument('--max_step', type=int, default=500)
args = parser.parse_args()
test_cube(args.max_episode, args.max_step)
| [
"[email protected]"
] | |
d78ef6ecd64be809897d19645a1bbe40685bf247 | b08d42933ac06045905d7c005ca9c114ed3aecc0 | /src/coefSubset/evaluate/ranks/twentyPercent/rank_2xra_D.py | d4d58ac3ba1c1b78c36a4f1f987552c3ea5935e1 | [] | no_license | TanemuraKiyoto/PPI-native-detection-via-LR | d148d53f5eb60a4dda5318b371a3048e3f662725 | 897e7188b0da94e87126a4acc0c9a6ff44a64574 | refs/heads/master | 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,392 | py | # 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '2xra.csv'
identifier = 'D'
coefFrac = 0.2
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/twentyPercent/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/twentyPercent/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
#df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Keep coefficients within the given fraction when ordered by decreasing order of coefficient magnitude
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs['absVal'] = np.abs(coefs['coefficients'])
coefs.sort_values(by = 'absVal', ascending = False, inplace = True)
coefs = coefs[:int(14028 * coefFrac + 0.5)]
keepList = list(coefs.index)
del coefs
df1 = df1[keepList]
df1 = df1.reindex(sorted(df1.columns), axis = 1)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
| [
"[email protected]"
] | |
af6fb6ffc288d9afb0dd158a13136688eab72366 | 4d2ece7d5ba6ac767ffac5d8a14a0f0bee4e2d70 | /class_linked.py | b51aaf8eb14a5f32876752c10d1301d3c5dd7895 | [] | no_license | HarshSharma009/Python | dec5ba5ab535d949b396e5b3d1f597d15eceb6c9 | a2393b88b58b740a0fd710eb4e9d67b8e26c6b61 | refs/heads/master | 2021-06-26T14:25:58.397856 | 2021-06-12T05:38:49 | 2021-06-12T05:38:49 | 228,650,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,470 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 14:43:41 2020
@author: Harsh Sharma
"""
class Node:
def __init__(self,value=None):
self.value=value
self.next=None
def isempty(self):
return (self.value==None)
def append(self,v):
if self.isempty():
self.value=v
elif self.next==None:
nn=Node(v)
self.next=nn
else:
(self.next).append(v)
def inserthead(self,v):
if self.isempty():
self.value=v
return
else:
nn=Node(v)
nn.value,self.value=self.value,nn.value
self.next,nn.next=nn,self.next
return
def bubblesort(self):
t1=self
t2=self.next
while t1!=None:
tmp=t1
t2=t1.next
while t2!=None:
if t1.value>t2.value:
t1.value,t2.value=t2.value,t1.value
t1=t1.next
t2=t2.next
t1=tmp.next
def bubblesort1(self):
t1=self
n=self.listLength()
for i in range(n):
for j in range(n-i-1):
if t1.next.value<t1.value:
t1.next.value,t1.value=t1.value,t1.next.value
t1=t1.next
t1=self
@classmethod
def addll(cls,l1,l2):
n=Node(l1.value+l2.value)
while l1.next!=None and l2.next!=None:
l1=l1.next;l2=l2.next
n.append(l1.value+l2.value)
return n
def listLength(self):
le=1
l=self
while l.next is not None:
le+=1
l.next=l.next.next
return le
def count(self,c=0):
if self.next==None:
return 1
else:
return 1+self.next.count()
def delete(self,v):
if self.isempty():
print('Empty List')
return
if self.value ==v:
if self.next == None:
self.value=None
return
else:
self.value=self.next.value
self.next=self.next.next
return
tmp=self
while tmp.next!=None:
if tmp.next.value==v:
tmp.next=tmp.next.next
return
else:
tmp=tmp.next
else:
print('not in list')
return
@classmethod
def createList(cls,l):
n=Node()
for i in l:
n.append(i)
return n
@classmethod
def concat(cls,l1,l2):
while l1.next != None:
l1=l1.next
l1.next=l2
@classmethod
def reverse(cls,l):
prev=None
current_node=l
while current_node is not None:
n=current_node.next
current_node.next=prev
prev=current_node
current_node=n
return prev
def swap(self):
if self.next==None:
return
else:
(self.value,self.next.value)=(self.next.value,self.value)
self.next.swap()
def leftrotate(self,n=1):
n=n%self.listLength()
for i in range(n):
self.swap()
def rswap(self):
if self.next==None:
return
else:
tmp=self
while tmp.next!=None:
tmp=tmp.next
while self.next.next!=None:
self.value,tmp.value=tmp.value,self.value
self=self.next
self.value,tmp.value=tmp.value,self.value
def riswap(self):
t1=self
while t1.next.next!=None:
t1=t1.next
t2=t1.next
t1.next=None
self.inserthead(t2.value)
def rightrotate(self,n=1):
n=n%self.listLength()
for i in range(n):
self.riswap()
def show(self):
if self.isempty():
return
elif self.next==None:
print(self.value)
return
else:
print(str(self.value)+'-->',end='')
(self.next).show()
| [
"[email protected]"
] | |
d9e98f51953a04b22a2f8bc4fd66c62fb3eb046a | 18dca9a552f5aa9303536613ec39f19cebf6647c | /Evaluation/OnlineEvaluation/grouped_nlppapers.py | 6d8de263c13fa0316b9700846890cd35bbd31a54 | [
"MIT"
] | permissive | ashwath92/MastersThesis | 9a39ed7eec825ed559d09507721c21bd12e2ab9c | f74755dc0c32f316da3c860dd5dbfa4c9cad97b3 | refs/heads/master | 2021-08-16T12:01:33.282459 | 2020-06-27T16:00:16 | 2020-06-27T16:00:16 | 197,282,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,061 | py | import pandas as pd
import re
import contractions
from gensim.parsing import preprocessing
from HyperDoc2Vec import *
hd2vmodel = HyperDoc2Vec.load('/home/ashwath/Programs/MAGCS/MAG-hyperdoc2vec/models/magcsenglish_window20.model')
def clean_text(text):
""" Cleans the text in the only argument in various steps
ARGUMENTS: text: content/title, string
RETURNS: cleaned text, string"""
# Replace newlines by space. We want only one doc vector.
text = text.replace('\n', ' ').lower()
# Remove URLs
text = re.sub(r"http\S+", "", text)
# Expand contractions: you're to you are and so on.
text = contractions.fix(text)
# Remove stop words
text = preprocessing.remove_stopwords(text)
# Remove punctuation -- all special characters
text = preprocessing.strip_multiple_whitespaces(preprocessing.strip_non_alphanum(text))
return text
filename='AllNLPcontexts.tsv'
df = pd.read_csv(filename, sep='\t', encoding='utf-8') #17126
df = df[~df.duplicated()] # 17111
# KEEP ONLY CONTEXTS WHICH ARE IN THE TRAINING SET
df = df[df.groundtruth.isin(hd2vmodel.docvecs.offset2doctag)] # 9206
df['normcontext'] = df['context'].apply(clean_text)
contextdict = {normalized: original for normalized, original in df[['normcontext', 'context']].values}
df['wordcount'] = df['normcontext'].apply(lambda x: len(x.split()))
df = df[df.wordcount>8] # 9019
# Remove contexts with less than 9 non-stop words
grouped_df = df.groupby(['magid', 'title', 'normcontext'])['groundtruth'].apply(list).to_frame('ground_truth').reset_index() # 8356
grouped_df['context'] = grouped_df['normcontext'].map(contextdict)
grouped_df = grouped_df.drop('normcontext', axis=1)
# Convert the list to a string with a comma between mag ids (no space)
grouped_df['ground_truth'] = grouped_df['ground_truth'].apply(lambda x: ','.join(str(j) for j in x))
# 8356 left
# Remove contexts with less than
grouped_df.to_csv('/home/ashwath/Programs/OnlineEvaluation/NLPcontexts_grouped.tsv', sep='\t', encoding='utf-8', index=False)
print(grouped_df.shape) #
| [
"[email protected]"
] | |
62ac8f0793cc301b038d63a9b03e49348f29bf26 | 49e703cdf72110b2be85e94b71dcbe88660f05e7 | /ssz/sedes/integer.py | 2ec7bae305f54ce570acc64bc3aea1c19db8fd3a | [
"MIT"
] | permissive | vapory-staging/py-ssz | becf67f5b03e171719c7166ffe28eaa1a9f09b30 | 4968148ff7fd86d53d7d082fc0f218d18fc6a4c9 | refs/heads/master | 2021-10-10T10:20:53.913112 | 2019-01-07T05:48:51 | 2019-01-07T05:48:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,417 | py | from ssz.exceptions import (
DeserializationError,
SerializationError,
)
class UnsignedInteger:
"""
A sedes for integers (uint<N>).
"""
num_bytes = 0
def __init__(self, num_bits):
# Make sure the number of bits are multiple of 8
if num_bits % 8 != 0:
raise ValueError(
"Number of bits should be multiple of 8"
)
if num_bits <= 0:
raise ValueError(
"Number of bits should be greater than 0"
)
self.num_bytes = num_bits // 8
def serialize(self, val):
if isinstance(val, bool) or not isinstance(val, int):
raise SerializationError(
'As per specified sedes object, can only serialize non-negative integer values',
val
)
if val < 0:
raise SerializationError(
'As per specified sedes object, can only serialize non-negative integer values',
val
)
try:
serialized_obj = val.to_bytes(self.num_bytes, 'big')
except OverflowError as err:
raise SerializationError('As per specified sedes object, %s' % err, val)
return serialized_obj
def deserialize_segment(self, data, start_index):
"""
Deserialize the data from the given start_index
"""
# Make sure we have sufficient data for deserializing
if len(data) + start_index < self.num_bytes:
raise DeserializationError(
'Insufficient data for deserializing',
data
)
end_index = start_index + self.num_bytes
return int.from_bytes(data[start_index:end_index], 'big'), end_index
def deserialize(self, data):
deserialized_data, end_index = self.deserialize_segment(data, 0)
if end_index != len(data):
raise DeserializationError(
'Data to be deserialized is too long',
data
)
return deserialized_data
uint8 = UnsignedInteger(8)
uint16 = UnsignedInteger(16)
uint24 = UnsignedInteger(24)
uint32 = UnsignedInteger(32)
uint40 = UnsignedInteger(40)
uint48 = UnsignedInteger(48)
uint56 = UnsignedInteger(56)
uint64 = UnsignedInteger(64)
uint128 = UnsignedInteger(128)
uint256 = UnsignedInteger(256)
uint384 = UnsignedInteger(384)
uint512 = UnsignedInteger(512)
| [
"[email protected]"
] | |
d92679ec536a61db4990fe12808f260b7736ae90 | a43c965e30916dc197b0995efcc8370627fe4d70 | /runTrial.py | 91a81bb60e8f898103de159f63dbbc87bfe5dbb9 | [
"MIT"
] | permissive | avkhadiev/mdfun | 021200d369a6946d5b036fc741359f848abf2c6d | cbf5b5ab01f6f14dd2cb9e3078a035d62ef3228f | refs/heads/master | 2021-01-19T09:27:25.447428 | 2017-04-26T14:53:00 | 2017-04-26T14:53:00 | 82,113,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,010 | py | # -*- coding: utf-8 -*-
#
# 1D Simple Harmonic Oscillator MD simulation
# launches a single simulation based on the provided configuration
# TODO calculates and saves required observables in the file
#
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-n", "--name",
action="store", type="string", dest="name",
help="specify simulation type")
parser.add_option("-s", "--simulation",
action="store", type="string", dest="sim",
help="specify simulation type")
parser.add_option("-c", "--config",
action="store", type="string", dest="config",
help="specify configuration via ':'-separated string")
parser.add_option("-d", "--debug",
action="store_true", dest="debug",
help="turn on debugging mode")
parser.add_option("-v",
action="store_true", dest="verbose",
help ="print status messages to standartd output")
parser.add_option("-q",
action="store_false", dest="verbose",
help ="don't print status messages to standartd output")
parser.add_option("-t", "--test",
action="store_true", dest="test",
help ="run tests")
parser.set_defaults(name="newTrial",
simulation="sho",
config = ":".join([
"dt=0.001",
"integration=velocityVerlet"]),
debug = False,
verbose = True,
test = False)
(options, args) = parser.parse_args()
###############################################################################
from md import simulate
name = options.name
sim = options.sim
verbose = options.verbose
debug = options.debug
test = options.test
if (verbose): print "Verbose mode: on\n"
if (debug): print "Debug mode: on\n"
if (test): print "Test mode: on\n"
config = simulate.parse(options.config)
if (test):
simulate.time_reversal( sim, config, verbose, debug )
else:
simulate.run( sim, name, config, verbose, debug )
| [
"[email protected]"
] | |
9b8b6cb47acb3e04aeeb079a08753d5a1a3e9123 | 3f47573a56d7347b125c91f69114b440f3f868ce | /bundle/jedi-vim/jedi/test/test_utils.py | 049dffd0f3cede659fb389acd518036d3558d647 | [
"MIT"
] | permissive | utylee/.vimwin | 25c53eabeea8a5e6e69e4b7fab761e1e442840e3 | a7714412d6cdc7744fe510617c62aa18358bdd99 | refs/heads/master | 2021-06-24T18:07:39.201913 | 2017-09-13T01:24:39 | 2017-09-13T01:24:39 | 103,337,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,663 | py | try:
import readline
except ImportError:
readline = False
from jedi import utils
from .helpers import unittest, cwd_at
@unittest.skipIf(not readline, "readline not found")
class TestSetupReadline(unittest.TestCase):
class NameSpace(object):
pass
def __init__(self, *args, **kwargs):
super(type(self), self).__init__(*args, **kwargs)
self.namespace = self.NameSpace()
utils.setup_readline(self.namespace)
def completions(self, text):
completer = readline.get_completer()
i = 0
completions = []
while True:
completion = completer(text, i)
if completion is None:
break
completions.append(completion)
i += 1
return completions
def test_simple(self):
assert self.completions('list') == ['list']
assert self.completions('importerror') == ['ImportError']
s = "print BaseE"
assert self.completions(s) == [s + 'xception']
def test_nested(self):
assert self.completions('list.Insert') == ['list.insert']
assert self.completions('list().Insert') == ['list().insert']
def test_magic_methods(self):
assert self.completions('list.__getitem__') == ['list.__getitem__']
assert self.completions('list().__getitem__') == ['list().__getitem__']
def test_modules(self):
import sys
import os
self.namespace.sys = sys
self.namespace.os = os
try:
assert self.completions('os.path.join') == ['os.path.join']
assert self.completions('os.path.join().upper') == ['os.path.join().upper']
c = set(['os.' + d for d in dir(os) if d.startswith('ch')])
assert set(self.completions('os.ch')) == set(c)
finally:
del self.namespace.sys
del self.namespace.os
def test_calls(self):
s = 'str(bytes'
assert self.completions(s) == [s, 'str(BytesWarning']
def test_import(self):
s = 'from os.path import a'
assert set(self.completions(s)) == set([s + 'ltsep', s + 'bspath'])
assert self.completions('import keyword') == ['import keyword']
import os
s = 'from os import '
goal = set([s + el for el in dir(os)])
# There are minor differences, e.g. the dir doesn't include deleted
# items as well as items that are not only available on linux.
assert len(set(self.completions(s)).symmetric_difference(goal)) < 20
@cwd_at('test')
def test_local_import(self):
s = 'import test_utils'
assert self.completions(s) == [s]
def test_preexisting_values(self):
self.namespace.a = range(10)
assert set(self.completions('a.')) == set(['a.' + n for n in dir(range(1))])
del self.namespace.a
def test_colorama(self):
"""
Only test it if colorama library is available.
This module is being tested because it uses ``setattr`` at some point,
which Jedi doesn't understand, but it should still work in the REPL.
"""
try:
# if colorama is installed
import colorama
except ImportError:
pass
else:
self.namespace.colorama = colorama
assert self.completions('colorama')
assert self.completions('colorama.Fore.BLACK') == ['colorama.Fore.BLACK']
del self.namespace.colorama
def test_version_info():
assert utils.version_info()[:2] > (0, 7)
| [
"[email protected]"
] | |
5643d6fa09f8461e8561a37f89cf1607c6f6d73f | 9e02354f6cbfd6529e0003b79c5a6ce0926ff573 | /rtcommit.py | 78fdc1e1b2b711327ec250fa79775745db3e3e30 | [] | no_license | jhuttner/rtcommit | 40997f1335d66a374e1df8db28fa2e75481ca55e | a0ca34131c0ae48fb484092980217cf6d338eae9 | refs/heads/master | 2021-03-12T23:48:11.178757 | 2011-03-22T21:33:56 | 2011-03-22T21:33:56 | 1,444,980 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,617 | py | #!/usr/local/bin/python
# Written by Joseph Huttner -- [email protected]
# Licensed under the GNU General Public License
#
# Change Log
#
# Jan 7, 2011.
# First release. Commit against an RT ticket and have the
# commit message automatically populate with the RT subject.
#
# Jan 17
# Add support for use as Git subcommand
# Add support for rt#id=0
#
# Feb 7 -
# Add support for committing against multiple tickets in one commit.
#
# Feb 28
# "git rt" without ticket number uses ticket IDs from
# last time they were passed
#
# March 5
# Add history option with -b flag
# Usage "git rt -b 3" populates commit file w/ last 3 rt tix that were
# referenced in commit messages. (Does not work for rt=0)
#
# March 12
# Add xmpp capability. Requires Git post commit hook.
# Note: Once I switch off of git-svn the post commit hook will become a post
# update hook.
#
# March 13
# Make 'git rtcommit init' set everything up.
# Only import xmpp when necessary
#
# March 14
# Put recipients and message inside --blast. Separate with colon.
import base64
from datetime import datetime
import getopt
import os
import shlex
import subprocess
import sys
try:
import json
except:
import simplejson as json
xmpp = None
TMP_FILE = '/tmp/rtcommit'
RT_HISTORY_FILE = os.path.join(os.getcwd(), '.rtcommit/history.json')
BLAST_FILE = os.path.join(os.getcwd(), '.rtcommit/blast.json')
ALIAS_FILE = os.path.join(os.getenv('HOME'), 'xmpp-aliases.json')
XMPP_CONFIG_FILE = os.path.join(os.getenv('HOME'), 'xmpp-config.json')
################################################################################
# Helper functions
def read(path, default='', as_json=False):
try:
fobj = open(path, 'r')
data = fobj.read()
fobj.close()
if as_json:
data = json.loads(data)
return data
except IOError:
return default
except ValueError:
return default
def write(path, data, as_json=False):
fobj = open(path, 'w')
if as_json:
data = json.dumps(data)
fobj.write(data)
fobj.close()
################################################################################
# RT functions
def format_ticket_id(ticket_id):
return '00000' if int(ticket_id) == 0 else ticket_id
def get_ticket_subject(ticket_id):
if int(ticket_id) == 0:
return 'YourMessageHere'
else:
command = 'rt show -t ticket ' + ticket_id + ' -f subject,id'
subprocess.call(shlex.split(command), stdout=file(TMP_FILE, 'w'))
subject = open(TMP_FILE).readline().split(":", 1)[1].strip()
return subject
def make_tmp_commit_file(ticket_ids):
# TODO - make the commit msg formatted correctly
commit_msg_parts = []
for tid in ticket_ids:
formatted_tid = format_ticket_id(str(tid));
msg = " ".join(['#rt#' + formatted_tid + ':', get_ticket_subject(tid)])
commit_msg_parts.append(msg)
commit_msg = '\n'.join(commit_msg_parts)
write(TMP_FILE, commit_msg)
def update_history_file(current, newest):
if not current:
result = newest
else:
for _id in reversed(newest):
if current[0] != _id and id != 0:
current.insert(0, _id)
result = current
write(RT_HISTORY_FILE, result, True)
def exec_git_commit():
command = 'git commit -a --edit -F /tmp/rtcommit'
subprocess.call(shlex.split(command))
################################################################################
# Blast functions
class Blast(object):
def __init__(self):
self.aliases = read(ALIAS_FILE, {}, True)
self.blasts = read(BLAST_FILE, [], True)
self.xmpp_config = read(XMPP_CONFIG_FILE, as_json=True)
def not_at_no_op(self):
return self.blasts and 'no_op' not in self.blasts[0]
def store_blast(self, blast):
if blast.find(':') != -1:
to, msg = blast.split(':', 1)
else:
to = blast
msg = ''
to = [s.strip() for s in to.split(',')]
timestamp = datetime.utcnow()
new_blast = {
'to': to,
'msg': msg.strip(),
'timestamp': str(timestamp),
}
curr_blasts = read(BLAST_FILE, [], True)
curr_blasts.insert(0, new_blast)
write(BLAST_FILE, curr_blasts, True)
def xmpp_connect(self):
client = self.xmpp_config['client']
server = self.xmpp_config['server']
port = self.xmpp_config['port']
username = self.xmpp_config['username']
password = base64.b64decode(self.xmpp_config['password'])
self.cnx = xmpp.Client(client, debug=[])
# Turn on debugging
#self.cnx = xmpp.Client(client)
self.cnx.connect(server=(server, port))
self.cnx.auth(username, password, 'botty')
def _get_alias_type(self, alias):
if alias in self.aliases['group']:
return 'group'
elif alias in self.aliases['user']:
return 'user'
return 'unknown_type'
def _send_group_blast(self, room, blast):
nickname = self.xmpp_config['nickname']
self.cnx.send(xmpp.Presence(to="%s/%s" % (room, nickname)))
msg = xmpp.protocol.Message(body=blast)
msg.setTo(room)
msg.setType('groupchat')
self.cnx.send(msg)
def _send_user_blast(self, to, content):
msg = xmpp.Message(to, content)
self.cnx.send(msg)
def store_no_op(self):
"""No-op prohibits blast from executing."""
curr_blasts = read(BLAST_FILE, [], True)
curr_blasts.insert(0, {'no_op': 1})
write(BLAST_FILE, curr_blasts, True)
def get_git_commit_info(self):
command = 'git log -n 1'
subprocess.call(shlex.split(command), stdout=file(TMP_FILE, 'w'))
data = open(TMP_FILE).read()
return data
def send_blast(self):
xmpp_targets = {
'group': [],
'user': [],
'unknown_type': [],
}
if self.not_at_no_op():
blast = self.blasts[0]
for recipient in blast['to']:
_type = self._get_alias_type(recipient)
if _type in ['group', 'user']:
full = self.aliases[_type][recipient]
xmpp_targets[_type].append(full)
else:
xmpp_targets[_type].append(recipient)
parts = [blast['msg'], '', self.get_git_commit_info()]
msg = '\n'.join(parts)
for r in xmpp_targets['group']:
self._send_group_blast(r, msg)
# users.append(current_user) ?
# treat the unknowns as users
for r in xmpp_targets['user'] + xmpp_targets['unknown_type']:
self._send_user_blast(r, msg)
################################################################################
# Initialization functions
def is_initialized():
look_for = os.path.join(os.getcwd(), '.rtcommit')
return os.path.exists(look_for)
def initialize():
if is_initialized():
return 'Error. Directory already exists.'
else:
def prepend_cwd(*items):
"""Build up paths with os.getcwd more easily."""
items = list(items)
items.insert(0, os.getcwd())
return os.path.join(*items)
history_parts = ['touch', prepend_cwd('.rtcommit', 'history.json')]
blast_parts = ['touch', prepend_cwd('.rtcommit', 'blast.json')]
mkdir_cmd = ' '.join(['mkdir', prepend_cwd('.rtcommit')])
init_history_file_cmd = ' '.join(history_parts)
init_blast_file_cmd = ' '.join(blast_parts)
subprocess.call(shlex.split(mkdir_cmd))
subprocess.call(shlex.split(init_history_file_cmd))
subprocess.call(shlex.split(init_blast_file_cmd))
return 'RT Commit library initialized successfully. To remove, \
\'rm -rf .rtcommit\''
################################################################################
# Main function
def main(argv):
optlist, args = getopt.gnu_getopt(argv[0:], 'p:', ['blast=', 'send-blast'])
optlist = dict(optlist)
if 'init' in args:
result = initialize()
print result
return
else:
try:
assert is_initialized()
except AssertionError:
print 'You must initialize RT Commit with \'git rtcommit init\' \
before running that command.'
return
if '--send-blast' in optlist:
instance = Blast()
if instance.not_at_no_op():
global xmpp
xmpp = __import__('xmpp')
instance.xmpp_connect()
instance.send_blast()
instance.store_no_op()
return
if '--blast' in optlist:
instance = Blast()
blast = optlist['--blast']
instance.store_blast(blast)
cl_ticket_ids = args
history = read(RT_HISTORY_FILE, as_json=True)
if '-p' in optlist:
# Need 'map' because unicode breaks everything
from_history = map(str, history[:int(optlist['-p'])])
else:
from_history = []
ticket_ids = cl_ticket_ids + from_history
if ticket_ids:
make_tmp_commit_file(ticket_ids)
update_history_file(history, cl_ticket_ids)
exec_git_commit()
if __name__ == '__main__':
main(sys.argv[1:])
| [
"[email protected]"
] | |
a1bc22d85ebb4a4a6709a2b40fc1b66ac428833b | 312732cce4737a33bd0a13137936b14424db884e | /filter_month11_good_data.py | 38755d2f3cda21807aaa952864f9a829cb2bdd7e | [] | no_license | marlboroug/mscode | 21c7f6408c4869aeb90b08ae9a29a0f7d09344fb | ee453c958d3d1d07eb8cff693f913fe4176926c6 | refs/heads/master | 2021-01-21T17:13:48.932367 | 2017-12-22T08:19:18 | 2017-12-22T08:19:18 | 98,517,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,070 | py | # encoding: utf-8
"""cross_validation_month_detail
Usage:
cross_validation_month_fault_detail.py
[--fault_disk_take_top=<fault_disk_take_top>] [--good_disk_take_top=<good_disk_take_top>] [--source_file_name=<source_file_name>] [--model_type=<model_type>]
cross_validation_month_fault_detail.py (-h | --help)
cross_validation_month_fault_detail.py --version
Options:
--fault_disk_take_top=<fault_disk_take_top> epoch. [default: 10].
--good_disk_take_top=<good_disk_take_top> epoch. [default: None].
--source_file_name=<source_file_name> epoch. [default: backblaze_2016.csv].
--model_type=<model_type> model. [default: RandomForest].
-h --help Show this screen.
--version Show version.
"""
# (--fault_train_month=<fault_train_month>) (--fault_test_month=<fault_test_month>) (--good_month_start=<good_month_start>) (--good_month_stop=<good_month_stop>) (--good_training_ratio=<good_training_ratio>)
from docopt import docopt
import pandas as pd
import os
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from collections import Counter
import numpy as np
import logging
from calendar import monthrange
import datetime
import _pickle as cPickle
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn import tree
import pydotplus
import random
import multiprocessing
header_file_name = 'header.csv'
#nrows_test 注意数量是不是批量的数据
nrows_test = None
source_file_name1 = 'small_new_feature_11month_feature_top20_Good.csv'
index_beg = 0 #11 = 0, 18 = 7
index_end = 3
experiment_type = 'generate_good_month_11_filter_time_index'
year = 2016
def _get_experiment_base(fault_train_month, fault_test_month, good_month_start, good_month_stop, good_training_ratio,
fault_disk_take_top, good_disk_take_top):
return "file"
# return 'FTRM_{}-FTEM_{}-GM_{}_{}-GTR_{}-F_top_{}-G_top_{}'.format(
# fault_train_month, fault_test_month, good_month_start, good_month_stop, round(good_training_ratio, 2),
# fault_disk_take_top, good_disk_take_top
# )
def _get_train_prefix(fault_train_month, fault_test_month, good_month_start, good_month_stop, good_training_ratio,
fault_disk_take_top, good_disk_take_top):
return _get_experiment_base(fault_train_month, fault_test_month, good_month_start, good_month_stop,
good_training_ratio,
fault_disk_take_top, good_disk_take_top) + '_train'
def _get_test_prefix(fault_train_month, fault_test_month, good_month_start, good_month_stop, good_training_ratio,
fault_disk_take_top, good_disk_take_top):
return _get_experiment_base(fault_train_month, fault_test_month, good_month_start, good_month_stop,
good_training_ratio,
fault_disk_take_top, good_disk_take_top) + '_test'
def _get_train_file_name(fault_train_month, fault_test_month, good_month_start, good_month_stop, good_training_ratio,
fault_disk_take_top, good_disk_take_top):
return experiment_type + '_' + _get_train_prefix(fault_train_month, fault_test_month, good_month_start,
good_month_stop, good_training_ratio,
fault_disk_take_top, good_disk_take_top) + '.csv'
def _get_test_file_name(fault_train_month, fault_test_month, good_month_start, good_month_stop, good_training_ratio,
fault_disk_take_top, good_disk_take_top):
return experiment_type + '_' + _get_test_prefix(fault_train_month, fault_test_month, good_month_start,
good_month_stop, good_training_ratio,
fault_disk_take_top, good_disk_take_top) + '.csv'
def _get_result_folder(fault_train_month, fault_test_month, good_month_start, good_month_stop, good_training_ratio,
fault_disk_take_top, good_disk_take_top):
return experiment_type + '_' + _get_experiment_base(fault_train_month, fault_test_month, good_month_start,
good_month_stop, good_training_ratio,
fault_disk_take_top, good_disk_take_top)
def __get_first_day_in_month(month):
return datetime.datetime.strptime(
'{}-{}-01'.format(int(year),month), "%Y-%m-%d")
def __get_last_day_in_month(month):
return datetime.datetime.strptime(
'{}-{}-{}'.format(int(year),month, monthrange(int(year), month)[1]), "%Y-%m-%d")
def _do_statistics(df):
y = df["LabelId"]
fault_unique_disks = df[df['LabelId'] == 1]['SerialNumber'].unique()
good_unique_disks = df[df['LabelId'] == 0]['SerialNumber'].unique()
logging.warning(
'good disks:{}, fault disks:{}, item statistic:{}'.format(
len(good_unique_disks), len(fault_unique_disks),
Counter(y)))
def preprocessingGroup2(group, gid, topNum):
# logging.warning("id "+str(gid))
return group[index_beg:index_end]
def multiProcPreprocess2(dfgb, topNum, keys):
multiprocessing.freeze_support()
#cpus = multiprocessing.cpu_count()
cpus = 64
results = []
#to test serial version
with multiprocessing.Pool(processes=cpus) as pool:
for x in keys:
result = pool.apply_async(preprocessingGroup2, args=(dfgb.get_group(x), x, topNum,))
results.append(result)
data = pd.concat([result.get() for result in results])
print('multiprocess2 end one cycle')
return data
def generate_data(fault_train_month, fault_test_month, good_month_start, good_month_stop, good_training_ratio,fault_disk_take_top, good_disk_take_top, source_file_name):
#取出两个月的内容
# with open(header_file_name) as f:
# header = f.readline().strip('\n').split(',')
header = None
logging.warning('reading source file...')
logging.warning('reading source file...' + str(source_file_name1))
file1 = pd.read_csv(source_file_name1, names=header, nrows = nrows_test)
logging.warning('init data...')
logging.warning('file 1 : ')
_do_statistics(file1)
#bad get last data
filter_data = multiProcPreprocess2(file1.groupby('SerialNumber'),1,file1['SerialNumber'].unique())
# finish bad get last data
filter_data.to_csv('filter_' + str(index_beg) + '_' + str(index_end) + '_small_new_feature_11month_feature_Good.csv', index=None)
logging.warning('save filter 11month.')
#get all data
##replace nan with mode
# good9_mode_dict = dict()
# good10_mode_dict = dict()
# good11_mode_dict = dict()
#
# bad9_mode_dict = dict()
# bad10_mode_dict = dict()
# bad11_mode_dict = dict()
#
#
# logging.warning('getting mode of every columns in training data...')
# for name in bad9.columns:
# if name in ['Min_DiskPartFailedTime','Max_DiskPartFailedTime','NodeId', 'SerialNumber', 'PreciseTimeStamp','LabelId']:
# continue
#
# good9_mode_dict[name] = good9[name].mode()
# good10_mode_dict[name] = good10[name].mode()
# good11_mode_dict[name] = good11[name].mode()
#
# bad10_mode_dict[name] = bad10[name].mode()
# bad11_mode_dict[name] = bad11[name].mode()
# bad11_mode_dict[name] = bad11[name].mode()
#
# for name in bad9.columns:
# if name in ['Min_DiskPartFailedTime','Max_DiskPartFailedTime','NodeId', 'SerialNumber', 'PreciseTimeStamp','LabelId']:
# continue
#
# good9[name][np.isnan(good9[name])] = good9_mode_dict[name]
# good10[name][np.isnan(good10[name])] = good10_mode_dict[name]
# good11[name][np.isnan(good11[name])] = good11_mode_dict[name]
#
# bad9[name][np.isnan(bad9[name])] = bad9_mode_dict[name]
# bad10[name][np.isnan(bad10[name])] = bad10_mode_dict[name]
# bad11[name][np.isnan(bad11[name])] = bad11_mode_dict[name]
# logging.warning('done replace nan with mode in training and testing column {}'.format(name))
##replace nan with mode
# good_disks9 = good9['SerialNumber'].unique()
# good_disks10 = good10['SerialNumber'].unique()
# good_disks11 = good11['SerialNumber'].unique()
# all_good = set(good_disks9)|set(good_disks10)|set(good_disks11)
#
# logging.warning('good disks 8:{} .'.format(len(good_disks9)))
# logging.warning('good disks 9:{} .'.format(len(good_disks10)))
# logging.warning('good disks 10:{} .'.format(len(good_disks11)))
# logging.warning('all good disks :{} .'.format(len(all_good)))
# bad_disks8 = bad9['SerialNumber'].unique()
# bad_disks9 = bad10['SerialNumber'].unique()
# bad_disks10 = bad11['SerialNumber'].unique()
# all_bad= set(bad_disks8)|set(bad_disks9)|set(bad_disks10)
# logging.warning('bad disks 8:{} .'.format(len(bad_disks8)))
# logging.warning('bad disks 9:{} .'.format(len(bad_disks9)))
# logging.warning('bad disks 10:{} .'.format(len(bad_disks10)))
# logging.warning('all bad disks :{} .'.format(len(all_bad)))
#
# bad9.to_csv('bad9.csv', index=None)
# logging.warning('save bad9.')
# bad10.to_csv('bad10.csv', index=None)
# logging.warning('save bad10.')
# bad11.to_csv('bad11.csv', index=None)
# logging.warning('save bad11.')
# good9.to_csv('good9.csv', index=None)
# logging.warning('save good9.')
# good10.to_csv('good10.csv', index=None)
# logging.warning('save good10.')
# good11.to_csv('good11.csv', index=None)
# logging.warning('save good11.')
##good sample
# sample_disk1 = set()
# sample_disk2 = set()
# sample_disk3 = set()
#
# for single in good_disks9:
# value = random.randint(1, 100)
# if value % 10 == 5:
# sample_disk1.add(single)
# for single in good_disks10:
# value = random.randint(1, 100)
# if value % 10 == 5:
# sample_disk2.add(single)
# for single in good_disks11:
# value = random.randint(1, 100)
# if value % 10 == 5:
# sample_disk3.add(single)
#
# logging.warning('sample_disk1:{} .'.format(len(sample_disk1)))
# logging.warning('sample_disk2:{} .'.format(len(sample_disk2)))
# logging.warning('sample_disk3:{} .'.format(len(sample_disk3)))
# logging.warning('all sample disks :{} .'.format(len(sample_disk1|sample_disk2|sample_disk3)))
#
# import csv
# cw = csv.writer(open("sample_good_disk9_SerialNumber.csv",'w'))
# cw.writerow(list(sample_disk1))
# cw = csv.writer(open("sample_good_disk10_SerialNumber.csv",'w'))
# cw.writerow(list(sample_disk2))
# cw = csv.writer(open("sample_good_disk11_SerialNumber.csv",'w'))
# cw.writerow(list(sample_disk3))
#
# good_sample1 = good9[good9['SerialNumber'].isin(sample_disk1)]
# good_sample2 = good10[good10['SerialNumber'].isin(sample_disk2)]
# good_sample3 = good11[good11['SerialNumber'].isin(sample_disk3)]
#
# good_sample1.to_csv('good_sample9.csv', index=None)
# logging.warning('save good_sample9.')
# good_sample2.to_csv('good_sample10.csv', index=None)
# logging.warning('save good_sample10.')
# good_sample3.to_csv('good_sample11.csv', index=None)
# logging.warning('save good_sample11.')
# logging.warning('test:{} '.format(2))
if __name__ == "__main__":
# arguments = docopt(__doc__, version='cross_validation_month_detail 1.0')
fault_train_month = [1,2,3]
fault_test_month = [1,2,3]
good_month_start = 1
good_month_stop = 1
good_training_ratio = float(0.5)
fault_disk_take_top = 1
good_disk_take_top = 1
source_file_name = ''
folder = _get_result_folder(fault_train_month, fault_test_month, good_month_start, good_month_stop,good_training_ratio,fault_disk_take_top, good_disk_take_top)
if not os.path.exists(folder):
logging.warning('create result folder {}.'.format(folder))
os.makedirs(folder)
else:
logging.warning(
'result folder {} exists! will overwrite files under it and appending logging file!'.format(folder))
logging.basicConfig(level=logging.WARNING,format='%(asctime)-15s %(message)s')
# set logging
logging.warning('removing previous logging handler...')
rootLogger = logging.getLogger()
for h in rootLogger.handlers[1:]:
rootLogger.removeHandler(h)
logging.warning('adding new handler...')
fileHandler = logging.FileHandler(filename=os.path.join(folder, 'logging.txt'))
logFormatter = logging.Formatter('%(asctime)-15s %(message)s')
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
logging.warning('analyse data...')
generate_data(fault_train_month, fault_test_month, good_month_start, good_month_stop, good_training_ratio,fault_disk_take_top, good_disk_take_top, source_file_name)
# used to sample good data and filter no use columns in all good and bad data | [
"[email protected]"
] | |
0d30e60a3c74c1f03015f06c6518907a8ddaad1f | 8d5ab4d889a596eb3f6e367fcf6a9cd849c5df94 | /node_modules/ccxt/python/ccxt/paymium.py | 4b01786877e77812bf7004170a588ac16b468ac0 | [
"MIT"
] | permissive | dleonard00/cryptoBot | 0ea9c1d73513ae446f3ab67ea1186cf786de4857 | 1195823102989b6d37698d0da676dcc37d342a4c | refs/heads/master | 2021-08-20T09:37:35.461416 | 2017-11-28T20:56:55 | 2017-11-28T20:56:55 | 112,361,285 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,890 | py | # -*- coding: utf-8 -*-
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
class paymium (Exchange):
def describe(self):
return self.deep_extend(super(paymium, self).describe(), {
'id': 'paymium',
'name': 'Paymium',
'countries': ['FR', 'EU'],
'rateLimit': 2000,
'version': 'v1',
'hasCORS': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27790564-a945a9d4-5ff9-11e7-9d2d-b635763f2f24.jpg',
'api': 'https://paymium.com/api',
'www': 'https://www.paymium.com',
'doc': [
'https://github.com/Paymium/api-documentation',
'https://www.paymium.com/page/developers',
],
},
'api': {
'public': {
'get': [
'countries',
'data/{id}/ticker',
'data/{id}/trades',
'data/{id}/depth',
'bitcoin_charts/{id}/trades',
'bitcoin_charts/{id}/depth',
],
},
'private': {
'get': [
'merchant/get_payment/{UUID}',
'user',
'user/addresses',
'user/addresses/{btc_address}',
'user/orders',
'user/orders/{UUID}',
'user/price_alerts',
],
'post': [
'user/orders',
'user/addresses',
'user/payment_requests',
'user/price_alerts',
'merchant/create_payment',
],
'delete': [
'user/orders/{UUID}/cancel',
'user/price_alerts/{id}',
],
},
},
'markets': {
'BTC/EUR': {'id': 'eur', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR'},
},
})
def fetch_balance(self, params={}):
balances = self.privateGetUser()
result = {'info': balances}
for c in range(0, len(self.currencies)):
currency = self.currencies[c]
lowercase = currency.lower()
account = self.account()
balance = 'balance_' + lowercase
locked = 'locked_' + lowercase
if balance in balances:
account['free'] = balances[balance]
if locked in balances:
account['used'] = balances[locked]
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, params={}):
orderbook = self.publicGetDataIdDepth(self.extend({
'id': self.market_id(symbol),
}, params))
result = self.parse_order_book(orderbook, None, 'bids', 'asks', 'price', 'amount')
result['bids'] = self.sort_by(result['bids'], 0, True)
return result
def fetch_ticker(self, symbol, params={}):
ticker = self.publicGetDataIdTicker(self.extend({
'id': self.market_id(symbol),
}, params))
timestamp = ticker['at'] * 1000
vwap = float(ticker['vwap'])
baseVolume = float(ticker['volume'])
quoteVolume = baseVolume * vwap
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['bid']),
'ask': float(ticker['ask']),
'vwap': vwap,
'open': float(ticker['open']),
'close': None,
'first': None,
'last': float(ticker['price']),
'change': None,
'percentage': float(ticker['variation']),
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def parse_trade(self, trade, market):
timestamp = int(trade['created_at_int']) * 1000
volume = 'traded_' + market['base'].lower()
return {
'info': trade,
'id': trade['uuid'],
'order': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['side'],
'price': trade['price'],
'amount': trade[volume],
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
response = self.publicGetDataIdTrades(self.extend({
'id': market['id'],
}, params))
return self.parse_trades(response, market)
def create_order(self, market, type, side, amount, price=None, params={}):
order = {
'type': self.capitalize(type) + 'Order',
'currency': self.market_id(market),
'direction': side,
'amount': amount,
}
if type == 'market':
order['price'] = price
response = self.privatePostUserOrders(self.extend(order, params))
return {
'info': response,
'id': response['uuid'],
}
def cancel_order(self, id, symbol=None, params={}):
return self.privatePostCancelOrder(self.extend({
'orderNumber': id,
}, params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
body = self.json(params)
nonce = str(self.nonce())
auth = nonce + url + body
headers = {
'Api-Key': self.apiKey,
'Api-Signature': self.hmac(self.encode(auth), self.secret),
'Api-Nonce': nonce,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'errors' in response:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| [
"[email protected]"
] | |
a7c0223148294123bd1252311d60669270ab2677 | 421338e0f5ad029885b7ae1b198dfde85798e313 | /auxilaries/config_str.py | 09b1bab56017945c0488842f5e8813742d12a321 | [] | no_license | HaiFengZeng/nsynth_wavenet | 985b0b0bea59b3a8cdee6be267259cdad5242a6c | 1c276ea9f8f98d25aad7d9049c6e03101429ac53 | refs/heads/master | 2020-03-25T01:50:13.018446 | 2018-07-28T14:28:41 | 2018-07-28T14:28:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,617 | py | from wavenet import parallel_wavenet, wavenet, masked
import json
import subprocess
from argparse import Namespace
import time
from auxilaries import reader
def get_config_srt(hparams, model, tag=''):
prefix = 'ns_' # nsynth
if model == 'wavenet':
model_str = 'wn'
elif model == 'parallel_wavenet':
model_str = 'pwn'
else:
raise ValueError('unsupported model type {}'.format(model))
branch_names = subprocess.check_output(['git', 'branch'])
current_branch_name = [bn for bn in branch_names.decode('utf-8').split('\n')
if '*' in bn][0]
current_branch_name = current_branch_name.split()[1]
if getattr(hparams, 'use_mu_law', False):
mu_law_tag = 'MU'
else:
mu_law_tag = 'n_MU'
if getattr(hparams, 'use_weight_norm', False):
weight_norm_tag = 'WN'
if current_branch_name == 'data_dep_init':
weight_norm_tag += '_DDI'
if parallel_wavenet.MANUAL_FINAL_INIT and model == 'parallel_wavenet':
weight_norm_tag += '_mfinit'
else:
weight_norm_tag = 'n_WN'
loss_type = getattr(hparams, 'loss_type', '').upper()
cstr = '-'.join([prefix + model_str, mu_law_tag, weight_norm_tag])
if reader.USE_NEW_MEL_EXTRACTOR:
cstr += '-NM'
if getattr(hparams, 'use_resize_conv', False):
cstr += '-RS'
else:
cstr += '-TS'
upsample_act = getattr(hparams, 'upsample_act', 'tanh')
cstr += ('-' + upsample_act)
if model == 'parallel_wavenet':
if parallel_wavenet.USE_LOG_SCALE:
cstr += '-LOGS'
else:
cstr += '-n_LOGS'
if parallel_wavenet.CLIP:
cstr += '-CLIP'
else:
cstr += '-n_CLIP'
if parallel_wavenet.SPEC_ENHANCE_FACTOR == 0:
cstr += '-NLABS' if parallel_wavenet.NORM_FEAT else '-LABS'
elif parallel_wavenet.SPEC_ENHANCE_FACTOR == 1:
cstr += '-NABS' if parallel_wavenet.NORM_FEAT else '-ABS'
elif parallel_wavenet.SPEC_ENHANCE_FACTOR == 2:
cstr += '-NPOW' if parallel_wavenet.NORM_FEAT else '-POW'
elif parallel_wavenet.SPEC_ENHANCE_FACTOR == 3:
cstr += '-NCOM' if parallel_wavenet.NORM_FEAT else '-COM'
else:
raise ValueError("SPEC_ENHANCE_FACTOR Value Error.")
if parallel_wavenet.USE_MEL:
cstr += '-MEL'
else:
cstr += '-n_MEL'
if parallel_wavenet.USE_L1_LOSS:
cstr += '-L1'
else:
cstr += '-L2'
if parallel_wavenet.USE_PRIORITY_FREQ:
cstr += '-PFS'
else:
cstr += '-n_PFS'
if model == 'wavenet' and getattr(hparams, 'add_noise', False):
cstr += '-NOISE'
if loss_type:
cstr += '-{}'.format(loss_type)
if tag:
cstr += '-{}'.format(tag)
return cstr
def get_time_str():
return time.strftime("%m_%d", time.localtime())
def get_config_time_str(hparams, model, tag=''):
cstr = get_config_srt(hparams, model, tag) + '-' + get_time_str()
return cstr
if __name__ == '__main__':
config1 = '../config_jsons/wavenet_mol.json'
with open(config1, 'rt') as F:
configs = json.load(F)
hparams = Namespace(**configs)
print(get_config_srt(hparams, 'wavenet'))
config1 = '../config_jsons/parallel_wavenet.json'
with open(config1, 'rt') as F:
configs = json.load(F)
hparams = Namespace(**configs)
print(get_config_srt(hparams, 'parallel_wavenet'))
print(get_config_time_str(hparams, 'parallel_wavenet'))
| [
"[email protected]"
] | |
576f0fbb13855a3291afaad145ffb5d1e107c293 | b6152611cfccc0289844293d693f98a88287de2e | /mysite/settings.py | 8dc7584886955556b8e2a7065d68d8f86230e726 | [] | no_license | babelcisco/mysite | 059c30c3ce148fc15c4d8e06d46d5e012bde6af6 | 5a506c82e4ead1d463d9b7b03abe3253a88dc8b8 | refs/heads/master | 2021-04-12T02:41:07.395607 | 2018-03-19T16:10:40 | 2018-03-19T16:10:40 | 125,883,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,088 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_cp!jc(wb_v$qt600qs5k77l*1)t3&$-lb4rb!ki1-+ltqrku1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
bb370fde71b86ce2b71d05a6b9c496760a47ffed | af7f6e1a1e4d964c3cce0d224effed08e9096ad0 | /Garden.py | eb94cd01d530d383378342388ec3bb615de1b156 | [] | no_license | saharsh2008/Ballon-Fight | e1cd4028822b35fd142dca400c1dc2c1c1b49f9f | 3e70929a57f2328ac9fb604689bd20808a0c11e5 | refs/heads/master | 2022-11-26T22:42:57.174998 | 2020-08-03T16:45:38 | 2020-08-03T16:45:38 | 284,748,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,136 | py | import pgzrun
from random import randint
import time
WIDTH = 800
HEIGHT = 600
CENTER_X = WIDTH / 2
CENTER_Y = HEIGHT / 2
game_over = False
finalised = False
garden_happy = True
fangflower_collision = False
time_elapsed = 0
start_time = time.time()
cow = Actor("cow")
cow.pos = 100, 500
flower_list = [ ]
wilted_list = [ ]
fangflower_list = [ ]
fangflower_vy_list = [ ]
fangflower_vx_list = [ ]
def draw():
global game_over, time_elapsed, finalised
if not game_over:
screen.clear()
screen.blit("garden", (0, 0))
cow.draw()
for flower in flower_list:
flower.draw()
for fangflower in fangflower_list:
fangflower.draw()
time_elapsed = int(time.time() - start_time)
screen.draw.text(
"Garden happy for: " +
str(time_elapsed) + " seconds",
topleft=(10, 10), color="black"
)
else:
if not finalized:
cow.draw()
screen.draw.text(
"Garden happy for: " +
str(time_elapsed) + " seconds",
topleft=(10, 10), color="black"
)
if (not garden_happy):
screen.draw.text(
"GARDEN UNHAPPY - GAME OVER!", color="black",
topleft=(10, 50)
)
finalized = True
else:
screen.draw.text(
"FANGFLOWER ATTACK - GAME OVER!", color="black",
topleft=(10, 50)
)
finalized = True
return
def new_flower():
global flower_list, wilted_list
flower_new = Actor("flower")
flower_new.pos = randint(50, WIDTH - 50), randint(150, HEIGHT - 100)
flower_list.append(flower_new)
wilted_list.append("happy")
return
def add_flowers():
global game_over
if not game_over:
new_flower()
clock.schedule(add_flowers, 4)
return
def check_wilt_times():
global wilted_list, game_over, garden_happy
if wilted_list:
for wilted_since in wilted_list:
if (not wilted_since == "happy"):
time_wilted = int(time.time() - wilted_since)
if (time_wilted) > 10.0:
garden_happy = False
game_over = True
break
return
def wilt_flower():
global flower_list, wilted_list, game_over
if not game_over:
if flower_list:
rand_flower = randint(0, len(flower_list) - 1)
if (flower_list[rand_flower].image == "flower"):
flower_list[rand_flower].image == "flower-wilt"
wilted_list[rand_flower] = time.time()
clock.schedule(wilt_flower, 3)
return
def check_flower_collision():
global cow, flower_list, wilted_list
index = 0
for flower in flower_list:
if (flower.colliderect(cow) and
flower.image == "flower-wilt"):
flower.image = "flower"
wilted_list[index] = "happy"
break
index = index + 1
return
def check_fangflower_collision():
global cow, fangflower_list, fangflower_collision
global game_over
for fangflower in fangflower_list:
if fangflower.colliderect(cow):
cow.image = "zap"
game_over = True
break
return
def velocity():
random_dir = randint(0, 1)
random_velocity = randint(2, 3)
if random_dir == 0:
return -random_velocity
else:
return random_velocity
def mutate():
global flower_list, fangflower_list, fangflower_vy_list
global fang_flower_vx_list, game_over
if not game_over and flower_list:
rand_flower = randint(0, len(flower_list) - 1)
fangflower_pos_x = flower_list[rand_flower].x
fangflower_pos_y = flower_list[rand_flower].y
del flower_list[rand_flower]
fangflower = Actor("fangflower")
fangflower.pos = fangflower_pos_x, fangflower_pos_y
fangflower_vx = velocity()
fangflower_vy = velocity()
fangflower = fangflower_list.append(fangflower)
fangflower_vx_list.append(fangflower_vx)
fangflower_vy_list.append(fangflower_vy)
clock.schedule(mutate, 20)
return
def update_fangflowers():
global fangflower_list, game_over
if not game_over:
index = 0
for fangflower in fangflower_list:
fangflower_vx = fangflower_vx_list[index]
fangflower_vy = fangflower_vy_list[index]
fangflower.x = fangflower.x + fangflower_vx
fangflower.y = fangflower.y + fangflower_vy
if fangflower.left < 0:
fangflower_vx_list[index] = -fangflower_vx
if fangflower.right > WIDTH:
fangflower_vx_list[index] = -fangflower_vx
if fangflower.top < 150:
fangflower_vy_list[index] = -fangflower_vy
if fangflower.bottom > HEIGHT:
fangflower_vy_list[index] = -fangflower_vy
index = index + 1
return
def reset_cow():
global game_over
if not game_over:
cow.image = "cow"
return
add_flowers()
wilt_flower()
def update():
global score, game_over, fangflower_collision
global flower_list, fangflower_list, time_elapsed
fangflower_collision = check_fangflower_collision()
check_wilt_times()
if not game_over:
if keyboard.space:
cow.image = "cow-water"
clock.schedule(reset_cow, 0.5)
check_flower_collision()
if keyboard.left and cow.x > 0:
cow.x -= 5
elif keyboard.right and cow.x < WIDTH:
cow.x += 5
elif keyboard.up and cow.y > 150:
cow.y -= 5
elif keyboard.down and cow.y < HEIGHT:
cow.y += 5
if time_elapsed > 15 and not fangflower_list:
mutate()
update_fangflowers()
pgzrun.go()
| [
"[email protected]"
] | |
3f4fc8fb10f2b958cdadc43fb4838be1ffd9d192 | e76683547c26a6f902e3c68ebfc207a1d9abe79f | /13_IntroductionToDjangoRESTFramework/djlibrary/app_api/v1/catalog/urls.py | 9982bc10639cbc000410d116ff4887b82bcf13b6 | [] | no_license | antoshkoo/python_django | 03b19f3307b9aef1cc4c2bf2858b550334442ebe | ecb8b1c3edfe3e1af6a531cb237bb5d9f4e50601 | refs/heads/master | 2023-03-19T18:52:54.257903 | 2021-03-05T16:09:56 | 2021-03-05T16:09:56 | 339,375,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | from django.urls import include, path
from rest_framework import routers
from app_api.v1.catalog.api import AuthorViewSet, BookViewSet
router = routers.DefaultRouter()
router.register('authors', AuthorViewSet)
router.register('books', BookViewSet)
urlpatterns = [
path('', include(router.urls)),
] | [
"[email protected]"
] | |
e72ba02c8a3a0527c9964bf64f11e21586fa69ad | 9b443b52a0eae62d624592a6fe7a73c05f046f64 | /ex11_second_intro_rover/robot.py | 462197bb9c615d8583b567c89ba2d4102d4043ee | [] | no_license | bruiken/DES-Exercises | 93cc4b84f02c6c2e4fddcc4e9f49bc710cda8aea | 2ae1c0d15bfdaf491e8b3dd7404a53fdea0bec2b | refs/heads/master | 2023-02-20T10:57:43.369508 | 2021-01-27T13:20:44 | 2021-01-27T13:20:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,460 | py | from ev3dev2.motor import OUTPUT_A, OUTPUT_D, MoveDifferential, SpeedRPM, SpeedPercent, SpeedDPS
from ev3dev2.unit import STUD_MM
from ev3dev2.wheel import EV3EducationSetTire
from ev3dev2.sensor.lego import UltrasonicSensor, TouchSensor, ColorSensor
from ev3dev2.led import Leds
from ev3dev2.sound import Sound
from datetime import datetime, timedelta
import time
import threading
class Robot:
"""
Robot is a class that instantiates and sets up the robot so that it is ready for use. It initializes the sensors
and the tank drive, and it sets up the sensors. On top of that it provides functions so that the robot can be
controlled.
"""
def __init__(self, bluetooth=None):
"""
Initializer for a Robot.
"""
self.bluetooth = bluetooth
if bluetooth:
self.bluetooth.initiate_connection()
print('connected')
self.cs = ColorSensor()
self.left_touch = TouchSensor('ev3-ports:in1')
self.right_touch = TouchSensor('ev3-ports:in4')
self.us = UltrasonicSensor()
self.tank_drive = Robot.get_tank_drive()
self.sound = Sound()
self.leds = Leds()
self.setup_sensors()
@staticmethod
def get_tank_drive():
"""
Loads the setup of the motors of a Robot. Contains the motor outputs, the type of tire and
the distance between the two wheels of the robot.
:return: A tank_drive setup with the two motors, tire type and distance between tires.
"""
return MoveDifferential(OUTPUT_A, OUTPUT_D, EV3EducationSetTire, 15 * STUD_MM)
def speak(self, text):
"""
Speak in separate thread so it does not block anything.
"""
threading.Thread(
target=lambda: self.sound.speak(text, Sound.PLAY_NO_WAIT_FOR_COMPLETE)
).start()
def setup_sensors(self):
"""
Sets up the "modes" of the sensors. For example sets the ultrasonic sensor to continuous centimeter measurement.
"""
self.us.mode = UltrasonicSensor.MODE_US_DIST_CM # continuous centimeter measurement
def start_drive(self, speed_percentage=40):
"""
Activates the motors of the robot to move forward.
:param speed_percentage: The speed of the Robot based on motor power. Percentage between 0 and 100.
"""
self.tank_drive.on(SpeedPercent(speed_percentage), SpeedPercent(speed_percentage))
def reverse_for_rotations(self, nr_rotations, rpm=60, lock=None):
"""
Reverses the Robot (makes it move backwards).
:param nr_rotations: Number of degrees Robot turns.
:param rpm: Speed at which the Robot reverses in rotations per minute.
:param lock: Optional Lock to stop the operation when requested
"""
self.tank_drive.on_for_rotations(SpeedRPM(-rpm), SpeedRPM(-rpm), nr_rotations, block=False)
end_time = datetime.now() + timedelta(seconds=(nr_rotations*60)/rpm)
while datetime.now() < end_time:
if lock.is_locked():
self.tank_drive.stop()
break
time.sleep(0.01)
def turn_for_rotations(self, rotations, rpm=30, lock=None):
"""
Turn for a number of degrees with the given speed.
Can be pre-empted when given a Lock.
:param rotations: The number of rotations to turn.
:param rpm: The speed to turn at.
:param lock: Optional Lock to stop the operation when requested.
"""
self.tank_drive.on_for_rotations(SpeedRPM(rpm), SpeedRPM(-rpm), abs(rotations), block=False)
end_time = datetime.now() + timedelta(seconds=(abs(rotations)*60)/abs(rpm))
while datetime.now() < end_time:
if lock.is_locked():
self.tank_drive.stop()
break
time.sleep(0.01)
def rotate_degrees(self, rotations, reverse_before_continue=True, rpm=35, lock=None):
"""
Rotates the Robot.
:param rotations: Number of rotations the Robot rotates.
:param reverse_before_continue: True if Robot needs to reverse before turning, False if not.
:param rpm: Speed at which the Robot turns.
:param lock: Optional Lock to stop the operation when requested
"""
if reverse_before_continue:
self.reverse_for_rotations(.6, lock=lock)
self.turn_for_rotations(rotations, rpm=rpm, lock=lock)
| [
"[email protected]"
] | |
de81d1f7b6fa8546f1762a7e9b75f0a7e76b89af | adcdc01294ef1563f063b26c4513825879d34262 | /spider/spider_server/app/service/service_user.py | d7a52343623b1b1afd5b2719d983e9d119227d88 | [] | no_license | clearloveyin/Cararote | 0312d18061fd6b23e72450d956ff2a4f07deb31a | 2d764d5d682391a8909650387e84eb735044c5ff | refs/heads/master | 2023-01-29T07:08:32.696111 | 2019-08-03T04:12:15 | 2019-08-03T04:12:15 | 200,329,856 | 0 | 0 | null | 2023-01-04T06:14:17 | 2019-08-03T04:07:49 | Python | UTF-8 | Python | false | false | 5,560 | py | import json
import ldap
from flask import g
from flask import current_app
from requests import post
from ..service import *
from token_manage import serializer
from app.db import db
from app.controller.ctrl_user import CtrlUser
ldapServer = 'LDAP://apolo.storm'
domain = 'storm'
class ServiceUser(object):
def __init__(self):
self.ctrl_object = CtrlUser()
def login_from_ldap(self, request_data):
"""
登录走strom账号
:param username:
:param password:
:return:
"""
ctrl_object = self.ctrl_object
result = {"type": SUCCESS, "content": None}
username = request_data.get("username")
password = request_data.get("password")
if not username or not password:
result["type"] = PROMPT_ERROR
message = "工号和密码不能为空!"
current_app.logger.info(message)
result["message"] = message
return result
res = self.login_check(username, password)
if res:
try:
user = ctrl_object.add_new_user(username)
g.username = username
g.password = password
token = serializer.dumps({'username': username})
user['LoginToken'] = str(token, encoding='utf-8')
result["content"] = user
db.session.commit()
return result
except Exception as e:
db.session.rollback()
result = return_exception_message(e)
return result
else:
result["type"] = PROMPT_ERROR
message = "登录失败,请输入正确的用户名或密码!"
current_app.logger.info(message)
result["message"] = message
return result
def login_check(self, username, password):
try:
conn = ldap.initialize(ldapServer)
domainUserName = domain + '\\' + username
conn.simple_bind_s(domainUserName, password)
return True
except:
return False
def login_from_cactus(self, request_data):
"""
登录走cactus
:param work_id:
:param password:
:return:
"""
ctrl_object = self.ctrl_object
work_id = request_data.get("username")
password = request_data.get("password")
employ = {"employeeNo": work_id,
"password": password,
"clientType": 0
}
result = {"type": SUCCESS, "content": None}
if not work_id or not password:
result["type"] = PROMPT_ERROR
message = "工号和密码不能为空!"
current_app.logger.info(message)
result["message"] = message
return result
try:
user_dict = self.post_cactus(employ)
if user_dict:
username = user_dict.get('username')
work_id = user_dict.get("work_id")
user = ctrl_object.register(username=username, work_id=work_id)
g.username = work_id
g.password = password
token = serializer.dumps({'username': username})
user['LoginToken'] = str(token, encoding='utf-8')
result["content"] = user
return result
else:
result["type"] = PROMPT_ERROR
message = "登录失败,请输入正确的用户名或密码!"
current_app.logger.info(message)
result["message"] = message
return result
except Exception as e:
db.session.rollback()
result = return_exception_message(e)
return result
def post_cactus(self, employ):
login_post = current_app.config["LOGIN_URL"]
r = post(login_post, employ, timeout=30)
user_dict = dict()
if r.status_code == 200: # 成功
result = json.loads(r.content)
if result.get("code") == 0: # 表示成功
# accessToken = result.get("info").get("accessToken")
username = result.get("info").get("userName")
work_id = result.get("info").get("employeeNo")
user_dict = {'username': username, "work_id": work_id}
return user_dict
def get_user_list(self):
"""获取所有的用户"""
ctrl_object = self.ctrl_object
result = {"type": SUCCESS, "content": None}
try:
user_list = ctrl_object.get_all_users()
if not user_list:
result["type"] = NOT_DATA
message = "暂无用户!"
result["message"] = message
else:
result["content"] = user_list
return result
except Exception as e:
result = return_exception_message(e)
return result
def get_role_list(self):
"""获取所有的角色"""
ctrl_object = self.ctrl_object
result = {"type": SUCCESS, "content": None}
try:
role_list = ctrl_object.get_all_roles()
if not role_list:
result["type"] = NOT_DATA
message = "暂无用户!"
result["message"] = message
else:
result["content"] = role_list
return result
except Exception as e:
result = return_exception_message(e)
return result
| [
"1484091708@qq,com"
] | 1484091708@qq,com |
a4822140a61b3189d8c7e9ef0a5f04e97a365ed7 | 538f6481bcfcfacdd64d75e920862fd22a0e8dc0 | /references/cpsc2019/CPSC0437_qrs8921_hr928/unet_lstm_mse_simple_9pool.py | 81aafa06504eee130add55edc8495396ec82b76e | [
"MIT",
"BSD-3-Clause"
] | permissive | wenh06/cpsc2020 | cdf6097dc9d920b29a852a2896ba3f5a275a74f4 | 47acb884ea1f2f819e564d8a17ad37001ed0df27 | refs/heads/master | 2023-07-27T01:56:41.442556 | 2021-05-26T00:10:39 | 2021-05-26T00:10:39 | 370,730,725 | 0 | 0 | BSD-3-Clause | 2021-05-25T14:56:02 | 2021-05-25T14:56:01 | null | UTF-8 | Python | false | false | 9,833 | py | from keras import optimizers
from keras.models import Model
from keras.layers import Dropout, Lambda, Input, average, Reshape, UpSampling1D, Multiply,Concatenate
from keras.layers import Conv1D, Flatten, Dense, Add, AveragePooling1D
from keras.layers import ZeroPadding1D, Cropping1D, BatchNormalization, MaxPooling1D
from keras import backend as K
from keras.layers import *
from keras import losses
from keras.utils.np_utils import to_categorical
import numpy as np
from keras import regularizers
def crop(tensors):
'''
:param tensors: List of two tensors, the second tensor having larger spatial dims
:return:
'''
h_dims, w_dims = [], []
for t in tensors:
b, h, w, d = K.get_variable_shape( t )
h_dims.append( h )
w_dims.append( w )
crop_h, crop_w = (h_dims[1] - h_dims[0]), (w_dims[1] - w_dims[0])
rem_h = crop_h % 2
rem_w = crop_w % 2
crop_h_dims = (crop_h // 2, crop_h // 2 + rem_h)
crop_w_dims = (crop_w // 2, crop_w // 2 + rem_w)
cropped = Cropping1D( cropping=(crop_h_dims, crop_w_dims) )( tensors[1] )
return cropped
def dice_loss(y_true, y_pred):
eps = 1e-5
intersection = K.sum(y_true * y_pred, axis=-1) + eps
summation = K.sum(y_true, axis=-1) + K.sum(y_pred,axis=-1) + eps
dice_loss = 1. - (2. * intersection/summation)
return dice_loss
# m: input
# dim: the num of channel
# res: controls the res connection
# drop: controls the dropout layer
# initpara: initial parameters
def convblock(m, dim, layername, res=0, drop=0.5, **kwargs):
n = Conv1D(filters=dim, name= layername + '_conv1', **kwargs)(m)
n = BatchNormalization(momentum=0.95, epsilon=0.001)(n)
n = Dropout(drop)(n) if drop else n
n = Conv1D(filters=dim, name= layername + '_conv2', **kwargs)(n)
n = BatchNormalization(momentum=0.95, epsilon=0.001)(n)
# m = Conv1D(filters=dim, name= layername + '_conv3', kernel_size=1, padding='same', activation='relu')(m)
# m = BatchNormalization(momentum=0.95, epsilon=0.001)(m)
return Concatenate()([m, n]) if res else n
def unet(input_shape, num_classes, lr, kernel_size=3, filter_num=32, res=0, maxpool=True, weights=None, drop_rate=0.5, use_lstm=True, loss_func='mse'):
'''initialization'''
kwargs = dict(
kernel_size=kernel_size,
strides=1,
activation='relu',
padding='same',
use_bias=True,
kernel_initializer='glorot_uniform', # Xavier均匀初始化
#kernel_initializer='he_normal',
bias_initializer='zeros',
bias_regularizer=None,
activity_regularizer=None, # 施加在输出上的正则项
kernel_constraint=None,
bias_constraint=None,
trainable=True, # 权值是否更新
)
kwargs2 = dict(
kernel_size=1,
strides=1,
activation='relu',
padding='same',
use_bias=True,
kernel_initializer='glorot_uniform', # Xavier均匀初始化
#kernel_initializer='he_normal',
bias_initializer='zeros',
bias_regularizer=None,
activity_regularizer=None, # 施加在输出上的正则项
kernel_constraint=None,
bias_constraint=None,
trainable=True, # 权值是否更新
)
num_classes = num_classes
data = Input(shape=input_shape, dtype='float', name='data')
# encoder
enconv1 = convblock(data, dim=filter_num, res=res, drop=drop_rate, layername='block1', **kwargs)
pool1 = MaxPooling1D(pool_size=3, strides=2,padding='same',name='pool1')(enconv1) if maxpool \
else Conv1D(filters=filter_num, strides=2, name='pool1')(enconv1)
enconv2 = convblock(pool1, dim=filter_num, res=res, drop=drop_rate, layername='block2', **kwargs)
pool2 = MaxPooling1D(pool_size=3, strides=2, padding='same', name='pool2')(enconv2) if maxpool \
else Conv1D(filters=filter_num, strides=2, name='pool2')(enconv2)
enconv3 = convblock(pool2, dim=2*filter_num, res=res, drop=drop_rate, layername='block3', **kwargs)
pool3 = MaxPooling1D(pool_size=3, strides=2, padding='same', name='pool3')(enconv3) if maxpool \
else Conv1D( filters=filter_num, strides=2, name='pool3')(enconv3)
enconv4 = convblock(pool3, dim=2*filter_num, res=res, drop=drop_rate, layername='block4', **kwargs)
pool4 = MaxPooling1D(pool_size=3, strides=2, padding='same', name='pool4')(enconv4) if maxpool \
else Conv1D(filters=filter_num, strides=2, name='pool4')(enconv4)
enconv5 = convblock(pool4, dim=4*filter_num, res=res, drop=drop_rate, layername='block5', **kwargs)
pool5 = MaxPooling1D(pool_size=3, strides=2, padding='same', name='pool5')(enconv5) if maxpool \
else Conv1D(filters=filter_num, strides=2, name='pool5')(enconv5)
enconv6 = convblock(pool5, dim=4*filter_num, res=res, drop=drop_rate, layername='block6', **kwargs)
pool6 = MaxPooling1D(pool_size=3, strides=2, padding='same', name='pool6')(enconv6) if maxpool \
else Conv1D(filters=filter_num, strides=2, name='pool6')(enconv6)
enconv7 = convblock(pool6, dim=8*filter_num, res=res, drop=drop_rate, layername='block7', **kwargs)
pool7 = MaxPooling1D(pool_size=3, strides=2, padding='same', name='pool7')(enconv7) if maxpool \
else Conv1D(filters=filter_num, strides=2, name='pool7')(enconv7)
enconv8 = convblock(pool7, dim=8*filter_num, res=res, drop=drop_rate, layername='block8', **kwargs)
pool8 = MaxPooling1D(pool_size=3, strides=2, padding='same', name='pool8')(enconv8) if maxpool \
else Conv1D(filters=filter_num, strides=2, name='pool8')(enconv8)
enconv9 = convblock(pool8, dim=16*filter_num, res=res, drop=drop_rate, layername='block9', **kwargs)
pool9 = MaxPooling1D(pool_size=3, strides=2, padding='same', name='pool9')(enconv9) if maxpool \
else Conv1D(filters=filter_num, strides=2, name='pool9')(enconv9)
enconv10 = convblock(pool9, dim=16*filter_num, res=res, drop=drop_rate, layername='block10', **kwargs)
if use_lstm:
# LSTM
lstm1 = Bidirectional(LSTM(8*filter_num, dropout=0.2, recurrent_dropout=0.2,
return_state=False, return_sequences=True), merge_mode = 'concat')(enconv10)
# decoder
up9 = Conv1D(filters=16*filter_num, kernel_size=1, padding='same', activation='relu',
name='up9')(UpSampling1D(size=2)(lstm1))
else:
up9 = Conv1D(filters=16*filter_num, kernel_size=1, padding='same', activation='relu',
name='up9')(UpSampling1D(size=2)(enconv10))
merge9 = Concatenate()([up9, enconv9])
deconv9 = convblock(merge9, dim=8*filter_num, res=res, drop=drop_rate, layername='deconv9', **kwargs2)
up8 = Conv1D(filters=8*filter_num, kernel_size=1, padding='same', activation='relu',
name='up8')(UpSampling1D(size=2)(deconv9))
merge8 = Concatenate()([up8, enconv8])
deconv8 = convblock(merge8, dim=8*filter_num, res=res, drop=drop_rate, layername='deconv8', **kwargs2)
up7 = Conv1D(filters=8*filter_num, kernel_size=1, padding='same', activation='relu',
name='up7')(UpSampling1D(size=2)(deconv8))
merge7 = Concatenate()([up7,enconv7])
deconv7 = convblock(merge7, dim=8*filter_num, res=res, drop=drop_rate, layername='deconv7', **kwargs2)
up6 = Conv1D(filters=4*filter_num, kernel_size=1, padding='same', activation='relu',
name='up6')(UpSampling1D(size=2)(deconv7))
merge6 = Concatenate()([up6,enconv6])
deconv6 = convblock(merge6, dim=filter_num, res=res, drop=drop_rate, layername='deconv6', **kwargs2)
up5 = Conv1D(filters=4*filter_num, kernel_size=1, padding='same', activation='relu',
name='up5')(UpSampling1D(size=2)(deconv6))
merge5 = Concatenate()([up5, enconv5])
deconv5 = convblock(merge5, dim=filter_num, res=res, drop=drop_rate, layername='deconv5', **kwargs2)
up4 = Conv1D(filters=2*filter_num, kernel_size=1, padding='same', activation='relu',
name='up4')(UpSampling1D(size=2)(deconv5))
merge4 = Concatenate()([up4, enconv4])
deconv4 = convblock(merge4, dim=filter_num, res=res, drop=drop_rate, layername='deconv4', **kwargs2)
up3 = Conv1D(filters=2*filter_num, kernel_size=1, padding='same', activation='relu',
name='up3')(UpSampling1D(size=2)(deconv4))
merge3 = Concatenate()([up3, enconv3])
deconv3 = convblock(merge3, dim=filter_num, res=res, drop=drop_rate, layername='deconv3', **kwargs2)
up2 = Conv1D(filters=filter_num, kernel_size=1, padding='same', activation='relu',
name='up2')(UpSampling1D(size=2)(deconv3))
merge2 = Concatenate()([up2, enconv2])
deconv2 = convblock(merge2, dim=filter_num, res=res, drop=drop_rate, layername='deconv2', **kwargs2)
up1 = Conv1D(filters=filter_num, kernel_size=1, padding='same', activation='relu',
name='up1')(UpSampling1D(size=2)(deconv2))
merge1 = Concatenate()([up1, enconv1])
deconv1 = convblock(merge1, dim=filter_num, res=res, drop=drop_rate, layername='deconv1', **kwargs2)
conv10 = Conv1D( filters=num_classes, kernel_size=1, padding='same', activation='relu',
name='conv10')(deconv1)
predictions = Conv1D(filters=num_classes, kernel_size=1, activation='sigmoid',
padding='same', name='predictions')(conv10)
model = Model(inputs=data, outputs= predictions)
#model.load_weights('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', by_name=True)
if weights is not None:
model.load_weights(weights,by_name=True)
sgd = optimizers.Adamax(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(optimizer=sgd, loss=loss_func)
return model
if __name__ == '__main__':
model = unet((2500, 1), 1, 0.001, maxpool=True, weights=None)
| [
"[email protected]"
] | |
eec69d7432435bb5123ce3378e2025ce0c3989f1 | b4da3119fbf64b789566066a3c96a1c8fc9c5370 | /cmframework/src/cmframework/lib/__init__.py | 09f53caf10b16878d66bab068b291122b62db446 | [
"Apache-2.0"
] | permissive | akraino-edge-stack/ta-config-manager | 0c2ccf3d786829a77ad427cbc17673864640d482 | 8a3f88d0dbf6afdb0130b9d35e563f8a54d15d44 | refs/heads/master | 2023-05-25T16:42:32.723250 | 2020-01-16T08:11:37 | 2020-01-16T10:42:08 | 199,550,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | # Copyright 2019 Nokia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cmclientimpl import CMClientImpl
| [
"[email protected]"
] | |
8130915fa36c2153f2eda28f95c56be4542177f8 | f93da4fe323a4fb403f3a57eecd2387c8a6789ac | /doctype/extend_the_test_period/test_extend_the_test_period.py | 8c81114919be0c1a658f57462ff429c1058c442c | [] | no_license | beshoyAtefZaki/hr-update | 35d893eff958a151b96b0a3477f292fb86fd9d95 | 2601ecf70c02d893a01872ad6d63336f51491e5c | refs/heads/master | 2020-08-18T09:20:04.126142 | 2019-10-17T11:16:36 | 2019-10-17T11:16:36 | 215,773,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestExtendthetestperiod(unittest.TestCase):
pass
| [
"[email protected]"
] | |
53a60bad2c5be52e6f5ce4f820cdccc85af98f97 | e60dd32858b162eb28e673ac9cfdb1b2a403c695 | /execptions_try_syntax.py | d9be623e83328f3919ab3a18116142ec8c1d70bb | [] | no_license | Hemanthtm2/playing_python | 8d3419a46d50b2801283aa020be89a8003d4ad31 | cb896916c6d84c564f7036a4ed6a2a5481bf2fd6 | refs/heads/master | 2020-05-23T13:10:51.753218 | 2019-02-20T21:52:34 | 2019-02-20T21:52:34 | 186,767,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | #!/usr/bin/python
def try_syntax(numerator,denominator):
try:
print('In the try block {}/{}'.format(numerator,denominator))
result=numerator/denominator
except ZeroDivisionError as zde:
print(zde)
else:
print('The result is:',result)
return result
finally:
print('Exiting')
print(try_syntax(12,4))
print(try_syntax(12,0))
| [
"[email protected]"
] | |
037e831a39a806490ee2f27ca98d8361163fd71f | 6625575b7e6f020f25c7a099be940b5dae823b26 | /transform_utils/dict_utils.py | ba78796bcdd88e0b9539d9214238fcf5576af02f | [] | no_license | dparadise28/transform | 143cd43486f5f32938b2a18f7ff661069726099a | 03ad9992ae57b3015016643a64df422362759bee | refs/heads/master | 2020-12-24T07:55:04.508787 | 2016-11-10T06:59:08 | 2016-11-10T06:59:08 | 73,355,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | from copy import deepcopy
def unpack(nested_dict, dict_name = 'main'):
'''
unpacks nested dicts
example:
nested_dict = {
"1": "1",
"2": {
"2": ["4"]
},
"3": [{
"1": "1",
"3": [{
"1": "1"
}]
}]
}
return:
[
{
"main": {"2": {}, "1": "1", "3": []}
},{
"3": {"1": "1","3": []}
},{
"3": {"1": "1"}
},{
"2": {"2": ["4"]}
}
]
'''
extracted_lists = []
def unpacked(r, key): #todo: reference to parent in dicts, removal of duplicates
nonlocal extracted_lists
def l(k, v):
if isinstance(v[0], dict): extracted_dict.update({k: []}); unpacked(v[0], k)
else: extracted_dict.update({k: v})
def d(k, v): extracted_dict.update({k: unpacked(v, k)})
def s(k, v): extracted_dict.update({k: v})
protocol, extracted_dict = {list: l, dict: d, str : s,}, {}
for k,v in r.items():
protocol[type(v)](k, v)
extracted_lists.append({key: extracted_dict}); return {}
unpacked(nested_dict, dict_name); return list(reversed(extracted_lists)) | [
"[email protected]"
] | |
1c7f9d355a8043570fbd6cc9913253ef39384e0a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02771/s627765878.py | 85fc06f4995007009269de5397302bc187300a6f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | a,b,c = map(int,input().split())
if (a == b and b != c) or (a == c and a != b) or (b == c and c != a):
print('Yes')
else:
print('No')
| [
"[email protected]"
] | |
b4a2405497961bc1b2a51c82a38be173a43bf86e | de9204aa4a471c266e7654571c5f88c798d41519 | /rbaas/wsgi.py | 468ec99d01293df59580d40543505a062d0fb525 | [] | no_license | noxor0/async-rbaas | 2bd1289ffbeda50beaf76e672b76d3b6fa0f2afb | 8af47bc0904c6d170de0641689a603dc9a4bb002 | refs/heads/main | 2023-05-14T03:09:56.490615 | 2021-06-07T02:16:49 | 2021-06-07T02:16:49 | 374,504,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
WSGI config for rbaas project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rbaas.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
22d1a3fd1c35d3c2c8d208486e40387a58501dbb | 40aad3eefe91dfd9ec1673d35e98ad2e03b0cc37 | /deepleanrning_practice.py | 4c5f6e5698456de8cb2280ca46cec75a6fb99512 | [] | no_license | xuxvming/ML-practicals- | 7f3b8848113d7b707a816f167490dd5fec0c660f | 24757f754aa497b7a5a86473c1ff0c75d01fc2c2 | refs/heads/master | 2020-03-22T19:58:42.694501 | 2018-10-06T17:32:25 | 2018-10-06T17:32:25 | 140,565,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,187 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 6 15:35:41 2018
@author: xxiu
"""
'''
input >weight> hidden layer2(activation function)> weights>hidden layer 2
(activation function)>weights> output
1.pass data straight through: feed forward nueral network
2.campare output with intended output > cost or loss fucntion ***(how close is the result)
3.optimaization function > minimize the error fucntion (the difference between expected values and actual values of Y)
This is also known as the LOSS. optimiztation aiming to minimize loss
backpropagation>go backwards and minipulate the weights which is also one of the optimization methods
the simplest way of adjusting the weights is gradiant descent
The function used in backpropagation is called optimize function
feed forward + bakcpropagation = epoch (1 cycle)
'''
'''
*** about cost function, loss function and objective function***
Loss function is usually a function defined on a data point, prediction and label, and measures the penalty.
For example:
square loss l(f(xi|θ),yi)=(f(xi|θ)−yi)2, used in linear regression
hinge loss l(f(xi|θ),yi)=max(0,1−f(xi|θ)yi), used in SVM
0/1 loss l(f(xi|θ),yi)=1⟺f(xi|θ)≠yi, used in theoretical analysis and definition of accuracy
Cost function is usually more general.
It might be a sum of loss functions over your training set plus some model complexity penalty (regularization).
For example:
Mean Squared Error MSE(θ)=1N∑Ni=1(f(xi|θ)−yi)2
SVM cost function SVM(θ)=∥θ∥2+C∑Ni=1ξi (there are additional constraints connecting ξi with C and with training set)
Objective function is the most general term for any function that you optimize during training.
For example, a probability of generating training set in maximum likelihood approach is a well defined objective function,
but it is not a loss function nor cost function (however you could define an equivalent cost function).
For example:
MLE is a type of objective function (which you maximize)
Divergence between classes can be an objective function but it is barely a cost function, unless you define something artificial, like 1-Divergence, and name it a cost
Long story short, I would say that:
A loss function is a part of a cost function which is a type of an objective function.
'''
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/",one_hot = True)
#one_hot means on is on other is off
#supppose we have 10 classes, 0-9
#0 = [1,0,0,0,0,0,0,0,0,0]> if we want 0 to output is also 0, only the 0th element is on
#define hidden layers
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_classes = 10
batch_size =100
#heigh x width
#2ed parameter is the shape
#x is the data and y is the label of that data
x =tf.placeholder('float',[None,784])
y =tf.placeholder('float')
def neural_network_model(data):
#let the weights equal to a tensorflow varibale and random normal varible
##would be better to use a for loop in case there are 100 hidden layers
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([784,n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1,n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2,n_nodes_hl3])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3,n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes]))}
#input_data*weights + biases
#this is linear model, the purpos of bisaes is to aviod when input_data is 0
#so that no neurons are ever fired
#build the model, the behavoir of layers
#matmul is used for multiplying matrix
l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']), hidden_1_layer['biases'])
# activation function Re(ctified) L(inear) (U)nit
#relu function> f(x) = max(0,x)
# The derivative of ReLU:
#1 if x > 0
#2 otherwise
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1,hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2,hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.add(tf.matmul(l3,output_layer['weights']), output_layer['biases'])
return output
def train_neural_network(x):
#rememer the output is onehot array
prediction = neural_network_model(x)
#cost fucntion
#this will calculate the difference of prediction and known label
cost =tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = prediction, labels = y))
#we want to use a optimizer to minize the cost nest step
optimizer = tf.train.AdamOptimizer().minimize(cost)
#define epochs we want
hm_epochs = 50
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
#calcluate the loss as we go
epoch_loss = 0;
#how many times we need to cycle
for _ in range(int(mnist.train.num_examples/batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
#c is the cost, run the optimizer for the cost function with feed direction in x and y
_, c = sess.run([optimizer, cost], feed_dict = {x:epoch_x, y:epoch_y})
epoch_loss += c
print('Epoch', epoch,'completed out of',hm_epochs,'loss',epoch_loss)
#argmax return the max value of 2
#it's either 0 or 1 so we need to compare with 1 to get the max value
correct = tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct,'float'))
print('Accuracy:', accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))
train_neural_network(x) | [
"[email protected]"
] | |
0a0aca32b392add0215c19553757eddf63b407b5 | 5013057778b9e0b0bce78185d5367eb8495521ec | /CVbook/ch1/interActiveAnnot.py | 13577b6d39f9ee0b1cea35de0841ae02a4ccc367 | [] | no_license | AdamBioprinter/OpenCV-Python-Tutorials | a31358b2b8bdb3bbb5b6c349ce067d2da680f822 | 5ab082c514e6bc554436e263c8e3b3aa2b1a27ee | refs/heads/master | 2021-01-09T09:36:02.369649 | 2016-08-02T06:24:28 | 2016-08-02T06:24:28 | 60,224,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | # This script shows how to interacti with an image by marking points
# the function that is used to do this is ginput()
from PIL import Image
from matplotlib import pyplot as plt
import numpy as np
im = np.array(Image.open('building.jpg'))
plt.imshow(im)
print('Please Click 3 points')
x = plt.ginput(3)
print('you clicked: ', x)
plt.show()
| [
"[email protected]"
] | |
b04a2a475e28f99b108ae6b96553f7c9218c2d58 | cc8b0e1b215834aefb2cf0f9f025fe2a1763ccdd | /agents/sarsa/sarsa_fa.py | b358b5ba028054ea214047739cadd00403fcd47c | [
"MIT"
] | permissive | afcarl/DeepRL-arnomoonens | abf929814934feebdbc24ee6e4d920349d7ea369 | 3e5a3ed1dc77c0abc3a5b9f413f51b7054ff9798 | refs/heads/master | 2020-03-22T08:06:45.393738 | 2018-07-03T14:29:30 | 2018-07-03T14:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,632 | py | # -*- coding: utf8 -*-
from gym import wrappers
from policies.e_greedy import EGreedy
from agents.sarsa import Sarsa
from traces.eligibility_traces import EligibilityTraces
from functionapproximation.tile_coding import TileCoding
# def draw_3d(tile_starts):
# states = []
# for i in range(n_x_tiles):
# for j in range(n_y_tiles):
# states.append([i, j])
# states = np.array(states)
class SarsaFA(object):
"""Learner using Sarsa and function approximation"""
def __init__(self, env, monitor_path, video=True, **usercfg):
super(SarsaFA, self).__init__()
self.env = env
self.env = wrappers.Monitor(self.env, monitor_path, force=True, video_callable=(None if video else False))
m = usercfg.get("m", 10) # Number of tilings
self.config = dict(
m=m,
n_x_tiles=9,
n_y_tiles=9,
Lambda=0.9,
epsilon=0, # fully greedy in this case
alpha=(0.05 * (0.5 / m)),
gamma=1,
n_iter=1000,
steps_per_episode=env.spec.tags.get("wrapper_config.TimeLimit.max_episode_steps") # Maximum number of allowed steps per episode, as determined (for this environment) by the gym library
)
self.config.update(usercfg)
O = env.observation_space
self.x_low, self.y_low = O.low
self.x_high, self.y_high = O.high
self.nA = env.action_space.n
self.policy = EGreedy(self.config["epsilon"])
self.function_approximation = TileCoding(self.x_low, self.x_high, self.y_low, self.y_high, m, self.config["n_x_tiles"], self.config["n_y_tiles"], self.nA)
def learn(self):
for i in range(self.config["n_iter"]):
traces = EligibilityTraces(self.function_approximation.features_shape, self.config["gamma"], self.config["Lambda"])
state, action = self.env.reset(), 0
sarsa = Sarsa(self.config["gamma"], self.config["alpha"], self.policy, traces, self.function_approximation, range(self.nA), state, action)
done = False # Done says if the goal is reached or the maximum number of allowed steps for the episode is reached (determined by the gym library itself)
iteration = 0
while not(done):
iteration += 1
state, reward, done, _ = self.env.step(action)
if done and iteration < self.config["steps_per_episode"]:
print("Episode {}: Less than {} steps were needed: {}".format(i, self.config["steps_per_episode"], iteration))
action = sarsa.step(state, reward)
| [
"[email protected]"
] | |
57d4f0aa35b2142b608fef3059072af4dcb8f224 | cb98df00075e55ea39d9e73662c9c575d197430b | /todolist/settings.py | 615e51aff0f5aed5b26e36ac4203c4be623ee3d1 | [] | no_license | phil-fiess/TodoList | c66bcc99333cc96d35e8078e1d5e7e7fb15180a0 | 02d5b789d0c3f8ca3e07bb004405f4356f0bbc18 | refs/heads/main | 2023-01-13T19:51:41.466170 | 2020-11-21T00:15:08 | 2020-11-21T00:15:08 | 314,699,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,156 | py | """
Django settings for todolist project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'eqdvr6kq%m2@pm0*u1^qr86g0%iso65z8ly9e5mm!9i5kkv2q!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'todo',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todolist.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todolist.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
| [
"[email protected]"
] | |
470ab45981ad7834924041e8f6b3fadccffa08a2 | 3f0e403ccece5d35af9dcef40855e648d444f81d | /hw3/gene_finder.py | 06cf0005510bafadfc4769125a275c92675564b9 | [] | no_license | paulruvolo/SoftwareDesign | fbcf40c40cb8d62ae69d64a8cf15742fc1398e91 | 3c4f2d9b196653226dc5dba1b30583aca48731df | refs/heads/master | 2020-05-20T10:04:25.879779 | 2014-05-01T15:45:17 | 2014-05-01T15:45:17 | 16,159,399 | 1 | 1 | null | 2014-04-03T07:01:41 | 2014-01-23T02:09:47 | Python | UTF-8 | Python | false | false | 4,780 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 2 11:24:42 2014
@author: YOUR NAME HERE
"""
# you may find it useful to import these variables (although you are not required to use them)
from amino_acids import aa, codons
def collapse(L):
""" Converts a list of strings to a string by concatenating all elements of the list """
output = ""
for s in L:
output = output + s
return output
def coding_strand_to_AA(dna):
""" Computes the Protein encoded by a sequence of DNA. This function
does not check for start and stop codons (it assumes that the input
DNA sequence represents an protein coding region).
dna: a DNA sequence represented as a string
returns: a string containing the sequence of amino acids encoded by the
the input DNA fragment
"""
# YOUR IMPLEMENTATION HERE
def coding_strand_to_AA_unit_tests():
""" Unit tests for the coding_strand_to_AA function """
# YOUR IMPLEMENTATION HERE
def get_reverse_complement(dna):
""" Computes the reverse complementary sequence of DNA for the specfied DNA
sequence
dna: a DNA sequence represented as a string
returns: the reverse complementary DNA sequence represented as a string
"""
# YOUR IMPLEMENTATION HERE
def get_reverse_complement_unit_tests():
""" Unit tests for the get_complement function """
# YOUR IMPLEMENTATION HERE
def rest_of_ORF(dna):
""" Takes a DNA sequence that is assumed to begin with a start codon and returns
the sequence up to but not including the first in frame stop codon. If there
is no in frame stop codon, returns the whole string.
dna: a DNA sequence
returns: the open reading frame represented as a string
"""
# YOUR IMPLEMENTATION HERE
def rest_of_ORF_unit_tests():
""" Unit tests for the rest_of_ORF function """
# YOUR IMPLEMENTATION HERE
def find_all_ORFs_oneframe(dna):
""" Finds all non-nested open reading frames in the given DNA sequence and returns
them as a list. This function should only find ORFs that are in the default
frame of the sequence (i.e. they start on indices that are multiples of 3).
By non-nested we mean that if an ORF occurs entirely within
another ORF, it should not be included in the returned list of ORFs.
dna: a DNA sequence
returns: a list of non-nested ORFs
"""
# YOUR IMPLEMENTATION HERE
def find_all_ORFs_oneframe_unit_tests():
""" Unit tests for the find_all_ORFs_oneframe function """
# YOUR IMPLEMENTATION HERE
def find_all_ORFs(dna):
""" Finds all non-nested open reading frames in the given DNA sequence in all 3
possible frames and returns them as a list. By non-nested we mean that if an
ORF occurs entirely within another ORF and they are both in the same frame,
it should not be included in the returned list of ORFs.
dna: a DNA sequence
returns: a list of non-nested ORFs
"""
# YOUR IMPLEMENTATION HERE
def find_all_ORFs_unit_tests():
""" Unit tests for the find_all_ORFs function """
# YOUR IMPLEMENTATION HERE
def find_all_ORFs_both_strands(dna):
""" Finds all non-nested open reading frames in the given DNA sequence on both
strands.
dna: a DNA sequence
returns: a list of non-nested ORFs
"""
# YOUR IMPLEMENTATION HERE
def find_all_ORFs_both_strands_unit_tests():
""" Unit tests for the find_all_ORFs_both_strands function """
# YOUR IMPLEMENTATION HERE
def longest_ORF(dna):
""" Finds the longest ORF on both strands of the specified DNA and returns it
as a string"""
# YOUR IMPLEMENTATION HERE
def longest_ORF_unit_tests():
""" Unit tests for the longest_ORF function """
# YOUR IMPLEMENTATION HERE
def longest_ORF_noncoding(dna, num_trials):
""" Computes the maximum length of the longest ORF over num_trials shuffles
of the specfied DNA sequence
dna: a DNA sequence
num_trials: the number of random shuffles
returns: the maximum length longest ORF """
# YOUR IMPLEMENTATION HERE
def gene_finder(dna, threshold):
""" Returns the amino acid sequences coded by all genes that have an ORF
larger than the specified threshold.
dna: a DNA sequence
threshold: the minimum length of the ORF for it to be considered a valid
gene.
returns: a list of all amino acid sequences whose ORFs meet the minimum
length specified.
"""
# YOUR IMPLEMENTATION HERE | [
"[email protected]"
] | |
0d8d9931fc39e117e208a6bbb14e8e319c51f5d8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03618/s852100353.py | 4ec50b5b200e61ec44a631e25644e2e5e81fbb35 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | A = input()
N = len(A)
count = {}
ans = 1
for i in range(N):
if A[i] in count.keys():
count[A[i]] += 1
else:
count[A[i]] = 1
L = list(count.values())
M = len(L)
for i in range(M):
for j in range(i+1,M):
ans += L[i] * L[j]
print(ans) | [
"[email protected]"
] | |
0d8b708a8a3d87139068832ed3780276e9387b0f | 71e640ac44a47e88a813b0019f1ce3031edaad3a | /BookZelle/10_animation.py | 64fd9ba63655a7d3133700de5d96af58f17f8273 | [] | no_license | PyRPy/algorithms_books | ef19b45f24c08a189cb9914f8423f28d609700e2 | 1804863ca931abedbbb8053bcc771115d0c23a2d | refs/heads/master | 2022-05-25T20:22:03.776125 | 2022-05-10T13:47:33 | 2022-05-10T13:47:33 | 167,270,558 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,987 | py | # 10_animation.py
from math import sqrt, sin, cos, radians, degrees
from graphics import *
from projectile import Projectile
from button import Button
class ShortTracker:
def __init__(self, win, angle, velocity, height):
"""Win is the GraphWin to display the shot angle, velocity,
and height are initial projectile parameters.
"""
self.proj = Projectile(angle, velocity, height)
self.marker = Circle(Point(0, height), 3)
self.marker.setFill("red")
self.marker.setOutline("red")
self.marker.draw(win)
def update(self, dt):
""" Move the shot dt seconds farther along its flight """
# update the projectile
self.proj.update(dt)
# move the circle to the new projectile location
center = self.marker.getCenter()
dx = self.proj.getX() - center.getX()
dy = self.proj.getY() - center.getY()
self.marker.move(dx, dy)
def getX(self):
return self.proj.getX()
def getY(self):
return self.proj.getY()
def undraw(self):
self.marker.undraw()
class InputDialog:
""" A custom window for getting simulation values(angle, vel, and height) from the
user. """
def __init__(self, angle, vel, height):
self.win = win = GraphWin("Initial values", 200, 300)
win.setCoords(0, 4.5, 4, 0.5)
Text(Point(1,1), "Angle").draw(win)
self.angle = Entry(Point(3,1), 5).draw(win)
self.angle.setText(str(angle))
Text(Point(1,2), "velocity").draw(win)
self.vel = Entry(Point(3,2), 5).draw(win)
self.vel.setText(str(vel))
Text(Point(1,3), "Height").draw(win)
self.height = Entry(Point(3,3), 5).draw(win)
self.height.setText(str(height))
self.fire = Button(win, Point(1,4), 1.25, 0.5, "Fire!")
self.fire.activate()
self.quit = Button(win, Point(3,4), 1.25, 0.5, "Quit")
self.quit.activate()
def interact(self):
"""Wait for user to click Quit or Fire button
Returns a string indicating which button was clicked
"""
while True:
pt = self.win.getMouse()
if self.quit.clicked(pt):
return "Quit"
if self.fire.clicked(pt):
return "Fire!"
def getValues(self):
""" return input values """
a = float(self.angle.getText())
v = float(self.vel.getText())
h = float(self.height.getText())
return a,v,h
def close(self):
""" close the input window """
self.win.close()
def main():
# create animation window
win = GraphWin("projectile animation", 640, 480, autoflush = False)
win.setCoords(-10, -10, 210, 155)
# draw baseline
Line(Point(-10, 0), Point(210, 0)).draw(win)
# draw labeled ticks every 50 meters
for x in range(0, 210, 50):
Text(Point(x, -5), str(x)).draw(win)
Line(Point(x,0), Point(x,2)).draw(win)
# event loop, each time through fires a single shot
angle, vel, height = 45.0, 40.0, 2.0
while True:
# interact with the user
inputwin = InputDialog(angle, vel, height)
choice = inputwin.interact()
inputwin.close()
if choice == "Quit":
break
# create a shot and track until it hits ground or leaves window
angle, vel, height = inputwin.getValues()
shot = ShortTracker(win, angle, vel, height)
while 0<= shot.getY() and -10 < shot.getX() <= 210:
shot.update(1/50) # this 'update()' comes from class method
update(50) # this function comes from 'graphics.py'
win.close()
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
3be9437daa2a35e743a7785af6fc9535115a66b2 | a61ffe4cde938b993202cd172c84d03cfc7ea955 | /c3d_svm_py_develop/c3d_svm_create_inputlist_and_outputprefix.py | 6a8156f7ef4e51584c25bdec7b4bf547e6db3735 | [] | no_license | wujinjun/wujinjun_py | 33f372944bcb0aa961503207df2c5743b309f46c | 6cb100b06ebe8a66c59198b2a4dcdf263f91f521 | refs/heads/master | 2021-01-19T10:54:06.250551 | 2017-11-20T09:14:44 | 2017-11-20T09:14:44 | 87,911,232 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,547 | py | #此py脚本用于c3d_svm,可以用这个脚本可以得到inputlist.txt, outputprefix.txt
#Usage:“数据集路径”“inputlist文件名”“ outputprefix文件名”,"起始video序号" ,"结束video序号" ,“帧采样间隔”
#Example:create_inputlist_ouputprefix("/home/wjj/HDD/HDD/dataset_pytest/","input_list.txt","output_list_prefix.txt",1,13,8)
#Author:Wu jinjun
import re
def change_to_outputprefix_format(string): #定义把路径转换为outputprefix格式的函数
return re.split(r".jpg",string)[0].replace("dataset","output")
def change_to_inputlist_format(string,label): #定义把路径转换为inputlist格式的函数
pattern_frame_num = re.compile(r"\d+") #定义提取路径中所有数字的的正则表达式
str_num = re.findall(pattern_frame_num,string)[-1] #取所有数字中的最后一项
int_num = int(str_num) #将str型的数字(前面带有0的,比如“000128”)转为int型
str_num_withoutzero = str(int_num) #将int型的数字转换为str型,此时去掉了000
pattern_path = re.compile(r"000*") #找到000*之前的路径
str_path = re.split(pattern_path,string)[0] #按照以上的规则将帧的路径切片,取前面的路径
inputlist_string = str_path + ' '+ str_num_withoutzero + ' ' + str(label) #组合形成inputlist格式
return inputlist_string
def match_item(string,floor,ceiling): #定义一个匹配item的函数,可以规定上限和下限
regex = re.compile(r"subject_*") #匹配subject_后的数字
int_num=int(re.split(regex,string)[-1]) #将匹配到的str型数字转为int型,即将数据切片得到最后的数字
if int_num <= ceiling and int_num >= floor: #如果小于期望值
return True
import os
def create_inputlist_ouputprefix(basepath,inputlist,outputprefix,floor,ceiling,sample_interval): #basepath是数据集的路径,inputlist文件名 outputprefix文件名,帧采样间隔
f_outputfile = open(outputprefix,'w') #打开文件准备写入
f_inputfile = open(inputlist,'w')
classlist = os.listdir(basepath) #将basepath下的文件遍历并存入classlist
label = 0
classlist.sort() #按照类别的首字母排序
#print classlist
for classvar in classlist: #for数据集下的每个类
videodir = os.path.join('%s%s' % (basepath,classvar))
if os.path.isdir(videodir) == True: #判断该条目是否是文件夹
videolist = os.listdir(videodir) #将video条目存入videolist
#print videodir
videolist.sort()
for videovar in videolist: #for每个类下的每个video
clipdir = os.path.join('%s/%s' % (videodir,videovar))
#print videolist
if os.path.isdir(clipdir) == True: #判断该条目是否是文件夹
if match_item(clipdir,floor=floor,ceiling=ceiling) == True:
#print clipdir
framelist = os.listdir(clipdir)
framelist.sort()
framelist = framelist[::sample_interval]
framelist = framelist[:-1]
for framevar in framelist:
framedir = os.path.join('%s/%s/%s' % (videodir,videovar,framevar))
#print framedir,label #打印clip路径以及对应label
frameitem_outputfile = change_to_outputprefix_format('%sc3d/%s_%s/%s' % (basepath,classvar,videovar,framevar))
frameitem_inputfile = change_to_inputlist_format(framedir,label)
f_outputfile.write('%s\n' % (frameitem_outputfile)) #将条目写入文件
f_inputfile.write('%s\n' % (frameitem_inputfile)) #将条目写入文件
#print frameitem_inputfile
#print frameitem_outputfile
label=label+1
f_outputfile.close()
f_inputfile.close()
print("Processing...")
#Usage:“数据集路径”“inputlist文件名”“ outputprefix文件名”,"起始video序号" ,"结束video序号" ,“帧采样间隔”
#Example:create_inputlist_ouputprefix("/home/wjj/HDD/HDD/dataset_pytest/","input_list.txt","output_list_prefix.txt",1,13,8)
create_inputlist_ouputprefix("/samba/HDD/dataset_20170117_error/","input_list_ori.txt","output_list_prefix_ori.txt",1,45,8)
print("Done!")
| [
"[email protected]"
] | |
3f506c4228fece459ff8e304cb3ff7c1c8f44ed0 | 4273556f701f70baccd7151ce0c51cb89e68a610 | /moult/color.py | c8ae3a6dfebf203ea0be218df9986ab90917fda7 | [
"MIT"
] | permissive | intermezzo-fr/moult | bef6166a99a181a91e610232d599cd669ca5f6fe | 59f5f08931c7ed306d83a7a1696d95082c259c60 | refs/heads/master | 2021-01-16T17:59:35.345622 | 2015-04-27T18:51:02 | 2015-04-27T18:51:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,524 | py | import sys
FG_BLACK = 30
FG_RED = 31
FG_GREEN = 32
FG_YELLOW = 33
FG_BLUE = 34
FG_MAGENTA = 35
FG_CYAN = 36
FG_WHITE = 37
FG_RESET = 39
BG_BLACK = 40
BG_RED = 41
BG_GREEN = 42
BG_YELLOW = 43
BG_BLUE = 44
BG_MAGENTA = 45
BG_CYAN = 46
BG_WHITE = 47
BG_RESET = 49
enabled = True
_enabled = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
class ColorCombo(object):
def __init__(self, foreground=0, background=0, bright=None):
self.foreground = foreground or FG_RESET
self.background = background or BG_RESET
self.set_bright(bright)
def set_bright(self, bright):
if bright is None:
self.flag = 22
elif bright:
self.flag = 1
else:
self.flag = 2
def copy(self):
c = ColorCombo(self.foreground, self.background)
c.flag = self.flag
return c
def __repr__(self):
return u'<ColorCombo [{:d}, {:d}]>'.format(self.foreground, self.background)
HEY = ColorCombo(FG_RED)
YAY = ColorCombo(FG_GREEN)
MEH = ColorCombo(FG_YELLOW)
GOOD = ColorCombo(FG_BLUE)
NEAT = ColorCombo(FG_CYAN)
SHHH = ColorCombo(FG_MAGENTA)
NOOO = ColorCombo(FG_WHITE, BG_RED, bright=True)
MAN = ColorCombo(FG_BLACK, BG_YELLOW, bright=True)
class ColorTextRun(object):
'''String imposter that supports multiple color strings, mostly so len()
reports the actual text's length
'''
def __init__(self, *items):
self.items = list(items)
def __len__(self):
return sum(map(len, self.items))
def __unicode__(self):
return u''.join(map(str, self.items))
def __str__(self):
return self.__unicode__()
def __repr__(self):
return u'<ColorTextRun {}>'.format(repr(self.items))
def __add__(self, other):
self.items.append(other)
return self
def __radd__(self, other):
self.items.insert(0, other)
return self
def encode(self, *args, **kwargs):
return str(self).encode(*args, **kwargs)
def decode(self, *args, **kwargs):
return str(self).decode(*args, **kwargs)
class ColorText(object):
'''String imposter that supports colored strings, mostly so len()
reports the actual text's length
'''
fmt = u'\033[{fg:d};{bg:d};{f}m{t}\033[0m'
def __init__(self, text, foreground=0, background=0, ignore_setting=False):
self.text = text
if isinstance(foreground, ColorCombo):
self.color = foreground
else:
self.color = ColorCombo(foreground or FG_RESET,
background or BG_RESET)
self.ignore_setting = ignore_setting
def __len__(self):
return len(self.text)
def __unicode__(self):
if not _enabled or (not self.ignore_setting and not enabled):
return self.text
return self.fmt.format(fg=self.color.foreground,
bg=self.color.background,
f=self.color.flag,
t=self.text)
def __str__(self):
return self.__unicode__()
def __repr__(self):
return u'<ColorText "{}" ({})>'.format(self.text, repr(self.color))
def __add__(self, other):
return ColorTextRun(self, other)
def __radd__(self, other):
return ColorTextRun(other, self)
def encode(self, *args, **kwargs):
return str(self).encode(*args, **kwargs)
def decode(self, *args, **kwargs):
return str(self).decode(*args, **kwargs)
| [
"[email protected]"
] | |
64bc51802cab2944f30ecdc0a80ee6c55b246561 | 966e52a65041fffb09cc66c5b3593a4af6d2aadb | /tut_01_hello_triangle/__init__.py | d015788299204f0c562468eac7b44ee0004c8c23 | [] | no_license | tymonpitts/pygltut | 489c0fd2a6a10076624b76cfc77c777e034e8c2a | 11868a764ffadae0bc439440e9f2273896ae7b3f | refs/heads/master | 2021-01-06T20:36:54.883409 | 2014-04-06T22:49:40 | 2014-04-06T22:49:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,292 | py | import glfw
from OpenGL import GL
from OpenGL.GL.shaders import compileShader, compileProgram
from gltut_framework import AbstractTutorial
VERTEX_SHADER = """
#version 330
layout(location = 0) in vec4 position;
void main()
{
gl_Position = position;
}
"""
FRAGMENT_SHADER = """
#version 330
out vec4 outputColor;
void main()
{
outputColor = vec4(1.0f, 1.0f, 1.0f, 1.0f);
}
"""
class Tutorial(AbstractTutorial):
def __init__(self, *args, **kwargs):
super(Tutorial, self).__init__(*args, **kwargs)
self.theProgram = None
self.vertexPositions = [
0.75, 0.75, 0.0, 1.0,
0.75, -0.75, 0.0, 1.0,
-0.75, -0.75, 0.0, 1.0]
self.positionBufferObject = None
self.vao = None
self.vert_components = 4
self.size_float = 4
def initializeProgram(self):
shaderList = []
shaderList.append(compileShader(VERTEX_SHADER, GL.GL_VERTEX_SHADER))
shaderList.append(compileShader(FRAGMENT_SHADER, GL.GL_FRAGMENT_SHADER))
self.theProgram = compileProgram(*shaderList)
def initializeVertexBuffer(self):
self.positionBufferObject = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER,self.positionBufferObject)
array_type = (GL.GLfloat*len(self.vertexPositions))
GL.glBufferData(
GL.GL_ARRAY_BUFFER,
len(self.vertexPositions)*self.size_float,
array_type(*self.vertexPositions),
GL.GL_STATIC_DRAW)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER,0)
def init(self):
self.vao = GL.glGenVertexArrays(1)
GL.glBindVertexArray(self.vao)
self.initializeProgram()
self.initializeVertexBuffer()
def display(self):
GL.glClearColor(0.0, 0.0, 0.0, 0.0)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glUseProgram(self.theProgram)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.positionBufferObject)
GL.glEnableVertexAttribArray(0)
GL.glVertexAttribPointer(0, self.vert_components, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
GL.glDrawArrays(GL.GL_TRIANGLES, 0, len(self.vertexPositions)//self.vert_components)
GL.glDisableVertexAttribArray(0)
GL.glUseProgram(0)
glfw.SwapBuffers()
def reshape(self, w, h):
GL.glViewport(0, 0, w, h);
def keyboard(self, key, press):
if key == glfw.KEY_ESC:
glfw.Terminate()
return
# import glfw
# from OpenGL.GL import *
# from OpenGL.raw.GL.ARB.vertex_array_object import \
# glGenVertexArrays, glBindVertexArray
# import numpy
# from gltut_framework import AbstractTutorial
# from OpenGL.GL import shaders
# from OpenGL.arrays import vbo
# from OpenGL.arrays.arraydatatype import ArrayDatatype
# # print OpenGL.GL.__file__
# class Tutorial(AbstractTutorial):
# VERTEX_SHADER = """
# #version 330
# layout(location = 0) in vec4 position;
# void main()
# {
# gl_Position = position;
# }
# """
# FRAGMENT_SHADER = """
# #version 330
# out vec4 outputColor;
# void main()
# {
# outputColor = vec4(1.0f, 1.0f, 1.0f, 1.0f);
# }
# """
# def __init__(self, *args, **kwargs):
# super(Tutorial, self).__init__(*args, **kwargs)
# self.theProgram = None
# self.vertexPositions = numpy.array([
# [0.75, 0.75, 0.0, 1.0],
# [0.75, -0.75, 0.0, 1.0],
# [-0.75, -0.75, 0.0, 1.0]], dtype='f')
# # self.positionBufferObject = None
# self.positionBufferObject = GLuint(0)
# self.vao = GLuint(0)
# def initializeProgram(self):
# shaderList = []
# shaderList.append(shaders.compileShader(self.VERTEX_SHADER, GL_VERTEX_SHADER))
# shaderList.append(shaders.compileShader(self.FRAGMENT_SHADER, GL_FRAGMENT_SHADER))
# self.theProgram = shaders.compileProgram(*shaderList)
# def initializeVertexBuffer(self):
# glGenBuffers(1, self.positionBufferObject)
# glBindBuffer(GL_ARRAY_BUFFER, self.positionBufferObject)
# glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(self.vertexPositions), self.vertexPositions, GL_STATIC_DRAW);
# glBindBuffer(GL_ARRAY_BUFFER, 0);
# # self.positionBufferObject = vbo.VBO(self.vertexPositions)
# def init(self):
# glGenVertexArrays(1, self.vao)
# glBindVertexArray(self.vao)
# self.initializeProgram()
# self.initializeVertexBuffer()
# # glGenVertexArrays(1, self.vao)
# # glBindVertexArray(self.vao)
# def display(self):
# glClearColor(0.0, 0.0, 0.0, 0.0)
# glClear(GL_COLOR_BUFFER_BIT)
# glUseProgram(self.theProgram)
# # glBindBuffer(GL_ARRAY_BUFFER, self.positionBufferObject)
# self.positionBufferObject.bind()
# glEnableVertexAttribArray(0)
# glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, 0)
# glDrawArrays(GL_TRIANGLES, 0, 3)
# glDisableVertexAttribArray(0)
# glUseProgram(0)
# glfw.SwapBuffers()
# def reshape(self, w, h):
# glViewport(0, 0, w, h);
# def keyboard(self, key, press):
# if key == glfw.KEY_ESC:
# glfw.Terminate()
# return
| [
"[email protected]"
] | |
39d2d1b7c2d82e8e5f7f14bf5ac912849bac3e28 | ac06fe47b7b8bca32cce00823d5bf6b974634e53 | /Geospatial/texas_freeze_NDVI.py | 348b04388374000651d3e2cb5cf5bdcab1b418e9 | [
"BSD-2-Clause"
] | permissive | veeeology/portfolio_projects | 3277d99faad84785bcc67e52a353762292c23960 | 6ba7ae934bb678091815262f9af553dffc407f4f | refs/heads/main | 2023-03-30T16:36:19.553748 | 2021-04-06T14:56:15 | 2021-04-06T14:56:15 | 347,794,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,267 | py | import os, zipfile, time, datetime, re
import datetime as dt
import rasterio
import rasterio.mask
import geopandas as gpd
import numpy as np
import matplotlib.pyplot as plt
# File locations and configuration
raster_crs = '+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +ellps=sphere +units=m +no_defs +type=crs'
raster_dir = 'eMODIS_NDVI_v6/'
states_shp_file = 'Culture/USA_States__Generalized_.geojson'
# Load in the Texas outline to crop the rasters
states_shp = gpd.read_file(states_shp_file)
shp_crs = states_shp.crs
texas = states_shp.loc[states_shp['STATE_NAME'] == 'Texas']
texas = texas.to_crs(raster_crs)
# Because the state outline includes lakes, etc, locate the largest of the
# multipolygons
texas = texas.iloc[0].geometry
texas_areas = [t.area for t in texas]
texas = [texas[i] for i in range(len(texas)) if texas_areas[i]==max(texas_areas)][0]
# # If needed, make sure the zip files have been extracted.
# zip_files = [f for f in os.listdir(raster_dir) if f.endswith('.zip')]
# for f in zip_files:
# data_zip = zipfile.ZipFile(raster_dir + f)
# data_zip.extractall(raster_dir)
# Get the unique list of file prefixes and dates to use for each raster
file_prefixes = dict()
for f in os.listdir(raster_dir):
# Look for the date part of the string
date_pattern = '\d{4}.\d{3}-\d{3}'
date_match = re.search(date_pattern, f)
# If the date is present, store it in the dictionary with the file prefix
if date_match != None:
date_str = date_match.group()
year = int(date_str[0:4])
start_days = int(date_str[5:8])
end_days = int(date_str[9:12])
# Convert from days since the beginning of the year to a date object
start_date = dt.date(year, 1, 1) + dt.timedelta(days=start_days)
end_date = dt.date(year, 1, 1) + dt.timedelta(days=end_days)
# Construct the file prefix
prefix = 'US_eMAH_NDVI_' + date_str + '.HKM.'
if prefix not in file_prefixes.keys():
file_prefixes[prefix] = dict()
file_prefixes[prefix]['start_date'] = start_date
file_prefixes[prefix]['end_date'] = end_date
if 'HKM.VI_NDVI' in f and f.endswith('.tif'):
file_prefixes[prefix]['NDVI_file'] = f
if 'HKM.VI_QUAL' in f and f.endswith('.tif'):
file_prefixes[prefix]['QUAL_file'] = f
# Iterate through each group of files to extract the data and
for fp in file_prefixes:
# First, open the NDVI raster and crop it to the state of Texas
img = rasterio.open(raster_dir + file_prefixes[fp]['NDVI_file'])
ndvi_masked, transform = rasterio.mask.mask(img, [texas]\
,crop=True, filled=False)
ndvi_masked = ndvi_masked[0]
# Plot the figure for display
f1 = plt.figure()
plt.title('NDVI from ' + str(file_prefixes[fp]['start_date']) + ' to ' \
+ str(file_prefixes[fp]['end_date']))
frame = plt.gca()
frame.axes.get_xaxis().set_visible(False)
frame.axes.get_yaxis().set_visible(False)
plt.imshow(ndvi_masked, cmap='gist_earth_r', vmin=-2000, vmax=10000)
plt.colorbar()
# Then open the quality file so that we can use it to filter our data
img = rasterio.open(raster_dir + file_prefixes[fp]['QUAL_file'])
qual_masked, transform = rasterio.mask.mask(img, [texas] \
,crop=True, filled=False)
qual_masked = qual_masked[0]
f1 = plt.figure()
plt.title('QUAL from ' + str(file_prefixes[fp]['start_date']) + ' to ' \
+ str(file_prefixes[fp]['end_date']))
frame = plt.gca()
frame.axes.get_xaxis().set_visible(False)
frame.axes.get_yaxis().set_visible(False)
plt.imshow(qual_masked, cmap='viridis', vmin=0, vmax=4)
plt.colorbar()
# Use the data quality layer to exclude pixels from the ndvi map
qual_flag = np.where(qual_masked>0.1, np.nan, 1.0)
ndvi_masked_filtered = np.multiply(ndvi_masked, qual_flag)
f1 = plt.figure()
plt.title('NDVI from ' + str(file_prefixes[fp]['start_date']) + ' to ' \
+ str(file_prefixes[fp]['end_date']))
frame = plt.gca()
frame.axes.get_xaxis().set_visible(False)
frame.axes.get_yaxis().set_visible(False)
plt.imshow(ndvi_masked_filtered, cmap='gist_earth_r', vmin=-2000, vmax=10000)
plt.colorbar()
file_prefixes[fp]['cropped_filtered_map'] = ndvi_masked_filtered
# Final step: Build the difference maps to calculate the difference in NDVI
# from before and after the freeze
start_comparison = 'US_eMAH_NDVI_2021.033-039.HKM.'
for fp in file_prefixes:
if fp != start_comparison:
diffmap = file_prefixes[fp]['cropped_filtered_map'] \
- file_prefixes[start_comparison]['cropped_filtered_map']
f1 = plt.figure()
plt.title('Difference in NDVI: pre-freeze (Feb 3-9) vs \n' \
+ file_prefixes[fp]['start_date'].strftime('%b %d') \
+ ' - ' + file_prefixes[fp]['end_date'].strftime('%b %d'))
frame = plt.gca()
frame.axes.get_xaxis().set_visible(False)
frame.axes.get_yaxis().set_visible(False)
plt.imshow(diffmap, cmap='RdYlGn', vmin=-1000, vmax=1000)
plt.colorbar() | [
"[email protected]"
] | |
1dc803b5cbb87ff7d2b65bfecab4e5e6aa6afee4 | c4a33b613ffc77dccf96d33c3a5cc127405c0e95 | /home/tests.py | 9124cedbe903776e5f9e6fcac0320003030903d1 | [] | no_license | tsokac2/new-irish-life | 25f49bd0b74dfa7c0a449772249f6cb51925b643 | d09934b60a1fd4fbd4540d412dc5dab726f5b502 | refs/heads/main | 2023-07-02T09:54:55.082587 | 2021-07-30T04:42:57 | 2021-07-30T04:42:57 | 379,245,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | from django.test import TestCase
class TestViews(TestCase):
def test_home_page(self):
""" Test home page renders correct page """
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self. assertTemplateUsed(response, 'home/home.html') | [
"[email protected]"
] | |
fc819fae94e9a16c18e0c3eede6e3712240680ff | ade1ad9128f7c31c69d468225bbf032fe96e0783 | /src/StateHandler.py | 22c3048426a0b2190fe0430cb9a55c5059274123 | [] | no_license | UHSProgramming/StrategyGame | a59bef78916654bcf0f42a94693672b16af594eb | 0795dc656e32903818e9f5aea47b9751f8471f7e | refs/heads/master | 2021-09-02T12:49:20.202234 | 2018-01-02T19:44:37 | 2018-01-02T19:44:37 | 105,717,547 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | import pygame
"""
StateHandler
Handles all the given GameStates.
StateHandler handles swapping states and maintaining state
"""
class StateHandler:
def __init__(self):
self.states = []
self.stateIndex = 0
def addState(self, newState): # add a state to the list of states
self.states.append(newState)
def changeState(self, newState): # change current state to states[newState]
self.currentState = self.states[newState]
self.stateIndex = newState
def getCurrentState(self): # get the current state
return self.states[self.stateIndex]
def update(self):
self.getCurrentState().update()
| [
"[email protected]"
] | |
744795bf937a40082cce84199fe73e053eeeed12 | 9983450fb504387c767a4ea59cc149786388b245 | /tests/mowgli_etl_test/pipeline/food_on/food_on_extractor_test.py | b76c05ca2a55c099f7c3cdf20e1dc5dc0ae4cd8a | [
"MIT"
] | permissive | tetherless-world/mowgli-etl | 2e354f00df22f70323c3e05b3504c272903ded6d | 28c19eba41e03e053ae4addff56a313d926e18d7 | refs/heads/master | 2023-04-18T15:16:14.051190 | 2021-01-04T15:32:01 | 2021-01-04T15:32:01 | 235,866,968 | 6 | 1 | MIT | 2021-04-20T20:05:35 | 2020-01-23T19:11:35 | Python | UTF-8 | Python | false | false | 416 | py | import os.path
from mowgli_etl.pipeline.food_on.food_on_extractor import FoodOnExtractor
def test_food_on_extractor(pipeline_storage):
result = FoodOnExtractor().extract(force=False, storage=pipeline_storage)
assert len(result) == 1
file_path = result["food_on_owl_file_path"]
assert os.path.dirname(file_path) == str(pipeline_storage.extracted_data_dir_path)
assert os.path.exists(file_path)
| [
"[email protected]"
] | |
30c8425b09e3e4075bc5a238bd98f31b25ea39e6 | a4dbabcae5bbc6f9aef63bd7dfad12eb92e3b1f9 | /venv/Scripts/easy_install-3.7-script.py | c39a3d704356584c70032a4e0d652d4f704a4dc6 | [] | no_license | RFarrow9/Zeitgeist | 18de1ffdb4d8c2bbe3354d46f71011069b5ffe0f | 9b2b90720a5be6ab1c46783110a09a855201a6c6 | refs/heads/master | 2020-08-01T07:37:41.541660 | 2019-09-25T19:36:29 | 2019-09-25T19:36:29 | 210,916,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | #!C:\Users\robfa\PycharmProjects\SparkZeitgeist\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"[email protected]"
] | |
c5e76a9cc1641319109e8b0d11e62b3cc0fcd669 | 302c5257598ef7e66606672bc73183b0f41d97fd | /wsgi.py | f07be02fec44860e7144109cd42a9e91c05c52d3 | [
"MIT"
] | permissive | jjmartens/mmq | 87955c3d2437a757e5adf6f2e9ba394d46e12b5d | 6fbff737e7c4ece100fcb4ab2844859d05b59dd2 | refs/heads/master | 2021-06-06T08:51:07.822333 | 2016-11-03T10:24:03 | 2016-11-03T10:24:03 | 33,864,801 | 5 | 1 | MIT | 2023-01-19T14:45:27 | 2015-04-13T11:35:55 | Python | UTF-8 | Python | false | false | 304 | py | from werkzeug.contrib.fixers import ProxyFix
from app import app, db
app.config['SQLALCHEMY_DATABASE_URI'] ='mysql://root:@localhost/mmq'
app.wsgi_app = ProxyFix(app.wsgi_app)
@app.errorhandler(500)
def internal_error(error):
db.session.rollback()
if __name__ == '__main__':
app.run(debug=True) | [
"[email protected]"
] | |
519e6292e702c8402b2db2212eabdbe6b3b18ae9 | e7ebcf6603d134180f5b793643795c10a888aeca | /th23.py | ccb54cac015e7aa561cd2fdfd4c2885de5a56914 | [] | no_license | tranvanquan1999/k-thu-t-l-p-tr-nh | c7469e78423e293062920ee11973d44c9c802857 | 5c9c5fff4b0d49a6eb62f041ac9bed8d4d6954e0 | refs/heads/master | 2020-04-28T02:57:20.890902 | 2019-06-01T05:40:06 | 2019-06-01T05:40:06 | 174,917,785 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | # viết chương trình kiểm tra số chẵn lẻ in ra màn hình
n=int(input("nhap so can kiem tra---->"))
if n % 2==0:
print("so chan");
else:
print("so le ") | [
"[email protected]"
] | |
a475483643b30fae89103e4f7a12d3e27b64a13f | 452c6ab91b09d4bec46cf49188c03cd6c7d4225c | /cifar100.py | d9de5d2a74dfd3097bb85f2e03ce8f39239f99ee | [] | no_license | bahar-j/computer-vision | a66197db8a75b2572e311a6c0daa530012e72adb | 5703028e427f716a58036605ee99297b974fe54e | refs/heads/master | 2022-12-24T21:17:21.639830 | 2020-09-06T12:21:19 | 2020-09-06T12:21:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,436 | py | import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras import layers, datasets
import numpy as np
# Hyperparameter Tuning
num_classes = 100
input_shape = (32, 32, 3)
# Prepare Dataset
(train_x, train_y), (test_x, test_y) = datasets.cifar100.load_data()
print(train_x.shape, train_y.shape, test_x.shape, test_y.shape)
train_y = tf.keras.utils.to_categorical(train_y, 100)
test_y = tf.keras.utils.to_categorical(test_y, 100)
train_x = train_x / 255
test_x = test_x / 255
print(train_y.shape, test_y.shape)
# Build Model
inputs = layers.Input(input_shape)
# Feature Extraction
net = layers.Conv2D(32, 3, strides=(1, 1), padding="SAME")(inputs)
net = layers.Activation('relu')(net)
net = layers.Conv2D(32, 3, strides=(1, 1), padding="SAME")(net)
net = layers.Activation('relu')(net)
net = layers.MaxPool2D((2, 2))(net)
net = layers.Dropout(0.5)(net)
net = layers.Conv2D(64, 3, strides=(1, 1), padding="SAME")(inputs)
net = layers.Activation('relu')(net)
net = layers.Conv2D(64, 3, strides=(1, 1), padding="SAME")(net)
net = layers.Activation('relu')(net)
net = layers.MaxPool2D((2, 2))(net)
net = layers.Dropout(0.5)(net)
# Fully Connected(Classification)
net = layers.Flatten()(net)
net = layers.Dense(512)(net) # 3236개의 노드 -> 512개의 노드
net = layers.Activation('relu')(net)
net = layers.Dropout(0.5)(net)
net = layers.Dense(num_classes)(net)
net = layers.Activation('softmax')(net)
model = tf.keras.Model(inputs = inputs, outputs = net, name='basic_cnn')
# Optimization Setting
model.compile(loss=tf.keras.losses.categorical_crossentropy,
optimizer = tf.keras.optimizers.Adam(),
metrics=['accuracy'])
# Training Model
hist = model.fit(train_x, train_y,
batch_size = 32,
shuffle = True,
epochs = 20)
# Evaluate
histories = hist.history
print(histories.keys())
plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.plot(histories['loss'])
plt.title("Loss Curve")
plt.subplot(122)
plt.plot(histories['accuracy'])
plt.ylim(0, 1)
plt.title('Acccuracy Curve')
plt.show()
logits = model.predict(test_x)
print(logits.shape)
print(np.argmax(logits[0]))
print(np.max(logits[0]))
plt.imshow(test_x[0])
plt.title(np.argmax(logits[0]))
plt.show()
preds = np.argmax(logits, -1)
print(preds.shape)
print(preds[0])
plt.hist(preds)
plt.hist(np.argmax(test_y, -1), color= 'red', alpha = 0.5)
plt.show()
| [
"[email protected]"
] | |
1b44382e8d09fb8bedca579dabdd3e87fc880555 | 6cdd96cf0a7f3aab3dbd9f870d6b1bf3fea214c0 | /models/crnn.py | e08de010f001d7661fe16244ca9d28bc40d98a88 | [
"MIT"
] | permissive | yanhongyu111/crnn.pytorch | 40492ea2a0e9f59afd9e35d0addbd293fd79543c | 143c28fc5abc585fc97c19e2e57090d39a7140c5 | refs/heads/master | 2020-06-20T01:40:53.034038 | 2019-07-14T06:50:22 | 2019-07-14T06:50:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,916 | py | # -*- coding: utf-8 -*-
# @Time : 18-11-16 下午5:46
# @Author : zhoujun
import torch
import torchvision
from torchvision.models.densenet import _DenseBlock
from torch import nn
class VGG(nn.Module):
def __init__(self, in_channels):
super(VGG, self).__init__()
self.features = nn.Sequential( # conv layer
nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=(3, 3), padding=(1, 1)),
nn.ReLU(),
nn.BatchNorm2d(64, momentum=0.9),
nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)),
# second conv layer
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), padding=(1, 1)),
nn.ReLU(),
nn.BatchNorm2d(128, momentum=0.9),
nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)),
# third conv layer
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3, 3), padding=(1, 1)),
nn.ReLU(),
nn.BatchNorm2d(256, momentum=0.9),
# fourth conv layer
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), padding=(1, 1)),
nn.ReLU(),
nn.BatchNorm2d(256, momentum=0.9),
nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 1), padding=(0, 1)),
# fifth conv layer
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=(3, 3), padding=(1, 1)),
nn.ReLU(),
nn.BatchNorm2d(512, momentum=0.9),
# sixth conv layer
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), padding=(1, 1)),
nn.ReLU(),
nn.BatchNorm2d(512, momentum=0.9),
nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 1), padding=(0, 1)),
# seren conv layer
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(2, 2)),
nn.ReLU(),
nn.BatchNorm2d(512, momentum=0.9)
)
def forward(self, x):
return self.features(x)
class BasicBlockV2(nn.Module):
r"""BasicBlock V2 from
`"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
This is used for ResNet V2 for 18, 34 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, in_channels, out_channels, stride, downsample=False):
super(BasicBlockV2, self).__init__()
self.bn1 = nn.BatchNorm2d(in_channels, momentum=0.9)
self.relu1 = nn.ReLU()
self.conv = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=1,
bias=False),
nn.BatchNorm2d(out_channels, momentum=0.9),
nn.ReLU(),
nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1,
bias=False)
)
if downsample:
self.downsample = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1,
stride=stride, bias=False)
else:
self.downsample = None
def forward(self, x):
residual = x
x = self.bn1(x)
x = self.relu1(x)
if self.downsample:
residual = self.downsample(x)
x = self.conv(x)
return x + residual
class ResNet(nn.Module):
def __init__(self, in_channels):
super(ResNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(64, momentum=0.9),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=2, stride=2, bias=False),
nn.BatchNorm2d(64, momentum=0.9),
nn.ReLU(),
BasicBlockV2(in_channels=64, out_channels=64, stride=1, downsample=True),
BasicBlockV2(in_channels=64, out_channels=128, stride=1, downsample=True),
nn.Dropout(0.2),
BasicBlockV2(in_channels=128, out_channels=128, stride=2, downsample=True),
BasicBlockV2(in_channels=128, out_channels=256, stride=1, downsample=True),
nn.Dropout(0.2),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=2, stride=(2, 1), padding=(0, 1), bias=False),
BasicBlockV2(in_channels=256, out_channels=512, stride=1, downsample=True),
nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=3, padding=0, bias=False),
nn.BatchNorm2d(1024, momentum=0.9),
nn.ReLU(),
nn.Conv2d(in_channels=1024, out_channels=2048, kernel_size=2, padding=(0, 1), bias=False),
nn.BatchNorm2d(2048, momentum=0.9),
nn.ReLU()
)
def forward(self, x):
return self.features(x)
class _Transition(nn.Sequential):
def __init__(self, in_channels, out_channels, pool_stride, pool_pad, dropout):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(in_channels, momentum=0.9))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False))
if dropout:
self.add_module('dropout', nn.Dropout(dropout))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=pool_stride, padding=pool_pad))
class DenseNet(nn.Module):
def __init__(self, in_channels):
super(DenseNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(64, momentum=0.9),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=2, stride=2, bias=False),
_DenseBlock(num_input_features=64, num_layers=8, bn_size=4, growth_rate=8, drop_rate=0),
_Transition(in_channels=128, out_channels=128, pool_stride=2, pool_pad=0, dropout=0.2),
_DenseBlock(num_input_features=128, num_layers=8, bn_size=4, growth_rate=8, drop_rate=0),
_Transition(in_channels=192, out_channels=192, pool_stride=(2, 1), pool_pad=(0, 1), dropout=0.2),
_DenseBlock(num_input_features=192, num_layers=8, bn_size=4, growth_rate=8, drop_rate=0),
nn.BatchNorm2d(256, momentum=0.9),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, padding=0, bias=False),
nn.BatchNorm2d(512, momentum=0.9),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=2, padding=(0, 1), bias=False),
nn.BatchNorm2d(1024, momentum=0.9),
nn.ReLU()
)
def forward(self, x):
return self.features(x)
class BidirectionalLSTM(nn.Module):
def __init__(self, in_channels, hidden_size, num_layers):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(input_size=in_channels, hidden_size=hidden_size, num_layers=num_layers, bidirectional=True)
def forward(self, x):
x, _ = self.rnn(x)
# x = self.fc(x) # [T * b, nOut]
return x
class Encoder(nn.Module):
def __init__(self, in_channels, encoder_name):
super(Encoder, self).__init__()
encoder_dict = {'VGG': VGG, 'ResNet': ResNet, 'DenseNet': DenseNet}
self.cnn = encoder_dict[encoder_name](in_channels=in_channels)
def forward(self, x):
return self.cnn(x)
class RNNDecoder(nn.Module):
def __init__(self, in_channels, n_class, hidden_size, num_layers=1):
super(RNNDecoder, self).__init__()
self.rnn = nn.Sequential(
BidirectionalLSTM(in_channels=in_channels, hidden_size=hidden_size, num_layers=num_layers),
BidirectionalLSTM(in_channels=hidden_size * 2, hidden_size=hidden_size, num_layers=num_layers))
self.fc = nn.Linear(hidden_size * 2, n_class)
def forward(self, x):
# remove the dim which is 1
x = x.squeeze(dim=2)
# from (batch,chanal,w) to (w,batch,channel)
x = x.permute(2, 0, 1) # [w, b, c]
x = self.rnn(x)
x = self.fc(x)
# print(x.shape)
return x
class CNNDecoder(nn.Module):
def __init__(self, in_channels, n_class):
super(CNNDecoder, self).__init__()
self.cnn_decoder = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=256, kernel_size=3, padding=1, stride=(2, 1), bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, stride=(2, 1), bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, stride=(2, 1), bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, stride=(2, 1), bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
)
self.fc = nn.Linear(256, n_class)
def forward(self, x):
x = self.cnn_decoder(x)
x = x.squeeze(dim=2)
x = x.permute(2, 0, 1)
x = self.fc(x)
# print(x.shape)
return x
class CRNN(nn.Module):
def __init__(self, in_channels, n_class, model_config):
super(CRNN, self).__init__()
encoder_out_dict = {'VGG': 512, 'ResNet': 2048, 'DenseNet': 1024}
if model_config['encoder'] not in encoder_out_dict:
raise NotImplementedError
self.encoder = Encoder(in_channels, model_config['encoder'])
if model_config['decoder']['type'] == 'rnn':
self.decoder = RNNDecoder(encoder_out_dict[model_config['encoder']], n_class,
model_config['decoder']['rnn']['nh'])
elif model_config['decoder']['type'] == 'cnn':
self.decoder = CNNDecoder(encoder_out_dict[model_config['encoder']], n_class)
else:
raise NotImplementedError
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
if __name__ == '__main__':
model = {
'encoder': 'VGG', # VGG ,ResNet or DenseNet
'decoder': {
'type': 'rnn', # cnn or rnn
'rnn': {
'nh': 100
}
}
}
device = torch.device('cpu')
a = torch.zeros((2, 3, 32, 320)).to(device)
net = CRNN(3, 10, model)
net.eval()
net.to(device)
b = net(a)
print(b.size())
| [
"[email protected]"
] | |
48ca5b34d8e4d69c03c18a6308499de393e3a28a | 1f187ab1fea83acec04fed50aeb4e406670124cc | /django_events/users/managers.py | dbdfa46906009c03b8e4771962a5765283e338e2 | [
"MIT"
] | permissive | chrisBrookes93/django-events-management | b231e4f7ff275a0910640e8138c06256b525f1fe | 93886448a7bb85c8758324977ff67bcacc80bbec | refs/heads/master | 2022-11-14T03:04:13.807371 | 2020-07-07T23:11:18 | 2020-07-07T23:11:18 | 275,236,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,412 | py | from django.contrib.auth.base_user import BaseUserManager
from django.utils.translation import ugettext_lazy as _lazy
class UserManager(BaseUserManager):
def create_user(self, email, password, **other_fields):
"""
Creates a standard user
:param email: Email Address (username)
:type email: str
:param password: Password
:type password: str
:param other_fields: Any other fields
:type other_fields: dict
:return: The user created
"""
if not email:
raise ValueError(_lazy('Missing Email Address'))
email = self.normalize_email(email)
user = self.model(email=email, **other_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **other_fields):
"""
Creates a super user
:param email: Email Address (username)
:type email: str
:param password: Password
:type password: str
:param other_fields: Any other fields
:type other_fields: dict
:return: The user created
"""
other_fields.setdefault('is_staff', True)
other_fields.setdefault('is_superuser', True)
other_fields.setdefault('is_active', True)
return self.create_user(email, password, **other_fields)
| [
"[email protected]"
] | |
ec88518eb53c3c0dded01a3e0c3b84a151e70586 | 3abf30632aae0d19510f206e8fb477b36e5873f1 | /dijkstra's_algorithm_queue.py | f93edc2517f7d7caf9e5ec5d848629d2623e885d | [] | no_license | ankita0204/DSA | 2df8e7fda1b2c08dc6945bb0136a2bdacc194c78 | 03ad051476e97a02b20d3ded4163fe178128536c | refs/heads/main | 2023-02-15T02:09:41.065637 | 2021-01-08T17:58:50 | 2021-01-08T17:58:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,256 | py | """
Created on Fri Sep 11 23:10:36 2021
@author: Ankita Dasgupta
Dijkstra's Algorithm application with the help of Queue
"""
import numpy as np
class queue:
size = None
tail = None # End of the Queue: Elements enter from here
head = None # Front of the Queue: Elements are deleted from here
type = None
arr = None
def __init__(self, arrtype):
self.arrtype = arrtype
self.size = 1
self.arr = np.zeros(self.size,arrtype)
self.tail = 0
self.head = 0
def empty(self):
return self.head == self.tail
def dequeue(self):
if self.empty():
print(' Queue Underflow')
return
a = self.arr[self.head]
self.head = self.head + 1
return a
def full(self):
return self.tail == self.size
def enqueue(self, a):
if self.full():
temp = self.arr
self.size *= 2
self.arr = np.zeros(self.size, self.arrtype)
for i in range(len(temp)):
self.arr[i]=temp[i]
self.arr[self.tail] = a
self.tail += 1
# Dijstras Algorithm
valstack = queue(int)
opstack = queue(np.chararray)
inp = input(' > ' )
token=inp.split()
for tok in token:
if tok== '(':
pass
elif tok== ')':
r = valstack.dequeue()
l = valstack.dequeue()
op=opstack.dequeue()
if op == '+':
valstack.enqueue(l+r)
elif op == '-':
valstack.enqueue(l-r)
elif op == '*':
valstack.enqueue(l*r)
elif op == '/':
valstack.enqueue(l/r)
elif op == '^':
valstack.enqueue(l**r)
else:
print("ERROR: INVALID OPERATOR")
elif '0' <= tok <= '999':
valstack.enqueue(int(tok))
elif tok in ['+','-','*','/','^']:
opstack.enqueue(tok)
elif tok == '=':
print('Result : ', valstack.dequeue())
assert valstack.empty() == True and opstack.empty() == True
print(opstack.empty())
print(valstack.empty()) | [
"[email protected]"
] | |
1fc7d09608203891efb389abfb3844062c39d487 | b40ce231d42e8291f0150987a2c95a12be5ec021 | /05_Regression/myridge.py | 518c89d2b946a4274a5149955ee78dd3abbc3ec5 | [] | no_license | Chenxu-nmsu/CS487_Applied_Machine_Learning_Python | 43ebd87a97bd7ea088f4f4500d6b9107bd603dbd | 8e9883576fc7592c1f0cd79cb1dc964d7c75b520 | refs/heads/master | 2022-12-16T21:30:49.174678 | 2020-09-28T06:03:41 | 2020-09-28T06:03:41 | 299,200,871 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | from sklearn.linear_model import Ridge
ridge = Ridge(alpha=1.0, solver='auto')
| [
"[email protected]"
] | |
e91054180f016f14be9cad61fb09ea51c6127794 | 9d57a7c07f25905d393d96412c406754b37d8201 | /CodeChallenge/TestSuites/conftest.py | 88e6b5494737c7c69a1b92aae0b690b0bb05556a | [] | no_license | git4bess/HearQAChallenge | ee1b5b28d5ccb6f732e368c438c760542f29442d | f9312438d4cd1420c27272b36784a59f0326c24c | refs/heads/master | 2022-11-26T20:25:57.280909 | 2020-08-09T16:59:34 | 2020-08-09T16:59:34 | 286,273,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | import pathlib
from pathlib import Path
import pytest
from selenium import webdriver
# from selenium.webdriver.support.select import Select
@pytest.fixture()
def setup(request):
base_path = Path.cwd()
file_path = base_path.parent
driver_path = file_path.as_posix() + "/Utilities/allDrivers/chromedriver"
driver = webdriver.Chrome(executable_path=driver_path)
#driver = webdriver.Chrome(executable_path="/home/sonitakooh/Downloads/allDrivers/chromedriver")
driver.get("https://the-internet.herokuapp.com/login")
driver.maximize_window()
request.cls.driver = driver
yield
driver.close()
@pytest.fixture(params=[("", "", "Your username is invalid"), ("tom", "mess!72", "Your username is invalid"), ("paul", "SuperSecretPassword!", "Your username is invalid"), ("tomsmith", "mypassword", "Your password is invalid")])
def logindata(request):
return request.param
@pytest.fixture()
def chromeBrowserExecPath():
base_path = Path.cwd()
base_path = base_path.parent
driver_path = base_path.as_posix() + "/Utilities/allDrivers/chromedriver"
return [str(driver_path)]
| [
"[email protected]"
] | |
53b23974f6b368076f2691763ddb41b5e9498d13 | c7787b22849ea99e94c030ea16b67fb6c96e370a | /tests/providers/aws/aws_dynamodb_test.py | ec28aa7fc0e96fe1b8415ce3d66b1e6a23335a7e | [
"Apache-2.0",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Classpath-exception-2.0",
"BSD-3-Clause",
"MIT",
"AGPL-3.0-only"
] | permissive | mengdong/PerfKitBenchmarker | b8ec428f5c1a306d3375d699fafb39922fa0233b | e2b6ac90a4a62c2db4cba7a3b3ce8717c804df7f | refs/heads/master | 2023-05-03T15:21:28.401706 | 2021-06-02T07:06:48 | 2021-06-02T07:07:19 | 264,105,279 | 0 | 0 | Apache-2.0 | 2020-05-15T05:36:32 | 2020-05-15T05:36:31 | null | UTF-8 | Python | false | false | 9,165 | py | """Tests for perfkitbenchmarker.providers.aws.aws_dynamodb."""
import json
import unittest
from absl import flags
from absl.testing import flagsaver
from absl.testing import parameterized
import mock
from perfkitbenchmarker import errors
from perfkitbenchmarker.providers.aws import aws_dynamodb
from perfkitbenchmarker.providers.aws import util
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
_DESCRIBE_TABLE_OUTPUT = """
{
"Table": {
"AttributeDefinitions": [
{
"AttributeName": "test",
"AttributeType": "S"
}
],
"TableName": "test",
"KeySchema": [
{
"AttributeName": "test",
"KeyType": "HASH"
}
],
"TableStatus": "ACTIVE",
"CreationDateTime": 1611605356.518,
"ProvisionedThroughput": {
"NumberOfDecreasesToday": 0,
"ReadCapacityUnits": 5,
"WriteCapacityUnits": 0
},
"TableSizeBytes": 0,
"ItemCount": 0,
"TableArn": "arn:aws:dynamodb:us-east-2:835761027970:table/test",
"TableId": "ecf0a60a-f18d-4666-affc-525ca6e1d207"
}
}
"""
@flagsaver.flagsaver
def GetTestDynamoDBInstance(table_name='test_table'):
FLAGS.zone = ['us-east-1a']
return aws_dynamodb.AwsDynamoDBInstance(table_name)
class AwsDynamodbTest(pkb_common_test_case.PkbCommonTestCase):
def assertArgumentInCommand(self, mock_cmd, arg):
"""Given an AWS command, checks that the argument is present."""
command = ' '.join(mock_cmd.call_args[0][0])
self.assertIn(arg, command)
@flagsaver.flagsaver
def testInitTableName(self):
test_instance = GetTestDynamoDBInstance('dynamo_test_table')
self.assertEqual(test_instance.table_name, 'dynamo_test_table')
@flagsaver.flagsaver
def testInitLocation(self):
FLAGS.zone = ['us-east-1a']
test_instance = aws_dynamodb.AwsDynamoDBInstance('test_table')
self.assertEqual(test_instance.zone, 'us-east-1a')
self.assertEqual(test_instance.region, 'us-east-1')
@flagsaver.flagsaver
def testInitKeysAndAttributes(self):
FLAGS.aws_dynamodb_primarykey = 'test_primary_key'
FLAGS.aws_dynamodb_sortkey = 'test_sort_key'
FLAGS.aws_dynamodb_attributetype = 'test_attribute_type'
test_instance = GetTestDynamoDBInstance()
self.assertEqual(test_instance.primary_key,
'{"AttributeName": "test_primary_key","KeyType": "HASH"}')
self.assertEqual(test_instance.sort_key,
'{"AttributeName": "test_sort_key","KeyType": "RANGE"}')
self.assertEqual(
test_instance.part_attributes,
'{"AttributeName": "test_primary_key","AttributeType": "test_attribute_type"}'
)
self.assertEqual(
test_instance.sort_attributes,
'{"AttributeName": "test_sort_key","AttributeType": "test_attribute_type"}'
)
@flagsaver.flagsaver
def testInitThroughput(self):
FLAGS.aws_dynamodb_read_capacity = 1
FLAGS.aws_dynamodb_write_capacity = 2
test_instance = GetTestDynamoDBInstance()
self.assertEqual(test_instance.throughput,
'ReadCapacityUnits=1,WriteCapacityUnits=2')
@flagsaver.flagsaver
def testGetResourceMetadata(self):
FLAGS.zone = ['us-east-1a']
FLAGS.aws_dynamodb_primarykey = 'test_primary_key'
FLAGS.aws_dynamodb_use_sort = 'test_use_sort'
FLAGS.aws_dynamodb_sortkey = 'test_sortkey'
FLAGS.aws_dynamodb_attributetype = 'test_attribute_type'
FLAGS.aws_dynamodb_read_capacity = 1
FLAGS.aws_dynamodb_write_capacity = 2
FLAGS.aws_dynamodb_lsi_count = 3
FLAGS.aws_dynamodb_gsi_count = 4
FLAGS.aws_dynamodb_ycsb_consistentReads = 5
FLAGS.aws_dynamodb_connectMax = 6
test_instance = aws_dynamodb.AwsDynamoDBInstance('test_table')
actual_metadata = test_instance.GetResourceMetadata()
expected_metadata = {
'aws_dynamodb_primarykey': 'test_primary_key',
'aws_dynamodb_use_sort': 'test_use_sort',
'aws_dynamodb_sortkey': 'test_sortkey',
'aws_dynamodb_attributetype': 'test_attribute_type',
'aws_dynamodb_read_capacity': 1,
'aws_dynamodb_write_capacity': 2,
'aws_dynamodb_lsi_count': 3,
'aws_dynamodb_gsi_count': 4,
'aws_dynamodb_consistentReads': 5,
'aws_dynamodb_connectMax': 6,
}
self.assertEqual(actual_metadata, expected_metadata)
@parameterized.named_parameters({
'testcase_name': 'ValidOutput',
'output': json.loads(_DESCRIBE_TABLE_OUTPUT)['Table'],
'expected': True
}, {
'testcase_name': 'EmptyOutput',
'output': {},
'expected': False
})
def testExists(self, output, expected):
test_instance = GetTestDynamoDBInstance()
self.enter_context(
mock.patch.object(
test_instance,
'_DescribeTable',
return_value=output))
actual = test_instance._Exists()
self.assertEqual(actual, expected)
def testSetThroughput(self):
test_instance = GetTestDynamoDBInstance(table_name='throughput_table')
cmd = self.enter_context(
mock.patch.object(
util,
'IssueRetryableCommand'))
test_instance._SetThroughput(5, 5)
self.assertArgumentInCommand(cmd, '--table-name throughput_table')
self.assertArgumentInCommand(cmd, '--region us-east-1')
self.assertArgumentInCommand(
cmd,
'--provisioned-throughput ReadCapacityUnits=5,WriteCapacityUnits=5')
def testGetThroughput(self):
test_instance = GetTestDynamoDBInstance()
output = json.loads(_DESCRIBE_TABLE_OUTPUT)['Table']
self.enter_context(
mock.patch.object(
test_instance,
'_DescribeTable',
return_value=output))
actual_rcu, actual_wcu = test_instance._GetThroughput()
self.assertEqual(actual_rcu, 5)
self.assertEqual(actual_wcu, 0)
def testTagResourceFailsWithNonExistentResource(self):
test_instance = GetTestDynamoDBInstance()
# Mark instance as non-existing.
self.enter_context(
mock.patch.object(test_instance, '_Exists', return_value=False))
with self.assertRaises(errors.Resource.CreationError):
test_instance._GetTagResourceCommand(['test', 'tag'])
def testUpdateWithDefaultTags(self):
test_instance = GetTestDynamoDBInstance()
test_instance.resource_arn = 'test_arn'
cmd = self.enter_context(mock.patch.object(util, 'IssueRetryableCommand'))
# Mark instance as existing.
self.enter_context(
mock.patch.object(test_instance, '_Exists', return_value=True))
test_instance.UpdateWithDefaultTags()
self.assertArgumentInCommand(cmd, '--region us-east-1')
self.assertArgumentInCommand(cmd, '--resource-arn test_arn')
def testUpdateTimeout(self):
test_instance = GetTestDynamoDBInstance()
test_instance.resource_arn = 'test_arn'
# Mock the aws util tags function.
self.enter_context(
mock.patch.object(
util,
'MakeDefaultTags',
autospec=True,
return_value={'timeout_utc': 60}))
# Mock the actual call to the CLI
cmd = self.enter_context(mock.patch.object(util, 'IssueRetryableCommand'))
# Mark instance as existing.
self.enter_context(
mock.patch.object(test_instance, '_Exists', return_value=True))
test_instance.UpdateTimeout(timeout_minutes=60)
self.assertArgumentInCommand(cmd, '--tags Key=timeout_utc,Value=60')
@parameterized.named_parameters(
{
'testcase_name': 'OnlyRcu',
'rcu': 5,
'wcu': 500,
}, {
'testcase_name': 'OnlyWcu',
'rcu': 500,
'wcu': 5,
}, {
'testcase_name': 'Both',
'rcu': 500,
'wcu': 500,
})
def testFreezeLowersThroughputToFreeTier(self, rcu, wcu):
test_instance = GetTestDynamoDBInstance()
self.enter_context(
mock.patch.object(
test_instance, '_GetThroughput', return_value=(rcu, wcu)))
mock_set_throughput = self.enter_context(
mock.patch.object(test_instance, '_SetThroughput', autospec=True))
test_instance._Freeze()
mock_set_throughput.assert_called_once_with(
rcu=aws_dynamodb._FREE_TIER_RCU, wcu=aws_dynamodb._FREE_TIER_WCU)
def testFreezeDoesNotLowerThroughputIfAlreadyAtFreeTier(self):
test_instance = GetTestDynamoDBInstance()
self.enter_context(
mock.patch.object(test_instance, '_GetThroughput', return_value=(5, 5)))
mock_set_throughput = self.enter_context(
mock.patch.object(test_instance, '_SetThroughput', autospec=True))
test_instance._Freeze()
mock_set_throughput.assert_not_called()
def testRestoreSetsThroughputBackToOriginalLevels(self):
test_instance = GetTestDynamoDBInstance()
test_instance.rcu = 5000
test_instance.wcu = 1000
mock_set_throughput = self.enter_context(
mock.patch.object(test_instance, '_SetThroughput', autospec=True))
test_instance._Restore()
mock_set_throughput.assert_called_once_with(
rcu=5000, wcu=1000)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
77f36774bc76d5d058496c9255280c51d8b36293 | 9ba1b921dd0c2e68345b3793131ad13aad671e20 | /cnn_class/benchmark.py | 5de59462bb63dfa48498c99a0b7ceb3854b3b81e | [] | no_license | racingicemen/DLCNNITT | 3ceda12e3d70e80764465112aca8a97415c94ca5 | 4f3c3b8b017279ab0502f7c3bc715befa6f115d6 | refs/heads/master | 2020-06-10T12:39:14.561287 | 2016-12-06T02:45:33 | 2016-12-06T02:45:33 | 75,960,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,372 | py | import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from scipy.io import loadmat
from sklearn.utils import shuffle
from datetime import datetime
K = 10 # num classes
def y2indicator(y):
N = len(y)
ind = np.zeros((N, K))
for i in range(N):
ind[i, y[i]] = 1
return ind
def error_rate(p, t):
return np.mean(p != t)
def flatten(X):
# X.shape = (32,32,3,N) N 32 x 32 color images
# flat.shape = (N, 32*32*3)
N = X.shape[-1]
flat = np.zeros((N, 3072))
for i in range(N):
flat[i] = X[:,:,:,i].reshape(3072)
return flat
def main():
train = loadmat('/home/rkk/kaggle/SVHN/train_32x32.mat')
test = loadmat('/home/rkk/kaggle/SVHN/test_32x32.mat')
Xtrain = flatten(train['X'].astype(np.float32) / 255)
Ytrain = train['y'].flatten() - 1
Xtrain, Ytrain = shuffle(Xtrain, Ytrain)
Ytrain_ind = y2indicator(Ytrain)
Xtest = flatten(test['X'].astype(np.float32) / 255)
Ytest = test['y'].flatten() - 1
Ytest_ind = y2indicator(Ytest)
max_iter = 20
print_period = 10
N, D = Xtrain.shape
batch_sz = 500
n_batches = int(N / batch_sz)
M1 = 1000
M2 = 500
W1_init = np.random.randn(D, M1) / np.sqrt(D + M1)
b1_init = np.zeros(M1)
W2_init = np.random.randn(M1, M2) / np.sqrt(M1 + M2)
b2_init = np.zeros(M2)
W3_init = np.random.randn(M2, K) / np.sqrt(M2 + K)
b3_init = np.zeros(K)
X = tf.placeholder(tf.float32, shape=(None, D), name='X')
T = tf.placeholder(tf.float32, shape=(None, K), name='T')
W1 = tf.Variable(W1_init.astype(np.float32))
b1 = tf.Variable(b1_init.astype(np.float32))
W2 = tf.Variable(W2_init.astype(np.float32))
b2 = tf.Variable(b2_init.astype(np.float32))
W3 = tf.Variable(W3_init.astype(np.float32))
b3 = tf.Variable(b3_init.astype(np.float32))
Z1 = tf.nn.relu(tf.matmul(X, W1) + b1)
Z2 = tf.nn.relu(tf.matmul(Z1, W2) + b2)
Yish = tf.matmul(Z2, W3) + b3
cost = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(Yish, T))
train_op = tf.train.RMSPropOptimizer(0.0001, decay=0.99, momentum=0.9).minimize(cost)
'''
Think of the dimension argument of tf.argmax as the axis across which you reduce. tf.argmax(arr, 0)
reduces across dimension 0, i.e. the rows. Reducing across rows means that you will get the argmax of
each individual column.
'''
predict_op = tf.argmax(Yish, 1)
t0 = datetime.now()
LL = []
init = tf.initialize_all_variables()
with tf.Session() as session:
session.run(init)
for i in range(max_iter):
for j in range(n_batches):
Xbatch = Xtrain[j*batch_sz:(j+1)*batch_sz, :]
Ybatch = Ytrain_ind[j*batch_sz:(j+1)*batch_sz, :]
session.run(train_op, feed_dict={X:Xbatch, T:Ybatch})
if j % print_period == 0:
test_cost = session.run(cost, feed_dict={X: Xtest, T: Ytest_ind})
prediction = session.run(predict_op, feed_dict={X: Xtest})
err = error_rate(prediction, Ytest)
print("Cost / err at iteration i=%d, j=%d: %.3f / %.3f" % (i, j, test_cost, err))
LL.append(test_cost)
print("Elapsed time:", (datetime.now() - t0))
plt.plot(LL)
plt.show()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
3fa5bea453f85362291bf14b95b767b1c41d51f4 | 4203f7d65fb93fc5907821907be0e57dff3a9bf6 | /monitor/migrations/0004_auto_20190329_1514.py | 2acb231f88b78e876edaa49b1a487c8047fbacaa | [] | no_license | Wesly-Yu/Xmonitor | 48afdd28ff24e5db7980b70b5f35ccf3b060d6ba | b8fc34adc3925fbd556fb94b302ad7ebede33528 | refs/heads/master | 2020-05-02T10:42:26.203476 | 2019-04-15T07:49:41 | 2019-04-15T07:49:41 | 177,904,853 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | # Generated by Django 2.1.5 on 2019-03-29 07:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('monitor', '0003_auto_20190329_1513'),
]
operations = [
migrations.AlterField(
model_name='serviceindex',
name='index_name',
field=models.CharField(max_length=64),
),
]
| [
"[email protected]"
] | |
f6e138d073472eb92edd8d8cdf87d9e963f1c4be | d6d1192e6817f5c37386b671328a238326611c04 | /day6.py | a4c79f50464e0af8a0e75217bf3dc35e38768e49 | [] | no_license | 12cheny/Leetcode | 0a3fdc8e2f8c8e2ccfacedec7446089508af7988 | e1b90cb2a3dfd03590ce7511d56a9f3be02c60ff | refs/heads/master | 2020-03-15T00:36:10.268271 | 2018-06-09T05:01:15 | 2018-06-09T05:01:15 | 131,872,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | #Add the two numbers and return it as a linked list.
'''
class ListNode(object):
def __init__(self,x):
self.val=x
self.next=None
'''
def num(l):
k=0
i=0
while l.next!=None:
k+=l.val*pow(10,i)
i+=1
l=l.next
k+=l.val*pow(10,i)
return k
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
number=num(l1)+num(l2)
out=str(number)[::-1]
ans=ListNode(int(out[0]))
l=ans
for i in range(1,len(out)):
p=ListNode(int(out[i]))
ans.next=p
ans=ans.next
return l
| [
"[email protected]"
] | |
d3f3761b4a237bbd0387788cc97266cc727c4f45 | e9d93dd6b3b5ca9e11add097f700d6248ddff7ae | /fizzbuzz-test.py | d62aadb16cc15ace89bfdf8999b6e780d733ffd6 | [] | no_license | ANDINDAE/Daytwochallenge | 9fe203444bcedacc91c2c1e7920e5d7fd995d687 | 1a7cc03e3cbe5a5190eef0adf168cadcf554a507 | refs/heads/master | 2020-04-05T17:00:22.870745 | 2018-11-26T17:14:56 | 2018-11-26T17:14:56 | 157,039,489 | 0 | 0 | null | 2018-11-11T02:37:44 | 2018-11-11T02:10:51 | null | UTF-8 | Python | false | false | 815 | py | import unittest
from fizzbuzz.fizzbuzz import fizzbuzz
class TestFizzBuzz(unittest.TestCase):
def test_non_list_inputs(self):
self.assertEqual(fizzbuzz(5, 8), "Invalid input")
def test_non_list_input(self):
self.assertEqual(fizzbuzz([2, 3, 4], 7), "Invalid input")
def test_fizz(self):
self.assertEqual(fizzbuzz([1, 3, 4], ['a', 'b', 'c']), "fizz")
def test_buzz(self):
self.assertEqual(fizzbuzz([4, 5, 4], [3, 4]), "buzz")
def test_fizz_buzz(self):
self.assertEqual(fizzbuzz([1, 2, 3, 3, 4, 5, 5, 5, 5, 6],
[7, 7, 4, 5, 9]), "fizzbuzz")
def test_other_total(self):
self.assertEqual(fizzbuzz([4, 5, 6], [3]), 4)
def test_both_empty(self):
self.assertEqual(fizzbuzz([1, 3, 5], []), "fizz") | [
"[email protected]"
] | |
c178f9017052339821c9a6ed7d596670dc281221 | c2704596fa9e8def1731bfbda3299b6d19a4721b | /nse.py | 310071d51dba565e4135f30ab102212c042e6e30 | [] | no_license | say-paul/finblitz | 391485f5b8da9cbe6af734b8f1f38dda34f6ef50 | e3305fc2dac549ae6ffad4c6c5789bcc24cdd319 | refs/heads/master | 2022-10-19T00:52:37.835403 | 2020-06-11T22:28:28 | 2020-06-11T22:28:28 | 270,297,074 | 0 | 0 | null | 2020-06-12T05:48:07 | 2020-06-07T12:18:48 | Python | UTF-8 | Python | false | false | 5,405 | py | import urllib.request
import json
import time
import os
from queue import Queue
from threading import Thread
from utils import combiner
import datetime
def import_web(ticker):
"""
:param ticker: Takes the company ticker
:return: Returns the HTML of the page
"""
url = 'https://www.nseindia.com/live_market/dynaContent/live_watch/get_quote/GetQuote.jsp?symbol='+ticker
req = urllib.request.Request(url, headers={'User-Agent': "PostmanRuntime/7.25.0"})
fp = urllib.request.urlopen(req, timeout=4)
mybytes = fp.read()
mystr = mybytes.decode("utf8")
fp.close()
return mystr
def get_quote(ticker):
"""
:param ticker: Takes the company ticker
:return: None
"""
ticker = ticker.upper()
try:
print("fetching data for {}".format(ticker))
string_html = filter_data(import_web(ticker))
# exit()
# get_data(string_html,ticker)
except Exception as e:
print("{} error for {}".format(e,ticker))
retry_list.append(ticker)
return string_html
def filter_data(string_html):
searchString = '<div id="responseDiv" style="display:none">'
#assign: stores html tag to find where data starts
searchString2 = '</div>'
#stores: stores html tag where data end
sta = string_html.find(searchString)
# returns & store: find() method returns the lowest index of the substring (if found). If not found, it returns -1.
data = string_html[sta + 43:]
#returns & stores: skips 43 characters and stores the index of substring
end = data.find(searchString2)
# returns & store: find() method returns the lowest index of the substring (if found). If not found, it returns -1.
fdata = data[:end]
#fetch: stores the fetched data into fdata
stripped = fdata.strip()
#removes: blank spaces
return stripped
def intraday_price_data(stripped):
js = json.loads(stripped)
datajs = js['data'][0]
subdictionary = {}
subdictionary['ltp'] = datajs['lastPrice']
subdictionary['open'] = datajs['open']
subdictionary['high'] = datajs['dayHigh']
subdictionary['low'] = datajs['dayLow']
subdictionary['close'] = datajs['lastPrice']
subdictionary['volume'] = datajs['totalTradedVolume']
subdictionary['pChange'] = datajs['pChange']
return {js['lastUpdateTime']: subdictionary}
def buyer_seller(stripped):
js = json.loads(stripped)
datajs = js['data'][0]
subdictionary = {}
for keys in datajs:
if (keys.__contains__("buyPrice") or keys.__contains__("sellPrice") or
keys.__contains__("buyQuantity") or keys.__contains__("sellQuantity")):
subdictionary[keys] = datajs[keys]
subdictionary['deliveryToTradedQuantity'] = datajs['deliveryToTradedQuantity']
return {js['lastUpdateTime'] : subdictionary}
def runner(ticker):
try:
print("Starting get_quote for ", ticker)
filtered_data = get_quote(ticker)
volume_data = buyer_seller(filtered_data)
with open(volume_path+"/"+ticker, 'a+') as f:
f.write(str(volume_data))
price_intraday = intraday_price_data(filtered_data)
with open(intraday_path+"/"+ticker, 'a+') as f:
f.write(str(price_intraday))
except Exception as e:
print(e)
def threader(q):
while True:
ticker = q.get(timeout=12)
print("Ticker {}".format(ticker))
runner(ticker)
combiner(volume_path, [ticker])
combiner(intraday_path, [ticker])
q.task_done()
def main():
global retry_list
retry_list = []
if not os.path.exists('historical_data'):
os.makedirs('historical_data')
os.makedirs('historical_data/buyer_seller_volume')
os.makedirs('historical_data/intraday')
with open(script_names,"r") as f:
data = f.read()
t_list = data.split("\n")
for i in range(workers):
worker = Thread(target=threader, args=(q,))
worker.setDaemon(True)
worker.start()
for ticker in t_list:
q.put(ticker)
q.join()
# for x in range(1):
# if (0 < len(retry_list)):
# print("error found {}....".format(str(len(retry_list))))
# print("retrying {} times....".format(x))
# for i in range(workers):
# worker = Thread(target=threader, args=(q,))
# worker.setDaemon(True)
# worker.start()
# for ticker in retry_list:
# q.put(ticker)
# q.join()
# combiner(volume_path, t_list)
# combiner(intraday_path, t_list)
print("Total scripts: {}".format(str(len(t_list))))
print("Count of scripts failed: {}".format(str(len(retry_list))))
return retry_list
q = Queue(maxsize=0)
workers = 13
script_names = "data/stock"
volume_path = "historical_data/buyer_seller_volume"
intraday_path = "historical_data/intraday"
retry_list = []
while(True):
current = datetime.datetime.now()
with open('report', 'a+') as f:
f.write("Run at {} \n".format(current))
f.close()
failed_list = main()
with open('report', 'a+') as f:
f.write("Time taken: {} sec \n".format((datetime.datetime.now()-current).seconds))
f.write("Could not fetch {} scripts \n\n".format(str(len(failed_list))))
f.close()
print("Sleeping for 30 sec")
time.sleep(30)
| [
"[email protected]"
] | |
7b840d26e327da2dc003c0647989f1e088499f46 | 2b23e732ae616f6a3c07866906e14a1e3883a693 | /Pandas Practice/Data Frames/dataframes.py | b4206591b7430020d600f928f3c67aa56f13b9cd | [] | no_license | JitenKumar/Data-Analysis-Visualization-With-Python | 7d045e49b16ad30fb5e2b558a3e4f69457bf4697 | d53d3738c10efc6882757692215d381321ee20a3 | refs/heads/master | 2020-03-18T12:28:40.146060 | 2018-05-28T18:28:12 | 2018-05-28T18:28:12 | 134,728,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | import numpy as np
import pandas as pd
from pandas import Series,DataFrame
# importing browser
# import webbrowser
# website_url = 'https://en.wikipedia.org/wiki/List_of_all-time_NFL_win%E2%80%93loss_records'
# webbrowser.open(website_url)
# copying a data from clipboard
nfl_data = pd.read_clipboard()
print(nfl_data)
#getting all the columns name
print(nfl_data.columns)
# getting a particular column (only single words)
print(nfl_data.Rank)
# getting multiple words columsn
print(nfl_data['Total Games'])
#using the dataframs
DataFrame(nfl_data,columns=['Rank','Team','Total Games','Division'])
#getting few rows
print(nfl_data.head(3))
#getting the last frames
print(nfl_data.tail(4))
# getting the rows on particular index
print(nfl_data.ix[4])
#Assigning the values
nfl_data['Rank'] = 34
print(nfl_data)
# using arrays
nfl_data['Rank'] = np.arange(16)
print(nfl_data)
# importing the series into the data frame
ins_data = Series(["Jiten","Palsra"],index=[4,9])
nfl_data['Team'] = ins_data
print(nfl_data)
# Deleting a particular columns
del nfl_data['Rank']
print(nfl_data)
# constructing a dataframe from a dictionary
new_dict = {'Name':['Jiten','Palsra','Kumar'],'Rank':[1,2,3]}
new_data_frame = DataFrame(new_dict)
print(new_data_frame) | [
"[email protected]"
] | |
b4d3b672bf6814c0dd201d85c23b36d481f41e82 | ec74eaaadc95dad2bc6f43a17687744c8e605787 | /vsbuy_backend/products/models/products.py | 75822f362644c7ba987b568f2468f3fd668c7cd3 | [
"MIT"
] | permissive | leis94/vsbuy_backend | 115c65eb3d6ccc3678f2956a0a7edfb64b4515bb | 16dd21b7da9a454187fe8605d4c5d075a151adb7 | refs/heads/master | 2022-12-22T11:34:37.956384 | 2020-10-05T04:48:39 | 2020-10-05T04:48:39 | 296,889,339 | 0 | 1 | MIT | 2020-10-05T04:48:40 | 2020-09-19T14:43:42 | Python | UTF-8 | Python | false | false | 437 | py | """Products mode. """
# Django
from django.db import models
# Utilities
from vsbuy_backend.utils.models import VSbuyModel
class Product(VSbuyModel):
"""Products model."""
name = models.CharField(
max_length=20,
)
is_active = models.BooleanField(
default=True,
help_text='Boolean field to get active a product'
)
def __str__(self):
"""Return Name"""
return self.name
| [
"[email protected]"
] | |
32b375c749e72bc27f65d0e72ccc6839db65e9d6 | 06516db27e5c3f4467ef34b79c12256f986e2f7b | /remove_nth_node/remove_nth_node.py | 0579e7bc57fe66c5933e941433f30827fea80b4b | [] | no_license | ryandavis3/leetcode | c4c6dc5a1606d204a2cf1beebb5b7f0e0cff3053 | 07f096c592053efd51bc2efbd3261380c0980b77 | refs/heads/master | 2023-04-06T17:44:15.036258 | 2021-04-09T16:17:48 | 2021-04-09T16:17:48 | 186,537,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py | # https://leetcode.com/problems/remove-nth-node-from-end-of-list/
class ListNode:
"""
Node in singly linked list.
"""
def __init__(self, x):
self.val = x
self.next = None
def removeNthFromEnd(head: ListNode, n: int) -> ListNode:
"""
Remove nth node from end of a linked list. Solve in one pass.
"""
if not head.next and n == 1:
return []
# Maintain two pointers. Move the first n+1 nodes ahead
# of the original head.
node_front = head
L = [node_front]
for i in range(n+1):
node_front = node_front.next
# Second pointer is behind
node_back = head
# Move two pointers at the same rate forward
while node_front:
node_front = node_front.next
node_back = node_back.next
# Update next field in back node to "skip" a node
node_back.next = node_back.next.next
return head
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
return removeNthFromEnd(head, n)
| [
"[email protected]"
] | |
80ec25164dfa6d31167df667c3d4783afccf3b86 | a8212baae786a140242d1de0d5b13aa925649661 | /information_gain_ratio.py | 1a45cc78130602283f99cda7cfa90fb2b699fb46 | [] | no_license | wenyu-z/py_programming | 8e218c82f176f5b22fe35b11855cd43d523786f2 | 2d832f9babc9e9e24e910009a4ef7e002b4b59a5 | refs/heads/master | 2021-01-23T10:54:46.539501 | 2018-07-14T00:23:10 | 2018-07-14T00:23:10 | 93,103,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,587 | py | # Reference
# http://www.ke.tu-darmstadt.de/lehre/archiv/ws0809/mldm/dt.pdf
def calc_var_entropy(df, var):
total_count = len(df)
levels = df[var].unique()
per_level_count = df.groupby(by=var).agg('count').values[:,0]
prob = map(lambda x: x/float(total_count), per_level_count)
neglog = map(lambda x: -x*np.log2(x), prob)
entropy = reduce(lambda x,y: x+y, neglog)
return entropy
def gain_ratio(df, features, target):
total_count = len(df)
target_entropy = calc_var_entropy(df, target)
gain_ratio_df = pd.DataFrame(columns=['feature', 'gain_ratio'])
for feat in features:
target_entropy_per_feat_level = df.groupby(by = feat).apply(lambda dfnow: calc_var_entropy(dfnow, target)).values
per_level_count = df.groupby(by = feat).agg('count').values[:,0]
prob = map(lambda x: x/float(total_count), per_level_count)
neglog = map(lambda (prob_this_level, target_entropy_this_level): -prob_this_level*target_entropy_this_level,
zip(prob, target_entropy_per_feat_level))
gain = target_entropy + np.sum(neglog)
split_info = calc_var_entropy(df, feat)
gain_ratio = gain/float(split_info)
gain_ratio_df = pd.concat((gain_ratio_df,
pd.DataFrame(columns=['feature', 'gain_ratio'],
data = [(feat, gain_ratio)])))
gain_ratio_df.sort_values(by = ['gain_ratio'], ascending = False, inplace = True)
gain_ratio_df.reset_index(inplace = True, drop = True)
return gain_ratio_df
| [
"[email protected]"
] | |
b19e7f283ced51c9136bab66a234faa7a1349d94 | b1c4a3fc1605e1b1da113fd1a2b97f7432866d9b | /Wm_UI_TEST/test_case/test_sellershop.py | 47ddc3b5c7f17da33af2faa8aa780dc3c2b7c24c | [] | no_license | AdilSky/PyWork | f1b46bb1bea849621f8226029ecbe668e60052bb | fcd7e8a8d69c1a70ae874ce94c20f518d961ef6a | refs/heads/master | 2021-08-24T14:09:21.819356 | 2017-12-10T05:04:42 | 2017-12-10T05:04:42 | 113,413,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,106 | py | #-*-coding:utf-8-*-
#商城单店功能自动化测试用例
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from selenium import webdriver
import time
import unittest
from selenium.webdriver.common.action_chains import ActionChains
import baseinfo
#from Wm_UI import baseinfo
class sellershop(unittest.TestCase):
'''单店功能测试'''
####登录系统
@ classmethod
def setUpClass(self):
self.base_url = baseinfo.base_url
self.username = baseinfo.sellerName
self.password = baseinfo.sellerPassword
self.driver = webdriver.Firefox()
time.sleep(1)
self.driver.maximize_window()
self.driver.get("%s/user/tologin" % self.base_url)
double_click = self.driver.find_element_by_id("username")
ActionChains(self.driver).double_click(double_click).perform()
self.driver.find_element_by_id("username").send_keys("%s" % self.username)
double_click = self.driver.find_element_by_id("password")
ActionChains(self.driver).double_click(double_click).perform()
self.driver.find_element_by_id("password").send_keys("%s" % self.password)
self.driver.find_element_by_id("sign_btn").click()
print(u"登录成功")
@ classmethod
def tearDownClass(self):
time.sleep(1)
self.driver.quit()
def test_recommend(self):
'''单店店铺推荐商品'''
try:
#进入卖家中心
time.sleep(1)
self.driver.find_element_by_xpath(".//*[@id='slide_wrap']/ul/li[2]").click()
time.sleep(1)
#点击店铺管理
self.driver.find_element_by_xpath('/html/body/div[4]/div[2]/div/div/h3[3]').click()
time.sleep(1)
#点击左侧栏商品推荐页
self.driver.find_element_by_xpath('/html/body/div[4]/div[2]/div/div/div[3]/a[3]').click()
time.sleep(1)
#推荐商品
self.driver.find_element_by_xpath('/html/body/div[4]/div[1]/div/form/table/tbody/tr[1]/td[7]/button').click()
#弹出的确认框,点击确认
self.driver.find_element_by_xpath('/html/body/div[7]/div[1]/div[3]/button[1]').click()
#进入单店页
self.driver.find_element_by_xpath('/html/body/div[4]/div[1]/div/form/table/tbody/tr[1]/td[2]/a/span/img').click()
#获取搜索页窗口句柄
recommend_windows = self.driver.current_window_handle
#获取当前所有打开的窗口句柄
all_handles = self.driver.window_handles
#切换到单店详情页
for shop_handle in all_handles:
if shop_handle != recommend_windows:
self.driver.switch_to.window(shop_handle)
#点击首页
self.driver.find_element_by_xpath('/html/body/div[4]/div[2]/div/ul/li[1]/a').click()
time.sleep(1)
recommend_text = self.driver.find_element_by_xpath('/html/body/div[5]/div/div[1]/ul/li').text
if recommend_text != "":
print(u"店铺推荐商品成功")
recommend_text = 1
else:
print(u"店铺推荐商品失败")
recommend_text = 0
self.assertEqual(recommend_text, 1, msg=None)
#以下为取消推荐商品操作
time.sleep(1)
#进入卖家中心
self.driver.find_element_by_xpath('/html/body/div[2]/ul/li[2]').click()
time.sleep(1)
#点击店铺管理
self.driver.find_element_by_xpath('/html/body/div[4]/div[2]/div/div/h3[3]').click()
time.sleep(1)
#进入商品推荐页
self.driver.find_element_by_xpath('/html/body/div[4]/div[2]/div/div/div[3]/a[3]').click()
time.sleep(1)
#取消推荐商品
self.driver.find_element_by_xpath(".//*[@id='productList']/table/tbody/tr[1]/td[7]/button").click()
time.sleep(1)
#弹出的确认框,点击确认
self.driver.find_element_by_xpath('/html/body/div[7]/div[1]/div[3]/button[1]').click()
print(U"取消推荐商品成功")
except BaseException as msg:
print(U"因未找到对应元素,测试用例未正常执行!")
print msg
self.assertIsNone(msg, msg=None)
def test_recommends(self):
'''批量推荐商品'''
try:
#点击卖家中心首页
time.sleep(1)
self.driver.find_element_by_xpath(".//*[@id='float']/a").click()
time.sleep(1)
#点击店铺管理
self.driver.find_element_by_xpath('/html/body/div[4]/div[2]/div/div/h3[3]').click()
time.sleep(1)
#点击左侧栏商品推荐页
self.driver.find_element_by_xpath('/html/body/div[4]/div[2]/div/div/div[3]/a[3]').click()
time.sleep(1)
#全选(当页所有)
self.driver.find_element_by_xpath('/html/body/div[4]/div[1]/div/div[1]/span[1]/label/input').click()
#把页面上最后 1 个 checkbox 勾去掉
self.driver.find_elements_by_css_selector('input[type=checkbox]').pop().click()
print(U"可推荐19个商品")
#点击批量推荐按钮
time.sleep(1)
self.driver.find_element_by_xpath('/html/body/div[4]/div[1]/div/span/input[1]').click()
#弹出的确认框,点击确认
time.sleep(1)
self.driver.find_element_by_xpath('/html/body/div[7]/div[1]/div[3]/button[1]').click()
#text_text = u"未推荐"
recommend_text = self.driver.find_element_by_xpath('/html/body/div[4]/div[1]/div/form/table/tbody/tr[1]/td[6]').text
#self.assertEqual(recommend_text, text_text, msg="批量推荐商品失败,未找到对应元素!")
if recommend_text == "未推荐":
print(u"批量推荐成功")
recommend_text = 1
else:
print(u"批量推荐失败")
recommend_text = 0
self.assertEqual(recommend_text, 1, msg=None)
#以下为批量取消推荐操作
time.sleep(1)
#全选(当页所有)
self.driver.find_element_by_xpath('/html/body/div[4]/div[1]/div/div[1]/span[1]/label/input').click()
#点击批量取消推荐按钮
time.sleep(1)
self.driver.find_element_by_xpath('/html/body/div[4]/div[1]/div/span/input[2]').click()
#弹出的确认框,点击确认
time.sleep(1)
self.driver.find_element_by_xpath('/html/body/div[7]/div[1]/div[3]/button[1]').click()
except BaseException as msg:
print(U"因未找到对应元素,测试用例未正常执行!")
print msg
self.assertIsNone(msg, msg=None)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
04619a2702ef2cecb1bc087adfc35c077a819f38 | ea0223cda77710734c34408d86b54a2cc989bd45 | /tests/metadata/mtl/test_ls7_definitive.py | db442ada15fd81ea2359ce8f7619c244231e7c6d | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | omad/eo-datasets | 2b2d74707e57d4fb90606177450c5f445482410e | 8a32120aa2cdf68dcb7003e1d61fa3a62fcf6b79 | refs/heads/develop | 2021-07-07T23:20:33.833426 | 2019-06-21T00:25:19 | 2019-06-25T02:01:35 | 197,543,844 | 0 | 0 | Apache-2.0 | 2019-07-18T08:18:26 | 2019-07-18T08:18:25 | null | UTF-8 | Python | false | false | 3,620 | py | # coding=utf-8
from __future__ import absolute_import
import unittest
import uuid
import datetime
import os
from pathlib import Path, PosixPath
import eodatasets.type as ptype
from tests.metadata.mtl import assert_expected_mtl
FILENAME = 'ls7_definitive_mtl.txt'
EXPECTED_OUT = ptype.DatasetMetadata(
id_=uuid.UUID('3ff71eb0-d5c5-11e4-aebb-1040f381a756'),
product_level='L1G',
creation_dt=datetime.datetime(2015, 4, 7, 1, 58, 25),
platform=ptype.PlatformMetadata(
code='LANDSAT_7'
),
instrument=ptype.InstrumentMetadata(
name='ETM',
operation_mode='SAM'
),
format_=ptype.FormatMetadata(
name='GeoTIFF'
),
acquisition=ptype.AcquisitionMetadata(
groundstation=ptype.GroundstationMetadata(
code='ASA'
)
),
usgs=ptype.UsgsMetadata(
scene_id='LE71140732005007ASA00'
),
extent=ptype.ExtentMetadata(
coord=ptype.CoordPolygon(
ul=ptype.Coord(
lat=-17.82157,
lon=115.58472
),
ur=ptype.Coord(
lat=-17.82497,
lon=117.82111
),
ll=ptype.Coord(
lat=-19.72798,
lon=115.56872
),
lr=ptype.Coord(
lat=-19.73177,
lon=117.83040
)
),
# TODO: python dt is one digit less precise than mtl (02:03:36.9270519Z). Does this matter?
center_dt=datetime.datetime(2005, 1, 7, 2, 3, 36, 927051)
),
grid_spatial=ptype.GridSpatialMetadata(
projection=ptype.ProjectionMetadata(
geo_ref_points=ptype.PointPolygon(
ul=ptype.Point(
x=350012.500,
y=8028987.500
),
ur=ptype.Point(
x=587012.500,
y=8028987.500
),
ll=ptype.Point(
x=350012.500,
y=7817987.500
),
lr=ptype.Point(
x=587012.500,
y=7817987.500
)
),
datum='GDA94',
ellipsoid='GRS80',
map_projection='UTM',
orientation='NORTH_UP',
resampling_option='CUBIC_CONVOLUTION',
zone=-50
)
),
image=ptype.ImageMetadata(
satellite_ref_point_start=ptype.Point(x=114, y=73),
cloud_cover_percentage=0.0,
sun_azimuth=102.37071009,
sun_elevation=58.08261077,
# sun_earth_distance=0.998137,
# ground_control_points_version=2,
# ground_control_points_model=47,
# geometric_rmse_model=4.582,
# geometric_rmse_model_x=3.370,
# geometric_rmse_model_y=3.104,
bands={}
),
lineage=ptype.LineageMetadata(
algorithm=ptype.AlgorithmMetadata(
name='LPGS',
version='12.5.0',
parameters={}
),
ancillary_quality='DEFINITIVE',
ancillary={
'cpf': ptype.AncillaryMetadata(
name='L7CPF20050101_20050331.09'
),
# We have the properties (quality) of the ancillary but not the file.
'ephemeris': ptype.AncillaryMetadata(
properties={'type': 'DEFINITIVE'}
)
}
)
)
class TestMtlRead(unittest.TestCase):
def test_ls7_equivalence(self):
assert_expected_mtl(
Path(os.path.join(os.path.dirname(__file__), FILENAME)),
EXPECTED_OUT
)
| [
"[email protected]"
] | |
ae7548d0538a443f1d20f3522313b3a6250ecaa7 | b6c6b3e631984e80ae18e2665aa7bc5dd9e2750c | /modules/jobs.py | d3d53a671fdc2be2ef442848e3a568fe14f0bb51 | [] | no_license | larryklean/soag | 41d6edd4a00504867d1efa5e34e56e6b4670a5ed | 4736fce81bacf558f7671e7d32425a14b6e0f58d | refs/heads/master | 2020-03-25T05:07:07.695537 | 2013-09-09T21:13:18 | 2013-09-09T21:13:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92,619 | py | # -*- coding: utf-8 -*-
from datetime import date
import utilities as utl
import datetime, traceback, os
import ColunasEntidades
def lrecl(db, entidade_id):
retColunasEntidades = ColunasEntidades.ColunasEntidades(db).\
selectColunasEntidadesResolvidasByCodigoEntidade(entidade_id)[1]
length = 0
for regcampo in retColunasEntidades:
if not regcampo.colunasEntidades.ehNotNull:
length += 1
if regcampo.datatypes.picture_cobol == '9':
length += ((regcampo.colunas.tamanhoColuna / 2) + 1)
else:
length += regcampo.colunas.tamanhoColuna
if regcampo.datatypes.descricao == 'NCHAR' or \
regcampo.datatypes.descricao == 'NVARCHAR' or \
regcampo.datatypes.descricao == 'VARCHAR':
length += 2
return length
def startLength(db, entidade, idcampo):
regcampos = ColunasEntidades.ColunasEntidades(db).\
selectColunasEntidadesResolvidasByCodigoEntidade(entidade)[1]
start = 0
length = 0
tipocampo = 'CH'
for regcampo in regcampos:
cpo = regcampo.colunas.id
if cpo == idcampo:
start += 1
length = 0
if regcampo.datatypes.picture_cobol == '9':
length += ((regcampo.colunas.tamanhoColuna / 2) + 1)
tipocampo = 'PD'
else:
length += regcampo.colunas.tamanhoColuna
tipocampo = 'CH'
if regcampo.datatypes.datatype == '-9' or \
regcampo.datatypes.datatype == '-15' or \
regcampo.datatypes.datatype == '12':
length += 2
break
if regcampo.datatypes.picture_cobol == '9':
start += ((regcampo.colunas.tamanhoColuna / 2) + 1)
else:
start += regcampo.colunas.tamanhoColuna
if str(regcampo.datatypes.datatype) == '-9' or \
str(regcampo.datatypes.datatype) == '-15' or \
str(regcampo.datatypes.datatype) == '12':
start += 2
return ['%s,%s%s' % (start, length, ',%s' % tipocampo), tipocampo]
def lreclBook(db, table, book, book_id):
regcampos = db(db[table][book]==book_id).select()
length = 0
nivel = 0
for regcampo in regcampos:
if regcampo.redefines:
nivel = regcampo.nivel
continue
if nivel and regcampo.nivel > nivel:
continue
nivel = 0
length += (regcampo.bytes * (regcampo.occurs \
if regcampo.occurs else 1))
return length
def startLengthBook(db, table, book, book_id, idcampo):
regcampos = db(db[table][book]==book_id).select()
start = 0
length = 0
tipocampo = 'CH'
for regcampo in regcampos:
cpo = regcampo.id
if cpo == idcampo:
start = regcampo.posicao * (regcampo.occurs \
if regcampo.occurs else 1)
length = regcampo.bytes
if regcampo.natureza == '9':
if regcampo.tipo.find('COMP-3') > -1:
tipocampo = 'PD'
elif regcampo.tipo.find('COMP') > -1:
tipocampo = 'BI'
else:
tipocampo = 'ZD'
else:
tipocampo = 'CH'
break
return ['%s,%s%s' % (start, length, ',%s' % tipocampo), tipocampo]
def booksImport(ids, db, folder, aplicacao, user):
if not ids:
return {'retorno': False, 'flash': 'Nenhum book selecionado', \
'labelErrors': '', 'msgsErrors': {}}
bookTxts = db(db.booksTxt.id.belongs((int(x) for x in ids))).select()
procs = 0
flash = 'Processamento efetuado'
labelErrors = 'Resultado do processamento'
msgsErrors = {}
for bookTxt in bookTxts:
if bookTxt.codigoAplicacao <> aplicacao or \
bookTxt.status <> 2:
continue
procs += 1
book = bookTxt.nome
rowbook = db(db.books.nome==book).select().first()
if not rowbook:
db(db.books.insert(codigoAplicacao=aplicacao, \
nome=book, \
descricao='Book ' + book, \
usuarioConfirmacao=user, \
dataConfirmacao=datetime.datetime.\
today()))
else:
db(db.books.id==rowbook.id).update(nome=book, \
descricao='Book %s' % \
book, \
usuarioConfirmacao=\
user, \
dataConfirmacao=\
datetime.datetime.today())
db(db.booksCampos.book==rowbook.id).delete()
rowbook = db(db.books.nome==book).select().first()
if bookTxt.nomeExtenso:
arqFile = bookTxt.nomeExtenso
else:
arqFile = os.path.join( folder
, 'uploads'
, str(bookTxt.arquivo))
ib = importBook(db, arqFile, rowbook, user, folder)
if not ib['retorno']:
return {'retorno': False, \
'flash': ib['flash'], \
'labelErrors': ib['labelErrors'], \
'msgsErrors': ib['msgsErrors']}
db(db.booksTxt.id==bookTxt.id).update(status=3, \
mensagem='Carga efetuada no Sistema')
if bookTxts:
if procs:
msgsErrors[0] = 'Importacao de Book. Done.'
else:
msgsErrors[0] = \
'Nenhum book pendente encontrado para processamento'
return {'retorno': True, 'flash': flash, 'labelErrors': labelErrors, \
'msgsErrors': msgsErrors, 'id': rowbook.id}
def importBook(db, arqFile, rowbook, user, folder):
flash = 'book importado'
labelErrors = 'Resultado da importacao'
msgsErrors = {}
try:
linFile = file(arqFile).readlines()
except:
erros = traceback.format_exc()
if erros:
idx = 0
for erro in erros.split('\n'):
if len(erro) > 1:
idx += 1
msgsErrors[idx] = erro
return {'retorno': False, \
'flash': 'Erro na leitura do arquivo', \
'labelErrors': arqFile, 'msgsErrors': msgsErrors}
linhas = []
for arq in linFile:
arq = arq.replace('\r', '').replace('\n', '')
if arq <> '' and arq[6: 7] <> '*':
linhas.append(arq.upper())
while True:
copy = False
for li in linhas:
if li.find('COPY') > -1:
copy = True
break
if not copy:
break
idx = -1
for li in linhas:
idx += 1
if li.find('COPY') > -1:
cols = li.split()
book = cols[1].replace("'",'').replace('.','')
rbook = db(db.books.nome==book).select().first()
if not rbook:
msgsErrors[idx] = 'COPY nao importado na ferramenta.'
return {'retorno': False, \
'flash': 'Erro na leitura do COPY', \
'labelErrors': 'COPY: %s' % book, \
'msgsErrors': msgsErrors}
bookTxt = db(db.booksTxt.nome==book).select().first()
if bookTxt.nomeExtenso:
arqFile = bookTxt.nomeExtenso
else:
arqFile = os.path.join( folder
, 'uploads'
, str(bookTxt.arquivo))
try:
linFile = file(arqFile).readlines()
except:
erros = traceback.format_exc()
if erros:
idx = 0
for erro in erros.split('\n'):
if len(erro) > 1:
idx += 1
msgsErrors[idx] = erro
return {'retorno': False, \
'flash': 'Erro na leitura do arquivo', \
'labelErrors': arqFile, 'msgsErrors': msgsErrors}
linhas.pop(idx)
idx2 = 0
for arq in linFile:
if arq[6: 7] <> '*':
linhas.insert(idx+idx2, arq.upper())
idx2 += 1
break
linhaf = ''
redefines = ''
posdic = {}
idxFiller = 1
idx = 0
posicao = 1
itemGrupo = []
nivel = 0
occurs = 0
niveloccurs = 0
idl = 0
for li in linhas:
if li.find('.') < 0:
linhaf += li.upper()
continue
linhaf += li.upper()
linha = linhaf.split()
idl += 1
nivel = int(linha[0])
campo = linha[1].replace('.','')
if campo == 'FILLER':
cpo = '%s_%s' % (campo, '{:>02}'.format(idxFiller))
idxFiller += 1
else:
cpo = campo
itemGrupo.append([nivel, cpo])
cpox = ''
for idx in xrange(idl-1, 0, -1):
if itemGrupo[idx][0] < nivel:
cpox = itemGrupo[idx][1]
break
if linhaf.find('REDEFINES ') > -1 and linhaf.find('PIC ') < 0:
redefines = linha[3].replace('.','')
posdic[cpo] = posicao = posdic[redefines]
db(db.booksCampos.insert(book=rowbook.id, nome=cpo, \
nivel=nivel, natureza='', posicao=posdic[cpo], \
picture='', tamanho='', inteiro=0, decimais=0, \
tipo='', bytes=0, redefines=redefines, \
itemGrupo=cpox, occurs=occurs, \
usuarioConfirmacao=user, \
dataConfirmacao=datetime.datetime.\
today()))
linhaf = ''
continue
if linhaf.find('REDEFINES ') > -1 and \
linhaf.find('PIC ') > -1:
picture = linha[5]
picts = picture.replace('.','')
if len(linha) > 6:
if len(linha) == 7:
picture += (' ' + linha[6])
else:
for idx in xrange(6, len(linha)-1):
if linha[idx].upper() <> 'VALUE':
picture += (' ' + linha[idx])
picture = picture.replace('S','')
pict = ''
inteiro = 0
decimais = 0
tam = ''
dec = ''
idx = 0
while picture[idx:idx+1] <> '(' and \
picture[idx:idx+1] <> ' ' and \
picture[idx:idx+1] <> '.':
pict += picture[idx:idx+1]
idx += 1
idx += 1
while picture[idx:idx+1] == ' ':
idx += 1
while picture[idx:idx+1] <> ')' and \
picture[idx:idx+1] <> ' ' and \
picture[idx:idx+1] <> '.':
tam += picture[idx:idx+1]
idx += 1
idx += 1
while picture[idx:idx+1] == ' ':
idx += 1
if picture[idx:idx+1] == 'V':
dec = 'V'
idx += 1
while picture[idx:idx+1] == '9' or \
picture[idx:idx+1] == '(' or \
picture[idx:idx+1] == ' ' or \
picture[idx:idx+1] == '.':
idx += 1
if picture[idx:idx+1] <> 'U' and \
picture[idx:idx+1] <> '.':
while picture[idx:idx+1] <> ')' and \
picture[idx:idx+1] <> ' ' and \
picture[idx:idx+1] <> '.':
dec += picture[idx:idx+1]
idx += 1
if dec == 'V': dec = ''
if dec <> '' and (dec[1:] >= '0' and dec[1:] <= '9'):
inteiro = int(tam)
decimais = int(dec[1:])
tam = str(int(tam) + int(dec[1:]))
else:
inteiro = int(tam)
decimais = 0
tam += dec
tipo = ''
idxtipo = picture.find(' COMP')
if idxtipo > -1:
idxtipo += 1
while picture[idxtipo:idxtipo+1] <> ' ' and \
picture[idxtipo:idxtipo+1] <> '.':
tipo += picture[idxtipo:idxtipo+1]
idxtipo += 1
redefines = linha[3].replace('.','')
posdic[cpo] = posicao = posdic[redefines] + 1
db(db.booksCampos.insert(book=rowbook.id, nome=cpo, \
nivel=nivel, natureza=pict, posicao=posdic[cpo], \
picture=picts, tamanho=tam, inteiro=inteiro, \
decimais=decimais if decimais else 0, \
tipo=tipo, bytes=0, redefines=redefines, \
itemGrupo=cpox, occurs=occurs, \
usuarioConfirmacao=user, \
dataConfirmacao=datetime.\
datetime.today()))
linhaf = ''
continue
if linhaf.find('OCCURS ') > -1:
if len(linha) == 4:
occurs = int(linha[3].replace('.',''))
elif len(linha) == 5:
occurs = int(linha[3].replace('.',''))
else:
occurs = int(linha[5].replace('.',''))
niveloccurs = nivel
posdic[cpo] = posicao
db(db.booksCampos.insert(book=rowbook.id, nome=cpo, \
nivel=nivel, natureza='', posicao=posdic[cpo], \
picture='', tamanho='', inteiro=0, decimais=0, \
tipo='', bytes=0, redefines='', \
itemGrupo=cpox, occurs=occurs, \
usuarioConfirmacao=user, \
dataConfirmacao=datetime.datetime.\
today()))
linhaf = ''
continue
if linhaf.find('PIC ') < 0:
posdic[cpo] = posicao
db(db.booksCampos.insert(book=rowbook.id, nome=cpo, \
nivel=nivel, natureza='', posicao=posdic[cpo], \
picture='', tamanho='', inteiro=0, decimais=0, \
tipo='', bytes=0, redefines='', \
itemGrupo=cpox, occurs=occurs, \
usuarioConfirmacao=user, \
dataConfirmacao=datetime.datetime.\
today()))
linhaf = ''
continue
if occurs:
if nivel <= niveloccurs:
occurs = 0
niveloccurs = 0
redefines = ''
linhaf = ''
picture = linha[3]
picts = picture.replace('.','')
if len(linha) > 4:
if len(linha) == 5:
picture += (' ' + linha[4])
else:
for idx in xrange(4, len(linha)-1):
if linha[idx].upper() <> 'VALUE':
picture += (' ' + linha[idx])
picture = picture.replace('S','')
pict = ''
inteiro = 0
decimais = 0
tam = ''
dec = ''
idx = 0
while picture[idx:idx+1] <> '(' and \
picture[idx:idx+1] <> ' ' and \
picture[idx:idx+1] <> '.':
pict += picture[idx:idx+1]
idx += 1
idx += 1
while picture[idx:idx+1] == ' ':
idx += 1
while picture[idx:idx+1] <> ')' and \
picture[idx:idx+1] <> ' ' and \
picture[idx:idx+1] <> '.':
tam += picture[idx:idx+1]
idx += 1
idx += 1
while picture[idx:idx+1] == ' ':
idx += 1
qt9 = 0
if picture[idx:idx+1] == 'V':
dec = 'V'
idx += 1
qt9 = 0
while picture[idx:idx+1] == '9' or \
picture[idx:idx+1] == '(' or \
picture[idx:idx+1] == ' ' or \
picture[idx:idx+1] == '.':
if picture[idx:idx+1] == '9':
qt9 += 1
idx += 1
if picture[idx:idx+1] <> 'U' and \
picture[idx:idx+1] <> '.':
while picture[idx:idx+1] <> ')' and \
picture[idx:idx+1] <> ' ' and \
picture[idx:idx+1] <> '' and \
picture[idx:idx+1] <> '.':
dec += picture[idx:idx+1]
idx += 1
if dec == 'V': dec = ''
if dec <> '' and (dec[1:] >= '0' and dec[1:] <= '9'):
inteiro = int(tam)
decimais = int(dec[1:])
tam = str(int(tam) + int(dec[1:]))
else:
inteiro = int(tam)
decimais = qt9
tam += dec
tipo = ''
idxtipo = picture.find(' COMP')
if idxtipo > -1:
idxtipo += 1
while picture[idxtipo:idxtipo+1] <> ' ' and \
picture[idxtipo:idxtipo+1] <> '.':
tipo += picture[idxtipo:idxtipo+1]
idxtipo += 1
if redefines:
posdic[cpo] = posicao = posdic[redefines] + 1
else:
posdic[cpo] = posicao
nbytes = 0
if not redefines:
if pict == '9':
if tipo.find('COMP-3') > -1:
nbytes = (((inteiro + decimais) / 2) + 1)
posicao += nbytes
elif tipo.find('COMP') > -1:
if inteiro < 5:
posicao += 2
nbytes = 2
elif inteiro < 10:
posicao += 4
nbytes = 4
else:
posicao += 8
nbytes = 8
else:
nbytes = inteiro + decimais
posicao += nbytes
else:
nbytes = inteiro
posicao += nbytes
db(db.booksCampos.insert(book=rowbook.id, nome=cpo, \
nivel=nivel, natureza=pict, posicao=posdic[cpo], \
picture=picts, tamanho=tam, inteiro=inteiro, \
decimais=decimais if decimais else 0, tipo=tipo, \
bytes=nbytes, redefines=redefines, \
itemGrupo=cpox, occurs=occurs, \
usuarioConfirmacao=user, \
dataConfirmacao=datetime.datetime.\
today()))
return {'retorno': True, 'flash': flash, 'labelErrors': labelErrors, \
'msgsErrors': msgsErrors}
def imgtb(argv1, argv2, argv3):
try:
with open(argv3, 'w') as f1:
with open(argv1) as f2:
for line in f2:
for tb in argv2:
line = line.replace(tb[0], tb[1])
f1.write(line)
except:
return traceback.format_exc()
def gerarimgtb(db, idimgtb, folder, user):
parms = db(db.parametros.id==1).select().first()
regimgtb = db(db.imgtbs.id==idimgtb).select().first()
aplicacao = db(db.aplicacoes.id==regimgtb.codigoAplicacao).\
select().first()
nomeaplicacao = aplicacao.aplicacao
sistema = nomeaplicacao + ' - ' + aplicacao.descricao
author = aplicacao.analista
regempresa = db(db.empresa.id==aplicacao.empresa).select().\
first()
regentidade = db(db.entidades.id==regimgtb.codigoEntidade).\
select().first()
regcolunasEntidades = ColunasEntidades.ColunasEntidades(db).\
selectColunasEntidadesResolvidasByCodigoEntidade(
regimgtb.codigoEntidade)[1]
templates = os.path.join( '\\\\'
, '127.0.0.1'
, 'c$'
, parms.web2py
, 'applications'
, parms.soag
, 'Template'
, 'IMGTB') + os.sep
gerimgtb = os.path.join( '\\\\'
, '127.0.0.1'
, 'c$'
, parms.raiz
, regempresa.nome
, nomeaplicacao
, 'GERADOS'
, 'IMGTB') + os.sep
gercpy = os.path.join( '\\\\'
, '127.0.0.1'
, 'c$'
, parms.raiz
, regempresa.nome
, nomeaplicacao
, 'GERADOS'
, 'CPY') + os.sep
try:
os.makedirs(gerimgtb)
except:
pass
try:
os.makedirs(gercpy)
except:
pass
descrs = '* '
idx = 0
for regto in regcolunasEntidades:
if idx < 1:
descrs += '{:<28}'.format(regimgtb.bookName + '-' + \
regto.colunas.columnName.replace('_','-')) + \
' = ' + \
'{:<31}'.format(utl.txtAbrev(r'%s' % \
regto.colunas.attributeName, 31).upper()) + ' *'
else:
descrs += '\n' + ' * ' + '{:<28}'.\
format(regimgtb.bookName + '-' + \
regto.colunas.columnName.replace('_','-')) + \
' = ' + '{:<31}'.format(utl.txtAbrev(r'%s' % \
regto.colunas.attributeName, 31).upper()) + ' *'
idx += 1
imagems = ''
for regto in regcolunasEntidades:
if not regto.colunasEntidades.ehNotNull:
texcoluna = regimgtb.bookName + '-' + regto.colunas.columnName.\
replace('_','-')
if (len(texcoluna) + 5) > 30:
if texcoluna[24:25] == '-':
texcoluna = texcoluna[0:24] + '-NULL'
else:
texcoluna = texcoluna[0:25] + '-NULL'
else:
texcoluna = texcoluna + '-NULL'
tamcoluna = len(texcoluna)
if tamcoluna > 28:
imagems += ('\n' + utl.repeat(' ', 14) if imagems else '') + \
'10 ' + texcoluna
imagems += '\n' + utl.repeat(' ', 45) + \
' PIC X(0001).'
else:
imagems += ('\n' + utl.repeat(' ', 14) if imagems else '') + \
'10 ' + \
'{:<28}'.format(texcoluna) + \
' PIC X(0001).'
if regto.datatypes.picture_cobol == '9':
picture = 'S9(' + '{:>04}'.format(regto.colunas.tamanhoColuna -
regto.colunas.decimais) + ')'
if regto.colunas.decimais == 0:
picture += ' COMP-3.'
else:
picture += 'V9(' + '{:>02}'.format(regto.colunas.decimais) + \
')' + ' COMP-3.'
else:
picture = ' X(' + '{:>04}'.format(regto.colunas.tamanhoColuna) + ').'
if regto.datatypes.datatype == '-9' or \
regto.datatypes.datatype == '-15' or \
regto.datatypes.datatype == '12':
texcolunal = regimgtb.bookName + '-' + \
regto.colunas.columnName.replace('_','-')
if (len(texcolunal) + 4) > 30:
if texcolunal[25:26] == '-':
texcolunal = texcolunal[0:25] + '-LEN'
else:
texcolunal = texcolunal[0:26] + '-LEN'
else:
texcolunal = texcolunal + '-LEN'
texcolunat = regimgtb.bookName + '-' + regto.colunas.columnName.\
replace('_','-')
if (len(texcolunat) + 5) > 30:
if texcolunat[24:25] == '-':
texcolunat = texcolunat[0:24] + '-TEXT'
else:
texcolunat = texcolunat[0:25] + '-TEXT'
else:
texcolunat = texcolunat + '-TEXT'
tamcoluna = len(texcolunat)
imagems += '\n' + utl.repeat(' ', 14) + '10 ' + \
regimgtb.bookName + '-' + \
regto.colunas.columnName.replace('_','-') + '.'
if tamcoluna > 25:
imagems += ('\n' + utl.repeat(' ', 17) if imagems else '') + \
' 15 ' + texcolunal
imagems += '\n' + utl.repeat(' ', 45) + \
' PIC S9(0004) COMP.'
imagems += '\n' + utl.repeat(' ', 17) + \
' 15 ' + texcolunat
imagems += '\n' + utl.repeat(' ', 45) + \
' PIC ' + picture
else:
imagems += ('\n' + utl.repeat(' ', 17) if imagems else '') + \
' 15 ' + \
'{:<25}'.format(texcolunal) + \
' PIC S9(0004) COMP.'
imagems += '\n' + utl.repeat(' ', 17) + \
' 15 ' + \
'{:<25}'.format(texcolunat) + \
' PIC ' + picture
else:
texcoluna = regimgtb.bookName + '-' + \
regto.colunas.columnName.replace('_','-')
tamcoluna = len(texcoluna)
if tamcoluna > 28:
imagems += ('\n' + utl.repeat(' ', 14) if imagems else '') + \
'10 ' + texcoluna
imagems += '\n' + utl.repeat(' ', 45) + \
' PIC ' + picture
else:
imagems += ('\n' + utl.repeat(' ', 14) if imagems else '') + \
'10 ' + \
'{:<28}'.format(texcoluna) + \
' PIC ' + picture
now = date.today().strftime('%d/%m/%Y')
db(db.imgtbs.id==idimgtb).update(usuarioGeracao=user, \
dataGeracao=datetime.datetime.today())
txt = '%s%s.cpy' % (gercpy, regimgtb.bookName)
erros = gerarcpy(txt, imagems)
if erros:
flash = 'Falha na Execução'
labelErrors = 'Resultado do processamento Imagem Tabela'
msgsErrors = {}
idx = 0
for erro in erros.split('\n'):
if len(erro) > 1:
idx += 1
msgsErrors[idx] = erro
return {'retorno': False, 'flash': flash, \
'labelErrors': labelErrors, 'msgsErrors': msgsErrors}
db.booksTxt.insert(codigoAplicacao=regimgtb.codigoAplicacao, \
nome=regimgtb.bookName, \
arquivo=txt, \
nomeExtenso=txt, status=2, \
mensagem='Pendente de Processamento.', \
usuarioConfirmacao=user,
dataConfirmacao=datetime.datetime.today())
flash = 'Imagem da Tabela gerado com sucesso.'
labelErrors = 'Resultado da geracao da Imagem da Tabela'
msgsErrors = {}
idbook = db((db.booksTxt.nomeExtenso==txt) &
(db.booksTxt.status==2)).select().first()
if idbook:
imp = booksImport([idbook.id], db, folder, \
regimgtb.codigoAplicacao, user)
if imp['retorno']:
msgsErrors[0] = 'Book %s - Done.' % regimgtb.bookName
length = lreclBook(db, 'booksCampos', 'book', imp['id'])
tpl = '%sIMAGEM_TABELA.txt' % templates
tb = [['@BOOKSNAME', regimgtb.bookName],
['@BOOKNAME', '{:<49}'.format(regimgtb.bookName)],
['@TABLENAME', '{:<32}'.format('(%s) %s' % \
(regentidade.nomeExterno, regentidade.nomeFisico))],
['@MES', '{:<02}'.format(now[3:5])],
['@ANO', '{:<44}'.format(now[6:])],
['@EMPRESA', '{:<49}'.format(regempresa.descricao.\
upper())],
['@AUTHOR', '{:<49}'.format(author.upper())],
['@SISTEMA', '{:<49}'.format(utl.\
remover_acentos(sistema).upper())],
['@LENGTH', '{:>04}'.format(length)],
['@CAMPO_DESCRICAO', descrs],
['@IMAGEMS', imagems]]
txt = '%s%s.cpy' % (gerimgtb, regimgtb.bookName)
erros = imgtb(tpl, tb, txt)
if erros:
flash = 'Falha na Execução'
labelErrors = 'Resultado do processamento Imagem Tabela'
msgsErrors = {}
idx = 0
for erro in erros.split('\n'):
if len(erro) > 1:
idx += 1
msgsErrors[idx] = erro
return {'retorno': False, 'flash': flash, \
'labelErrors': labelErrors, \
'msgsErrors': msgsErrors}
else:
msgsErrors[0] = 'Book %s - Done.' % regimgtb.bookName
msgsErrors[1] = \
'Book %s - Erro na importacao para booksTxt.' % regimgtb.bookName
return {'retorno': True, 'flash': flash, \
'labelErrors': labelErrors, \
'msgsErrors': msgsErrors}
def gerarcpy(txt, lines):
try:
with open(txt, 'w') as f1:
idx = 0
for line in lines.split('\n'):
idx += 1
f1.write(('\n' if idx > 1 else '') + line)
except:
return traceback.format_exc()
def hpu(argv1, argv2, argv3, job=False, step=''):
li = ''
try:
if job:
with open(argv1) as f2:
for line in f2:
for tb in argv2:
line = line.replace(tb[0], tb[1])
li += line
return {'erros': '', 'linhas': li}
else:
with open(argv3, 'w') as f1:
with open(argv1) as f2:
for line in f2:
for tb in argv2:
line = line.replace(tb[0], tb[1])
f1.write(line)
li += line
return {'erros': '', 'linhas': li}
except:
return {'erros': traceback.format_exc(), 'linhas': ''}
def gerarhpu(db, idhpu, user, job=False, step=''):
parms = db(db.parametros.id==1).select().first()
reghpu = db(db.hpus.id==idhpu).select().first()
if not reghpu:
flash = 'Falha na Execução'
labelErrors = 'Resultado do processamento HPU'
if step:
msgsErrors = {1: '%s do HPU nao definido.' % step}
else:
msgsErrors = {1: 'HPU nao definido.'}
return {'retorno': False, 'flash': flash, \
'labelErrors': labelErrors, \
'msgsErrors': msgsErrors, \
'linhas': ''}
aplicacao = db(db.aplicacoes.id==reghpu.codigoAplicacao).\
select().first()
nomeaplicacao = aplicacao.aplicacao
regempresa = db(db.empresa.id==aplicacao.empresa).select().first()
regentidade = db(db.entidades.id==reghpu.codigoEntidade).\
select().first()
templates = os.path.join( '\\\\'
, '127.0.0.1'
, 'c$'
, parms.web2py
, 'applications'
, parms.soag
, 'Template'
, 'JCL') + os.sep
gerhpu = os.path.join( '\\\\'
, '127.0.0.1'
, 'c$'
, parms.raiz
, regempresa.nome
, nomeaplicacao
, 'GERADOS'
, 'HPU') + os.sep
try:
os.makedirs(gerhpu)
except:
pass
regsysin = db(db.sysin.hpus==idhpu).select().first()
sysin = ''
sqx = '' if not regsysin.sql else regsysin.sql.replace('\r', '')
if regsysin:
if not regsysin.nome1:
sysin += '//SYSIN DD *'
sysin += '\nUNLOAD TABLESPACE @TABLEHEADER.@TABLESPACE'
sysin += '\nDB2 NO'
sysin += '\nQUIESCE YES'
sysin += '\n@SELECT'
sysin += '\nOUTDDN (SYSREC01)'
sysin += '\nFORMAT DSNTIAUL'
sysin += '\nLOADDDN SYSPUNCH'
else:
if regsysin.nome1 == '*':
sysin += 'DSN=%s,' % (regsysin.nome1 + '.' + \
regsysin.nome2 + '.' + \
regsysin.nome3)
else:
nome2 = (reghpu.jobRotine + 'S' + '{:>02}'.\
format(step.split('STEP')[1]))\
if step else regsysin.nome2
sysin += 'DSN=AD.C87.%s.%s%s,' % (regsysin.nome1.upper(), \
nome2, \
('.' + regsysin.nome3.upper()) \
if regsysin.nome3 else '')
sysin += '\n// DISP=SHR'
else:
sysin += '//SYSIN DD *'
sysin += '\nUNLOAD TABLESPACE @TABLEHEADER.@TABLESPACE'
sysin += '\nDB2 NO'
sysin += '\nQUIESCE YES'
sysin += '\n@SELECT'
sysin += '\nOUTDDN (SYSREC01)'
sysin += '\nFORMAT DSNTIAUL'
sysin += '\nLOADDDN SYSPUNCH'
stp = regsysin.nome2 if not step \
else ('S' + '{:>02}'.format(step.split('STEP')[1]))
rot = reghpu.jobRotine + (stp if stp else ('S' +
'{:>02}'.format(reghpu.jobStep.split('STEP')[1])))
tb = [['@JOBNAME', reghpu.jobName.upper()],
['@ROTINA', rot],
['@APPLID', nomeaplicacao[0:5].upper()],
['@USER', reghpu.jobUser.upper()],
['@STEP', step if step else reghpu.jobStep \
if reghpu.jobStep else 'STEP1'],
['@SYSIN', sysin],
['@SELECT', sqx],
['@TABLEUID', regentidade.nomeExterno[0:4]+'A000'],
['@TABLEHEADER', regentidade.nomeExterno[0:4]+'D000'],
['@TABLESPACE', regentidade.nomeExterno.replace('B','S')],
['@TABLENAME', regentidade.nomeExterno],
['@TABLE', regentidade.nomeFisico]]
txt = '%s%s.jcl' % (gerhpu, reghpu.jobName)
if job:
tpl = '%sJOB_HPU_SYSIN_CARTAO.txt' % templates
else:
tpl = '%sHPU_SYSIN_CARTAO.txt' % templates
ghpu = hpu(tpl, tb, txt, job, step)
if ghpu['erros']:
flash = 'Falha na Execução'
labelErrors = 'Resultado do processamento HPU%s' % \
(' - %s' % step) if step else ''
msgsErrors = {}
idx = 0
for erro in ghpu['erros'].split('\n'):
if len(erro) > 1:
idx += 1
msgsErrors[idx] = erro
return {'retorno': False, 'flash': flash, \
'labelErrors': labelErrors, \
'msgsErrors': msgsErrors, \
'linhas': ghpu['linhas']}
db(db.hpus.id==idhpu).update(usuarioGeracao=user, \
dataGeracao=datetime.datetime.today())
flash = 'HPU gerado com sucesso.'
labelErrors = 'Resultado da geracao do HPU'
msgsErrors = {}
msgsErrors[0] = 'JOB %s - Done.' % reghpu.jobName
return {'retorno': True, 'flash': flash, \
'labelErrors': labelErrors, \
'msgsErrors': msgsErrors, \
'linhas': ghpu['linhas']}
def progckrs(argv1, argv2, argv3, job=False, step=''):
li = ''
try:
if job:
with open(argv1) as f2:
for line in f2:
for tb in argv2:
line = line.replace(tb[0], tb[1])
li += line
return {'erros': '', 'linhas': li}
else:
with open(argv3, 'w') as f1:
with open(argv1) as f2:
for line in f2:
for tb in argv2:
line = line.replace(tb[0], tb[1])
f1.write(line)
li += line
return {'erros': '', 'linhas': li}
except:
return {'erros': traceback.format_exc(), 'linhas': ''}
def gerarckrs(db, idprog, user, job=False, step=''):
parms = db(db.parametros.id==1).select().first()
regprog = db(db.progckrs.id==idprog).select().first()
if not regprog:
flash = 'Falha na Execução'
labelErrors = 'Resultado do processamento Programa (CKRS)'
if step:
msgsErrors = {1: '%s do Programa nao definido.' % step}
else:
msgsErrors = {1: 'Programa nao definido.'}
return {'retorno': False, 'flash': flash, \
'labelErrors': labelErrors, \
'msgsErrors': msgsErrors, \
'linhas': ''}
aplicacao = db(db.aplicacoes.id==regprog.codigoAplicacao).\
select().first()
nomeaplicacao = aplicacao.aplicacao
regempresa = db(db.empresa.id==aplicacao.empresa).select().first()
# Comentarios
regprogckrs2 = db(db.progckrs2.progckrs==idprog).select()
if not regprogckrs2:
flash = 'Comentarios nao definidos%s' % \
(' - %s' % step) if step else ''
return {'retorno': False, 'flash': flash, \
'labelErrors': '', 'msgsErrors': {}}
# Entradas
regprogckrs3 = db(db.progckrs3.progckrs==idprog).select()
if not regprogckrs3:
flash = 'Entradas nao definidas%s' % (' - %s' % step) if step else ''
return {'retorno': False, 'flash': flash, \
'labelErrors': '', 'msgsErrors': {}}
# Saidas
regprogckrs4 = db(db.progckrs4.progckrs==idprog).select()
if not regprogckrs4:
flash = 'Saidas nao definidas%s' % (' - %s' % step) if step else ''
return {'retorno': False, 'flash': flash, \
'labelErrors': '', 'msgsErrors': {}}
templates = os.path.join( '\\\\'
, '127.0.0.1'
, 'c$'
, parms.web2py
, 'applications'
, parms.soag
, 'Template'
, 'JCL') + os.sep
gerprog = os.path.join( '\\\\'
, '127.0.0.1'
, 'c$'
, parms.raiz
, regempresa.nome
, nomeaplicacao
, 'GERADOS'
, 'PROGCKRS') + os.sep
try:
os.makedirs(gerprog)
except:
pass
comentario = ''
entrada = ''
saida = ''
for regsort2 in regprogckrs2:
comentario += regsort2.comentario.replace('\n','').upper()
for regsort3 in regprogckrs3:
if regsort3.nome2 == '*':
entrada += '%s//%s DD DSN=%s,' % ('\n//*\n' if entrada else '', \
regsort3.nome1.upper(), \
regsort3.nome2 + '.' + \
regsort3.nome3 + '.' + \
regsort3.nome4)
else:
if regsort3.nome2:
nome2 = regsort3.nome2
else:
nome2 = (regprog.jobRotine + 'S' + '{:>02}'.\
format(step.split('STEP')[1]))\
if step else regsort3.nome2
entrada += '%s//%s DD DSN=AD.C87.%s.%s%s,' % ('\n//*\n' \
if entrada else '', regsort3.nome1.upper(), \
nome2, regsort3.nome3.upper(), \
('.' + regsort3.nome4.upper()) \
if regsort3.nome4 else '')
entrada += '\n// DISP=SHR'
for regsort4 in regprogckrs4:
lrecl = lreclBook(db, db.booksCampos, 'book', \
regsort4.book)
nome2 = (regprog.jobRotine + 'S' + '{:>02}'.\
format(step.split('STEP')[1]))\
if step else regsort4.nome2
saida += '%s//%s DD DSN=AD.C87.%s.%s%s,' % ('\n//*\n' \
if saida else '', regsort4.nome1.upper(), \
nome2, regsort4.nome3.upper(), \
('.' + regsort4.nome4.upper()) \
if regsort4.nome4 else '')
saida += '\n// DISP=(,CATLG,DELETE),'
saida += '\n// UNIT=(DISCO,04),'
saida += '\n// SPACE=(TRK,(005000,1000),RLSE),'
saida += '\n// DCB=(AD.A,LRECL=%s,RECFM=FB)' % \
'{:>04}'.format(lrecl)
if not step:
stp = 'S' + '{:>02}'.format(regprog.jobStep.split('STEP')[1])
else:
stp = 'S' + '{:>02}'.format(step.split('STEP')[1])
rot = regprog.jobRotine + stp
comentario = utl.remover_acentos(comentario)
tb = [['@JOBNAME', regprog.jobName.upper()],
['@ROTINA', rot],
['@APPLID', regprog.jobRotine[0:5].upper()],
['@USER', regprog.jobUser.upper()],
['@STEP', step if step else regprog.jobStep \
if regprog.jobStep else 'STEP1'],
['@PROGRAMA', regprog.jobPrograma.upper()],
['@COMENTARIO', utl.stringList(comentario, 61, \
'//* *** ')],
['@ENTRADA', entrada],
['@SAIDA', saida]]
txt = '%s%s.jcl' % (gerprog, regprog.jobName)
if job:
tpl = '%sJOB_PROGCKRS.txt' % templates
else:
tpl = '%sPROGCKRS.txt' % templates
gckrs = progckrs(tpl, tb, txt, job)
if gckrs['erros']:
flash = 'Falha na Execução'
labelErrors = 'Resultado do processamento Programa (CKRS)%s' % \
(' - %s' % step) if step else ''
msgsErrors = {}
idx = 0
for erro in gckrs['erros'].split('\n'):
if len(erro) > 1:
idx += 1
msgsErrors[idx] = erro
return {'retorno': False, 'flash': flash, \
'labelErrors': labelErrors, \
'msgsErrors': msgsErrors, \
'linhas': gckrs['linhas']}
db(db.progckrs.id==idprog).update(usuarioGeracao=user, \
dataGeracao=datetime.datetime.today())
flash = 'Programa (CKRS) gerado com sucesso.'
labelErrors = 'Resultado da geracao do JCL'
msgsErrors = {}
msgsErrors[0] = 'JCL %s - Done.' % regprog.jobName
return {'retorno': True, 'flash': flash, \
'labelErrors': labelErrors, \
'msgsErrors': msgsErrors, \
'linhas': gckrs['linhas']}
def prognens(argv1, argv2, argv3, job=False, step=''):
li = ''
try:
if job:
with open(argv1) as f2:
for line in f2:
for tb in argv2:
line = line.replace(tb[0], tb[1])
li += line
return {'erros': '', 'linhas': li}
else:
with open(argv3, 'w') as f1:
with open(argv1) as f2:
for line in f2:
for tb in argv2:
line = line.replace(tb[0], tb[1])
f1.write(line)
li += line
return {'erros': '', 'linhas': li}
except:
return {'erros': traceback.format_exc(), 'linhas': ''}
def gerarnens(db, idprog, user, job=False, step=''):
parms = db(db.parametros.id==1).select().first()
regprog = db(db.prognens.id==idprog).select().first()
if not regprog:
flash = 'Falha na Execução'
labelErrors = 'Resultado do processamento Programa'
if step:
msgsErrors = {1: '%s do Programa nao definido.' % step}
else:
msgsErrors = {1: 'Programa nao definido.'}
return {'retorno': False, 'flash': flash, \
'labelErrors': labelErrors, \
'msgsErrors': msgsErrors, \
'linhas': ''}
aplicacao = db(db.aplicacoes.id==regprog.codigoAplicacao).\
select().first()
nomeaplicacao = aplicacao.aplicacao
regempresa = db(db.empresa.id==aplicacao.empresa).select().first()
# Comentarios
regprognens2 = db(db.prognens2.prognens==idprog).select()
if not regprognens2:
flash = 'Comentarios nao definidos%s' % \
(' - %s' % step) if step else ''
return {'retorno': False, 'flash': flash, \
'labelErrors': '', 'msgsErrors': {}}
# Entradas
regprognens3 = db(db.prognens3.prognens==idprog).select()
if not regprognens3:
flash = 'Entradas nao definidas%s' % \
(' - %s' % step) if step else ''
return {'retorno': False, 'flash': flash, \
'labelErrors': '', 'msgsErrors': {}}
# Saidas
regprognens4 = db(db.prognens4.prognens==idprog).select()
if not regprognens4:
flash = 'Saidas nao definidas%s' % (' - %s' % step) if step else ''
return {'retorno': False, 'flash': flash, \
'labelErrors': '', 'msgsErrors': {}}
templates = os.path.join( '\\\\'
, '127.0.0.1'
, 'c$'
, parms.web2py
, 'applications'
, parms.soag
, 'Template'
, 'JCL') + os.sep
gerprog = os.path.join( '\\\\'
, '127.0.0.1'
, 'c$'
, parms.raiz
, regempresa.nome
, nomeaplicacao
, 'GERADOS'
, 'PROGNENS') + os.sep
try:
os.makedirs(gerprog)
except:
pass
comentario = ''
entrada = ''
saida = ''
for regsort2 in regprognens2:
comentario += regsort2.comentario.replace('\n','').upper()
for regsort3 in regprognens3:
if regsort3.nome2 == '*':
entrada += '%s//%s DD DSN=%s,' % \
('\n//*\n' if entrada else '', \
regsort3.nome1.upper(), \
regsort3.nome2 + '.' + \
regsort3.nome3 + '.' + \
regsort3.nome4)
else:
if regsort3.nome2:
nome2 = regsort3.nome2
else:
nome2 = (regprog.jobRotine + 'S' + '{:>02}'.\
format(step.split('STEP')[1]))\
if step else regsort3.nome2
entrada += '%s//%s DD DSN=AD.C87.%s.%s%s,' % ('\n//*\n' \
if entrada else '', regsort3.nome1.upper(), \
nome2, regsort3.nome3.upper(), \
('.' + regsort3.nome4.upper()) \
if regsort3.nome4 else '')
entrada += '\n// DISP=SHR'
for regsort4 in regprognens4:
lrecl = lreclBook(db, db.booksCampos, 'book', regsort4.book)
nome2 = (regprog.jobRotine + 'S' + '{:>02}'.\
format(step.split('STEP')[1]))\
if step else regsort4.nome2
saida += '%s//%s DD DSN=AD.C87.%s.%s.%s%s,' % ('\n//*\n' \
if saida else '', regsort4.nome1.upper(), \
nome2, regsort4.nome3.upper(), \
regsort4.nome4.upper(), \
('.' + regsort4.nome5.upper()) \
if regsort4.nome5 else '')
saida += '\n// DISP=(,CATLG,DELETE),'
saida += '\n// UNIT=(DISCO,04),'
saida += '\n// SPACE=(TRK,(005000,1000),RLSE),'
saida += '\n// DCB=(AD.A,LRECL=%s,RECFM=FB)' % \
'{:>04}'.format(lrecl)
if not step:
stp = 'S' + '{:>02}'.format(regprog.jobStep.split('STEP')[1])
else:
stp = 'S' + '{:>02}'.format(step.split('STEP')[1])
rot = regprog.jobRotine + stp
comentario = utl.remover_acentos(comentario)
tb = [['@JOBNAME', regprog.jobName.upper()],
['@ROTINA', rot],
['@APPLID', regprog.jobRotine[0:5].upper()],
['@USER', regprog.jobUser.upper()],
['@STEP', step if step else regprog.jobStep \
if regprog.jobStep else 'STEP1'],
['@PROGRAMA', regprog.jobPrograma.upper()],
['@COMENTARIO', utl.stringList(comentario, 61, \
'//* *** ')],
['@ENTRADA', entrada],
['@SAIDA', saida]]
txt = '%s%s.jcl' % (gerprog, regprog.jobName)
if job:
tpl = '%sJOB_PROGNENS.txt' % templates
else:
tpl = '%sPROGNENS.txt' % templates
gnens = prognens(tpl, tb, txt, job, step)
if gnens['erros']:
flash = 'Falha na Execução'
labelErrors = 'Resultado do processamento Programa%s' % \
(' - %s' % step) if step else ''
msgsErrors = {}
idx = 0
for erro in gnens['erros'].split('\n'):
if len(erro) > 1:
idx += 1
msgsErrors[idx] = erro
return {'retorno': False, 'flash': flash, \
'labelErrors': labelErrors, \
'msgsErrors': msgsErrors, \
'linhas': gnens['linhas']}
db(db.prognens.id==idprog).update(usuarioGeracao=user, \
dataGeracao=datetime.datetime.today())
flash = 'Programa gerado com sucesso.'
labelErrors = 'Resultado da geracao do JCL'
msgsErrors = {}
msgsErrors[0] = 'JCL %s - Done.' % regprog.jobName
return {'retorno': True, 'flash': flash, \
'labelErrors': labelErrors, \
'msgsErrors': msgsErrors, \
'linhas': gnens['linhas']}
def sort1s(argv1, argv2, argv3, job=False, step=''):
li = ''
try:
if job:
with open(argv1) as f2:
for line in f2:
for tb in argv2:
line = line.replace(tb[0], tb[1])
li += line
return {'erros': '', 'linhas': li}
else:
with open(argv3, 'w') as f1:
with open(argv1) as f2:
for line in f2:
for tb in argv2:
line = line.replace(tb[0], tb[1])
f1.write(line)
li += line
return {'erros': '', 'linhas': li}
except:
return {'erros': traceback.format_exc(), 'linhas': ''}
def gerarsort1s(db, idsrt, user, job=False, step=''):
parms = db(db.parametros.id==1).select().first()
regsrt = db(db.sort1s.id==idsrt).select().first()
if not regsrt:
flash = 'Falha na Execução'
labelErrors = 'Resultado do processamento SORT1S'
if step:
msgsErrors = {1: '%s do Sort nao definido.' % step}
else:
msgsErrors = {1: 'Sort nao definido.'}
return {'retorno': False, 'flash': flash, \
'labelErrors': labelErrors, \
'msgsErrors': msgsErrors, \
'linhas': ''}
aplicacao = db(db.aplicacoes.id==regsrt.codigoAplicacao).\
select().first()
nomeaplicacao = aplicacao.aplicacao
regempresa = db(db.empresa.id==aplicacao.empresa).\
select().first()
regentidade = db(db.entidades.id==regsrt.codigoEntidade).\
select().first()
regsort2s = db(db.sort2s.codigoSort1s==idsrt).select()
if not regsort2s:
flash = 'Fields nao selecionados%s' % \
(' - %s' % step) if step else ''
return {'retorno': False, 'flash': flash, \
'labelErrors': '', 'msgsErrors': {}}
regsort3s = db(db.sort3s.codigoSort1s==idsrt).select()
templates = os.path.join( '\\\\'
, '127.0.0.1'
, 'c$'
, parms.web2py
, 'applications'
, parms.soag
, 'Template'
, 'JCL') + os.sep
gersrt = os.path.join( '\\\\'
, '127.0.0.1'
, 'c$'
, parms.raiz
, regempresa.nome
, nomeaplicacao
, 'GERADOS'
, 'SORT1S') + os.sep
try:
os.makedirs(gersrt)
except:
pass
classificacao = ''
fields = ''
formato = ''
iguais = True
slant = ''
include = ''
lreclc = lrecl(db, regsrt.codigoEntidade)
idx = 0
for regsort2 in regsort2s:
regcampo = db(db.colunas.id==regsort2.campo).select().first()
sl = startLength(db, regsrt.codigoEntidade, regsort2.campo)
if sl[1] <> slant and slant and iguais:
iguais = False
else:
slant = sl[1]
if idx < 1:
fields += '%s,%s' % (sl[0], 'A' \
if regsort2.ordem=='ASCENDENTE' else 'D')
classificacao += regcampo.columnName
else:
fields += ',' + '%s,%s' % (sl[0], 'A' \
if regsort2.ordem=='ASCENDENTE' else 'D')
classificacao += ', ' + regcampo.columnName
idx += 1
if iguais:
fields = fields.replace(',%s' % slant, '') + ')'
formato = ',FORMAT=%s' % slant
else:
fields += ')' if fields else ''
idxin = 0
for regsort3 in regsort3s:
regcampo = db(db.colunas.id==regsort3.campo).select().first()
sl = startLength(db, regsrt.codigoEntidade, regsort3.campo)
if regsort3.tipo == 'INCLUDE':
if not idxin:
include += '%s,%s,%s' % (sl[0], regsort3.operacao,
regsort3.valor)
else:
include += '%s,%s,%s' % (sl[0], regsort3.operacao,
regsort3.valor)
idxin += 1
if include:
include = 'OUTFIL INCLUDE=' + \
utl.stringList('(%s),FNAMES=SORTOUT' % \
include, 56, ' ')
if not step:
stp = 'S' + '{:>02}'.format(regsrt.jobStep.split('STEP')[1])
else:
stp = 'S' + '{:>02}'.format(step.split('STEP')[1])
rot = regsrt.jobRotine[0:5] + stp
tb = [['@JOBNAME', regsrt.jobName.upper()],
['@ROTINA', rot],
['@APPLID', nomeaplicacao[0:5]],
['@USER', regsrt.jobUser],
['@STEP', step if step else regsrt.jobStep \
if regsrt.jobStep else 'STEP1'],
['@ARQNAME', regsrt.jobArqName],
['@CLASSIFICACAO', utl.stringList(classificacao, 61, \
'//* *** ')],
['@TABLESPACE', regentidade.nomeExterno[0:4]+'S000'],
['@TABLENAME', regentidade.nomeExterno],
['@TABLE', regentidade.nomeFisico],
['@FIELDS', utl.stringList(fields + formato, 59, \
' ')],
['@INCLUDE', ('\n ' if include else '') + include],
['@LRECL', '{:>04}'.format(lreclc)]]
txt = '%s%s.jcl' % (gersrt, regsrt.jobName)
if job:
tpl = '%sJOB_SORT1S.txt' % templates
else:
tpl = '%sSORT1S.txt' % templates
gs1s = sort1s(tpl, tb, txt, job, step)
if gs1s['erros']:
flash = 'Falha na Execução'
labelErrors = 'Resultado do processamento SORT1S%s' % \
(' - %s' % step) if step else ''
msgsErrors = {}
idx = 0
for erro in gs1s['erros'].split('\n'):
if len(erro) > 1:
idx += 1
msgsErrors[idx] = erro
return {'retorno': False, 'flash': flash, \
'labelErrors': labelErrors, \
'msgsErrors': msgsErrors, \
'linhas': gs1s['linhas']}
db(db.sort1s.id==idsrt).update(usuarioGeracao=user, \
dataGeracao=datetime.datetime.today())
flash = 'SORT1S gerado com sucesso.'
labelErrors = 'Resultado da geracao do SORT'
msgsErrors = {}
msgsErrors[0] = 'JOB %s - Done.' % regsrt.jobName
return {'retorno': True, 'flash': flash, \
'labelErrors': labelErrors, \
'msgsErrors': msgsErrors, \
'linhas': gs1s['linhas']}
def sortnens(argv1, argv2, argv3, job=False, step=''):
li = ''
try:
if job:
with open(argv1) as f2:
for line in f2:
for tb in argv2:
line = line.replace(tb[0], tb[1])
li += line
return {'erros': '', 'linhas': li}
else:
with open(argv3, 'w') as f1:
with open(argv1) as f2:
for line in f2:
for tb in argv2:
line = line.replace(tb[0], tb[1])
f1.write(line)
li += line
return {'erros': '', 'linhas': li}
except:
return {'erros': traceback.format_exc(), 'linhas': ''}
def gerarsortnens(db, idsrt, user, job=False, step=''):
parms = db(db.parametros.id==1).select().first()
regsrt = db(db.sortnens.id==idsrt).select().first()
if not regsrt:
flash = 'Falha na Execução'
labelErrors = 'Resultado do processamento SORTNENS'
if step:
msgsErrors = {1: '%s do Sort nao definido.' % step}
else:
msgsErrors = {1: 'Sort nao definido.'}
return {'retorno': False, 'flash': flash, \
'labelErrors': labelErrors, \
'msgsErrors': msgsErrors, \
'linhas': ''}
aplicacao = db(db.aplicacoes.id==regsrt.codigoAplicacao).\
select().first()
nomeaplicacao = aplicacao.aplicacao
regempresa = db(db.empresa.id==aplicacao.empresa).select().first()
regsortnens2 = db(db.sortnens2.sortnens==idsrt).select() # Fields
if not regsortnens2:
flash = 'Fields nao definidos%s' % (' - %s' % step) if step else ''
return {'retorno': False, 'flash': flash, \
'labelErrors': '', 'msgsErrors': {}}
regsortnens3 = db(db.sortnens3.sortnens==idsrt).select() # Sortin
if not regsortnens3:
flash = 'Sortin nao definido%s' % (' - %s' % step) if step else ''
return {'retorno': False, 'flash': flash, \
'labelErrors': '', 'msgsErrors': {}}
regsortnens4 = db(db.sortnens4.sortnens==idsrt).select() # Sortout
if not regsortnens4:
flash = 'Sortout nao definido%s' % (' - %s' % step) if step else ''
return {'retorno': False, 'flash': flash, \
'labelErrors': '', 'msgsErrors': {}}
qtdSortout = db(db.sortnens4.sortnens==idsrt).count()
if qtdSortout > 1:
temInclude = True
sortout = ''
for regto in regsortnens4:
reginclude = db(db.sortnens5.sortnens4==regto.id).select()
if not reginclude:
sortout = regto.nome1 + ' - ' + regto.nome2
temInclude = False
break
if not temInclude:
flash = 'Include nao definido para o Sortout %s%s' % (sortout , \
(' - %s' % step) if step else '')
return {'retorno': False, 'flash': flash, \
'labelErrors': '', 'msgsErrors': {}}
templates = os.path.join( '\\\\'
, '127.0.0.1'
, 'c$'
, parms.web2py
, 'applications'
, parms.soag
, 'Template'
, 'JCL') + os.sep
gersrt = os.path.join( '\\\\'
, '127.0.0.1'
, 'c$'
, parms.raiz
, regempresa.nome
, nomeaplicacao
, 'GERADOS'
, 'SORTNENS') + os.sep
try:
os.makedirs(gersrt)
except:
pass
classificacao = ''
sortin = ''
sysout = ''
fields = ''
formato = ''
iguais = True
slant = ''
include = ''
incl = ''
lrecl = lreclBook(db, db.booksCampos, 'book', regsrt.book)
idx = 0
for regsort2 in regsortnens2:
regcampo = db(db.booksCampos.id==regsort2.campo).select().first()
sl = startLengthBook(db, db.booksCampos, 'book', \
regsrt.book, regsort2.campo)
if sl[1] <> slant and slant and iguais:
iguais = False
else:
slant = sl[1]
if idx < 1:
fields += '%s,%s' % (sl[0], 'A' \
if regsort2.ordem=='ASCENDENTE' else 'D')
classificacao += regcampo.nome
else:
fields += ',' + '%s,%s' % (sl[0], 'A' \
if regsort2.ordem=='ASCENDENTE' else 'D')
classificacao += ', ' + regcampo.nome
idx += 1
if iguais:
fields = fields.replace(',%s' % slant, '') + ')'
formato = ',FORMAT=%s' % slant
else:
fields += ')' if fields else ''
idxin = 0
for regsort3 in regsortnens3:
if regsort3.nome1 == '*':
nome1 = (regsort3.nome1 + '.' + regsort3.nome2 + '.' + \
regsort3.nome3)
else:
if regsort3.nome1:
nome1 = regsort3.nome1
else:
nome1 = (regsrt.jobRotine + 'S' + '{:>02}'.
format(step.split('STEP')[1]
if step else regsrt.jobStep.split('STEP')[1]))
if not idxin:
if regsort3.nome1 == '*':
sortin += '//SORTIN DD DSN=%s,' % nome1
else:
sortin += '//SORTIN DD DSN=AD.C87.%s.%s.%s%s,' % \
(nome1, regsort3.nome2, regsort3.nome3, \
('.' + regsort3.nome4) \
if regsort3.nome4 else '')
else:
if regsort3.nome1 == '*':
sortin += '\n// DD DSN=%s,' % nome1
else:
sortin += '\n// DD DSN=AD.C87.%s.%s.%s%s,' % \
(nome1, regsort3.nome2, regsort3.nome3, \
('.' + regsort3.nome4) \
if regsort3.nome4 else '')
sortin += '\n// DISP=SHR'
idxin += 1
if len(regsortnens4) == 1:
nome1 = (regsrt.jobRotine + 'S' + '{:>02}'.format(step.split('STEP')[1]
if step else regsrt.jobStep.split('STEP')[1]))
sysout += '//SORTOUT DD DSN=AD.C87.%s.SORT.%s%s,' % \
(nome1, regsortnens4[0].nome1, \
('.' + regsortnens4[0].nome2) \
if regsortnens4[0].nome2 else '')
sysout += '\n// DISP=(,CATLG,DELETE),'
sysout += '\n// UNIT=(DISCO,04),'
sysout += '\n// SPACE=(TRK,(005000,1000),RLSE),'
sysout += '\n// DCB=(AD.A,LRECL=@LRECL,RECFM=FB)'
else:
idxout = 1
nome1 = (regsrt.jobRotine + 'S' + '{:>02}'.
format(step.split('STEP')[1]
if step else regsrt.jobStep.split('STEP')[1]))
for regsort4 in regsortnens4:
sysout += '%s//SYSOUT%s DD DSN=AD.C87.%s.SORT.%s%s,' % \
('\n//*\n' if sysout else '', \
'{:>02}'.format(idxout), \
nome1, regsort4.nome1, \
('.' + regsort4.nome2) \
if regsort4.nome2 else '')
sysout += '\n// DISP=(,CATLG,DELETE),'
sysout += '\n// UNIT=(DISCO,04),'
sysout += '\n// SPACE=(TRK,(005000,1000),RLSE),'
sysout += '\n// DCB=(AD.A,LRECL=@LRECL,RECFM=FB)'
idxout += 1
bn = ''
idxout = 1
for regsort4 in regsortnens4:
regsort5s = db(db.sortnens5.sortnens4==regsort4.id).select()
if not regsort5s:
continue
incl = ''
for regsort5 in regsort5s:
regcampo = db(db.booksCampos.id==regsort5.campo).select().first()
sl = startLengthBook(db, db.booksCampos, 'book', \
regsrt.book, regsort5.campo)
if regsort5.tipo == 'INCLUDE':
incl += '%s%s,%s,%s' % (',' if incl else '', sl[0], \
regsort5.operacao, regsort5.valor)
include += ('%sOUTFIL INCLUDE=' % bn) + \
utl.stringList('(%s),FNAMES=%s%s' % \
(incl, 'SYSOUT' if qtdSortout > 1 else 'SORTOUT', \
'{:>02}'.format(idxout) \
if qtdSortout > 1 else ''), \
56, ' ')
idxout += 1
bn = '\n '
if not step:
stp = 'S' + '{:>02}'.format(regsrt.jobStep.split('STEP')[1])
else:
stp = 'S' + '{:>02}'.format(step.split('STEP')[1])
rot = regsrt.jobRotine + stp
arqname = utl.remover_acentos(utl.stringList(\
'CLASSIFICA O ARQUIVO %s POR:' % \
regsrt.jobArqName, 61, '//* *** ')).upper()
classificacao = utl.remover_acentos(classificacao).upper()
tb = [['@JOBNAME', regsrt.jobName.upper()],
['@ROTINA', rot],
['@APPLID', nomeaplicacao[0:5]],
['@USER', regsrt.jobUser],
['@STEP', step if step else regsrt.jobStep \
if regsrt.jobStep else 'STEP1'],
['@ARQNAME', arqname],
['@CLASSIFICACAO', utl.stringList(classificacao, 61, \
'//* *** ')],
['@SORTIN', sortin],
['@SYSOUT', sysout],
['@FIELDS', utl.stringList(fields + \
formato, 59, ' ')],
['@INCLUDE', ('\n ' if include else '') + include],
['@LRECL', '{:>04}'.format(lrecl)]]
txt = '%s%s.jcl' % (gersrt, regsrt.jobName)
if job:
tpl = '%sJOB_SORTNENS.txt' % templates
else:
tpl = '%sSORTNENS.txt' % templates
gsns = sortnens(tpl, tb, txt, job, step)
if gsns['erros']:
flash = 'Falha na Execução'
labelErrors = 'Resultado do processamento SORTNENS%s' % \
(' - %s' % step) if step else ''
msgsErrors = {}
idx = 0
for erro in gsns['erros'].split('\n'):
if len(erro) > 1:
idx += 1
msgsErrors[idx] = erro
return {'retorno': False, 'flash': flash, \
'labelErrors': labelErrors, \
'msgsErrors': msgsErrors, \
'linhas': gsns['linhas']}
db(db.sortnens.id==idsrt).update(usuarioGeracao=user, \
dataGeracao=datetime.datetime.today())
flash = 'SORTNENS gerado com sucesso.'
labelErrors = 'Resultado da geracao do SORT'
msgsErrors = {}
msgsErrors[0] = 'JOB %s - Done.' % regsrt.jobName
return {'retorno': True, 'flash': flash, \
'labelErrors': labelErrors, \
'msgsErrors': msgsErrors, \
'linhas': gsns['linhas']}
def job(argv1, argv2, argv3):
try:
with open(argv3, 'w') as f1:
with open(argv1) as f2:
for line in f2:
for tb in argv2:
line = line.replace(tb[0], tb[1])
f1.write(line)
except:
return traceback.format_exc()
def gerarJob(db, idjob, user):
parms = db(db.parametros.id==1).select().first()
regjob = db(db.jobs.id==idjob).select().first()
regsteps = db(db.jobsteps.job==idjob).select(orderby='sequencia')
aplicacao = db(db.aplicacoes.id==regjob.codigoAplicacao).\
select().first()
nomeaplicacao = aplicacao.aplicacao
regempresa = db(db.empresa.id==aplicacao.empresa).select().first()
if not regsteps:
flash = 'Steps nao definidos'
return {'retorno': False, 'flash': flash, 'labelErrors': '', \
'msgsErrors': {}}
templates = os.path.join( '\\\\'
, '127.0.0.1'
, 'c$'
, parms.web2py
, 'applications'
, parms.soag
, 'Template'
, 'JCL') + os.sep
gerjob = os.path.join( '\\\\'
, '127.0.0.1'
, 'c$'
, parms.raiz
, regempresa.nome
, nomeaplicacao
, 'GERADOS'
, 'JOBS') + os.sep
try:
os.makedirs(gerjob)
except:
pass
jobs = ''
for regstep in regsteps:
if regstep.objeto == 'HPU':
obj = gerarhpu(db, regstep.idObjeto, user, job=True, \
step=regstep.step)
if not obj['retorno']:
return {'retorno': False, 'flash': obj['flash'], \
'labelErrors': obj['labelErrors'], \
'msgsErrors': obj['msgsErrors']}
jobs += obj['linhas']
continue
if regstep.objeto == 'Programa':
obj = gerarnens(db, regstep.idObjeto, user, job=True, \
step=regstep.step)
if not obj['retorno']:
return {'retorno': False, 'flash': obj['flash'], \
'labelErrors': obj['labelErrors'], \
'msgsErrors': obj['msgsErrors']}
jobs += obj['linhas']
continue
if regstep.objeto == 'Programa (CKRS)':
obj = gerarckrs(db, regstep.idObjeto, user, job=True, \
step=regstep.step)
if not obj['retorno']:
return {'retorno': False, 'flash': obj['flash'], \
'labelErrors': obj['labelErrors'], \
'msgsErrors': obj['msgsErrors']}
jobs += obj['linhas']
continue
if regstep.objeto == 'Sort Tabela':
obj = gerarsort1s(db, regstep.idObjeto, user, job=True, \
step=regstep.step)
if not obj['retorno']:
return {'retorno': False, 'flash': obj['flash'], \
'labelErrors': obj['labelErrors'], \
'msgsErrors': obj['msgsErrors']}
jobs += obj['linhas']
continue
if regstep.objeto == 'Sort Arquivo':
obj = gerarsortnens(db, regstep.idObjeto, user, job=True, \
step=regstep.step)
if not obj['retorno']:
return {'retorno': False, 'flash': obj['flash'], \
'labelErrors': obj['labelErrors'], \
'msgsErrors': obj['msgsErrors']}
jobs += obj['linhas']
continue
tb = [['@JOBNAME', regjob.name], \
['@ROTINA', regjob.rotine], \
['@APPLID', regjob.rotine[0:4]], \
['@USER', regjob.usuario],
['@STEPS', jobs]]
txt = '%s%s.jcl' % (gerjob, regjob.name)
tpl = '%sJOB.txt' % templates
erros = job(tpl, tb, txt)
if erros:
flash = 'Falha na Execução'
labelErrors = 'Resultado do processamento JOB'
msgsErrors = {}
idx = 0
for erro in erros.split('\n'):
if len(erro) > 1:
idx += 1
msgsErrors[idx] = erro
return {'retorno': False, 'flash': flash, \
'labelErrors': labelErrors, \
'msgsErrors': msgsErrors}
db(db.jobs.id==idjob).update(usuarioGeracao=user, \
dataGeracao=datetime.datetime.today())
flash = 'JOB gerado com sucesso.'
labelErrors = 'Resultado da geracao do JOB'
msgsErrors = {}
msgsErrors[0] = 'JOB %s - Done.' % regjob.name
return {'retorno': True, 'flash': flash, \
'labelErrors': labelErrors, \
'msgsErrors': msgsErrors}
def referback(db, idaplicacao, idstep, idcontroller, idobjeto):
regstp = db(db.jobsteps.id==idstep).select().first()
if regstp.objeto == 'HPU':
nome1 = 'nome1'
nome2 = 'nome2'
nome3 = 'nome3'
elif regstp.objeto == 'Programa':
nome1 = 'nome2'
nome2 = 'nome3'
nome3 = 'nome4'
elif regstp.objeto == 'Programa (CKRS)':
nome1 = 'nome2'
nome2 = 'nome3'
nome3 = 'nome4'
elif regstp.objeto == 'Sort Tabela':
nome1 = 'nome2'
nome2 = 'nome3'
nome3 = 'nome4'
else:
nome1 = 'nome1'
nome2 = 'nome2'
nome3 = 'nome3'
regsteps = db(db.jobsteps).select(orderby='job,step,sequencia')
items = ''
rotina = ''
job = 0
step = ''
ocor = 0
qtde = len(regsteps)
for regstep in regsteps:
ocor += 1
regjob = db(db.jobs.id==regstep.job).select().first()
if regjob.codigoAplicacao <> idaplicacao:
continue
if regstep.job == int(session.idjob or 0) and (regstep.step == session.get('step', '') or regstep.sequencia > int(session.sequencia or 0)):
continue
if regjob.rotine <> rotina:
items += ('</ul></ul></ul><ul>' if items else '<ul>') + '<li>Rotina: %s</li>' % regjob.rotine
rotina = regjob.rotine
items += '<ul><li>Job: %s</li>' % regjob.name
job = regstep.job
items += '<ul><li>%s - %s %s</li>' % (regstep.step, regstep.objeto, regstep.dsObjeto)
step = regstep.step
else:
if regstep.job <> job:
items += ('</ul><ul>' if items else '<ul>') + '<li>Job: %s</li>' % regjob.name
job = regstep.job
items += '<ul><li>%s - %s %s</li>' % (regstep.step, regstep.objeto, regstep.dsObjeto)
step = regstep.step
else:
if regstep.step <> step:
items += '%s<ul><li>%s - %s %s</li>' % ('<ul>' if ocor==qtde else '', regstep.step, regstep.objeto, regstep.dsObjeto)
step = regstep.step
if regstep.objeto == 'HPU':
reghpu = db(db.hpus.id==regstep.idObjeto).select().first()
if not reghpu:
items += '<ul><li>Nenhuma saida definida</li></ul>'
else:
regent = db(db.tabelas.id==reghpu.codigoEntidade).select().first()
items += '<ul>'
dsname = 'SYSREC01 - AD.C87.%s.%s' % (regjob.rotine + 'S%s.HPU.SYSREC01' % regstep.sequencia, regent.nome)
if regstep.job == int(session.idjob or 0):
vlr1 = '*'
vlr2 = step
vlr3 = 'SYSREC01'
vlr4 = ''
else:
vlr1 = regjob.rotine + 'S%s' % '{:>02}'.format(regstep.sequencia)
vlr2 = 'SYSREC01'
vlr3 = regent.nome
vlr4 = ''
items += '<li>' + str(A(dsname, _style=XML('cursor: pointer'),
_onclick=XML("jQuery('#%s_%s').attr('value','%s');" %
(idcontroller, nome1, vlr1) +
"jQuery('#%s_%s').attr('value','%s');" %
(idcontroller, nome2, vlr2) +
"jQuery('#%s_%s').attr('value','%s');" %
(idcontroller, nome3, vlr3) +
("jQuery('#%s_nome4').attr('value','%s');" %
(idcontroller, vlr4)
if regstp.objeto == 'Sort Arquivo' else '') +
"jQuery('#referback_%s').text('');" % idobjeto)))
items += '</li>'
items += '</ul>'
if regstep.objeto == 'Programa':
regnens = db(db.prognens.id==regstep.idObjeto).select().first()
if not regnens:
items += '<ul><li>Nenhuma saida definida</li></ul>'
else:
idx = 0
regnens4s = db(db.prognens4.prognens==regnens.id).select()
for regnens4 in regnens4s:
idx += 1
items += '<ul>'
dsname = '%s - AD.C87.%s%s%s%s' % \
(regnens4.nome1, regjob.rotine + 'S%s' % '{:>02}'.format(regstep.sequencia),
('.' + regnens4.nome3) if regnens4.nome3 else '',
('.' + regnens4.nome4) if regnens4.nome4 else '',
('.' + regnens4.nome5) if regnens4.nome5 else '')
if regstep.job == int(session.idjob or 0):
vlr1 = '*'
vlr2 = step
vlr3 = regnens4.nome1
vlr4 = ''
else:
vlr1 = regjob.rotine + 'S%s' % '{:>02}'.format(regstep.sequencia)
vlr2 = (regnens4.nome3) if regnens4.nome3 else ''
vlr3 = (regnens4.nome4) if regnens4.nome4 else ''
vlr4 = (regnens4.nome5) if regnens4.nome5 else ''
items += '<li>' + str(A(dsname, _style=XML('cursor: pointer'),\
_onclick=XML("jQuery('#%s_%s').attr('value','%s');" %
(idcontroller, nome1, vlr1) +
"jQuery('#%s_%s').attr('value','%s');" %
(idcontroller, nome2, vlr2) +
"jQuery('#%s_%s').attr('value','%s');" %
(idcontroller, nome3, vlr3) +
("jQuery('#%s_nome4').attr('value','%s');" %
(idcontroller, vlr4)
if regstp.objeto == 'Sort Arquivo' else '') +
"jQuery('#referback_%s').text('');" % idobjeto)))
items += '</li>'
items += '</ul>'
if not idx:
items += '<ul><li>Nenhuma Saida definida</li></ul>'
if regstep.objeto == 'Programa (CKRS)':
regckrs = db(db.progckrs.id==regstep.idObjeto).select().first()
if not regckrs:
items += '<ul><li>Nenhuma saida definida</li></ul>'
else:
idx = 0
regckrs4s = db(db.progckrs4.progckrs==regckrs.id).select()
for regckrs4 in regckrs4s:
idx += 1
items += '<ul>'
dsname = '%s - AD.C87.%s%s%s%s' % \
(regnens4.nome1, regjob.rotine + 'S%s' % '{:>02}'.format(regstep.sequencia),
('.' + regckrs4.nome3) if regckrs4.nome3 else '',
('.' + regckrs4.nome4) if regckrs4.nome4 else '',
('.' + regckrs4.nome5) if regckrs4.nome5 else '')
if regstep.job == int(session.idjob or 0):
vlr1 = '*'
vlr2 = step
vlr3 = regckrs4.nome1
vlr4 = ''
else:
vlr1 = regjob.rotine + 'S%s' % '{:>02}'.format(regstep.sequencia)
vlr2 = (regckrs4.nome3) if regckrs4.nome3 else ''
vlr3 = (regckrs4.nome4) if regckrs4.nome4 else ''
vlr4 = (regckrs4.nome5) if regckrs4.nome5 else ''
items += '<li>' + str(A(dsname, _style=XML('cursor: pointer'),
_onclick=XML("jQuery('#%s_%s').attr('value','%s');" %
(idcontroller, nome1, vlr1) +
"jQuery('#%s_%s').attr('value','%s');" %
(idcontroller, nome2, vlr2) +
"jQuery('#%s_%s').attr('value','%s');" %
(idcontroller, nome3, vlr3) +
("jQuery('#%s_nome4').attr('value','%s');" %
(idcontroller, vlr4)
if regstp.objeto == 'Sort Arquivo' else '') +
"jQuery('#referback_%s').text('');" % idobjeto)))
items += '</li>'
items += '</ul>'
if not idx:
items += '<ul><li>Nenhuma Saida definida</li>'
if regstep.objeto == 'Sort Tabela':
reg1s = db(db.sort1s.id==regstep.idObjeto).select().first()
if not reg1s:
items += '<ul><li>Nenhuma saida definida</li></ul>'
else:
regent = db(db.tabelas.id==reg1s.codigoEntidade).select().first()
items += '<ul>'
dsname = 'SORTOUT - AD.C87.%s%s%s' % (regjob.rotine + 'S%s.SORT' % '{:>02}'.format(regstep.sequencia), '.' + regent.nome, '.' + reg1s.jobArqName)
if regstep.job == int(session.idjob or 0):
vlr1 = '*'
vlr2 = step
vlr3 = 'SORTOUT'
vlr4 = ''
else:
vlr1 = regjob.rotine
vlr2 = 'S%s.SORT' % '{:>02}'.format(regstep.sequencia)
vlr3 = regent.nome
vlr4 = reg1s.jobArqName
items += '<li>' + str(A(dsname, _style=XML('cursor: pointer'),
_onclick=XML("jQuery('#%s_%s').attr('value','%s');" %
(idcontroller, nome1, vlr1) +
"jQuery('#%s_%s').attr('value','%s');" %
(idcontroller, nome2, vlr2) +
"jQuery('#%s_%s').attr('value','%s');" %
(idcontroller, nome3, vlr3) +
("jQuery('#%s_nome4').attr('value','%s');" %
(idcontroller, vlr4)
if regstp.objeto == 'Sort Arquivo' else '') +
"jQuery('#referback_%s').text('');" % idobjeto)))
items += '</li>'
items += '</ul>'
if regstep.objeto == 'Sort Arquivo':
regnens = db(db.sortnens.id==regstep.idObjeto).select().first()
if not regnens:
items += '<ul><li>Nenhuma saida definida</li></ul>'
else:
regnens4s = db(db.sortnens4.sortnens==regnens.id).select()
idx = 0
for regnens4 in regnens4s:
idx += 1
items += '<ul>'
origem = 'SYSOUT%s' % '{:>02}'.format(idx)
dsname = 'SYSOUT%s - AD.C87.%s%s%s' % ('{:>02}'.format(idx), regjob.rotine + 'S%s.SORT' % '{:>02}'.format(regstep.sequencia), '.' + regnens4.nome1, ('.' + regnens4.nome2) if regnens4.nome2 else '')
if regstep.job == int(session.idjob or 0):
vlr1 = '*'
vlr2 = step
vlr3 = origem
vlr4 = ''
else:
vlr1 = regjob.rotine + 'S%s' % '{:>02}'.format(regstep.sequencia)
vlr2 = 'SORT'
vlr3 = regnens4.nome1
vlr4 = (regnens4.nome2) if regnens4.nome2 else ''
items += '<li>' + str(A(dsname, _style=XML('cursor: pointer'),
_onclick=XML("jQuery('#%s_%s').attr('value','%s');" %
(idcontroller, nome1, vlr1) +
"jQuery('#%s_%s').attr('value','%s');" %
(idcontroller, nome2, vlr2) +
"jQuery('#%s_%s').attr('value','%s');" %
(idcontroller, nome3, vlr3) +
("jQuery('#%s_nome4').attr('value','%s');" %
(idcontroller, vlr4)
if regstp.objeto == 'Sort Arquivo' else '') +
"jQuery('#referback_%s').text('');" % idobjeto)))
items += '</li>'
items += '</ul>'
if not idx:
items += '<ul><li>Nenhuma Saida definida</li></ul>'
items += '</ul>'
return XML(items)
# vim: ft=python
| [
"[email protected]"
] | |
f37580c24968dd1c773b2fadeb70ecf90b5e8744 | 44791c21090478681c4d0b83154b7bebc6f398f0 | /server/mongoModel.py | 748fc29ac2663e9676842a26a0701a4ed5a22ecf | [] | no_license | prestondoris/OBP_AuthServer | ad916c29af0e956c0c42414abd59e39263275918 | a4caf57592e09c74d5400484ef40fe63285fa43c | refs/heads/master | 2022-12-27T18:23:01.935357 | 2019-06-27T23:16:41 | 2019-06-27T23:16:41 | 192,974,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | import json
from bson.objectid import ObjectId
from bson.json_util import dumps, loads
from flask_pymongo import PyMongo
builtin_list = list
mongo = None
def _id(id):
if not isinstance(id, ObjectId):
return ObjectId(id)
return id
def init_app(app):
global mongo
mongo = PyMongo(app)
mongo.init_app(app)
# [START read]
def read(email):
try:
result = mongo.db.users.find_one({'email': email})
return result
except:
return json.dumps({'error': 'Error Connecting to DB'})
# [END read]
# [START create]
def create(data):
try:
user = read(data["email"])
if not user:
try:
user = mongo.db.users.insert_one(data)
return read(user.inserted_id)
except:
return json.dumps({'error': 'Error Connecting to DB'})
return None
except:
return json.dumps({'error': 'Error Connecting to DB'})
# [END create]
# [START update]
def update(data, email):
mongo.db.users.replace_one({'email': email}, data)
return read(email)
# [END update]
def delete(email):
mongo.db.users.delete_one({'email': email}) | [
"[email protected]"
] | |
0c36af70f72e559ca91174c96852a456071cc657 | 424717bb2008a93b374e958b730f4062c20314c0 | /desginer_pattern_state.py | fbc9a8c374a73bf8fe7914418df4507e2ef084ad | [] | no_license | renatolaq/Python-Design-Paterns | 5dcfe7a6c014c946efe6ba63bc0bae248180e45f | 31237df5d74eab3bb60fb5f3c70d785ea4a459d5 | refs/heads/master | 2020-08-05T04:25:29.595037 | 2019-10-04T12:29:47 | 2019-10-04T12:29:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,223 | py | class ComputerState(object):
name = "state"
allowed = []
def switch(self, state):
if state.name in self.allowed:
print('Current:', self, ' => switched to new state', state.name)
self.__class__ = state
else:
print('Current:', self, ' => switching to', state.name, 'not possible.')
def __str__(self):
return self.name
class Off(ComputerState):
name = "off"
allowed = ['on']
class On(ComputerState):
name = "on"
allowed = ['off', 'suspend', 'hibernate']
class Suspend(ComputerState):
name = "suspend"
allowed = ['on']
class Hibernate(ComputerState):
name = "hibernate"
allowed = ['on']
class Computer(object):
def __init__(self, model='HP'):
self.model = model
self.state = Off()
def change(self, state):
self.state.switch(state)
if __name__ == '__main__':
comp = Computer()
# Switch on
comp.change(On)
# Switch off
comp.change(Off)
# Switch on again
comp.change(On)
# Suspend
comp.change(Suspend)
# Try to hibernate - cannot!
comp.change(Hibernate)
# switch on back
comp.change(On)
# Finally off
comp.change(Off) | [
"[email protected]"
] | |
8d1410c3b1419e23f80b80067b065a269fdcbe9f | 5267ce24d7c8c5b5d742bffc22f9f8f3b80eb05f | /code/replot.py | 67c70c0248cb20c210dd11d4df942a6a31dfad60 | [
"MIT"
] | permissive | go-smart/glossia-quickstart | b6d31abd2a8dde964c3020d6c3539d43301bee41 | b4245f29b184f7e8e7d643d1af8a95719af43954 | refs/heads/master | 2016-09-14T14:10:05.529552 | 2016-04-26T11:32:56 | 2016-04-26T11:32:56 | 56,546,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | from __future__ import print_function
import numpy as N
import matplotlib.pyplot as P
import sys
import variables as v
if len(sys.argv) < 2:
print("Must have a filename!")
exit(1)
filename = sys.argv[1]
# Output cumulative coverage curves as CSV
output_arrays = N.loadtxt(filename)
output_arrays = zip(*output_arrays)
# Plot the coverage curves
for pair, a in zip(v.electrode_pairs, output_arrays[1:]):
P.plot(output_arrays[0], a, label="%d - %d" % pair)
# Draw the plot
P.draw()
P.xlabel(r"Threshold level of $|E|$ ($\mathrm{J}$)")
P.xlim([0, 1000])
P.ylabel(r"Fraction of tumour beneath level")
P.ylim([0, 1])
# Show a legend for the plot
P.legend(loc=3)
# Display the plot
P.show(block=True)
| [
"[email protected]"
] | |
8298fe4b188526c579e1d765177c96f7516d529e | c3bf6dd16fe96246baa6ee122202ae380d2eb91b | /ch06a/ex0602.py | a2edb01dc6dd041a204625c00a0ebaf94f085ff5 | [] | no_license | flyupwards/python3_source_code | f944c0965259a1848026d9794db63d9fffcbac94 | 2736c995f99c0db8c9ecd4021b1c06e1204bac26 | refs/heads/master | 2023-02-24T12:00:17.092864 | 2021-01-27T15:38:32 | 2021-01-27T15:38:32 | 333,469,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | '''
计算圆的面积s
'''
def getcirclearea(r):
print("圆的面积s={:8.2f}".format(3.14 * r * r))
return
getcirclearea(3)
getcirclearea # 函数名变量在内存中的地址
print(getcirclearea(3)) # return语句无返回值时,返回None
| [
"[email protected]"
] | |
34ecf6296f533149652bcc9bdfc768e47934f237 | 7bb2ce3785d3aecd357be03651f3a7ee8b0d7ade | /ssd/evaluate/evaluate_net.py | b365975bb3c488193e98ee3574c2adc9c5a7cf5c | [] | no_license | yangfly/additions_mxnet | db930f781d991ae8c744d4cee7e51852011fd12c | 8ed564a9e8fd4a50079baeb22aa5f2fdefc178fc | refs/heads/master | 2020-05-29T22:41:11.270796 | 2017-12-06T08:08:33 | 2017-12-06T08:08:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,991 | py | from __future__ import print_function
import os
import sys
import importlib
import mxnet as mx
from dataset.iterator import DetRecordIter
from config.config import cfg
from evaluate.eval_metric import MApMetric, VOC07MApMetric
import logging
from symbol.symbol_factory import get_symbol
from tools.do_nms import do_nms
def evaluate_net(net, path_imgrec, num_classes, mean_pixels, data_shape,
model_prefix, epoch, ctx=mx.cpu(), batch_size=1,
path_imglist="", nms_thresh=0.45, force_nms=False,
ovp_thresh=0.5, use_difficult=False, class_names=None,
voc07_metric=False):
"""
evalute network given validation record file
Parameters:
----------
net : str or None
Network name or use None to load from json without modifying
path_imgrec : str
path to the record validation file
path_imglist : str
path to the list file to replace labels in record file, optional
num_classes : int
number of classes, not including background
mean_pixels : tuple
(mean_r, mean_g, mean_b)
data_shape : tuple or int
(3, height, width) or height/width
model_prefix : str
model prefix of saved checkpoint
epoch : int
load model epoch
ctx : mx.ctx
mx.gpu() or mx.cpu()
batch_size : int
validation batch size
nms_thresh : float
non-maximum suppression threshold
force_nms : boolean
whether suppress different class objects
ovp_thresh : float
AP overlap threshold for true/false postives
use_difficult : boolean
whether to use difficult objects in evaluation if applicable
class_names : comma separated str
class names in string, must correspond to num_classes if set
voc07_metric : boolean
whether to use 11-point evluation as in VOC07 competition
"""
# set up logger
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# args
if isinstance(data_shape, int):
data_shape = (3, data_shape, data_shape)
assert len(data_shape) == 3 and data_shape[0] == 3
model_prefix += '_' + str(data_shape[1])
# iterator
eval_iter = DetRecordIter(path_imgrec, batch_size, data_shape,
path_imglist=path_imglist, **cfg.valid)
# model params
load_net, args, auxs = mx.model.load_checkpoint(model_prefix, epoch)
# network
if net is None:
net = load_net
else:
net = get_symbol(net, data_shape[1], num_classes=num_classes,
nms_thresh=nms_thresh, force_suppress=force_nms)
if not 'label' in net.list_arguments():
label = mx.sym.Variable(name='label')
net = mx.sym.Group([net, label])
# init module
mod = mx.mod.Module(net, label_names=('label',), logger=logger, context=ctx,
fixed_param_names=net.list_arguments())
mod.bind(data_shapes=eval_iter.provide_data, label_shapes=eval_iter.provide_label)
mod.set_params(args, auxs, allow_missing=False, force_init=True)
# run evaluation
if voc07_metric:
metric = VOC07MApMetric(ovp_thresh, use_difficult, class_names)
else:
metric = MApMetric(ovp_thresh, use_difficult, class_names)
for i, datum in enumerate(eval_iter):
# mod.reshape(data_shapes=datum.provide_data, label_shapes=datum.provide_label)
mod.forward(datum)
preds = mod.get_outputs()
for j in range(preds[0].shape[0]):
det0 = preds[0][j].asnumpy() # (n_anchor, 6)
det0 = do_nms(det0, n_class=20, th_nms=nms_thresh)
preds[0][j] = mx.nd.array(det0, ctx=preds[0][j].context)
metric.update(datum.label, preds)
if i % 10 == 0:
print('processed {} batches.'.format(i))
results = metric.get_name_value()
# results = mod.score(eval_iter, metric, num_batch=None)
for k, v in results:
print("{}: {}".format(k, v))
| [
"[email protected]"
] | |
772bc6013f219ad1c46eadc9d8058b9393df3313 | 883ddb559c7b54af22202685b279d899af48f497 | /examen/models.py | 28aa9e8af52fcbb70211e30c61652ff3364aa9ec | [] | no_license | kikega/oposiciones | 3bd64e8b955b33ca5be0b24faf17de4e7c235310 | 65a33c47c95645e5499942982f2e6855ef81ee8b | refs/heads/master | 2020-06-09T10:39:28.241903 | 2019-08-22T22:40:30 | 2019-08-22T22:40:30 | 103,585,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | from django.db import models
from django.contrib.postgres.fields import ArrayField
# Create your models here.
class Examen(models.Model):
fecha = models.DateField(auto_now=True)
p_falladas = ArrayField(models.IntegerField())
p_acertadas = models.IntegerField()
fallos = models.IntegerField()
nota = models.FloatField()
class Meta:
verbose_name_plural = "Examenes"
def __str__(self):
return str(self.fecha)
| [
"[email protected]"
] | |
94d04dee1f8e390d9c5217e7c519fccc0f5fc0de | a82be1ed4ec980c45ee963ccbe15fa3c574d6c78 | /Bike Rent Count.py | 4c5b187fa709de4e61d9014f0a392347bcc440b2 | [] | no_license | ytnvj2/bikerent | 82326e25be66078a22a327512f00ff8e06277dd4 | 711f466f8e03fe545546928cf5bd0d9dd7569125 | refs/heads/master | 2020-04-08T18:51:16.164408 | 2018-12-01T03:47:26 | 2018-12-01T03:47:26 | 159,627,917 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,762 | py | import os
import pandas as pd
import numpy as np
import fancyimpute
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import (explained_variance_score, mean_absolute_error, mean_squared_error)
from sklearn.metrics import r2_score
def load_data():
# define path of csv
raw_data_path=os.path.join('day.csv')
# import the dataset
df=pd.read_csv(raw_data_path)
df.columns=['Instant', 'Date', 'Season', 'Year', 'Month', 'Holiday', 'Weekday',
'WorkingDay', 'WeatherSituation', 'Temp', 'ActualTemp', 'Humidity', 'WindSpeed',
'Casual', 'Registered', 'Count']
# Convert the categorical columns to object
cat_cols=['Instant','Season', 'Year', 'Month', 'Holiday', 'Weekday',
'WorkingDay', 'WeatherSituation']
for i in cat_cols:
df[i] = df[i].astype(object)
num_cols=[]
for i in df.columns:
if(df[i].dtype==np.dtype('int64') or df[i].dtype==np.dtype('float64')):
num_cols.append(i)
df[i] = df[i].astype(np.float64)
# viewing the dataframe's info
df.info()
return df,num_cols,cat_cols
def outlier_imputer(df_o,num_cols):
# Outlier Analysis
while True:
for i in num_cols:
min=(df_o[i].quantile(0.25)-1.5*(df_o[i].quantile(0.75)-df_o[i].quantile(0.25)))
max=(df_o[i].quantile(0.75)+1.5*(df_o[i].quantile(0.75)-df_o[i].quantile(0.25)))
df_o.loc[df_o[i]<min,i] = np.nan
df_o.loc[df_o[i]>max,i] = np.nan
missing_val = df_o.isnull().sum()
print(missing_val)
if(missing_val.sum()>0):
df_o_knn=pd.DataFrame(fancyimpute.KNN(k = 3).complete(df_o[num_cols]), columns = num_cols)
df_o_knn.head()
df_o.iloc[:,9:15]=df_o_knn.iloc[:,:]
else:
break
return df_o
def feature_selection(df):
#Set the width and hieght of the plot
f, ax = plt.subplots(figsize=(7, 5))
#Generate correlation matrix
corr = df.iloc[:,9:].corr()
#Plot using seaborn library
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool), cmap=sns.diverging_palette(220, 10, as_cmap=True),
square=True, ax=ax)
return df.drop(['Instant','Date','Holiday','Temp','Registered'],axis=1,inplace=False)
def split_dataset(df):
X=df.iloc[:,:-1].values
y=df.iloc[:,-1].values
return train_test_split(X,y,test_size=0.2,random_state=123)
def feature_scaling(X_train,X_test):
standardScaler=StandardScaler()
X_train[:,6:]=standardScaler.fit_transform(X_train[:,6:])
X_test[:,6:]=standardScaler.transform(X_test[:,6:])
return X_train,X_test,standardScaler
def train_lm(X_train,y_train):
lr_model=LinearRegression()
lr_model.fit(X_train,y_train)
return lr_model
def train_dt(X_train,y_train):
dtr_model=DecisionTreeRegressor(random_state=123)
dtr_model.fit(X_train,y_train)
return dtr_model
def train_rf(X_train,y_train):
rf_model=RandomForestRegressor(n_estimators=50,random_state=123)
rf_model.fit(X_train,y_train)
return rf_model
def train_knn(X_train,y_train):
rf_model=KNeighborsRegressor(n_neighbors=5)
rf_model.fit(X_train,y_train)
return rf_model
def predict_vals(model,X_test,y_test):
print(model.score(X_test,y_test))
preds=model.predict(X_test)
return preds
def evaluate_model(y_test,y_pred):
print('R-Square',r2_score(y_test,y_pred))
print('MSE',mean_squared_error(y_test,y_pred))
print('MAE',mean_absolute_error(y_test,y_pred))
print('Explained Variance',explained_variance_score(y_test,y_pred))
def predict(model,X):
df=X
df.columns=['Instant', 'Date', 'Season', 'Year', 'Month', 'Holiday', 'Weekday',
'WorkingDay', 'WeatherSituation', 'Temp', 'ActualTemp', 'Humidity', 'WindSpeed',
'Casual', 'Registered']
# Convert the categorical columns to object
cat_cols=['Season', 'Year', 'Month', 'Holiday', 'Weekday',
'WorkingDay', 'WeatherSituation']
for i in cat_cols:
df[i] = df[i].astype(object)
num_cols=[]
for i in df.columns:
if(df[i].dtype==np.dtype('int64') or df[i].dtype==np.dtype('float64')):
num_cols.append(i)
df[i] = df[i].astype(np.float64)
X.drop(['Instant','Date','Holiday','Temp','Registered'],axis=1,inplace=True)
X=X.values
X[:,6:]=standardScaler.transform(X[:,6:])
return model.predict(X)
df,num_cols,cat_cols=load_data()
df=outlier_imputer(df,num_cols)
df=feature_selection(df)
X_train,X_test,y_train,y_test=split_dataset(df)
X_train_scaled,X_test_scaled,standardScaler=feature_scaling(X_train,X_test)
lr_model=train_lm(X_train,y_train)
dt_model=train_dt(X_train,y_train)
rf_model=train_rf(X_train,y_train)
knn_model=train_knn(X_train,y_train)
print('Linear Model')
y_pred=predict_vals(lr_model,X_test,y_test)
evaluate_model(y_test,y_pred)
print('Decision Tree Model')
y_pred=predict_vals(dt_model,X_test,y_test)
evaluate_model(y_test,y_pred)
print('Random Forest Model')
y_pred=predict_vals(rf_model,X_test,y_test)
evaluate_model(y_test,y_pred)
print('K Nearest Neighbors(k=5)')
y_pred=predict_vals(knn_model,X_test,y_test)
evaluate_model(y_test,y_pred)
# Sample Input Creation
a=np.array([[399, '2012-02-03', 1, 1, 2, 0, 5, 1, 1, 0.313333, 0.309346,
0.526667, 0.17849600000000002, 310, 3841]], dtype=object)
s=pd.DataFrame(a)
print(s)
# Output for sample input
print("The Model's Prediction for the input is ",predict(rf_model,s)) | [
"[email protected]"
] | |
98529b4cb54decdf4e7df6ee449955d6f8fe354c | 91cdc8cf41dff4d179f9e8319bf9e5b19da64b1f | /backend/home/migrations/0003_hgfdjfd.py | d8c1aae2adf8e892905df603dde5c9609a4539fc | [] | no_license | crowdbotics-apps/mobile-1666-dev-7710 | 8b752505addfe8ae946f061d6fb57445f2eca1a5 | 4b2fba070ce3d95eb9b3c153a16af9d527079d03 | refs/heads/master | 2022-11-21T13:33:33.758230 | 2020-07-17T14:18:12 | 2020-07-17T14:18:12 | 280,130,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | # Generated by Django 2.2.14 on 2020-07-17 07:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("home", "0002_load_initial_data"),
]
operations = [
migrations.CreateModel(
name="Hgfdjfd",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("jhjhjh", models.BigIntegerField()),
],
),
]
| [
"[email protected]"
] | |
879e9eec961a0ae978e683b4e0a3c9e98b728095 | 203d26cb680c9ab4fc45990123ae9b17574a4908 | /tools/grammar.py | 430ebedab3d86d9658a7c731fe4fc1f1c439c089 | [] | no_license | jprimeau/DE1-JP80 | b608870a63c5a56063fb24bdd1f60ae653f399de | 08d2150bd2153e540aa6e987c10abeef135b5492 | refs/heads/master | 2016-09-06T10:09:29.129759 | 2015-01-03T03:40:48 | 2015-01-03T03:40:48 | 26,215,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,699 | py | grammar = {
'ACI':['D', 2, 'CE'],
'ADC':['R', 1, {'A':'8F','B':'88','C':'89','D':'8A','E':'8B','H':'8C','L':'8D','M':'8E'}],
'ADD':['R', 1, {'A':'87','B':'80','C':'81','D':'82','E':'83','H':'84','L':'85','M':'86'}],
'ADI':['D', 2, 'C6'],
'ANA':['R', 1, {'A':'A7','B':'A0','C':'A1','D':'A2','E':'A3','H':'A4','L':'A5','M':'A6'}],
'ANI':['D', 2, 'E6'],
'CALL':['A', 3, 'CD'],
'CC':['A', 3, 'DC'],
'CNC':['A', 3, 'D4'],
'CP':['A', 3, 'F4'],
'CM':['A', 3, 'FC'],
'CPE':['A', 3, 'EC'],
'CPO':['A', 3, 'E4'],
'CZ':['A', 3, 'CC'],
'CNZ':['A', 3, 'C4'],
'CMA':['', 1, '2F'],
'CMC':['', 1, '3F'],
'CMP':['R', 1, {'A':'BF','B':'B8','C':'B9','D':'BA','E':'BB','H':'BC','L':'BD','M':'BE'}],
'CPI':['D', 2, 'FE'],
'DAA':['', 1, '27'],
'DAD':['Rp', 1, {'B':'09','D':'19','H':'29','SP':'39'}],
'DCR':['R', 1, {'A':'3D','B':'05','C':'0D','D':'15','E':'1D','H':'25','L':'2D','M':'35'}],
'DCX':['Rp', 1, {'B':'0B','D':'1B','H':'2B','SP':'3B'}],
'DI':['', 1, 'F3'],
'EI':['', 1, 'FB'],
'HLT':['', 1, '76'],
'IN':['D', 2, 'DB'],
'INR':['R', 1, {'A':'3C','B':'04','C':'0C','D':'14','E':'1C','H':'24','L':'2C','M':'34'}],
'INX':['Rp', 1, {'B':'03','D':'13','H':'23','SP':'33'}],
'JMP':['A', 3, 'C3'],
'JC':['A', 3, 'DA'],
'JNC':['A', 3, 'D2'],
'JP':['A', 3, 'F2'],
'JM':['A', 3, 'FA'],
'JPE':['A', 3, 'EA'],
'JPO':['A', 3, 'E2'],
'JZ':['A', 3, 'CA'],
'JNZ':['A', 3, 'C2'],
'LDA':['A', 3, '3A'],
'LDAX':['Rp', 3, {'B':'0A','D':'1A'}],
'LHDL':['A', 3, '2A'],
'LXI':['Rp,DD', 3, {'B':'01','D':'11','H':'21','SP':'31'}],
'MOV':['R,R', 1, {'A,A':'7F','A,B':'78','A,C':'79','A,D':'7A','A,E':'7B','A,H':'7C','A,L':'7D','A,M':'7E',
'B,A':'47','B,B':'40','B,C':'41','B,D':'42','B,E':'43','B,H':'44','B,L':'45','B,M':'46',
'C,A':'41','C,B':'48','C,C':'49','C,D':'4A','C,E':'4B','C,H':'4C','C,L':'4D','C,M':'4E',
'D,A':'57','D,B':'50','D,C':'51','D,D':'52','D,E':'53','D,H':'54','D,L':'55','D,M':'56',
'E,A':'5F','E,B':'58','E,C':'59','E,D':'5A','E,E':'5B','E,H':'5C','E,L':'5D','E,M':'5E',
'H,A':'67','H,B':'60','H,C':'61','H,D':'62','H,E':'63','H,H':'64','H,L':'65','H,M':'66',
'L,A':'6F','L,B':'68','L,C':'69','L,D':'6A','L,E':'6B','L,H':'6C','L,L':'6D','L,M':'6E',
'M,A':'77','M,B':'70','M,C':'71','M,D':'72','M,E':'73','M,H':'74','M,L':'75'}],
'MVI':['R,D', 2, {'A':'3E','B':'06','C':'0E','D':'16','E':'1E','H':'26','L':'2E','M':'36'}],
'NOP':['', 1, '00'],
'ORA':['R', 1, {'A':'B7','B':'B0','C':'B1','D':'B2','E':'B3','H':'B4','L':'B5','M':'B6'}],
'ORI':['D', 2, 'F6'],
'OUT':['D', 2, 'D3'],
'PCHL':[ '', 1, 'E9'],
'POP':['Rp', 1, {'B':'C1','D':'D1','H':'E1','PSW':'F1'}],
'PUSH':['Rp', 1, {'B':'C5','D':'D5','H':'E5','PSW':'F5'}],
'RAL':['', 1, '17'],
'RAR':['', 1, '1F'],
'RCL':['', 1, '07'],
'RRC':['', 1, '0F'],
'RET':['', 1, 'C9'],
'RC':['', 1, 'D8'],
'RNC':['', 1, 'D0'],
'RP':['', 1, 'F0'],
'RM':['', 1, 'F8'],
'RPE':['', 1, 'E8'],
'RPO':['', 1, 'E0'],
'RZ':['', 1, 'C8'],
'RNZ':['', 1, 'C0'],
'RIM':['', 1, '20'],
'RST':['N', 1, ['C7','CF','D7','DF','E7','EF','F7','FF']],
'SBB':['R', 1, {'A':'9F','B':'98','C':'99','D':'9A','E':'9B','H':'9C','L':'9D','M':'9E'}],
'SBI':['D', 2, 'DE'],
'SHLD':['A', 3, '22'],
'SIM':['', 1, '30'],
'SPHL':['', 1, 'F9'],
'STA':['A', 3, '32'],
'STAX':['Rp', 1, {'B':'02','D':'12'}],
'STC':['', 1, '37'],
'SUB':['R', 1, {'A':'97','B':'90','C':'91','D':'92','E':'93','H':'94','L':'95','M':'96'}],
'SUI':['D', 2, 'D6'],
'XCHG':['', 1, 'EB'],
'XRA':['R', 1, {'A':'AF','B':'A8','C':'A9','D':'AA','E':'AB','H':'AC','L':'AD','M':'AE'}],
'XRI':['D', 2, 'EE'],
'XTHL':['', 1, 'E3'],
'defw':['AD', 0, ''],
'defm':['AD', 0, ''],
'org':['AD', 0, ''],
'equ':['AD', 0, ''],
}
| [
"[email protected]"
] | |
973987e590757c2014e474ea2435574e52da4589 | bdc59a03b8bc589eeda1f4c5a97d7e283f5cc81c | /Add_Project_GUI.py | b6e20a76544d2da6013c881b4955612d13b67999 | [] | no_license | simplesports/ProjectManager | 111e8f6a55afeaad7d0754008a329e99bcbbea9f | bf8ccb584bd25a4651e8be602cd9cb83fac5d046 | refs/heads/master | 2020-12-30T12:34:47.809607 | 2017-07-07T22:53:08 | 2017-07-07T22:53:08 | 91,388,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,615 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Add_Project_GUI.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Add_Projects(object):
def setupUi(self, Add_Projects):
Add_Projects.setObjectName("Add_Projects")
Add_Projects.resize(592, 724)
self.gridLayout = QtWidgets.QGridLayout(Add_Projects)
self.gridLayout.setObjectName("gridLayout")
self.text_Additional_Comments = QtWidgets.QLabel(Add_Projects)
self.text_Additional_Comments.setObjectName("text_Additional_Comments")
self.gridLayout.addWidget(self.text_Additional_Comments, 7, 1, 1, 1)
self.textEdit_Additional_Comments = QtWidgets.QTextEdit(Add_Projects)
self.textEdit_Additional_Comments.setObjectName("textEdit_Additional_Comments")
self.gridLayout.addWidget(self.textEdit_Additional_Comments, 8, 1, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.text_Project_Number = QtWidgets.QLabel(Add_Projects)
self.text_Project_Number.setObjectName("text_Project_Number")
self.horizontalLayout.addWidget(self.text_Project_Number)
self.UserInput_Project_Number = QtWidgets.QLineEdit(Add_Projects)
self.UserInput_Project_Number.setObjectName("UserInput_Project_Number")
self.horizontalLayout.addWidget(self.UserInput_Project_Number)
self.text_Project_Name = QtWidgets.QLabel(Add_Projects)
self.text_Project_Name.setObjectName("text_Project_Name")
self.horizontalLayout.addWidget(self.text_Project_Name)
self.UserInput_Project_Name = QtWidgets.QLineEdit(Add_Projects)
self.UserInput_Project_Name.setObjectName("UserInput_Project_Name")
self.horizontalLayout.addWidget(self.UserInput_Project_Name)
self.gridLayout.addLayout(self.horizontalLayout, 0, 1, 1, 1)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem)
self.Button_Add_Project = QtWidgets.QPushButton(Add_Projects)
self.Button_Add_Project.setObjectName("Button_Add_Project")
self.horizontalLayout_5.addWidget(self.Button_Add_Project)
self.Button_Cancel = QtWidgets.QPushButton(Add_Projects)
self.Button_Cancel.setObjectName("Button_Cancel")
self.horizontalLayout_5.addWidget(self.Button_Cancel)
self.gridLayout.addLayout(self.horizontalLayout_5, 10, 1, 1, 1)
self.groupBox_DueDates = QtWidgets.QGroupBox(Add_Projects)
self.groupBox_DueDates.setObjectName("groupBox_DueDates")
self.gridLayout_3 = QtWidgets.QGridLayout(self.groupBox_DueDates)
self.gridLayout_3.setObjectName("gridLayout_3")
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.text_Description = QtWidgets.QLabel(self.groupBox_DueDates)
self.text_Description.setObjectName("text_Description")
self.horizontalLayout_4.addWidget(self.text_Description)
self.UserInput_Description = QtWidgets.QLineEdit(self.groupBox_DueDates)
self.UserInput_Description.setObjectName("UserInput_Description")
self.horizontalLayout_4.addWidget(self.UserInput_Description)
self.text_date = QtWidgets.QLabel(self.groupBox_DueDates)
self.text_date.setObjectName("text_date")
self.horizontalLayout_4.addWidget(self.text_date)
self.dateEdit = QtWidgets.QDateEdit(self.groupBox_DueDates)
self.dateEdit.setObjectName("dateEdit")
self.horizontalLayout_4.addWidget(self.dateEdit)
self.gridLayout_2.addLayout(self.horizontalLayout_4, 1, 0, 1, 1)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.Button_Add_To_Table_Due_Dates = QtWidgets.QPushButton(self.groupBox_DueDates)
self.Button_Add_To_Table_Due_Dates.setObjectName("Button_Add_To_Table_Due_Dates")
self.horizontalLayout_2.addWidget(self.Button_Add_To_Table_Due_Dates)
self.Button_Dates_Update_Table = QtWidgets.QPushButton(self.groupBox_DueDates)
self.Button_Dates_Update_Table.setObjectName("Button_Dates_Update_Table")
self.horizontalLayout_2.addWidget(self.Button_Dates_Update_Table)
self.Button_Dates_Remove = QtWidgets.QPushButton(self.groupBox_DueDates)
self.Button_Dates_Remove.setObjectName("Button_Dates_Remove")
self.horizontalLayout_2.addWidget(self.Button_Dates_Remove)
self.gridLayout_2.addLayout(self.horizontalLayout_2, 2, 0, 1, 1)
self.gridLayout_3.addLayout(self.gridLayout_2, 0, 0, 1, 1)
self.tableWidget_Dates = QtWidgets.QTableWidget(self.groupBox_DueDates)
self.tableWidget_Dates.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.tableWidget_Dates.setDragDropOverwriteMode(False)
self.tableWidget_Dates.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.tableWidget_Dates.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tableWidget_Dates.setObjectName("tableWidget_Dates")
self.tableWidget_Dates.setColumnCount(2)
self.tableWidget_Dates.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_Dates.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_Dates.setHorizontalHeaderItem(1, item)
self.gridLayout_3.addWidget(self.tableWidget_Dates, 1, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox_DueDates, 5, 1, 1, 1)
self.groupBox = QtWidgets.QGroupBox(Add_Projects)
self.groupBox.setObjectName("groupBox")
self.gridLayout_4 = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout_4.setObjectName("gridLayout_4")
self.text_Contact_name = QtWidgets.QLabel(self.groupBox)
self.text_Contact_name.setObjectName("text_Contact_name")
self.gridLayout_4.addWidget(self.text_Contact_name, 0, 0, 1, 1)
self.checkBox = QtWidgets.QCheckBox(self.groupBox)
self.checkBox.setObjectName("checkBox")
self.gridLayout_4.addWidget(self.checkBox, 0, 7, 1, 1)
self.text_Contact_PhoneNumber = QtWidgets.QLabel(self.groupBox)
self.text_Contact_PhoneNumber.setObjectName("text_Contact_PhoneNumber")
self.gridLayout_4.addWidget(self.text_Contact_PhoneNumber, 0, 2, 1, 1)
self.UserInput_Main_Contact_Name = QtWidgets.QLineEdit(self.groupBox)
self.UserInput_Main_Contact_Name.setObjectName("UserInput_Main_Contact_Name")
self.gridLayout_4.addWidget(self.UserInput_Main_Contact_Name, 0, 1, 1, 1)
self.text_Email = QtWidgets.QLabel(self.groupBox)
self.text_Email.setObjectName("text_Email")
self.gridLayout_4.addWidget(self.text_Email, 3, 0, 1, 1)
self.label = QtWidgets.QLabel(self.groupBox)
self.label.setObjectName("label")
self.gridLayout_4.addWidget(self.label, 1, 0, 1, 1)
self.label_2 = QtWidgets.QLabel(self.groupBox)
self.label_2.setObjectName("label_2")
self.gridLayout_4.addWidget(self.label_2, 1, 2, 1, 1)
self.UserInput_Title = QtWidgets.QLineEdit(self.groupBox)
self.UserInput_Title.setObjectName("UserInput_Title")
self.gridLayout_4.addWidget(self.UserInput_Title, 1, 1, 1, 1)
self.tableWidget_Contacts = QtWidgets.QTableWidget(self.groupBox)
self.tableWidget_Contacts.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.tableWidget_Contacts.setDragDropOverwriteMode(False)
self.tableWidget_Contacts.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.tableWidget_Contacts.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tableWidget_Contacts.setObjectName("tableWidget_Contacts")
self.tableWidget_Contacts.setColumnCount(6)
self.tableWidget_Contacts.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_Contacts.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_Contacts.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_Contacts.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_Contacts.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_Contacts.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_Contacts.setHorizontalHeaderItem(5, item)
self.gridLayout_4.addWidget(self.tableWidget_Contacts, 7, 1, 1, 6)
self.UserInput_Company = QtWidgets.QLineEdit(self.groupBox)
self.UserInput_Company.setObjectName("UserInput_Company")
self.gridLayout_4.addWidget(self.UserInput_Company, 1, 3, 1, 4)
self.UserInput_Email = QtWidgets.QLineEdit(self.groupBox)
self.UserInput_Email.setObjectName("UserInput_Email")
self.gridLayout_4.addWidget(self.UserInput_Email, 3, 1, 1, 6)
self.UserInput_Main_Contact_PhoneNumber = QtWidgets.QLineEdit(self.groupBox)
self.UserInput_Main_Contact_PhoneNumber.setObjectName("UserInput_Main_Contact_PhoneNumber")
self.gridLayout_4.addWidget(self.UserInput_Main_Contact_PhoneNumber, 0, 3, 1, 4)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem2)
self.Button_Add_To_Table_Contacts = QtWidgets.QPushButton(self.groupBox)
self.Button_Add_To_Table_Contacts.setObjectName("Button_Add_To_Table_Contacts")
self.horizontalLayout_6.addWidget(self.Button_Add_To_Table_Contacts)
self.Button_Contacts_Update_Table = QtWidgets.QPushButton(self.groupBox)
self.Button_Contacts_Update_Table.setObjectName("Button_Contacts_Update_Table")
self.horizontalLayout_6.addWidget(self.Button_Contacts_Update_Table)
self.Button_Contacts_Remove = QtWidgets.QPushButton(self.groupBox)
self.Button_Contacts_Remove.setObjectName("Button_Contacts_Remove")
self.horizontalLayout_6.addWidget(self.Button_Contacts_Remove)
self.gridLayout_4.addLayout(self.horizontalLayout_6, 6, 0, 1, 8)
self.gridLayout.addWidget(self.groupBox, 4, 1, 1, 1)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.text_Project_Folder = QtWidgets.QLabel(Add_Projects)
self.text_Project_Folder.setObjectName("text_Project_Folder")
self.horizontalLayout_3.addWidget(self.text_Project_Folder)
self.UserInput_Project_Folder = QtWidgets.QLineEdit(Add_Projects)
self.UserInput_Project_Folder.setObjectName("UserInput_Project_Folder")
self.horizontalLayout_3.addWidget(self.UserInput_Project_Folder)
self.Button_Browse = QtWidgets.QPushButton(Add_Projects)
self.Button_Browse.setObjectName("Button_Browse")
self.horizontalLayout_3.addWidget(self.Button_Browse)
self.gridLayout.addLayout(self.horizontalLayout_3, 6, 1, 1, 1)
self.retranslateUi(Add_Projects)
QtCore.QMetaObject.connectSlotsByName(Add_Projects)
def retranslateUi(self, Add_Projects):
_translate = QtCore.QCoreApplication.translate
Add_Projects.setWindowTitle(_translate("Add_Projects", "Add a New Project :-)"))
self.text_Additional_Comments.setText(_translate("Add_Projects", "Addtional Comments:"))
self.text_Project_Number.setText(_translate("Add_Projects", "Project Number"))
self.text_Project_Name.setText(_translate("Add_Projects", "Project Name"))
self.Button_Add_Project.setText(_translate("Add_Projects", "Add Project"))
self.Button_Cancel.setText(_translate("Add_Projects", "Cancel"))
self.groupBox_DueDates.setTitle(_translate("Add_Projects", "Due Dates"))
self.text_Description.setText(_translate("Add_Projects", "Description:"))
self.text_date.setText(_translate("Add_Projects", "Date:"))
self.Button_Add_To_Table_Due_Dates.setText(_translate("Add_Projects", "Add to Table"))
self.Button_Dates_Update_Table.setText(_translate("Add_Projects", "Update Table"))
self.Button_Dates_Remove.setText(_translate("Add_Projects", "Remove"))
item = self.tableWidget_Dates.horizontalHeaderItem(0)
item.setText(_translate("Add_Projects", "Description"))
item = self.tableWidget_Dates.horizontalHeaderItem(1)
item.setText(_translate("Add_Projects", "Due Date"))
self.groupBox.setTitle(_translate("Add_Projects", "Contacts"))
self.text_Contact_name.setText(_translate("Add_Projects", "Contact Name:"))
self.checkBox.setText(_translate("Add_Projects", "Main?"))
self.text_Contact_PhoneNumber.setText(_translate("Add_Projects", "Contact Phone Number:"))
self.text_Email.setText(_translate("Add_Projects", "Email:"))
self.label.setText(_translate("Add_Projects", "Title:"))
self.label_2.setText(_translate("Add_Projects", "Company:"))
item = self.tableWidget_Contacts.horizontalHeaderItem(0)
item.setText(_translate("Add_Projects", "Contact Name:"))
item = self.tableWidget_Contacts.horizontalHeaderItem(1)
item.setText(_translate("Add_Projects", "Contact Phone Number:"))
item = self.tableWidget_Contacts.horizontalHeaderItem(2)
item.setText(_translate("Add_Projects", "Title:"))
item = self.tableWidget_Contacts.horizontalHeaderItem(3)
item.setText(_translate("Add_Projects", "Company"))
item = self.tableWidget_Contacts.horizontalHeaderItem(4)
item.setText(_translate("Add_Projects", "Email"))
item = self.tableWidget_Contacts.horizontalHeaderItem(5)
item.setText(_translate("Add_Projects", "Main Contact"))
self.Button_Add_To_Table_Contacts.setText(_translate("Add_Projects", "Add to Table"))
self.Button_Contacts_Update_Table.setText(_translate("Add_Projects", "Update Table"))
self.Button_Contacts_Remove.setText(_translate("Add_Projects", "Remove"))
self.text_Project_Folder.setText(_translate("Add_Projects", "Project Folder:"))
self.Button_Browse.setText(_translate("Add_Projects", "Browse"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Add_Projects = QtWidgets.QWidget()
ui = Ui_Add_Projects()
ui.setupUi(Add_Projects)
Add_Projects.show()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
1a3d25ee2ab2d6274bd90c30781999f1a956cbdb | 61afd923551491846ae827821f55c4fb5fd04c98 | /packages/levylab_lib_lockin_multichannel/levylab_lib_lockin_multichannel-2.14.0.59.spec | d493c1230e5f712ce6a5b1e7090368ee412a0a98 | [
"BSD-3-Clause"
] | permissive | laserengineer/levylabpitt.github.io | b74b711aff2a5eb1b46f880a1071ac0873f1a9ac | cdf9aeb6faaf136211291ce2232c239229d85bbe | refs/heads/master | 2023-04-29T02:36:48.736236 | 2021-05-14T19:20:40 | 2021-05-14T19:20:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,044 | spec | [Package]
Name="levylab_lib_lockin_multichannel"
Version="2.14.0.59"
Release=""
ID=3e298bc47804f097168f0d0fcf0f1965
File Format="vip"
Format Version="2017"
Display Name="Multichannel Lockin"
[Description]
Description="Multichannel Lockin for National Instruments' Dynamic Signal Acquisition hardware (4431, 4461, 4462). This version is configured to handle multiple cards (up to 16) for simultaneous, synchronized AI/AO. You can configure a number of analog outputs (up to 32) to output sine, square, sawtooth, or triangle functions with DC offsets. Each of the analog inputs (up to 32) can be demodulated at multiple frequencies."
Summary=""
License="BSD-3"
Copyright="Copyright (c) 2021, LevyLab"
Distribution=""
Vendor="LevyLab"
URL=""
Packager="Patrick Irvin"
Demo="FALSE"
Release Notes="[2.14.0]\0D\0A[beta build]\0A- Updated AO Channels, AI Channels, Reference Channels datatypes: Array of Clusters instead of Cluster of Arrays\0A- Updated Lockin Results datatyp: Dictionary; APIs are provided for parsing the dicrtionary\0A- Each reference channel has independent control of time constant (Maximum of 4 reference channels)\\\0A- Individual choice for demodulating each AI Channel (e.g., demodulate each AI with choice of Reference channel, no demodulation, or all demodulation channels)\0A- Reset phase option: align AO and Reference phase without resetting DAQ tasks\0A- UI improvements"
System Package="FALSE"
Sub Package="FALSE"
License Agreement="TRUE"
[LabVIEW]
close labview before install="FALSE"
restart labview after install="FALSE"
skip mass compile after install="FALSE"
[Platform]
Exclusive_LabVIEW_Version="LabVIEW>=16.0"
Exclusive_LabVIEW_System="ALL"
Exclusive_OS="ALL"
[Script VIs]
PreInstall=""
PostInstall=""
PreUninstall=""
PostUninstall=""
Verify=""
PreBuild=""
PostBuild=""
[Dependencies]
AutoReqProv=FALSE
Requires="jdp_science_postgresql>=0.1.1.10,jki_lib_json_serialization>=1.1.10.37,jki_lib_serialization>=1.0.1.14,jki_lib_state_machine>=2018.0.7.45,jki_lib_unicode>=1.0.0.7,jki_statemachineobjects>=1.3.0.56,labview-zmq>=3.5.1.109,lava_lib_ui_tools>=1.4.1.74,levylab_lib_control_vi>=1.3.0.11,levylab_lib_fileutilities>=1.2.6.13,levylab_lib_graph_utilities>=2.1.6.9,levylab_lib_lvtoitx>=3.0.6.14,levylab_lib_postgresql>=1.3.3.21,levylab_lib_voltage_update>=1.0.3.5,levylab_lib_xy_utilities>=1.4.0.17,mgi_lib_1d_array>=1.0.2.3,mgi_lib_application_control>=1.1.1.10,mgi_lib_cluster>=1.1.0.1,mgi_lib_error_handling>=1.1.1.3,mgi_lib_error_reporter>=1.0.2.5,mgi_lib_file>=1.1.0.4,mgi_lib_numeric>=1.1.0.2,mgi_lib_picture_&_image>=1.0.2.1,mgi_lib_read_write_anything>=2.1.4.4,mgi_lib_string>=1.1.1.5,mgi_lib_timing>=1.1.0.2,national_instruments_lib_guid_generator>=1.0.2.3,ni_lib_stm>=3.1.0.9,oglib_appcontrol>=4.1.0.7,oglib_array>=4.1.1.14,oglib_dictionary>=4.0.0.4,oglib_error>=4.2.0.23,oglib_file>=4.0.1.22,oglib_lvdata>=5.0.0.27,oglib_lvzip>=4.0.1,oglib_numeric>=4.1.0.8,oglib_string>=5.0.0.25,oglib_time>=4.0.1.3,oglib_variantconfig>=4.0.0.5,wireflow_ab_lib_wf_progressbar>=1.0.2.56"
Conflicts=""
[Activation]
License File=""
Licensed Library=""
[Files]
Num File Groups="3"
Sub-Packages=""
Namespaces=""
[File Group 0]
Target Dir="<application>"
Replace Mode="Always"
Num Files=359
File 0="user.lib/LevyLab/Lockin-Multichannel/.dragon"
File 1="user.lib/LevyLab/Lockin-Multichannel/Multichannel Lockin.lvproj"
File 2="user.lib/LevyLab/Lockin-Multichannel/Multichannel Lockin.vi"
File 3="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/4461-SIMULATEDDEVICES.NCE"
File 4="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Lockin.DAQ.lvclass"
File 5="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Lockin.DAQ.ResetPhase.vi"
File 6="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Lockin.DAQ.setAI.vi"
File 7="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Process.vi"
File 8="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Waveguide Model/Waveguide Model--Cluster.ctl"
File 9="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Waveguide Model/Waveguide Model.vi"
File 10="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/446xAI-enum.ctl"
File 11="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/446xAO-enum.ctl"
File 12="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/AI Prefilter-cluster.ctl"
File 13="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/AI Simulation-enum.ctl"
File 14="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/AI-cluster.ctl"
File 15="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/AI.config(UI)-array.ctl"
File 16="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/AI.config(UI)-cluster.ctl"
File 17="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/AI.config-array.ctl"
File 18="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/AI.config-cluster.ctl"
File 19="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/AI.coupling-enum.ctl"
File 20="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/AmplifierType-enum.ctl"
File 21="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/AO-cluster.ctl"
File 22="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/AO.config(UI)-array.ctl"
File 23="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/AO.config(UI)-cluster.ctl"
File 24="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/AO.config-array.ctl"
File 25="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/AO.config-cluster.ctl"
File 26="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/AO_Channel-Numeric.ctl"
File 27="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/Cal Info - Cluster.ctl"
File 28="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/DAQType-enum.ctl"
File 29="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/Front Panel Cluster.ctl"
File 30="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/Function+DC-enum.ctl"
File 31="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/Function-enum.ctl"
File 32="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/INI-enum.ctl"
File 33="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PrivateEvents--Cluster.ctl"
File 34="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PrivateEvents--Lockin.DAQ.autoContigureDAQ.ctl"
File 35="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PrivateEvents--Lockin.DAQ.measureOffset.ctl"
File 36="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PrivateEvents--Lockin.DAQ.Reset Phase.ctl"
File 37="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PrivateEvents--Lockin.DAQ.setAI.ctl"
File 38="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PrivateEvents--Lockin.DAQ.setAIconfig.ctl"
File 39="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PrivateEvents--Lockin.DAQ.setAO.ctl"
File 40="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PrivateEvents--Lockin.DAQ.setAOconfig.ctl"
File 41="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PrivateEvents--Lockin.DAQ.setInputGain.ctl"
File 42="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PrivateEvents--Lockin.DAQ.setOutputGain.ctl"
File 43="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PrivateEvents--Lockin.DAQ.setPreFilter.ctl"
File 44="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PrivateEvents--Lockin.DAQ.setREF.ctl"
File 45="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PrivateEvents--Lockin.DAQ.setSampling.ctl"
File 46="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PrivateEvents--Lockin.DAQ.setState.ctl"
File 47="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PrivateEvents--Lockin.DAQ.setSweep.ctl"
File 48="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PrivateEvents--Lockin.DAQ.zeroOffset.ctl"
File 49="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PrivateEvents--LockinDAQ.Calibrate.ctl"
File 50="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PrivateEvents--LockinDAQ.setCalibration.ctl"
File 51="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PublicEvents--Cluster.ctl"
File 52="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PublicEvents--Lockin.DAQ.getAI.ctl"
File 53="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PublicEvents--Lockin.DAQ.getAO.ctl"
File 54="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PublicEvents--Lockin.DAQ.getAOChannels.ctl"
File 55="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PublicEvents--Lockin.DAQ.getCalibration.ctl"
File 56="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PublicEvents--Lockin.DAQ.getDAQconfig.ctl"
File 57="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PublicEvents--Lockin.DAQ.getDAQState.ctl"
File 58="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PublicEvents--Lockin.DAQ.getREF.ctl"
File 59="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PublicEvents--Lockin.DAQ.getREFChannels.ctl"
File 60="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PublicEvents--Lockin.DAQ.getResults.ctl"
File 61="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PublicEvents--Lockin.DAQ.getSweepResults.ctl"
File 62="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PublicEvents--Lockin.DAQ.getSweepWaveforms.ctl"
File 63="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/PublicEvents--LockinDAQ.Calibrate.ctl"
File 64="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/REF-cluster.ctl"
File 65="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/REF_Channel-Numeric.ctl"
File 66="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/Results--Cluster.ctl"
File 67="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/Sample Mode--Ring.ctl"
File 68="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/Sampling.ctl"
File 69="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/Set State-enum.ctl"
File 70="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/State-enum.ctl"
File 71="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/Sweep Configuration--Cluster.ctl"
File 72="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/Sweep Pattern--Enum.ctl"
File 73="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/SweepResults--Cluster.ctl"
File 74="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Typedefs/XP Style VISA Control.ctl"
File 75="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Tests/Test LockinEngine.vi"
File 76="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Tests/Test PLL.vi"
File 77="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Tests/Test Sweep.vi"
File 78="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Sweep/AddParametersToSweepResults.vi"
File 79="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Sweep/Append Multiple Waveforms.vi"
File 80="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Sweep/Append Waveforms Sweep.vi"
File 81="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Sweep/Calcualte N_total and Updates.vi"
File 82="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Sweep/ConcatenateSweepResults.vi"
File 83="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Sweep/Create DC Waveforms.vi"
File 84="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Sweep/Generate Sweep Pattern.vi"
File 85="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Sweep/Generator DC Sweep Manager.vi"
File 86="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Sweep/Get Sweep Pattern Subset.vi"
File 87="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Sweep/Sweep Back and Forth.vi"
File 88="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Sweep/Test New Sweep.vi"
File 89="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/private/Lockin.DAQ.getAI.vi"
File 90="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/private/Lockin.DAQ.getAO.vi"
File 91="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/private/Lockin.DAQ.getAOChannels.vi"
File 92="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/private/Lockin.DAQ.getCalibration.vi"
File 93="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/private/Lockin.DAQ.getDAQconfig.vi"
File 94="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/private/Lockin.DAQ.getDAQState.vi"
File 95="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/private/Lockin.DAQ.getIVSweepResults.vi"
File 96="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/private/Lockin.DAQ.GetPrivateEvents.vi"
File 97="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/private/Lockin.DAQ.getREF.vi"
File 98="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/private/Lockin.DAQ.getREFChannels.vi"
File 99="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/private/Lockin.DAQ.getResults.vi"
File 100="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/private/Lockin.DAQ.getSweepResults.vi"
File 101="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/private/Tree.vi"
File 102="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/private/Wait Until SMO Started.vi"
File 103="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Methods (overrides)/CreatePrivateEvents.vi"
File 104="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Methods (overrides)/CreatePublicEvents.vi"
File 105="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Methods (overrides)/DestroyPrivateEvents.vi"
File 106="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/Methods (overrides)/DestroyPublicEvents.vi"
File 107="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/4461 - Create AO DC waveforms.vi"
File 108="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/4461 - Create AO waveforms.vi"
File 109="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/4461 - Create Reference Waveforms.vi"
File 110="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/4461 Autophase.vi"
File 111="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/4461 Export Triggers.vi"
File 112="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/4461 Replace AO Ch.vi"
File 113="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/44xx AI Min and Max.vi"
File 114="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/44xx AO Min and Max.vi"
File 115="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/44xx Ref Clk.vi"
File 116="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Auto Configure DAQ.vi"
File 117="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Example-Waveform Circular Buffer.vi"
File 118="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Frequency Mixer.vi"
File 119="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Get Dev Product Type.vi"
File 120="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Get Master and Slave Tasks.vi"
File 121="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Get Terminal Name with Device Prefix.vi"
File 122="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Get Waveform AI Channel.vi"
File 123="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Get Waveform Quadrature.vi"
File 124="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Get Waveform Reference Channel.vi"
File 125="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin Engine.vi"
File 126="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.Calibrate.vi"
File 127="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.Clear.vi"
File 128="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.Create.AI Task.vi"
File 129="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.Create.AO Task.vi"
File 130="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.Create.Sample Clock.vi"
File 131="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.Create.vi"
File 132="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.DAQmx Read.vi"
File 133="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.DAQmx Write.vi"
File 134="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.ErrorHandler.vi"
File 135="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.EventRegistration.vi"
File 136="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.Generate.Waveforms.vi"
File 137="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.Get Calibration.vi"
File 138="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.Read AI and Write AO.vi"
File 139="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.Read AI.vi"
File 140="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.Simulate Noisy AI.vi"
File 141="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.Simulate Waveguide.vi"
File 142="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.Start.AI.vi"
File 143="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.Start.AO.vi"
File 144="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.Start.vi"
File 145="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.Stop.vi"
File 146="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.TimedLoop.vi"
File 147="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.Trigger.vi"
File 148="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Lockin.DAQ.Write AO.vi"
File 149="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Low Pass Filter subVI.vi"
File 150="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Low Pass Filter.vi"
File 151="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Multitone Eval.vi"
File 152="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/PLL_PID.vi"
File 153="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Set Waveform AI Channel.vi"
File 154="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Set Waveform Quadrature.vi"
File 155="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Set Waveform Reference Channel.vi"
File 156="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Test Multitone.vi"
File 157="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/Waveform Circular Buffer.vi"
File 158="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/Clip AO Waveforms.vi"
File 159="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/decimate_waveforms.vi"
File 160="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/Divide Input Gain.vi"
File 161="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/Divide Output Gain.vi"
File 162="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/FPDAQtoDAQstrings.vi"
File 163="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/Fs In Range.vi"
File 164="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/Fs_to_FilterDelay.vi"
File 165="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/getTimeWithError.vi"
File 166="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/lockin - add phase element.vi"
File 167="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/Lockin - Format Results.vi"
File 168="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/lockin - replace AO DC.vi"
File 169="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/lockin - replace AO Sweep Indicator.vi"
File 170="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/lockin - Restore Saved AO.vi"
File 171="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/lockin - Save AO.vi"
File 172="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/lockin - Set REF f equal AO f.vi"
File 173="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/lockin - Set REF phase equals zero.vi"
File 174="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/lockin - zero amplitude.vi"
File 175="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/ms to Hz.vi"
File 176="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/Multiply Input Gain.vi"
File 177="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/Multiply Output Gain.vi"
File 178="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/Prefilter_60Hz_Notch.vi"
File 179="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/Prefilter_LP.vi"
File 180="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/read_lockin_FP_DSC.vi"
File 181="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/Replace timestamp.vi"
File 182="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/SI prefix.vi"
File 183="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/Simple PID Array.vi"
File 184="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/Split 1D ARRAY in half.vi"
File 185="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/Subtract DAQ Offset.vi"
File 186="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/update_rate_and_time.vi"
File 187="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/DAQ/support/write_lockin_FP_DSC.vi"
File 188="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API (Class)/Write AI Configuration.vi"
File 189="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API (Class)/Write AO Configuration.vi"
File 190="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API (Class)/Write buffer size (S).vi"
File 191="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API (Class)/Write Channels_ AO.vi"
File 192="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API (Class)/Write Channels_ Reference.vi"
File 193="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API (Class)/Write Gain.vi"
File 194="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API (Class)/Write Ref TC and Order.vi"
File 195="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API (Class)/Write sample mode.vi"
File 196="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API (Class)/Write sampling.vi"
File 197="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API/Lockin.DAQ.autoConfigureDAQ.vi"
File 198="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API/Lockin.DAQ.GetPublicEvents.vi"
File 199="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API/Lockin.DAQ.measureOffset.vi"
File 200="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API/Lockin.DAQ.setAIconfig.vi"
File 201="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API/Lockin.DAQ.setAO.vi"
File 202="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API/Lockin.DAQ.setAOconfig.vi"
File 203="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API/Lockin.DAQ.setCalibration.vi"
File 204="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API/Lockin.DAQ.setInputGain.vi"
File 205="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API/Lockin.DAQ.setOutputGain.vi"
File 206="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API/Lockin.DAQ.setPreFilter.vi"
File 207="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API/Lockin.DAQ.setREF.vi"
File 208="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API/Lockin.DAQ.setSampling.vi"
File 209="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API/Lockin.DAQ.setstate.vi"
File 210="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API/Lockin.DAQ.setSweep.vi"
File 211="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API/Lockin.DAQ.TestLauncher.vi"
File 212="user.lib/LevyLab/Lockin-Multichannel/Lockin.DAQ/API/Lockin.DAQ.zeroOffset.vi"
File 213="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockinl.UI/Instrument.Lockin.UI.lvclass"
File 214="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockinl.UI/Instrument.Lockin.UI.TestLauncher.vi"
File 215="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockinl.UI/Process.vi"
File 216="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockinl.UI/Typedefs/AI Channel--Cluster.ctl"
File 217="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockinl.UI/Typedefs/AO Channel--Cluster.ctl"
File 218="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockinl.UI/Typedefs/Device Configuration--Cluster.ctl"
File 219="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockinl.UI/Typedefs/Display-Enum.ctl"
File 220="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockinl.UI/Typedefs/Lockin.Commands--Enum.ctl"
File 221="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockinl.UI/Typedefs/REF Channel--Cluster.ctl"
File 222="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockinl.UI/Typedefs/Sweep Channel--Cluster.ctl"
File 223="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockinl.UI/Typedefs/Tiny Ring.ctl"
File 224="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockinl.UI/Typedefs/X Axis-Enum.ctl"
File 225="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockinl.UI/Typedefs/Y Axis Input-Enum.ctl"
File 226="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockinl.UI/Typedefs/Y Axis Output-Enum.ctl"
File 227="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockinl.UI/private/X Axis WFM Graph.vi"
File 228="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockinl.UI/private/Y Axis Output WFM Graph.vi"
File 229="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Instrument.Lockin.lvclass"
File 230="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Instrument.Lockin.TestLauncher.vi"
File 231="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Process.vi"
File 232="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Typedefs/configSweepFile--cluster.ctl"
File 233="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Typedefs/Lockin.Configuration--Cluster.ctl"
File 234="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Typedefs/Lockin.getAll--Cluster.ctl"
File 235="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Typedefs/Offset Mode--Enum.ctl"
File 236="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Typedefs/Reference--Array of Cluster.ctl"
File 237="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Typedefs/Results Type--Enum.ctl"
File 238="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Typedefs/Sample Mode--Enum.ctl"
File 239="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Private/Coerce Timing.vi"
File 240="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Private/formatSweepResults.vi"
File 241="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Private/Lockin.Client.vi"
File 242="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Private/Lockin.Command Enum to String.vi"
File 243="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Private/Lockin.Configuration Window.vi"
File 244="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Private/Lockin.Read Configuration File.vi"
File 245="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Private/Lockin.Read Configuration.vi"
File 246="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Private/Lockin.Write Configuration File.vi"
File 247="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Private/Lockin.Write Configuration.vi"
File 248="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Private/Parse Result Key.vi"
File 249="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Private/Retry Timeout.vi"
File 250="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Private/Update AO Parameter.vi"
File 251="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Private/Update REF Parameter.vi"
File 252="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Overrides/Configure Instrument.vi"
File 253="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Overrides/Get SMO Name.vi"
File 254="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Overrides/Get SMO Port.vi"
File 255="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Overrides/Get SMO Public API.vi"
File 256="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Overrides/getAll.vi"
File 257="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/Overrides/Handle Command.vi"
File 258="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-public/AI Array Add or Remove.vi"
File 259="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-public/AI config not UI to UI.vi"
File 260="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-public/AI config UI to not UI.vi"
File 261="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-public/AO Array Add or Remove.vi"
File 262="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-public/AO config not UI to UI.vi"
File 263="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-public/AO config UI to not UI.vi"
File 264="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-public/config not UI to UI (AO and AI).vi"
File 265="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-public/config UI to not UI (AO and AI).vi"
File 266="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-public/Initialize REF Channels.vi"
File 267="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-public/REF Array Add or Remove.vi"
File 268="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/Channel String to Number of Channels.vi"
File 269="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/decimate_waveforms.vi"
File 270="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/Display n or dt.vi"
File 271="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/Divide by Gain.vi"
File 272="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/FloatApprox.vi"
File 273="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/FloatApproxPoint1Percent.vi"
File 274="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/format display.vi"
File 275="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/Fs and Ns for integer periods.vi"
File 276="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/Get AI REF (f) StringsAndValues.vi"
File 277="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/Get AI REF StringsAndValues.vi"
File 278="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/Get AI StringsAndValues.vi"
File 279="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/Get AO StringsAndValues.vi"
File 280="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/Get REF StringsAndValues.vi"
File 281="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/Initialize AI Channels.vi"
File 282="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/Initialize AO Channels.vi"
File 283="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/Initialize Input Gain.vi"
File 284="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/Initialize Ouput Gain.vi"
File 285="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/LegendWaveformChart.vi"
File 286="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/LegendWaveformGraph.vi"
File 287="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/LegendWDTWaveformGraph.vi"
File 288="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/LegendXYGraph.vi"
File 289="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/Limit AO Amplitude.vi"
File 290="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/PS Chart.vi"
File 291="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/Set all f same.vi"
File 292="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/Spinner.vi"
File 293="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/DAQ-private/State History.vi"
File 294="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API (Class)/Write AI Configuration.vi"
File 295="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API (Class)/Write AI WFM.vi"
File 296="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API (Class)/Write AO Configuration.vi"
File 297="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API (Class)/Write AO WFM.vi"
File 298="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API (Class)/Write Channels_ AO.vi"
File 299="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API (Class)/Write Channels_ REF.vi"
File 300="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API (Class)/Write Input Gain.vi"
File 301="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API (Class)/Write Output Gain.vi"
File 302="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API (Class)/Write REF WFM.vi"
File 303="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API (Class)/Write Results.vi"
File 304="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API (Class)/Write sampling.vi"
File 305="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/getAI.vi"
File 306="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/getAO.vi"
File 307="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/getAOconfig.vi"
File 308="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/getAutoSampling.vi"
File 309="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/getAUX.vi"
File 310="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/getInputGain.vi"
File 311="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/getIVmode.vi"
File 312="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/getOutputGain.vi"
File 313="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/getREFconfig.vi"
File 314="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/getRefreshTime.vi"
File 315="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/getResults.vi"
File 316="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/getSampling.vi"
File 317="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/getStatus.vi"
File 318="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/getSweepResults.vi"
File 319="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/getSweepWaveforms.vi"
File 320="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/getTrigger.vi"
File 321="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/Offset.vi"
File 322="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/Open.vi"
File 323="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/Parse Results (all).vi"
File 324="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/Parse Results (single).vi"
File 325="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/reset.vi"
File 326="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/setAO.vi"
File 327="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/setAO_Amp.vi"
File 328="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/setAO_DC.vi"
File 329="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/setAO_f.vi"
File 330="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/setAO_function.vi"
File 331="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/setAO_phi.vi"
File 332="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/setAutoSampling.vi"
File 333="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/setAUX.vi"
File 334="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/setDAQ.vi"
File 335="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/setInputGain.vi"
File 336="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/setIVmodeConfig.vi"
File 337="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/setOutputGain.vi"
File 338="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/setREF.vi"
File 339="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/setREF_f.vi"
File 340="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/setREF_phi.vi"
File 341="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/setSampling.vi"
File 342="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/setSweepConfiguration.vi"
File 343="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/startIVAndWait.vi"
File 344="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/startIVSweepAndWait.vi"
File 345="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/startSweep.vi"
File 346="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/startSweepAndWait.vi"
File 347="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/stopSweep.vi"
File 348="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/trigger.vi"
File 349="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/waitForNewResults.vi"
File 350="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/waitForNewWaveforms.vi"
File 351="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/waitStatusCreated.vi"
File 352="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/waitStatusIdle.vi"
File 353="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/waitStatusStarted.vi"
File 354="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/waitStatusStopped.vi"
File 355="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/waitStatusSweepingStarted.vi"
File 356="user.lib/LevyLab/Lockin-Multichannel/Instrument.Lockin/API/waitStatusSweepingStopped.vi"
File 357="examples/LevyLab/Multichannel Lockin/Examples/Example_IV_Curves.vi"
File 358="examples/LevyLab/Multichannel Lockin/Examples/Example_Lockin.vi"
[File Group 1]
Target Dir="<menus>/Categories/LevyLab"
Replace Mode="Always"
Num Files=2
File 0="_functions_levylab_lib_lockin_multichannel_1.mnu"
File 1="functions_LevyLab_lib_Lockin_Multichannel.mnu"
[File Group 2]
Target Dir="<menus>/Categories/LevyLab"
Replace Mode="If Newer"
Num Files=1
File 0="dir.mnu"
| [
"[email protected]"
] | |
cb18200e2c81369663359804149cff1c5d66c90f | c9c33bbb53f73459f3d8642ba5bebda75d2d6bf3 | /v01/sheets03.py | f94b06ac5fec554a6b11d5e616c219dceebd4270 | [] | no_license | ernbilen/POS | 90faa0c278b4ebbdd2470004b17227ff7b1b8a5b | 43fa9316039f8640a8386d47ade62c2972638f06 | refs/heads/main | 2023-03-14T11:12:24.532756 | 2021-03-02T23:01:26 | 2021-03-02T23:01:26 | 343,936,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,220 | py | import tkinter as tk
from tkinter import filedialog
import csv
from generate_pdf import *
#-----------LOADS THE OUTPUT PATH-------------#
def loadConfig():
file = open('config.csv','r')
reader = csv.reader(file)
path = ""
for row in reader:
if row[0]: return row[1]
file.close()
return ""
#########################################################################################################
# GLOBAL VARIABLES #
#########################################################################################################
entries = [['01','01 NOV 19','02 NOV 19','PEAR','$50',2,'1234567',100,20,120],
['02','03 NOV 19','04 NOV 19','CLOGGY','$50',2,'1234568',100,20,120],
['03','05 NOV 19','06 NOV 19','PYTHON','$50',2,'1234569',100,20,120],
['04','07 NOV 19','08 NOV 19','WEB','$50',2,'1234570',100,20,120],
['05','09 NOV 19','10 NOV 19','GLOBB','$50',2,'1234571',100,20,120]]
output_path = loadConfig()
extra_lines = []
gselected = None
PO_number = None
##########################################################################################################
captions = ['ID','Start date','End date', 'Company', 'Rate','Days', 'Invoice No','Sum','VAT','Total']
#########################################################################################################
# DICTIONARY VALUES #
#########################################################################################################
dd = {'lm':100,
'rm' : 100,
'bh' : 100,
'rad' : 10,
'cap_font' : 'Helvetica 10',
'cap_font_bold' : 'Helvetica 10 bold',
'txt_font' : 'Helvetica 12 bold',
'ttl_font' : 'Helvetica 20 bold',
'bttn_font' : 'Helvetica 12 bold',
'ttl_clr' : '#fa4300',
'cap_row' : 10,
'cap_color' : 'gray80',
'cap_high' : 'white',
'cap_ttl' : 'gray70',
'bgclr' : '#2e343d',
'txt_row' : 75,
'txt_clr' : 'gray40',
'rect_bg' : 'white',
'rect_int' : 'gray95',
'rect_high' : '#fac000',
'rect_mover' : '#ffe89c',
'nw': 25,
'b_high': '#41bffa',
'tk_icon' : 'invoices.ico'}
#########################################################################################################
# INITIATE TK WINDOW #
#########################################################################################################
root = tk.Tk()
root.geometry('1400x800+100+100')
root.iconbitmap(dd['tk_icon'])
root.title("Invoice creator")
canvas = tk.Canvas(root,width = 1400, height = 1200, bg=dd['bgclr'])
canvas.pack()
#########################################################################################################
# BUTTON METHODS #
#########################################################################################################
#---------------------------------------CHANGES THE OUTPUT PATH-----------------------------------------#
def change_path():
global output_path
output_path = filedialog.askdirectory(initialdir=output_path)
file = open('config.csv')
lines = file.read().splitlines()
lines[0] = 'Output path,' + output_path
file.close()
file = open('config.csv', 'w')
file.write('\n'.join(lines))
file.close()
#---------------------------------------SPLASH SCREEN---------------------------------------------------#
def splash(message, fontsize = 20):
warning = tk.Toplevel(root, border = 0)
s_w,s_h = 800,200
parent_geo = [int(dim) for dim in root.winfo_geometry().replace('x',' ').replace('+',' ').split()]
w,h = parent_geo[0],parent_geo[1]
x,y = parent_geo[2],parent_geo[3]
warning.geometry('{}x{}+{}+{}'.format(s_w,s_h,x + w//2-s_w//2,y + h//2 - s_h//2))
warning.overrideredirect(True)
warning.resizable(width=False, height=False)
warning_canvas = tk.Canvas(warning,width=1400, height=1200, bg=dd['bgclr'])
warning_canvas.pack()
warning_canvas.bind('<Button-1>', lambda x :warning.destroy())
x,y = warning_canvas.canvasx(400),warning_canvas.canvasx(100)
warning_canvas.create_text(x,y, text = message, font = 'Helvetica {} bold'.format(fontsize), fill = 'white')
warning_canvas.create_text(x,y+85, text = 'Click to close', font = 'Helvetica 8', fill = 'white')
#---------------------------------------LOADS ADDRESSES FROM A CSV FILE--------------------------------#
def loadAddresses(): # Loads addresses from the cvs
file = open('addresses.csv','r')
reader = csv.reader(file)
addresses = {}
print(reader)
for row in reader:
print(row)
if row and len(row)>1:
addresses[row[0]] = row[1::]
file.close()
return addresses
#---------------------------------------EVENT MANAGER FOR CANVAS---------------------------------------#
def event_manager(event, list_of_objects):
x,y = event.x, event.y
if event.type[0] == '6':
for object in list_of_objects:
bbox = object.bbox
object.on_mouse_off()
x1,y1,x2,y2 = bbox[0],bbox[1],bbox[2],bbox[3],
if x1<=x<=x2 and y1<=y<=y2:
object.on_mouse_over()
return
if event.type[0] == '4':
for object in list_of_objects:
bbox = object.bbox
object.on_mouse_off()
x1,y1,x2,y2 = bbox[0],bbox[1],bbox[2],bbox[3],
if x1<=x<=x2 and y1<=y<=y2:
object.on_click()
return
#---------------------------------------CREATES A NICE INPUT BOX---------------------------------------#
def add_input_box(p_canvas, left, top, width, height, just='left'):
item_entry = tk.Entry(p_canvas, font='Helvetica 18 bold', background=dd['bgclr'], borderwidth=0, foreground='white', insertbackground='white', justify=just) #dd['bgclr']
item_window = p_canvas.create_window(left+5, top+5, anchor='nw', window=item_entry, width=width-10, height=height-10)
round_rectangle(p_canvas, left, top, left + width, top+height, fill=dd['bgclr'], outline='', width=0)
return item_entry
#---------------------------------------CREATES A LINE OF INPUT BOXES----------------------------------#
def add_input_line(p_canvas, left, top):
input_line = []
round_rectangle(p_canvas,left-5,top-5,780,top+45, fill = '#556070')
input_line.append(add_input_box(p_canvas, left, top, 300, 40))
input_line.append(add_input_box(p_canvas, left+520, top, 50, 40,'right'))
p_canvas.create_text(left + 610,top+22, font = 'Helvetica 18 bold', fill = 'white', text = 'X')
input_line.append(add_input_box(p_canvas, left + 650, top, 100, 40,'right'))
return input_line
#---------------------------------------ADD ECTRA LINE SCREEN------------------------------------------#
def add_extra_lines_screen():
add_extra = tk.Toplevel(root)
add_extra.iconbitmap(dd['tk_icon'])
add_extra.title("Add extra items")
s_w, s_h = 800, 600
parent_geo = [int(dim) for dim in root.winfo_geometry().replace('x', ' ').replace('+', ' ').split()]
w, h = parent_geo[0], parent_geo[1]
x, y = parent_geo[2], parent_geo[3]
add_extra.geometry('{}x{}+{}+{}'.format(s_w, s_h, x + w // 2 - s_w // 2, y + h // 2 - s_h // 2))
add_extra_canvas = tk.Canvas(add_extra, width=800, height=600, bg=dd['bgclr'])
add_extra_canvas.pack()
add_extra_canvas_objects = []
x,y = add_extra_canvas.canvasx(0),add_extra_canvas.canvasy(0)
inputs = []
def get_lines(inputs_lst): # READS THE LINES, REMOVES EMPTY ONES
read = [[box.get() for index,box in enumerate(line) ] for line in inputs_lst]
read = [line for line in read if any(line)]
read = [(line[0], int(line[1]), float(line[2])) for line in read]
global extra_lines
extra_lines = read
fontsize = 20-len(read)
splash('Items added: \n' + '\n'.join(['{} {} x {}'.format(x,y,z) for x,y,z in read]),fontsize)
add_extra.destroy()
done_button = {'text': 'Done', 'bg': '', 'fcl': 'white', 'size': 40, 'font': 'Helvetica 12 bold',
'callback': lambda: get_lines(inputs), 'hl': 'white', 'hlc': dd['b_high'], 'fhl': 'Helvetica 14 bold', 'fhlc': 'white', 'ftxt': 'Done'}
add_extra_canvas_objects.append(canvas_button(add_extra_canvas,(x+300,y+500,x+500,y+550),done_button))
add_extra_canvas.create_text(x+25,y+25, anchor = 'nw', fill = dd['cap_ttl'], font = 'Helvetica 14 bold', text = 'Items')
add_extra_canvas.create_text(x + 25 + 520, y + 25, anchor='nw', fill=dd['cap_ttl'], font='Helvetica 14 bold', text='Units')
add_extra_canvas.create_text(x + 25 + 650, y + 25, anchor='nw', fill=dd['cap_ttl'], font='Helvetica 14 bold', text='Price/Unit')
#add_extra_canvas.create_text(x + 25 + 650, y + 400, anchor='nw', fill=dd['cap_ttl'], font='Helvetica 14 bold', text='Discount')
for i in range(5):
inputs.append(add_input_line(add_extra_canvas,x+25,y+75+(i*60)))
#inputs.append(add_input_line(add_extra_canvas,x+25,y+75+(6*60)))
add_extra_canvas.bind('<Button-1>', lambda event: event_manager(event, add_extra_canvas_objects) )
add_extra_canvas.bind('<Motion>', lambda event: event_manager(event, add_extra_canvas_objects))
return extra_lines
#----------------SCREEN THAT APPEARS AFTER GENERATE INVOICE BUTTON IS PRESSED--------------------------#
def generate_invoice_screen():
global PO_number
PO_number = None
if gselected == None: # IF NO INVOICE HAS BEEN SELECTED EXIT
splash('Please select an invoice')
return
#####################################################################################
invoice_canvas_items = []
invoice_info = tk.Toplevel(root)
invoice_info.iconbitmap(dd['tk_icon'])
invoice_info.title("Check the info before you proceed")
invoice_info.geometry('1400x800+100+100')
invoice_canvas = tk.Canvas(invoice_info,width=1400, height=1200, bg=dd['bgclr'])
invoice_canvas.pack()
sel = entries[gselected]
dd['nw']=50
header = entry_box(captions, sel, invoice_canvas)
addresses = loadAddresses()
#------------------------------------FUNCTIONS--------------------------------------------------#
def close_invoice_info(): # when back button is pressed
invoice_info.destroy()
def generate_invoice_call(): # CALLS generate_invoice() funtion from generate_pdf.py and closes the windwo
addresses = loadAddresses()
sel = entries[gselected]
if sel[3] not in addresses.keys():
splash('Please add the company address to the database')
else:
splash('Generating invoice')
generate_invoice(sel,PO_number,extra_lines,output_path)
invoice_canvas.delete('all')
invoice_info.destroy()
def add_address(address): # adds address of a company to the csv file
if address:
selected = entries[gselected]
file = open('addresses.csv', 'a')
file.write("\n{},{}".format(selected[3], address))
file.close()
splash('Addres for {} added'.format(selected[3]))
def addPO(number): # assigns a PO number
global PO_number
if number !='PO NUMBER':
PO_number = number
splash('Purchase number {} added'.format(number))
else: splash('Please enter a PO number')
def add_extra_lines(): # when add custom lines is pressed
add_extra_lines_screen()
invoice_canvas.create_line(700,200,700,700, fill='gray50', width=1, capstyle = 'round') # Separator
########################################################################################################
# ADDRESS SECTION
########################################################################################################
left = 300
invoice_canvas.create_text(left, 250, text='Address', font=dd['ttl_font'], fill = 'gray50', anchor='sw')
invoice_canvas.create_text(left, 325, text=sel[3], font=dd['ttl_font'], fill='White', anchor='sw')
address_entry = tk.Text(invoice_canvas, font='Helvetica 14 bold', background = dd['bgclr'], borderwidth=0, foreground = 'white',insertbackground='white')
address_entry.insert('1.0','No address \nfor this company found')
address_window = invoice_canvas.create_window(left+5,355,anchor = 'nw', window = address_entry, width = 295, height=200)
round_rectangle(invoice_canvas, left, 350, left+300, 555, fill='', outline='white', width=1)
add_address_button = {'text': 'Add address', 'bg': '', 'fcl': 'white', 'size': 40, 'font': 'Helvetica 12 bold',
'callback': lambda : add_address(address_entry.get('1.0','end-1c')), 'hl': 'white', 'hlc': dd['b_high'], 'fhl': 'Helvetica 14 bold', 'fhlc': 'white', 'ftxt': 'Add address'}
invoice_canvas_items.append(canvas_button(invoice_canvas,(left,575,left+300,675),add_address_button))
########################################################################################################
# ADD PO SECTION
########################################################################################################
right = 800 # Alignment value
# SECTION TITLE
invoice_canvas.create_text(right, 320, text='Discount:', font=dd['ttl_font'], fill='gray50', anchor='sw')
# ENTRY BOX
PO_entry = tk.Entry(invoice_canvas, font='Helvetica 22 bold', background=dd['bgclr'], borderwidth=0, foreground='white', justify='center', text='Enter amount')
PO_entry.delete(0,'end')
PO_entry.insert('0', 'Enter amount')
PO_window = invoice_canvas.create_window(right + 5, 355, anchor='nw', window=PO_entry, width=290, height=50)
round_rectangle(invoice_canvas, right, 350, right + 300, 410, fill='', outline='white', width=1)
#
# ADD PO BUTTON
add_po_button = {'text' : 'Add PO', 'bg' :'', 'fcl':'white', 'size' : 40,'font' : 'Helvetica 12 bold',
'callback' : lambda: addPO(PO_entry.get()) , 'hl' : 'white', 'hlc' : dd['b_high'], 'fhl' : 'Helvetica 14 bold', 'fhlc' : 'white', 'ftxt' : 'Add PO'}
invoice_canvas_items.append(canvas_button(invoice_canvas, (right, 575, right+300, 675), add_po_button))
# ADD EXTRA LINES
add_line_button = {'text': 'Add custom lines', 'bg': '', 'fcl': 'white', 'size': 40, 'font': 'Helvetica 12 bold',
'callback': add_extra_lines, 'hl': 'white', 'hlc': dd['b_high'], 'fhl': 'Helvetica 14 bold', 'fhlc': 'white', 'ftxt': 'Add custom lines'}
invoice_canvas_items.append(canvas_button(invoice_canvas, (left,700,right+300,750),add_line_button))
#
########################################################################################################
# BACK AND NEXT BUTTONS
########################################################################################################
next_button = {'text': 'Next', 'bg': '', 'fcl': 'white', 'size': 40, 'font': 'Helvetica 12 bold',
'callback': generate_invoice_call, 'hl': 'white', 'hlc': dd['b_high'], 'fhl': 'Helvetica 14 bold', 'fhlc': 'white', 'ftxt': 'Next'}
invoice_canvas_items.append(canvas_button(invoice_canvas,(right+400,350,right+500,400),next_button))
back_button = {'text': 'Back', 'bg': '', 'fcl': 'white', 'size': 40, 'font': 'Helvetica 12 bold',
'callback': close_invoice_info, 'hl': 'white', 'hlc': dd['b_high'], 'fhl': 'Helvetica 14 bold', 'fhlc': 'white', 'ftxt': 'Back'}
invoice_canvas_items.append(canvas_button(invoice_canvas, (100, 350, 200, 400), back_button))
#########################################################################################################
# BINDINGS
#########################################################################################################
invoice_canvas.bind('<Motion>', lambda event: event_manager(event,invoice_canvas_items))
invoice_canvas.bind('<Button-1>', lambda event: event_manager(event, invoice_canvas_items))
#--------------------------------DRAWS A ROUNDED RECTANGLE-------------------------------------#
def round_rectangle(p_canvas,x1, y1, x2, y2, radius=10, **kwargs):
points = [x1+radius, y1,x1+radius, y1,x2-radius, y1,x2-radius, y1,x2, y1,x2, y1+radius,x2, y1+radius,x2, y2-radius,x2, y2-radius,x2, y2,
x2 - radius, y2,x2-radius, y2,x2-radius, y2,x2-radius, y2,x1+radius, y2,x1+radius, y2,x1, y2,x1, y2-radius,x1, y2-radius,x1, y1+radius,x1, y1+radius,x1, y1]
return p_canvas.create_polygon(points, **kwargs, smooth=True)
def select(index):
global selected
selected = index
#############################################################################################################
#
# CLASSES GFX ELEMENTS
#
#############################################################################################################
#############################################################################################################
# THIS ONE DISPLAYS INVOICE DATA ON THE FIRST PAGE
#############################################################################################################
class entry_box:
def __init__(self,captions,entry,p_canvas):
f = ['{}']*7 + ['${:.0f}']*3
self.canvas = p_canvas
self.col = [dd['lm']+20,dd['lm']+75,dd['lm']+200,dd['lm']+325,dd['lm']+525,dd['lm']+600,dd['lm']+700,dd['lm']+825,dd['lm']+950,dd['lm']+1075]
self.cap_size = [dd['cap_font']] *9 + [dd['cap_font_bold']]
self.cap_clr = [dd['cap_color']] * 9 + [dd['cap_ttl']]
self.clr = [dd['txt_clr']] * 9 + [dd['ttl_clr']]
self.size = [dd['txt_font']] * 9 + [dd['ttl_font']]
self.static_background = round_rectangle(p_canvas,dd['lm'], dd['nw'], 1300, dd['nw'] + 100, fill=dd['rect_int'])
self.dynamic_background = p_canvas.create_rectangle(self.col[1] - 15, dd['nw'], self.col[-1] - 25, dd['nw'] + 100, fill=dd['rect_bg'], width=0)
self.cap_list = [p_canvas.create_text(self.col[i], dd['nw'] + dd['cap_row'], anchor='nw', font=self.cap_size[i], text=cap,
fill=self.cap_clr[i]) for i, cap in enumerate(captions)]
self.data_list = [
p_canvas.create_text(self.col[i], dd['nw'] + dd['txt_row'], anchor='sw', font=self.size[i], text=f[i].format(data), fill=self.clr[i]) for
i, data in enumerate(entry)]
self.bbox = (dd['lm'], dd['nw'], 1300, dd['nw'] + 100)
self.callback = select
def cap_style(self, color, font):
for cap in self.cap_list[1:-1]:
self.canvas.itemconfig(cap, fill = color, font = font)
def entry_highlight(self, color):
self.canvas.itemconfig(self.dynamic_background, fill = color)
def remove_highlight(self):
self.entry_highlight(dd['rect_bg'])
self.cap_style(dd['cap_color'], dd['cap_font'])
def mover(self, color):
self.canvas.itemconfig(self.dynamic_background, fill = color)
#BUTTONS DECRIPTION
buttons = [{'text' : 'Invoices saved in: {}'.format(output_path), 'bg' :'#2e343d', 'fcl':'white', 'size' : 40,'font' : 'Helvetica 12 bold',
'callback' : change_path , 'hl' : 'white', 'hlc' : dd['rect_mover'], 'fhl' : 'Helvetica 14 bold', 'fhlc' : '#ff9500', 'ftxt' : 'Change output path'},
{'text' : 'Generate invoice', 'bg' : '#2e343d', 'fcl':'white', 'size' : 60,'font' : 'Helvetica 20 bold',
'callback' : generate_invoice_screen , 'hl' : '#aee887', 'hlc' : '#aee887','fhl' : 'Helvetica 22 bold', 'fhlc' : 'white','ftxt' : 'Generate invoice'}]
#############################################################################################################
# THIS ONE DISPLAYS A BUTTON ON THE CANVAS
#############################################################################################################
class canvas_button:
def __init__(self,p_canvas,bbox, setup):
self.canvas = p_canvas
self.bgcolor = {'norm' : setup ['bg'], 'high' : setup ['hlc']}
self.txtcolor = {'norm' : setup['fcl'], 'high' : setup['fhlc']}
self.font = {'norm' : setup['font'],'high' : setup['fhl']}
self.message = {'norm' : setup['text'], 'high':setup['ftxt']}
x,y,xx,yy = bbox[0],bbox[1],bbox[2],bbox[3]
self.dynamic_background = round_rectangle(p_canvas,x,y, xx, yy, fill=self.bgcolor['norm'], width=1, outline='white' )
self.cap_list = [p_canvas.create_text((x+xx)/2,(y+yy)/2, font=self.font['norm'], text=self.message['norm'], fill=self.txtcolor['norm'])]
self.bbox = bbox
self.on_click = setup['callback']
def on_click(self, color):
self.on_click()
def on_mouse_over(self):
self.canvas.itemconfig(self.dynamic_background, fill=self.bgcolor['high'])
self.canvas.itemconfig(self.cap_list[0], fill=self.txtcolor['high'], text=self.message['high'], font=self.font['high'])
def on_mouse_off(self):
self.canvas.itemconfig(self.dynamic_background, fill = self.bgcolor['norm'])
self.canvas.itemconfig(self.cap_list[0], fill=self.txtcolor['norm'], text = self.message['norm'],font = self.font['norm'])
#############################################################################################################
# THIS ONE DISPLAYS A BUTTON THAT UPDATES IT'S TEXT
#############################################################################################################
class active_button:
def __init__(self,p_canvas,title):
self.canvas = p_canvas
self.bgcolor = {'norm' : title['bg'], 'high' : title['hlc']}
self.txtcolor = {'norm' : title['fcl'], 'high' : title['fhlc']}
self.font = {'norm' : title['font'],'high' : title['fhl']}
self.message = {'norm' : title['text'], 'high':title['ftxt']}
self.dynamic_background = round_rectangle(p_canvas,dd['lm'], dd['nw'], 1300, dd['nw'] + title['size'], fill=self.bgcolor['norm'], width=1, outline='white' )
self.cap_list = [p_canvas.create_text(650,dd['nw'] + title['size']/2, font=self.font['norm'], text=self.message['norm'], fill=self.txtcolor['norm'])]
self.bbox = (dd['lm'], dd['nw'], 1300, dd['nw'] + title['size'])
dd['nw'] += title['size']
self.callback = title['callback']
self.update = False
def cap_style(self, color, font):
for cap in self.cap_list[1:-1]:
self.canvas.itemconfig(cap, fill = color, font = font)
def entry_highlight(self, color):
self.canvas.itemconfig(self.dynamic_background, fill = color)
def update_f(self):
self.message['norm'] = 'Invoices saved in: {}'.format(output_path)
self.canvas.itemconfig(self.cap_list[0], fill=self.txtcolor['norm'], text=self.message['norm'], font=self.font['norm'])
def on_click(self):
self.callback()
if self.update:
self.update_f()
def mover(self,color):
self.canvas.itemconfig(self.dynamic_background, fill=self.bgcolor['high'])
self.canvas.itemconfig(self.cap_list[0], fill=self.txtcolor['high'], text=self.message['high'], font=self.font['high'])
def remove_highlight(self):
self.canvas.itemconfig(self.dynamic_background, fill = self.bgcolor['norm'])
self.canvas.itemconfig(self.cap_list[0], fill=self.txtcolor['norm'], text = self.message['norm'],font = self.font['norm'])
#############################################################################################################
# DRAWS THE FIRST PAGE
#############################################################################################################
def draw_all_entries(entries, buttons):
entries_list = []
for entry in entries:
entries_list.append(entry_box(captions,entry,canvas))
dd['nw'] += 125
dd['nw'] += 10
for button in buttons:
entries_list.append(active_button(canvas,button))
dd['nw'] += 10
entries_list[-2].update = True
return entries_list
#############################################################################################################
# EVENT MANAGER FOR THE FIRST PAGE HAS EXTRA OPTION FOR SELECTION
#############################################################################################################
def register_click(event, display_list):
x,y = event.x,event.y
global gselected
selected = None
for index,object in enumerate(display_list):
x1,y1,x2,y2 = object.bbox[0],object.bbox[1],object.bbox[2],object.bbox[3]
if x1 <= x <= x2 and y1 <= y <= y2:
if gselected == index:
object.remove_highlight()
gselected = None
return
selected = index
if index in range(5): gselected = selected
break
if selected in range(5):
for object in display_list[0:5]:
object.entry_highlight(dd['rect_bg'])
object.cap_style(dd['cap_color'], dd['cap_font'])
object = display_list[selected]
object.entry_highlight(dd['rect_high'])
object.cap_style(dd['cap_high'], dd['cap_font_bold'])
elif selected != None:
for object in display_list[5::]:
object.remove_highlight()
object = display_list[selected]
object.entry_highlight(dd['rect_high'])
#object.callback()
object.on_click()
object.remove_highlight()
#############################################################################################################
# EVENT MANAGER FOR THE FIRST PAGE JUST FOR MOUSE MOVEMENT AND HIGHLIGHTS
#############################################################################################################
def register_mover(event,display_list):
x, y = event.x, event.y
for index,object in enumerate(display_list):
if index == gselected: continue
object.remove_highlight()
for index,object in enumerate(display_list):
x1, y1, x2, y2 = object.bbox[0], object.bbox[1], object.bbox[2], object.bbox[3]
if x1 <= x <= x2 and y1 <= y <= y2:
if index == gselected: continue
object.mover(dd['rect_mover'])
#############################################################################################################
# HOLDS ALL OBJECTS OF THE MAIN PAGE
#############################################################################################################
display_list = draw_all_entries(entries, buttons) #+ draw_button(buttons)
#############################################################################################################
# BINDINGS FOR THE FIRST PAGE
#############################################################################################################
canvas.bind('<Button-1>', lambda event: register_click(event,display_list))
canvas.bind('<Motion>', lambda event: register_mover(event,display_list))
tk.mainloop()
| [
"[email protected]"
] | |
792fdbb3cc101530963c57eaaf2b741d3620df5d | 13bf0967b6421c990699677c327b252269fb75e4 | /rapidz/core.py | 86634359d5920c4725221f50ab7cf972426883e7 | [
"BSD-3-Clause"
] | permissive | xpdAcq/rapidz | 712b549a3d3cde3a08ed5a91d343d15fb7f0768e | a354853695a054477ce361d104f55c77a77c7d5d | refs/heads/main | 2023-08-08T19:55:40.629387 | 2023-07-25T08:23:33 | 2023-07-25T08:23:33 | 150,287,875 | 6 | 7 | BSD-3-Clause | 2023-07-25T08:16:56 | 2018-09-25T15:26:41 | Python | UTF-8 | Python | false | false | 43,036 | py | from __future__ import absolute_import, division, print_function
from collections.abc import Sequence, Hashable
from collections import deque
import copy
from datetime import timedelta
import functools
import logging
import six
import sys
import threading
from time import time
import weakref
import toolz
from tornado import gen
from tornado.locks import Condition
from tornado.ioloop import IOLoop
from tornado.queues import Queue
from collections.abc import Iterable
from .compatibility import get_thread_identity
from .orderedweakset import OrderedWeakrefSet
no_default = "--no-default--"
_global_sinks = set()
_html_update_streams = set()
thread_state = threading.local()
logger = logging.getLogger(__name__)
_io_loops = []
def _weakref_node(x):
if isinstance(x, Stream):
return weakref.ref(x)
return x
def _deref_weakref(x):
if isinstance(x, weakref.ref):
return x()
return x
def apply(func, args, args2=None, kwargs=None):
if not isinstance(args, Sequence) or isinstance(args, str):
args = (args,)
if args2:
args = args + args2
if kwargs:
return func(*args, **kwargs)
else:
return func(*args)
def move_to_first(node, f=True):
"""Promote current node to first in the execution order
Parameters
----------
node : rapidz instance
Node to be promoted
f : bool or Sequence of rapidz
The upstream node(s) to promote this node for. If True, promote all
upstream nodes. Defaults to True
Notes
-----
This is often used for saving data, since saving data before the rest of
the data is processed makes sure that all the data that can be saved
(before an exception is hit) is saved.
"""
if f is True:
f = node.upstreams
if not isinstance(f, Sequence):
f = (f,)
for upstream in f:
for n in upstream.downstreams.data:
if n() is node:
break
upstream.downstreams.data._od.move_to_end(n, last=False)
del n
return node
def args_kwargs(cls):
def _(func):
@functools.wraps(func)
def wraps(*args, **kwargs):
self = args[0]
obj = func(*args, **kwargs)
# TODO: decide if we want to capture self in args
self._init_args = tuple([_weakref_node(a) for a in args[1:]])
self._init_kwargs = {
k: _weakref_node(v) for k, v in kwargs.items()
}
return obj
return wraps
cls.__init__ = _(cls.__init__)
return cls
def get_io_loop(asynchronous=None):
if asynchronous:
return IOLoop.current()
if not _io_loops:
loop = IOLoop()
thread = threading.Thread(target=loop.start)
thread.daemon = True
thread.start()
_io_loops.append(loop)
return _io_loops[-1]
def identity(x):
return x
class Stream(object):
""" A Stream is an infinite sequence of data
Streams subscribe to each other passing and transforming data between them.
A Stream object listens for updates from upstream, reacts to these updates,
and then emits more data to flow downstream to all Stream objects that
subscribe to it. Downstream Stream objects may connect at any point of a
Stream graph to get a full view of the data coming off of that point to do
with as they will.
Parameters
----------
asynchronous: boolean or None
Whether or not this stream will be used in asynchronous functions or
normal Python functions. Leave as None if you don't know.
True will cause operations like emit to return awaitable Futures
False will use an Event loop in another thread (starts it if necessary)
ensure_io_loop: boolean
Ensure that some IOLoop will be created. If asynchronous is None or
False then this will be in a separate thread, otherwise it will be
IOLoop.current
Examples
--------
>>> def inc(x):
... return x + 1
>>> source = Stream() # Create a stream object
>>> s = source.map(inc).map(str) # Subscribe to make new streams
>>> s.sink(print) # take an action whenever an element reaches the end
>>> L = list()
>>> s.sink(L.append) # or take multiple actions (streams can branch)
>>> for i in range(5):
... source.emit(i) # push data in at the source
'1'
'2'
'3'
'4'
'5'
>>> L # and the actions happen at the sinks
['1', '2', '3', '4', '5']
"""
_graphviz_shape = "ellipse"
_graphviz_style = "rounded,filled"
_graphviz_fillcolor = "white"
_graphviz_orientation = 0
str_list = ["func", "predicate", "n", "interval"]
def __init__(
self,
upstream=None,
upstreams=None,
stream_name=None,
loop=None,
asynchronous=None,
ensure_io_loop=False,
):
self.downstreams = OrderedWeakrefSet()
if upstreams is not None:
self.upstreams = list(upstreams)
else:
self.upstreams = [upstream]
self._set_asynchronous(asynchronous)
self._set_loop(loop)
if ensure_io_loop and not self.loop:
self._set_asynchronous(False)
if self.loop is None and self.asynchronous is not None:
self._set_loop(get_io_loop(self.asynchronous))
for upstream in self.upstreams:
if upstream:
upstream.downstreams.add(self)
self.name = stream_name
def _set_loop(self, loop):
self.loop = None
if loop is not None:
self._inform_loop(loop)
else:
for upstream in self.upstreams:
if upstream and upstream.loop:
self.loop = upstream.loop
break
def _inform_loop(self, loop):
"""
Percolate information about an event loop to the rest of the stream
"""
if self.loop is not None:
if self.loop is not loop:
raise ValueError("Two different event loops active")
else:
self.loop = loop
for upstream in self.upstreams:
if upstream:
upstream._inform_loop(loop)
for downstream in self.downstreams:
if downstream:
downstream._inform_loop(loop)
def _set_asynchronous(self, asynchronous):
self.asynchronous = None
if asynchronous is not None:
self._inform_asynchronous(asynchronous)
else:
for upstream in self.upstreams:
if upstream and upstream.asynchronous:
self.asynchronous = upstream.asynchronous
break
def _inform_asynchronous(self, asynchronous):
"""
Percolate information about an event loop to the rest of the stream
"""
if self.asynchronous is not None:
if self.asynchronous is not asynchronous:
raise ValueError(
"Stream has both asynchronous and synchronous elements"
)
else:
self.asynchronous = asynchronous
for upstream in self.upstreams:
if upstream:
upstream._inform_asynchronous(asynchronous)
for downstream in self.downstreams:
if downstream:
downstream._inform_asynchronous(asynchronous)
@classmethod
def register_api(cls, modifier=identity):
""" Add callable to Stream API
This allows you to register a new method onto this class. You can use
it as a decorator.::
>>> @Stream.register_api()
... class foo(Stream):
... ...
>>> Stream().foo(...) # this works now
It attaches the callable as a normal attribute to the class object. In
doing so it respsects inheritance (all subclasses of Stream will also
get the foo attribute).
By default callables are assumed to be instance methods. If you like
you can include modifiers to apply before attaching to the class as in
the following case where we construct a ``staticmethod``.
>>> @Stream.register_api(staticmethod)
... class foo(Stream):
... ...
>>> Stream.foo(...) # Foo operates as a static method
"""
def _(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
setattr(cls, func.__name__, modifier(wrapped))
return func
return _
def start(self):
""" Start any upstream sources """
for upstream in self.upstreams:
upstream.start()
def __str__(self):
s_list = []
if getattr(self, "name", False):
s_list.append("{}; {}".format(self.name, self.__class__.__name__))
else:
s_list.append(self.__class__.__name__)
for m in self.str_list:
s = ""
at = getattr(self, m, None)
if at:
if not callable(at):
s = str(at)
elif hasattr(at, "__name__"):
s = getattr(self, m).__name__
elif hasattr(at.__class__, "__name__"):
s = getattr(self, m).__class__.__name__
else:
s = None
if s:
s_list.append("{}={}".format(m, s))
if len(s_list) <= 2:
s_list = [term.split("=")[-1] for term in s_list]
text = "<"
text += s_list[0]
if len(s_list) > 1:
text += ": "
text += ", ".join(s_list[1:])
text += ">"
return text
__repr__ = __str__
def _ipython_display_(self, **kwargs):
try:
from ipywidgets import Output
import IPython
except ImportError:
return self._repr_html_()
output = Output(_view_count=0)
output_ref = weakref.ref(output)
def update_cell(val):
output = output_ref()
if output is None:
return
with output:
IPython.display.clear_output(wait=True)
IPython.display.display(val)
s = self.map(update_cell)
_html_update_streams.add(s)
self.output_ref = output_ref
s_ref = weakref.ref(s)
def remove_stream(change):
output = output_ref()
if output is None:
return
if output._view_count == 0:
ss = s_ref()
ss.destroy()
_html_update_streams.remove(ss) # trigger gc
output.observe(remove_stream, "_view_count")
return output._ipython_display_(**kwargs)
def _emit(self, x):
result = []
for downstream in list(self.downstreams):
r = downstream.update(x, who=self)
if type(r) is list:
result.extend(r)
else:
result.append(r)
return [element for element in result if element is not None]
def emit(self, x, asynchronous=False):
""" Push data into the stream at this point
This is typically done only at source Streams but can theortically be
done at any point
"""
ts_async = getattr(thread_state, "asynchronous", False)
if self.loop is None or asynchronous or self.asynchronous or ts_async:
if not ts_async:
thread_state.asynchronous = True
try:
result = self._emit(x)
if self.loop:
return gen.convert_yielded(result)
finally:
thread_state.asynchronous = ts_async
else:
@gen.coroutine
def _():
thread_state.asynchronous = True
try:
result = yield self._emit(x)
finally:
del thread_state.asynchronous
raise gen.Return(result)
sync(self.loop, _)
def update(self, x, who=None):
self._emit(x)
def gather(self):
""" This is a no-op for core rapidz
This allows gather to be used in both dask and core streams
"""
return self
def connect(self, downstream):
""" Connect this stream to a downstream element.
Parameters
----------
downstream: Stream
The downstream stream to connect to
"""
self.downstreams.add(downstream)
if downstream.upstreams == [None]:
downstream.upstreams = [self]
else:
downstream.upstreams.append(self)
def disconnect(self, downstream):
""" Disconnect this stream to a downstream element.
Parameters
----------
downstream: Stream
The downstream stream to disconnect from
"""
self.downstreams.remove(downstream)
downstream.upstreams.remove(self)
@property
def upstream(self):
if len(self.upstreams) != 1:
raise ValueError("Stream has multiple upstreams")
else:
return self.upstreams[0]
def destroy(self, streams=None):
"""
Disconnect this stream from any upstream sources
"""
if streams is None:
streams = self.upstreams
for upstream in list(streams):
upstream.downstreams.remove(self)
self.upstreams.remove(upstream)
if self in _global_sinks:
_global_sinks.remove(self)
def scatter(self, **kwargs):
from .parallel import scatter
return scatter(self, **kwargs)
def remove(self, predicate):
""" Only pass through elements for which the predicate returns False """
return self.filter(lambda x: not predicate(x))
@property
def scan(self):
return self.accumulate
@property
def concat(self):
return self.flatten
def sink_to_list(self):
""" Append all elements of a stream to a list as they come in
Examples
--------
>>> source = Stream()
>>> L = source.map(lambda x: 10 * x).sink_to_list()
>>> for i in range(5):
... source.emit(i)
>>> L
[0, 10, 20, 30, 40]
"""
L = []
self.sink(L.append)
return L
def frequencies(self, **kwargs):
""" Count occurrences of elements """
def update_frequencies(last, x):
return toolz.assoc(last, x, last.get(x, 0) + 1)
return self.scan(update_frequencies, start={}, **kwargs)
def visualize(self, filename="mystream.png", **kwargs):
"""Render the computation of this object's task graph using graphviz.
Requires ``graphviz`` to be installed.
Parameters
----------
filename : str, optional
The name of the file to write to disk.
kwargs:
Graph attributes to pass to graphviz like ``rankdir="LR"``
"""
from .graph import visualize
return visualize(self, filename, **kwargs)
def to_dataframe(self, example):
""" Convert a stream of Pandas dataframes to a DataFrame
Examples
--------
>>> source = Stream()
>>> sdf = source.to_dataframe()
>>> L = sdf.groupby(sdf.x).y.mean().stream.sink_to_list()
>>> source.emit(pd.DataFrame(...)) # doctest: +SKIP
>>> source.emit(pd.DataFrame(...)) # doctest: +SKIP
>>> source.emit(pd.DataFrame(...)) # doctest: +SKIP
"""
from .dataframe import DataFrame
return DataFrame(stream=self, example=example)
def to_batch(self, **kwargs):
""" Convert a stream of lists to a Batch
All elements of the stream are assumed to be lists or tuples
Examples
--------
>>> source = Stream()
>>> batches = source.to_batch()
>>> L = batches.pluck('value').map(inc).sum().stream.sink_to_list()
>>> source.emit([{'name': 'Alice', 'value': 1},
... {'name': 'Bob', 'value': 2},
... {'name': 'Charlie', 'value': 3}])
>>> source.emit([{'name': 'Alice', 'value': 4},
... {'name': 'Bob', 'value': 5},
... {'name': 'Charlie', 'value': 6}])
"""
from .batch import Batch
return Batch(stream=self, **kwargs)
@args_kwargs
@Stream.register_api()
class sink(Stream):
""" Apply a function on every element
Examples
--------
>>> source = Stream()
>>> L = list()
>>> source.sink(L.append)
>>> source.sink(print)
>>> source.sink(print)
>>> source.emit(123)
123
123
>>> L
[123]
See Also
--------
map
Stream.sink_to_list
"""
_graphviz_shape = "trapezium"
def __init__(self, upstream, func, *args, **kwargs):
self.func = func
# take the stream specific kwargs out
stream_name = kwargs.pop("stream_name", None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
_global_sinks.add(self)
def update(self, x, who=None):
result = self.func(x, *self.args, **self.kwargs)
if gen.isawaitable(result):
return result
else:
return []
@args_kwargs
@Stream.register_api()
class map(Stream):
""" Apply a function to every element in the stream
Parameters
----------
func: callable
*args :
The arguments to pass to the function.
**kwargs:
Keyword arguments to pass to func
Examples
--------
>>> source = Stream()
>>> source.map(lambda x: 2*x).sink(print)
>>> for i in range(5):
... source.emit(i)
0
2
4
6
8
"""
def __init__(self, upstream, func, *args, **kwargs):
self.func = func
# this is one of a few stream specific kwargs
stream_name = kwargs.pop("stream_name", None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None):
try:
result = self.func(x, *self.args, **self.kwargs)
except Exception as e:
logger.exception(e)
raise
else:
return self._emit(result)
@args_kwargs
@Stream.register_api()
class starmap(Stream):
""" Apply a function to every element in the stream, splayed out
See ``itertools.starmap``
Parameters
----------
func: callable
*args :
The arguments to pass to the function.
**kwargs:
Keyword arguments to pass to func
Examples
--------
>>> source = Stream()
>>> source.starmap(lambda a, b: a + b).sink(print)
>>> for i in range(5):
... source.emit((i, i))
0
2
4
6
8
"""
def __init__(self, upstream, func, *args, **kwargs):
self.func = func
# this is one of a few stream specific kwargs
stream_name = kwargs.pop("stream_name", None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None):
y = x + self.args
try:
result = self.func(*y, **self.kwargs)
except Exception as e:
logger.exception(e)
raise
else:
return self._emit(result)
def _truthy(x):
return not not x
@Stream.register_api()
class filter(Stream):
""" Only pass through elements that satisfy the predicate
Parameters
----------
predicate : function
The predicate. Should return True or False, where
True means that the predicate is satisfied.
Examples
--------
>>> source = Stream()
>>> source.filter(lambda x: x % 2 == 0).sink(print)
>>> for i in range(5):
... source.emit(i)
0
2
4
"""
def __init__(self, upstream, predicate, *args, **kwargs):
if predicate is None:
predicate = _truthy
self.predicate = predicate
stream_name = kwargs.pop("stream_name", None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None):
if self.predicate(x, *self.args, **self.kwargs):
return self._emit(x)
@args_kwargs
@Stream.register_api()
class accumulate(Stream):
""" Accumulate results with previous state
This performs running or cumulative reductions, applying the function
to the previous total and the new element. The function should take
two arguments, the previous accumulated state and the next element and
it should return a new accumulated state.
Parameters
----------
func: callable
start: object
Initial value. Defaults to the first submitted element
returns_state: boolean, optional
If true then func should return both the state and the value to emit
If false then both values are the same, and func returns one value
reset_stream : Stream instance or None, optional
If not None, when the ``reset_stream`` stream emits the accumulate node's
state will revert to the initial state (set by ``start``), defaults
to None
**kwargs:
Keyword arguments to pass to func
Examples
--------
>>> source = Stream()
>>> source.accumulate(lambda acc, x: acc + x).sink(print)
>>> for i in range(5):
... source.emit(i)
1
3
6
10
"""
_graphviz_shape = "box"
def __init__(
self, upstream, func, start=no_default, returns_state=False, reset_stream=None,
**kwargs
):
self.func = func
self.kwargs = kwargs
self.state = start
# XXX: maybe don't need deepcopy?
self._initial_state = copy.deepcopy(start)
self.returns_state = returns_state
self.reset_node = reset_stream
# this is one of a few stream specific kwargs
stream_name = kwargs.pop("stream_name", None)
if reset_stream:
Stream.__init__(self, upstreams=[upstream, reset_stream],
stream_name=stream_name)
else:
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None):
if who is self.reset_node:
self.state = copy.deepcopy(self._initial_state)
return
if self.state is no_default:
self.state = x
return self._emit(x)
else:
try:
result = self.func(self.state, x, **self.kwargs)
except Exception as e:
logger.exception(e)
raise
if self.returns_state:
state, result = result
else:
state = result
self.state = state
return self._emit(result)
@args_kwargs
@Stream.register_api()
class partition(Stream):
""" Partition stream into tuples of equal size
Examples
--------
>>> source = Stream()
>>> source.partition(3).sink(print)
>>> for i in range(10):
... source.emit(i)
(0, 1, 2)
(3, 4, 5)
(6, 7, 8)
"""
_graphviz_shape = "diamond"
def __init__(self, upstream, n, **kwargs):
self.n = n
self.buffer = []
Stream.__init__(self, upstream, **kwargs)
def update(self, x, who=None):
self.buffer.append(x)
if len(self.buffer) == self.n:
result, self.buffer = self.buffer, []
return self._emit(tuple(result))
else:
return []
@args_kwargs
@Stream.register_api()
class sliding_window(Stream):
""" Produce overlapping tuples of size n
Examples
--------
>>> source = Stream()
>>> source.sliding_window(3).sink(print)
>>> for i in range(8):
... source.emit(i)
(0, 1, 2)
(1, 2, 3)
(2, 3, 4)
(3, 4, 5)
(4, 5, 6)
(5, 6, 7)
"""
_graphviz_shape = "diamond"
def __init__(self, upstream, n, **kwargs):
self.n = n
self.buffer = deque(maxlen=n)
Stream.__init__(self, upstream, **kwargs)
def update(self, x, who=None):
self.buffer.append(x)
if len(self.buffer) == self.n:
return self._emit(tuple(self.buffer))
else:
return []
def convert_interval(interval):
if isinstance(interval, str):
import pandas as pd
interval = pd.Timedelta(interval).total_seconds()
return interval
@args_kwargs
@Stream.register_api()
class timed_window(Stream):
""" Emit a tuple of collected results every interval
Every ``interval`` seconds this emits a tuple of all of the results
seen so far. This can help to batch data coming off of a high-volume
stream.
"""
_graphviz_shape = "octagon"
def __init__(self, upstream, interval, **kwargs):
self.interval = convert_interval(interval)
self.buffer = []
self.last = gen.moment
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
self.loop.add_callback(self.cb)
def update(self, x, who=None):
self.buffer.append(x)
return self.last
@gen.coroutine
def cb(self):
while True:
L, self.buffer = self.buffer, []
self.last = self._emit(L)
yield self.last
yield gen.sleep(self.interval)
@args_kwargs
@Stream.register_api()
class delay(Stream):
""" Add a time delay to results """
_graphviz_shape = "octagon"
def __init__(self, upstream, interval, **kwargs):
self.interval = convert_interval(interval)
self.queue = Queue()
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
self.loop.add_callback(self.cb)
@gen.coroutine
def cb(self):
while True:
last = time()
x = yield self.queue.get()
yield self._emit(x)
duration = self.interval - (time() - last)
if duration > 0:
yield gen.sleep(duration)
def update(self, x, who=None):
return self.queue.put(x)
@args_kwargs
@Stream.register_api()
class rate_limit(Stream):
""" Limit the flow of data
This stops two elements of streaming through in an interval shorter
than the provided value.
Parameters
----------
interval: float
Time in seconds
"""
_graphviz_shape = "octagon"
def __init__(self, upstream, interval, **kwargs):
self.interval = convert_interval(interval)
self.next = 0
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
@gen.coroutine
def update(self, x, who=None):
now = time()
old_next = self.next
self.next = max(now, self.next) + self.interval
if now < old_next:
yield gen.sleep(old_next - now)
yield self._emit(x)
@args_kwargs
@Stream.register_api()
class buffer(Stream):
""" Allow results to pile up at this point in the stream
This allows results to buffer in place at various points in the stream.
This can help to smooth flow through the system when backpressure is
applied.
"""
_graphviz_shape = "diamond"
def __init__(self, upstream, n, **kwargs):
self.queue = Queue(maxsize=n)
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
self.loop.add_callback(self.cb)
def update(self, x, who=None):
return self.queue.put(x)
@gen.coroutine
def cb(self):
while True:
x = yield self.queue.get()
yield self._emit(x)
@args_kwargs
@Stream.register_api()
class zip(Stream):
""" Combine streams together into a stream of tuples
We emit a new tuple once all streams have produce a new tuple.
See also
--------
combine_latest
zip_latest
"""
_graphviz_orientation = 270
_graphviz_shape = "triangle"
def __init__(self, *upstreams, **kwargs):
self.maxsize = kwargs.pop("maxsize", 10)
first = kwargs.pop("first", None)
self.condition = Condition()
self.literals = [
(i, val)
for i, val in enumerate(upstreams)
if not isinstance(val, Stream)
]
self.buffers = {
upstream: deque()
for upstream in upstreams
if isinstance(upstream, Stream)
}
upstreams2 = [
upstream for upstream in upstreams if isinstance(upstream, Stream)
]
Stream.__init__(self, upstreams=upstreams2, **kwargs)
if first:
move_to_first(self, first)
def pack_literals(self, tup):
""" Fill buffers for literals whenever we empty them """
inp = list(tup)[::-1]
out = []
for i, val in self.literals:
while len(out) < i:
out.append(inp.pop())
out.append(val)
while inp:
out.append(inp.pop())
return tuple(out)
def update(self, x, who=None):
L = self.buffers[who] # get buffer for stream
L.append(x)
if len(L) == 1 and all(self.buffers.values()):
tup = tuple(self.buffers[up][0] for up in self.upstreams)
for buf in self.buffers.values():
buf.popleft()
self.condition.notify_all()
if self.literals:
tup = self.pack_literals(tup)
return self._emit(tup)
elif len(L) > self.maxsize:
return self.condition.wait()
@args_kwargs
@Stream.register_api()
class combine_latest(Stream):
""" Combine multiple streams together to a stream of tuples
This will emit a new tuple of all of the most recent elements seen from
any stream.
Parameters
----------
emit_on : stream or list of streams or None
only emit upon update of the streams listed.
If None, emit on update from any stream
See Also
--------
zip
"""
_graphviz_orientation = 270
_graphviz_shape = "triangle"
def __init__(self, *upstreams, **kwargs):
emit_on = kwargs.pop("emit_on", None)
first = kwargs.pop("first", None)
self.last = [None for _ in upstreams]
self.missing = set(upstreams)
if emit_on is not None:
if not isinstance(emit_on, Iterable):
emit_on = (emit_on,)
emit_on = tuple(
upstreams[x] if isinstance(x, int) else x for x in emit_on
)
self.emit_on = emit_on
else:
self.emit_on = upstreams
Stream.__init__(self, upstreams=upstreams, **kwargs)
self._graphviz_edge_types = {
u: {"style": "solid"} for u in self.upstreams
}
self._graphviz_edge_types.update(
{u: {"style": "dashed"} for u in self.emit_on}
)
if first:
move_to_first(self, first)
def update(self, x, who=None):
if self.missing and who in self.missing:
self.missing.remove(who)
self.last[self.upstreams.index(who)] = x
if not self.missing and who in self.emit_on:
tup = tuple(self.last)
return self._emit(tup)
@args_kwargs
@Stream.register_api()
class flatten(Stream):
""" Flatten streams of lists or iterables into a stream of elements
Examples
--------
>>> source = Stream()
>>> source.flatten().sink(print)
>>> for x in [[1, 2, 3], [4, 5], [6, 7, 7]]:
... source.emit(x)
1
2
3
4
5
6
7
See Also
--------
partition
"""
def update(self, x, who=None):
L = []
for item in x:
y = self._emit(item)
if type(y) is list:
L.extend(y)
else:
L.append(y)
return L
@args_kwargs
@Stream.register_api()
class unique(Stream):
""" Avoid sending through repeated elements
This deduplicates a stream so that only new elements pass through.
You can control how much of a history is stored with the ``history=``
parameter. For example setting ``history=1`` avoids sending through
elements when one is repeated right after the other.
Examples
--------
>>> source = Stream()
>>> source.unique(history=1).sink(print)
>>> for x in [1, 1, 2, 2, 2, 1, 3]:
... source.emit(x)
1
2
1
3
"""
def __init__(self, upstream, history=None, key=identity, **kwargs):
self.seen = None
self.key = key
self.history = history
Stream.__init__(self, upstream, **kwargs)
def update(self, x, who=None):
y = self.key(x)
# If this is the first piece of data make the cache
if self.seen is None:
if isinstance(y, Hashable):
self.seen = dict()
if self.history:
# if it is hashable use LRU cache
if isinstance(y, Hashable):
from zict import LRU
self.seen = LRU(self.history, self.seen)
# if not hashable use deque (since it doesn't need a hash
else:
self.seen = deque(maxlen=self.history)
if isinstance(y, Hashable):
if y not in self.seen:
self.seen[y] = 1
return self._emit(x)
# If y is a dict then we can't use LRU cache use FILO deque instead
else:
if y not in self.seen:
self.seen.append(y)
return self._emit(x)
@args_kwargs
@Stream.register_api()
class union(Stream):
""" Combine multiple streams into one
Every element from any of the upstreams streams will immediately flow
into the output stream. They will not be combined with elements from
other streams.
See also
--------
Stream.zip
Stream.combine_latest
"""
def __init__(self, *upstreams, **kwargs):
super(union, self).__init__(upstreams=upstreams, **kwargs)
def update(self, x, who=None):
return self._emit(x)
@args_kwargs
@Stream.register_api()
class pluck(Stream):
""" Select elements from elements in the stream.
Parameters
----------
pluck : object, list
The element(s) to pick from the incoming element in the stream
If an instance of list, will pick multiple elements.
Examples
--------
>>> source = Stream()
>>> source.pluck([0, 3]).sink(print)
>>> for x in [[1, 2, 3, 4], [4, 5, 6, 7], [8, 9, 10, 11]]:
... source.emit(x)
(1, 4)
(4, 7)
(8, 11)
>>> source = Stream()
>>> source.pluck('name').sink(print)
>>> for x in [{'name': 'Alice', 'x': 123}, {'name': 'Bob', 'x': 456}]:
... source.emit(x)
'Alice'
'Bob'
"""
def __init__(self, upstream, pick, **kwargs):
self.pick = pick
super(pluck, self).__init__(upstream, **kwargs)
def update(self, x, who=None):
if isinstance(self.pick, list):
return self._emit(tuple([x[ind] for ind in self.pick]))
else:
return self._emit(x[self.pick])
@args_kwargs
@Stream.register_api()
class collect(Stream):
"""
Hold elements in a cache and emit them as a collection when flushed.
Examples
--------
>>> source1 = Stream()
>>> source2 = Stream()
>>> collector = collect(source1)
>>> collector.sink(print)
>>> source2.sink(collector.flush)
>>> source1.emit(1)
>>> source1.emit(2)
>>> source2.emit('anything') # flushes collector
...
[1, 2]
"""
def __init__(self, upstream, cache=None, **kwargs):
if cache is None:
cache = deque()
self.cache = cache
Stream.__init__(self, upstream, **kwargs)
def update(self, x, who=None):
self.cache.append(x)
def flush(self, _=None):
out = tuple(self.cache)
self._emit(out)
self.cache.clear()
@args_kwargs
@Stream.register_api()
class zip_latest(Stream):
"""Combine multiple streams together to a stream of tuples
The stream which this is called from is lossless. All elements from
the lossless stream are emitted reguardless of when they came in.
This will emit a new tuple consisting of an element from the lossless
stream paired with the latest elements from the other streams.
Elements are only emitted when an element on the lossless stream are
received, similar to ``combine_latest`` with the ``emit_on`` flag.
See Also
--------
Stream.combine_latest
Stream.zip
"""
def __init__(self, lossless, *upstreams, **kwargs):
first = kwargs.pop("first", None)
upstreams = (lossless,) + upstreams
self.last = [None for _ in upstreams]
self.missing = set(upstreams)
self.lossless = lossless
self.lossless_buffer = deque()
Stream.__init__(self, upstreams=upstreams, **kwargs)
if first:
move_to_first(self, first)
def update(self, x, who=None):
idx = self.upstreams.index(who)
if who is self.lossless:
self.lossless_buffer.append(x)
self.last[idx] = x
if self.missing and who in self.missing:
self.missing.remove(who)
if not self.missing:
L = []
while self.lossless_buffer:
self.last[0] = self.lossless_buffer.popleft()
L.append(self._emit(tuple(self.last)))
return L
@args_kwargs
@Stream.register_api()
class latest(Stream):
""" Drop held-up data and emit the latest result
This allows you to skip intermediate elements in the stream if there is
some back pressure causing a slowdown. Use this when you only care about
the latest elements, and are willing to lose older data.
This passes through values without modification otherwise.
Examples
--------
>>> source.map(f).latest().map(g) # doctest: +SKIP
"""
_graphviz_shape = "octagon"
def __init__(self, upstream, **kwargs):
self.condition = Condition()
self.next = []
Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)
self.loop.add_callback(self.cb)
def update(self, x, who=None):
self.next = [x]
self.loop.add_callback(self.condition.notify)
@gen.coroutine
def cb(self):
while True:
yield self.condition.wait()
[x] = self.next
yield self._emit(x)
def sync(loop, func, *args, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
# This was taken from distrbuted/utils.py
timeout = kwargs.pop("callback_timeout", None)
def make_coro():
coro = gen.maybe_future(func(*args, **kwargs))
if timeout is None:
return coro
else:
return gen.with_timeout(timedelta(seconds=timeout), coro)
e = threading.Event()
main_tid = get_thread_identity()
result = [None]
error = [False]
@gen.coroutine
def f():
try:
if main_tid == get_thread_identity():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
thread_state.asynchronous = True
result[0] = yield make_coro()
except Exception as exc:
logger.exception(exc)
error[0] = sys.exc_info()
finally:
thread_state.asynchronous = False
e.set()
loop.add_callback(f)
while not e.is_set():
e.wait(1000000)
if error[0]:
six.reraise(*error[0])
else:
return result[0]
@Stream.register_api()
class starsink(Stream):
""" Apply a function on every element
Examples
--------
>>> source = Stream()
>>> L = list()
>>> source.sink(L.append)
>>> source.sink(print)
>>> source.sink(print)
>>> source.emit(123)
123
123
>>> L
[123]
See Also
--------
map
Stream.sink_to_list
"""
_graphviz_shape = "trapezium"
def __init__(self, upstream, func, *args, **kwargs):
self.func = func
# take the stream specific kwargs out
stream_name = kwargs.pop("stream_name", None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
_global_sinks.add(self)
def update(self, x, who=None):
y = x + self.args
result = self.func(*y, **self.kwargs)
if gen.isawaitable(result):
return result
else:
return []
def destroy_pipeline(source_node: Stream):
"""Destroy all the nodes attached to the source
Parameters
----------
source_node : Stream
The source node for the pipeline
"""
for ds in list(source_node.downstreams):
destroy_pipeline(ds)
if source_node.upstreams:
try:
source_node.destroy()
# some source nodes are tuples and some are bad wekrefs
except (AttributeError, KeyError):
pass
| [
"[email protected]"
] | |
cc3b85e9670631473525625c285ccfe5825c620d | 4e28bb44058b3bfdaf24d1117838e6736e9b0ed5 | /app/extensions.py | 0e7fd9811c53f92bd480e7471b41dccd79179a8d | [] | no_license | Polikarpova/ad-vk-bot | 130917752f4c354b09242cf8b548514d649581a6 | 1867ed1accb9f0483570f7c52aa46e44676fa624 | refs/heads/main | 2022-12-28T23:16:35.720661 | 2020-10-09T11:18:06 | 2020-10-09T11:18:06 | 302,617,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | import os
from flask_sqlalchemy import SQLAlchemy
from apscheduler.schedulers.background import BackgroundScheduler
# SQLAlchemy
db = SQLAlchemy()
# APScheduler
scheduler = BackgroundScheduler(daemon=True)
weeklyJobName = 'weekly_job'
playerAlarmName = 'player_alarm'
# Данные о сообществе
album_id = int(os.environ.get('ALBUM_ID')) # id альбома с мемами для send_blame_message | [
"[email protected]"
] | |
8c3c73ad65673c805d4e2f9b4a49f2cebb653539 | b45f75091463b302c95daf6cd6fa2dea6ab7081b | /src/signer/context_processors.py | 5e38c849199f40a6a76dfbd9d8dcfdf359da20f5 | [] | no_license | joka/signer | 3dd6b4b0838923690fae4ab766058097f9372fe5 | 1f0f84009221599e979410df1cf7bc33d490985e | refs/heads/master | 2021-01-13T01:28:18.675061 | 2010-01-07T17:50:15 | 2010-01-07T17:50:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | # -*- coding: utf-8 -*-
from django.conf import settings
def currentpage(request):
return {
'page_url': request.build_absolute_uri(),
# 'page_url': '%s%s'%(settings.BASE_URL, request.get_full_path()),
}
| [
"[email protected]"
] | |
f20ae41427efa6bdfaf815d9104ff68383036252 | fc98574ce6f6adb571d53e32dcb8041b032c4aae | /report2.py | a650dcde5f4a3d904417f4f7a9d692b71f591af5 | [] | no_license | aTentori/REST_assignment | d23bb4f7d08193a22d50e11f6f184b8c6339f3a8 | 8da5bb23eef5f01695373212f3c3d564d95fee46 | refs/heads/master | 2022-12-09T10:38:10.136022 | 2020-09-02T01:50:55 | 2020-09-02T01:50:55 | 292,141,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | import requests
import time
start = time.time()
report = requests.get("http://localhost:21212/report").json()
print('{:<2} {:<20} {:<10} {:<10} {}'.format('ID:', 'Name:', 'Num Loc:', 'Avg value:','Categories:'))
for r in report:
#print({} ).format(str(r[0]) + "\t\t\t" + str(r[1]) + "\t\t\t\t" + str(r[2]) + "\t\t\t" + str(r[3]) + "\t\t\t" + str(r[4]))
print('{:<1} {:<20} {:<10} {:<10} {}'.format(str(r[0]), str(r[1]), str(r[2]), str(r[3]), str(r[4])))
end = time.time()
time = end - start
final_time = round(time,2)
print("")
print("Program time is: " + str(final_time) +" seconds") | [
"[email protected]"
] | |
60f17c6b3275abd5fe5fa0a5fdf553adf57b8a9f | 093b415a408e3770c60c370dea539c1c7dbb1a2b | /netbox/virtualization/migrations/0013_deterministic_ordering.py | b5b0451a17ed3e38ca8fb9b474029a3bc208ce98 | [
"Apache-2.0"
] | permissive | jathanism/netbox | 0f710b9196226c049fc86bd3b15830c3e717463b | a94e5c7403a0aa07d60536a487cea2c868c9fa41 | refs/heads/develop | 2023-04-13T00:55:29.862161 | 2021-03-12T15:59:23 | 2021-03-12T15:59:23 | 347,462,807 | 1 | 0 | Apache-2.0 | 2023-04-04T01:41:13 | 2021-03-13T19:40:48 | null | UTF-8 | Python | false | false | 365 | py | # Generated by Django 2.2.8 on 2020-01-15 18:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('virtualization', '0012_vm_name_nonunique'),
]
operations = [
migrations.AlterModelOptions(
name='virtualmachine',
options={'ordering': ('name', 'pk')},
),
]
| [
"[email protected]"
] | |
47b9800f33759447156595cdedc80af7b88a3786 | 8320eb6213ca1648129519603818e637e78aed2a | /streamlit_multiapp.py | 2e6e6617941e5631a839eb63db20de3507ae4fa8 | [
"MIT"
] | permissive | NIH-CARD/AlignedUMAP-BiomedicalData | 0147749f79e11eae52b8ce757ade41c349972fe4 | d583e9ac0b9081ec2cd8bf2e3c26b88cadf0689b | refs/heads/main | 2023-04-29T06:13:20.390510 | 2023-04-21T21:59:49 | 2023-04-21T21:59:49 | 591,063,147 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | """Frameworks for running multiple Streamlit applications as a single app.
"""
import streamlit as st
class MultiApp:
def __init__(self):
self.apps = []
def add_app(self, title, func, params):
self.apps.append({
"title": title,
"function": func,
"params": params
})
def run(self):
st.markdown(
"""<style>
.boxBorder1 {
outline-offset: 20px;
font-size:128px;
}</style>
""", unsafe_allow_html=True)
from st_btn_select import st_btn_select
app = st_btn_select(
self.apps,
format_func=lambda app: '{}'.format(app['title']),
)
app['function'](**app['params']) | [
"[email protected]"
] | |
0228f114db266e3c412cd650e31d2e3ba5b42cae | 523f5c919a5dbf56985a91373b00dd91556a7bae | /myproject/urls.py | c24b6a10e385774124e25ae5817be0b31fbd8d8d | [] | no_license | liuliansp/Vue_Django_Mysql | 8000d4d92bc4213f0cc1d2feb73e3c3247d11f2b | e7b582d35de8f63e5b6592f360236450516dce8a | refs/heads/master | 2022-01-29T23:40:54.159792 | 2019-09-02T06:40:20 | 2019-09-02T06:40:20 | 205,355,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | """myproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
# from django.urls import path
from django.conf.urls import url,include
from django.views.generic import TemplateView
import myapp.urls
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/', include(myapp.urls)),
url(r'^$', TemplateView.as_view(template_name="index.html")),
]
| [
"[email protected]"
] | |
38cbaca239856f031f96c679a20502407b7e7761 | df1eb6867a174910daae78f5436bf89810d52c29 | /release.py | 341bea189ea43cc6d7ccc16371d6b3cecfdec179 | [
"MIT"
] | permissive | containers-tools/cct | 26f38699c3853a157ce7e3ccdbfbab622540bf46 | 20bacd4b859a1dd0bcddb085eed9c4c887961135 | refs/heads/master | 2020-05-21T04:22:19.022569 | 2017-09-20T14:05:33 | 2017-09-20T14:05:33 | 36,220,969 | 12 | 10 | null | 2017-07-27T13:53:46 | 2015-05-25T09:03:41 | Python | UTF-8 | Python | false | false | 268 | py | #!/bin/python
import os
from zipfile import ZipFile
with ZipFile('cct.zip', 'w') as zf:
zf.write('__main__.py')
for root, directory, files in os.walk('cct'):
for f in files:
arc_file = os.path.join(root, f)
zf.write(arc_file)
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.