hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5f27a02869b20dee1a16b8900aaf7b0000709f7f | 105 | py | Python | discobot/utils.py | pythonology/disco | 82cc0434e584a1053075b51f487a3ac9b03f6f7d | [
"MIT"
] | 7 | 2016-04-13T23:03:36.000Z | 2016-04-19T22:25:28.000Z | discobot/utils.py | pythonology/disco | 82cc0434e584a1053075b51f487a3ac9b03f6f7d | [
"MIT"
] | 2 | 2016-06-03T16:12:00.000Z | 2021-03-25T21:40:27.000Z | discobot/utils.py | pythonology/disco | 82cc0434e584a1053075b51f487a3ac9b03f6f7d | [
"MIT"
] | 1 | 2016-04-13T18:43:19.000Z | 2016-04-13T18:43:19.000Z | def make_attachment_uri(discriminator, filename):
return 'disco://%s/%s' % (discriminator, filename)
| 35 | 54 | 0.733333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.142857 |
5f289aaa4dfd07380db23d1700f9b70a80d10934 | 5,266 | py | Python | oaipmh/__init__.py | scieloorg/oai-pmh | 9d3044921d2d5cafb18e54f04070e8783f49c06d | [
"BSD-2-Clause"
] | 2 | 2019-03-16T04:40:29.000Z | 2022-03-10T14:50:21.000Z | oaipmh/__init__.py | DalavanCloud/oai-pmh | 9d3044921d2d5cafb18e54f04070e8783f49c06d | [
"BSD-2-Clause"
] | 27 | 2017-08-23T17:11:57.000Z | 2021-06-01T21:57:31.000Z | oaipmh/__init__.py | DalavanCloud/oai-pmh | 9d3044921d2d5cafb18e54f04070e8783f49c06d | [
"BSD-2-Clause"
] | 2 | 2017-06-12T16:18:35.000Z | 2019-03-16T04:40:12.000Z | import os
import re
from pyramid.config import Configurator
from pyramid.events import NewRequest
from oaipmh import (
repository,
datastores,
sets,
utils,
articlemeta,
entities,
)
from oaipmh.formatters import (
oai_dc,
oai_dc_openaire,
)
METADATA_FORMATS = [
(entities.MetadataFormat(
metadataPrefix='oai_dc',
schema='http://www.openarchives.org/OAI/2.0/oai_dc.xsd',
metadataNamespace='http://www.openarchives.org/OAI/2.0/oai_dc/'),
oai_dc.make_metadata,
lambda x: x),
(entities.MetadataFormat(
metadataPrefix='oai_dc_openaire',
schema='http://www.openarchives.org/OAI/2.0/oai_dc.xsd',
metadataNamespace='http://www.openarchives.org/OAI/2.0/oai_dc/'),
oai_dc_openaire.make_metadata,
oai_dc_openaire.augment_metadata),
]
STATIC_SETS = [
(sets.Set(setSpec='openaire', setName='OpenAIRE'),
datastores.identityview),
]
DEFAULT_SETTINGS = [
('oaipmh.repo.name', 'OAIPMH_REPO_NAME', str,
'SciELO - Scientific Electronic Library Online'),
('oaipmh.repo.baseurl', 'OAIPMH_REPO_BASEURL', str,
'http://www.scielo.br/oai/scielo-oai.php'),
('oaipmh.repo.protocolversion', 'OAIPMH_REPO_PROTOCOLVERSION', str,
'2.0'),
('oaipmh.repo.adminemail', 'OAIPMH_REPO_ADMINEMAIL', str,
'[email protected]'),
('oaipmh.repo.earliestdatestamp', 'OAIPMH_REPO_EARLIESTDATESTAMP',
utils.parse_date, '1998-08-01'),
('oaipmh.repo.deletedrecord', 'OAIPMH_REPO_DELETEDRECORD', str,
'no'),
('oaipmh.repo.granularity', 'OAIPMH_REPO_GRANULARITY', str,
'YYYY-MM-DD'),
('oaipmh.repo.granularity_regex', 'OAIPMH_REPO_GRANULARITY_REGEX',
re.compile, r'^(\d{4})-(\d{2})-(\d{2})$'),
('oaipmh.collection', 'OAIPMH_COLLECTION', str,
'scl'),
('oaipmh.listslen', 'OAIPMH_LISTSLEN', int,
100),
('oaipmh.chunkedresumptiontoken.chunksize',
'OAIPMH_CHUNKEDRESUMPTIONTOKEN_CHUNKSIZE', int, 12),
('oaipmh.articlemeta_uri', 'OAIPMH_ARTICLEMETA_URI', str,
'articlemeta.scielo.org:11621'),
]
def parse_settings(settings):
"""Analisa e retorna as configurações da app com base no arquivo .ini e env.
As variáveis de ambiente possuem precedência em relação aos valores
definidos no arquivo .ini.
"""
parsed = {}
cfg = list(DEFAULT_SETTINGS)
for name, envkey, convert, default in cfg:
value = os.environ.get(envkey, settings.get(name, default))
if convert is not None:
value = convert(value)
parsed[name] = value
return parsed
def get_datastore(settings):
client = articlemeta.get_articlemeta_client(settings['oaipmh.collection'],
domain=settings['oaipmh.articlemeta_uri'])
return articlemeta.ArticleMeta(client)
def get_repository_meta(settings):
repometa = repository.RepositoryMeta(
repositoryName=settings['oaipmh.repo.name'],
baseURL=settings['oaipmh.repo.baseurl'],
protocolVersion=settings['oaipmh.repo.protocolversion'],
adminEmail=settings['oaipmh.repo.adminemail'],
earliestDatestamp=settings['oaipmh.repo.earliestdatestamp'],
deletedRecord=settings['oaipmh.repo.deletedrecord'],
granularity=settings['oaipmh.repo.granularity'])
return repometa
def get_granularity_validator(settings):
def validate(date_time):
return bool(settings['oaipmh.repo.granularity_regex'].fullmatch(
date_time))
return validate
def get_setsregistry(settings):
registry = articlemeta.ArticleMetaSetsRegistry(
datastore=get_datastore(settings))
for metadata, view in STATIC_SETS:
registry.add(metadata, view)
return registry
def get_resultpage_factory(settings):
return repository.ResultPageFactory(ds=get_datastore(settings),
setsreg=get_setsregistry(settings),
listslen=settings['oaipmh.listslen'],
chunk_size=settings['oaipmh.chunkedresumptiontoken.chunksize'],
granularity_validator=get_granularity_validator(settings),
earliest_datestamp=settings['oaipmh.repo.earliestdatestamp'])
def add_oai_repository(event):
settings = event.request.registry.settings
event.request.repository = repository.Repository(
get_repository_meta(settings), get_datastore(settings),
get_granularity_validator(settings),
resultpage_factory=get_resultpage_factory(settings))
for metadata, formatter, augmenter in METADATA_FORMATS:
event.request.repository.add_metadataformat(metadata, formatter,
augmenter)
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
settings.update(parse_settings(settings))
config = Configurator(settings=settings)
config.add_subscriber(add_oai_repository, NewRequest)
# URL patterns
config.add_route('root', '/')
config.scan()
return config.make_wsgi_app()
| 33.329114 | 80 | 0.657615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,664 | 0.31563 |
5f2a79411bbedc2b8b017ecceddbf86f3d9843cc | 1,613 | py | Python | create_training_data.py | nasi-famnit/HOur-flight | 96a9aeb1cf0f3fa588c587db08ba0b8fa980eac9 | [
"MIT"
] | 1 | 2016-04-24T10:49:52.000Z | 2016-04-24T10:49:52.000Z | create_training_data.py | nasi-famnit/HOur-flight | 96a9aeb1cf0f3fa588c587db08ba0b8fa980eac9 | [
"MIT"
] | null | null | null | create_training_data.py | nasi-famnit/HOur-flight | 96a9aeb1cf0f3fa588c587db08ba0b8fa980eac9 | [
"MIT"
] | null | null | null | import flightdata
import weatherparser
import airportdata
import pandas as pd
from datetime import datetime
from pathlib import Path
flights = flightdata.read_csv('data/unpacked/flights/On_Time_On_Time_Performance_2016_1.csv')
fname = 'data/processed/training/training{:04}_v1.csv'
prev_time = datetime.now()
df = pd.DataFrame()
current_csv_name = Path(fname.format(1))
for idx, flight in flights.iterrows():
idx = idx+1
if idx%100 == 0:
now_time = datetime.now()
delta = now_time - prev_time
print('Processing file', idx, ',', 100.0/delta.total_seconds(), 'per second')
prev_time = now_time
if idx % 1000 == 0:
ff = fname.format(idx//1000)
current_csv_name = Path(fname.format(1+idx//1000))
print('Writing to', ff)
df.to_csv(ff)
else:
if current_csv_name.exists():
continue
ff = flight[['Year', 'Month', 'DayofMonth', 'DayOfWeek', 'UniqueCarrier', 'Origin', 'Dest', 'CRSDepTime', 'DepDelayMinutes', 'DepDel15', 'CRSArrTime', 'ArrTime', 'ArrDelay', 'ArrDelayMinutes', 'ArrDel15', 'CRSElapsedTime', 'ActualElapsedTime', 'Distance', 'WeatherDelay']]
weather_origin = weatherparser.get_weather_conditions(airportdata.from_faa(ff.Origin), ff.CRSDepTime)
weather_dest = weatherparser.get_weather_conditions(airportdata.from_faa(ff.Dest), ff.CRSArrTime)
if (weather_origin is None) or ( weather_dest is None):
continue
line = pd.DataFrame(pd.concat([ff, weather_origin, weather_dest])).T
if idx%1000==1:
df = line
else:
df = df.append(line)
| 37.511628 | 276 | 0.675139 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 373 | 0.231246 |
5f2bba8707cde7a8ee1262b60500da6ac69cd76b | 5,618 | py | Python | study/python/pyqt/app/signup.py | cheenwe/blog | a866b3ab98aa58e3ed4a7624fbb72c8fd8dee790 | [
"MIT"
] | 10 | 2016-09-28T03:22:41.000Z | 2020-06-16T08:42:25.000Z | study/python/pyqt/app/signup.py | cheenwe/blog | a866b3ab98aa58e3ed4a7624fbb72c8fd8dee790 | [
"MIT"
] | 12 | 2017-04-18T08:41:04.000Z | 2020-06-10T02:54:58.000Z | study/python/pyqt/app/signup.py | cheenwe/blog | a866b3ab98aa58e3ed4a7624fbb72c8fd8dee790 | [
"MIT"
] | 8 | 2016-09-28T03:03:32.000Z | 2019-09-16T04:22:01.000Z | from PyQt5 import QtCore, QtGui, QtWidgets
from db import Db
class Ui_Signup(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.setFixedSize(638, 441)
# self.label = QtWidgets.QLabel(Dialog)
# self.label.setGeometry(QtCore.QRect(110, 190, 151, 31))
# self.label.setObjectName("label")
self.label_password = QtWidgets.QLabel(Dialog)
self.label_password.setGeometry(QtCore.QRect(110, 260, 151, 31))
self.label_password.setObjectName("label_password")
self.label_password2 = QtWidgets.QLabel(Dialog)
self.label_password2.setGeometry(QtCore.QRect(110, 300, 171, 31))
self.label_password2.setObjectName("label_password2")
self.label_email = QtWidgets.QLabel(Dialog)
self.label_email.setGeometry(QtCore.QRect(110, 230, 161, 31))
self.label_email.setObjectName("label_email")
# self.txtUsername = QtWidgets.QLineEdit(Dialog)
# self.txtUsername.setGeometry(QtCore.QRect(290, 190, 221, 27))
# self.txtUsername.setObjectName("txtUsername")
self.txtEmail = QtWidgets.QLineEdit(Dialog)
self.txtEmail.setGeometry(QtCore.QRect(290, 230, 221, 27))
self.txtEmail.setObjectName("txtEmail")
self.txtPassword = QtWidgets.QLineEdit(Dialog)
################## make the password invisible ############
self.txtPassword.setEchoMode(QtWidgets.QLineEdit.Password)
###########################################################
self.txtPassword.setGeometry(QtCore.QRect(290, 270, 221, 27))
self.txtPassword.setObjectName("txtPassword")
self.txtPassword2 = QtWidgets.QLineEdit(Dialog)
################## make the password2 invisible ############
self.txtPassword2.setEchoMode(QtWidgets.QLineEdit.Password)
###########################################################
self.txtPassword2.setGeometry(QtCore.QRect(290, 310, 221, 27))
self.txtPassword2.setObjectName("txtPassword2")
self.btnRegister = QtWidgets.QPushButton(Dialog)
self.btnRegister.setGeometry(QtCore.QRect(240, 360, 131, 41))
self.btnRegister.setObjectName("btnRegister")
################## register button#########################
self.btnRegister.clicked.connect(self.registerButton)
###########################################################
self.label_Heading = QtWidgets.QLabel(Dialog)
self.label_Heading.setGeometry(QtCore.QRect(120, 30, 431, 61))
self.label_Heading.setObjectName("label_Heading")
self.label_5 = QtWidgets.QLabel(Dialog)
self.label_5.setGeometry(QtCore.QRect(110, 150, 151, 31))
self.label_5.setObjectName("label_5")
self. label_name = QtWidgets.QLabel(Dialog)
self. label_name.setGeometry(QtCore.QRect(110, 150, 151, 31))
self. label_name.setObjectName(" label_name")
self.txtName = QtWidgets.QLineEdit(Dialog)
self.txtName.setGeometry(QtCore.QRect(290, 150, 221, 27))
self.txtName.setObjectName("txtName")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def registerButton(self):
name = self.txtName.text()
email = self.txtEmail.text()
# username = self.txtUsername.text()
password = self.txtPassword.text()
password2 = self.txtPassword2.text()
if self.checkFields(name,email,password):
self.showMessage("Error", "All fields must be filled")
else:
if(self.checkPassword(password,password2)):
insertDb = Db()
Db().insertTable(name,email,password)
self.showMessage("Success","Registration successul")
self.clearField()
else:
self.showMessage("Error","Passwords doesn't match")
def showMessage(self,title,msg):
msgBox = QtWidgets.QMessageBox()
msgBox.setIcon(QtWidgets.QMessageBox.Information)
#msgBox.setTitle(title)
msgBox.setText(msg)
msgBox.setStandardButtons(QtWidgets.QMessageBox.Ok)
msgBox.exec_()
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", " XXX系统"))
self.label_password.setText(_translate("Dialog", " 密码:"))
self.label_password2.setText(_translate("Dialog", "重复密码:"))
self.label_email.setText(_translate("Dialog", "邮箱:"))
self.btnRegister.setText(_translate("Dialog", "注册"))
self.label_Heading.setText(_translate("Dialog", " 账户注册"))
self. label_name.setText(_translate("Dialog", "用户名:"))
def loginPage(self):
self.loginWindow = QtWidgets.QDialog()
self.ui = Ui_Signup2()
self.ui.setupUi(self.loginWindow)
self.loginWindow.show()
def checkFields(self,name,email,password):
if(name=="" or email == "" or password== ""):
return True
############## check if password1 and password2 matches #############
def checkPassword(self,password, password2):
return password == password2
##################### clear fields ##################
def clearField(self):
self.txtPassword.setText(None)
self.txtName.setText(None)
self.txtEmail.setText(None)
self.txtPassword2.setText(None)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Signup()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| 41.925373 | 73 | 0.619794 | 5,384 | 0.951909 | 0 | 0 | 0 | 0 | 0 | 0 | 1,227 | 0.216938 |
5f2c618b2d6e7fe4b895b468c576b02558bec6fb | 70 | py | Python | gym_android_wechat_jump/env/__init__.py | gooooloo/gym-android-wechat-jump | f7c576316ae07d9701cc467ef271f838418d8695 | [
"MIT"
] | null | null | null | gym_android_wechat_jump/env/__init__.py | gooooloo/gym-android-wechat-jump | f7c576316ae07d9701cc467ef271f838418d8695 | [
"MIT"
] | null | null | null | gym_android_wechat_jump/env/__init__.py | gooooloo/gym-android-wechat-jump | f7c576316ae07d9701cc467ef271f838418d8695 | [
"MIT"
] | null | null | null | from gym_android_wechat_jump.env.wechat_jump_env import WechatJumpEnv
| 35 | 69 | 0.914286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
5f2c8d2aea105ec6207836197f28f050f9bd7157 | 318 | py | Python | bot/__init__.py | sudomice/crypto-bot | 51dcf66d79612f2ba8bdf5645005b143fbeda343 | [
"MIT"
] | null | null | null | bot/__init__.py | sudomice/crypto-bot | 51dcf66d79612f2ba8bdf5645005b143fbeda343 | [
"MIT"
] | null | null | null | bot/__init__.py | sudomice/crypto-bot | 51dcf66d79612f2ba8bdf5645005b143fbeda343 | [
"MIT"
] | null | null | null | import requests
from bot.constants import BASE_ENDPOINT
import cli.app
@cli.app.CommandLineApp
def bot(app):
ping_response = requests.get(BASE_ENDPOINT+'api/v3/ping')
print(f'{ping_response}:{ping_response.json()}')
# bot.add_param("-h", "--help", help="HELP me")
if __name__ == '__main__':
bot.run()
| 19.875 | 61 | 0.701258 | 0 | 0 | 0 | 0 | 152 | 0.477987 | 0 | 0 | 111 | 0.349057 |
5f2e54facc35f3d3aca215fe2e3b9ff2dc7350a5 | 4,374 | py | Python | metadrive/config.py | wefindx/metadrive | 576d240065b61b0187afc249819b705c06308d05 | [
"Apache-2.0"
] | 7 | 2019-02-04T18:31:06.000Z | 2021-12-22T17:08:55.000Z | metadrive/config.py | wefindx/metadrive | 576d240065b61b0187afc249819b705c06308d05 | [
"Apache-2.0"
] | 11 | 2019-04-30T18:19:33.000Z | 2019-08-15T19:56:37.000Z | metadrive/config.py | wefindx/metadrive | 576d240065b61b0187afc249819b705c06308d05 | [
"Apache-2.0"
] | 2 | 2019-01-26T03:17:25.000Z | 2019-04-15T18:35:56.000Z | import os
import imp
from pathlib import Path
import configparser
import requests
import gpgrecord
config = configparser.ConfigParser()
INSTALLED = imp.find_module('metadrive')[1]
HOME = str(Path.home())
DEFAULT_LOCATION = os.path.join(HOME,'.metadrive')
CONFIG_LOCATION = os.path.join(DEFAULT_LOCATION, 'config')
CREDENTIALS_DIR = os.path.join(DEFAULT_LOCATION, '-/+')
SESSIONS_DIR = os.path.join(DEFAULT_LOCATION, 'sessions')
DATA_DIR = os.path.join(DEFAULT_LOCATION, 'data')
SITES_DIR = os.path.join(HOME, 'Sites')
KNOWN_DRIVERS = os.path.join(DEFAULT_LOCATION, 'known_drivers')
SUBTOOLS = [
fn.rsplit('.py')[0]
for fn in os.listdir(INSTALLED)
if fn.startswith('_') and fn.endswith('.py') and not fn == '__init__.py'
]
def ENSURE_SESSIONS():
if not os.path.exists(SESSIONS_DIR):
os.makedirs(SESSIONS_DIR)
for subtool in SUBTOOLS:
subtool_profiles_path = os.path.join(SESSIONS_DIR, subtool)
if not os.path.exists(subtool_profiles_path):
if subtool != '__init__':
os.makedirs(subtool_profiles_path)
ENSURE_SESSIONS()
def ENSURE_DATA():
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR)
ENSURE_DATA()
def ENSURE_SITES():
if not os.path.exists(SITES_DIR):
os.makedirs(SITES_DIR)
ENSURE_SITES()
if not os.path.exists(CONFIG_LOCATION):
username = input("Type your GitHub username: ")
config['GITHUB'] = {'USERNAME': username}
config['PROXIES'] = {'http': '', 'https': ''}
config['DRIVERS'] = {'auto_upgrade': False}
config['SELENIUM'] = {'headless': False}
config['DRIVER_BACKENDS'] = {
'CHROME': '/usr/bin/chromedriver' # e.g., or http://0.0.0.0:4444/wd/hub, etc.
}
with open(CONFIG_LOCATION, 'w') as configfile:
config.write(configfile)
config.read(CONFIG_LOCATION)
GITHUB_USER = config['GITHUB']['USERNAME']
REPO_PATH = os.path.join(DEFAULT_LOCATION, '-')
DRIVERS_PATH = os.path.join(DEFAULT_LOCATION, 'drivers')
CHROME_DRIVER = config['DRIVER_BACKENDS']['CHROME']
SELENIUM = config['SELENIUM']
if str(config['DRIVERS']['auto_upgrade']) == 'False':
AUTO_UPGRADE_DRIVERS = False
elif str(config['DRIVERS']['auto_upgrade']) == 'True':
AUTO_UPGRADE_DRIVERS = True
elif str(config['DRIVERS']['auto_upgrade']) == 'None':
AUTO_UPGRADE_DRIVERS = None
else:
AUTO_UPGRADE_DRIVERS = False
def ENSURE_REPO():
while not requests.get('https://github.com/{}/-'.format(GITHUB_USER)).ok:
input("Please, create repository named `-` on your GitHub. Type [ENTER] to continue... ")
if os.path.exists(REPO_PATH):
# git pull #
os.system('cd {}; git pull'.format(REPO_PATH))
else:
# git clone #
os.system('cd {}; git clone {}'.format(
DEFAULT_LOCATION,
'[email protected]:{}/-.git'.format(GITHUB_USER)))
if not os.path.exists(CREDENTIALS_DIR):
os.makedirs(CREDENTIALS_DIR)
os.system("cd {}; git add .; git commit -m 'credentials (+)'; git push origin master".format(
REPO_PATH
))
def ENSURE_GPG():
config.read(CONFIG_LOCATION)
if 'GPG' in config.keys():
return config['GPG']['KEY']
print('Choose your GPG key for encrypting credentials:')
KEY_LIST = gpgrecord.list_recipients()
for i, key in enumerate(KEY_LIST):
print('{id}. {uid} {fingerprint}'.format(
id=i+1,
uid=key['uids'],
fingerprint=key['fingerprint']
))
i = int(input('Type key order in the list: ')) - 1
GPG_KEY = KEY_LIST[i]['fingerprint']
config['GPG'] = {'KEY': GPG_KEY}
with open(CONFIG_LOCATION, 'w') as configfile:
config.write(configfile)
return GPG_KEY
def ENSURE_PROXIES():
config.read(CONFIG_LOCATION)
if 'PROXIES' in config.keys():
return {key: 'socks5h://'+config['PROXIES'][key] or None
for key in config['PROXIES'] if config['PROXIES'][key]}
SOCKS5 = input('Type-in default socks5 proxy (e.g., 127.0.0.1:9999) (leave emtpy to default to direct connections) [ENTER]: ')
config['PROXIES'] = {
'http': SOCKS5,
'https': SOCKS5
}
with open(CONFIG_LOCATION, 'w') as configfile:
config.write(configfile)
return {key: 'socks5h://'+config['PROXIES'][key] or None
for key in config['PROXIES'] if config['PROXIES'][key]}
| 29.355705 | 131 | 0.645405 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,132 | 0.258802 |
5f2efcc18abcce7bbf0e01ac810dce1793930f16 | 1,272 | py | Python | project/matching/migrations/0001_initial.py | Project-EPIC/emergencypetmatcher | 72c9eec228e33c9592243266e048dc02824d778d | [
"MIT"
] | null | null | null | project/matching/migrations/0001_initial.py | Project-EPIC/emergencypetmatcher | 72c9eec228e33c9592243266e048dc02824d778d | [
"MIT"
] | null | null | null | project/matching/migrations/0001_initial.py | Project-EPIC/emergencypetmatcher | 72c9eec228e33c9592243266e048dc02824d778d | [
"MIT"
] | 1 | 2021-06-24T01:50:06.000Z | 2021-06-24T01:50:06.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('reporting', '0002_remove_petreport_revision_number'),
('socializing', '__first__'),
]
operations = [
migrations.CreateModel(
name='PetMatch',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('proposed_date', models.DateTimeField(auto_now_add=True)),
('has_failed', models.BooleanField(default=False)),
('down_votes', models.ManyToManyField(related_name='down_votes_related', to='socializing.UserProfile')),
('found_pet', models.ForeignKey(related_name='found_pet_related', default=None, to='reporting.PetReport')),
('lost_pet', models.ForeignKey(related_name='lost_pet_related', default=None, to='reporting.PetReport')),
('proposed_by', models.ForeignKey(related_name='proposed_by_related', to='socializing.UserProfile')),
('up_votes', models.ManyToManyField(related_name='up_votes_related', to='socializing.UserProfile')),
],
),
]
| 43.862069 | 123 | 0.644654 | 1,163 | 0.914308 | 0 | 0 | 0 | 0 | 0 | 0 | 411 | 0.323113 |
5f30dced4b52419281f10b8f50593c7065a03df1 | 3,273 | py | Python | sdk/python/pulumi_aws/get_ip_ranges.py | Charliekenney23/pulumi-aws | 55bd0390160d27350b297834026fee52114a2d41 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/get_ip_ranges.py | Charliekenney23/pulumi-aws | 55bd0390160d27350b297834026fee52114a2d41 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/get_ip_ranges.py | Charliekenney23/pulumi-aws | 55bd0390160d27350b297834026fee52114a2d41 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from . import utilities, tables
class GetIpRangesResult:
"""
A collection of values returned by getIpRanges.
"""
def __init__(__self__, cidr_blocks=None, create_date=None, ipv6_cidr_blocks=None, regions=None, services=None, sync_token=None, url=None, id=None):
if cidr_blocks and not isinstance(cidr_blocks, list):
raise TypeError("Expected argument 'cidr_blocks' to be a list")
__self__.cidr_blocks = cidr_blocks
"""
The lexically ordered list of CIDR blocks.
"""
if create_date and not isinstance(create_date, str):
raise TypeError("Expected argument 'create_date' to be a str")
__self__.create_date = create_date
"""
The publication time of the IP ranges (e.g. `2016-08-03-23-46-05`).
"""
if ipv6_cidr_blocks and not isinstance(ipv6_cidr_blocks, list):
raise TypeError("Expected argument 'ipv6_cidr_blocks' to be a list")
__self__.ipv6_cidr_blocks = ipv6_cidr_blocks
"""
The lexically ordered list of IPv6 CIDR blocks.
"""
if regions and not isinstance(regions, list):
raise TypeError("Expected argument 'regions' to be a list")
__self__.regions = regions
if services and not isinstance(services, list):
raise TypeError("Expected argument 'services' to be a list")
__self__.services = services
if sync_token and not isinstance(sync_token, float):
raise TypeError("Expected argument 'sync_token' to be a float")
__self__.sync_token = sync_token
"""
The publication time of the IP ranges, in Unix epoch time format
(e.g. `1470267965`).
"""
if url and not isinstance(url, str):
raise TypeError("Expected argument 'url' to be a str")
__self__.url = url
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
async def get_ip_ranges(regions=None,services=None,url=None,opts=None):
"""
Use this data source to get the IP ranges of various AWS products and services. For more information about the contents of this data source and required JSON syntax if referencing a custom URL, see the [AWS IP Address Ranges documention][1].
"""
__args__ = dict()
__args__['regions'] = regions
__args__['services'] = services
__args__['url'] = url
__ret__ = await pulumi.runtime.invoke('aws:index/getIpRanges:getIpRanges', __args__, opts=opts)
return GetIpRangesResult(
cidr_blocks=__ret__.get('cidrBlocks'),
create_date=__ret__.get('createDate'),
ipv6_cidr_blocks=__ret__.get('ipv6CidrBlocks'),
regions=__ret__.get('regions'),
services=__ret__.get('services'),
sync_token=__ret__.get('syncToken'),
url=__ret__.get('url'),
id=__ret__.get('id'))
| 42.506494 | 245 | 0.65689 | 2,066 | 0.631225 | 0 | 0 | 0 | 0 | 922 | 0.281699 | 1,418 | 0.433242 |
5f31588bc153ffcf5b70c1b521bc861fcd11b513 | 5,123 | py | Python | lithops/localhost/local_handler.py | GEizaguirre/lithops | 296451ea3ebf630a5dca2f17248387e6bb1ee5b6 | [
"Apache-2.0"
] | null | null | null | lithops/localhost/local_handler.py | GEizaguirre/lithops | 296451ea3ebf630a5dca2f17248387e6bb1ee5b6 | [
"Apache-2.0"
] | null | null | null | lithops/localhost/local_handler.py | GEizaguirre/lithops | 296451ea3ebf630a5dca2f17248387e6bb1ee5b6 | [
"Apache-2.0"
] | null | null | null | import os
import sys
import json
import pkgutil
import logging
import uuid
import time
import multiprocessing
from pathlib import Path
from threading import Thread
from types import SimpleNamespace
from multiprocessing import Process, Queue
from lithops.utils import version_str, is_unix_system
from lithops.worker import function_handler
from lithops.config import STORAGE_DIR, JOBS_DONE_DIR
from lithops import __version__
os.makedirs(STORAGE_DIR, exist_ok=True)
os.makedirs(JOBS_DONE_DIR, exist_ok=True)
log_file = os.path.join(STORAGE_DIR, 'local_handler.log')
logging.basicConfig(filename=log_file, level=logging.INFO)
logger = logging.getLogger('handler')
CPU_COUNT = multiprocessing.cpu_count()
def extract_runtime_meta():
runtime_meta = dict()
mods = list(pkgutil.iter_modules())
runtime_meta["preinstalls"] = [entry for entry in sorted([[mod, is_pkg]for _, mod, is_pkg in mods])]
runtime_meta["python_ver"] = version_str(sys.version_info)
print(json.dumps(runtime_meta))
class ShutdownSentinel():
"""Put an instance of this class on the queue to shut it down"""
pass
class LocalhostExecutor:
"""
A wrap-up around Localhost multiprocessing APIs.
"""
def __init__(self, config, executor_id, job_id, log_level):
logging.basicConfig(filename=log_file, level=log_level)
self.log_active = logger.getEffectiveLevel() != logging.WARNING
self.config = config
self.queue = Queue()
self.use_threads = not is_unix_system()
self.num_workers = self.config['lithops'].get('workers', CPU_COUNT)
self.workers = []
sys.stdout = open(log_file, 'a')
sys.stderr = open(log_file, 'a')
if self.use_threads:
for worker_id in range(self.num_workers):
p = Thread(target=self._process_runner, args=(worker_id,))
self.workers.append(p)
p.start()
else:
for worker_id in range(self.num_workers):
p = Process(target=self._process_runner, args=(worker_id,))
self.workers.append(p)
p.start()
logger.info('ExecutorID {} | JobID {} - Localhost Executor started - {} workers'
.format(job.executor_id, job.job_id, self.num_workers))
def _process_runner(self, worker_id):
logger.debug('Localhost worker process {} started'.format(worker_id))
while True:
event = self.queue.get(block=True)
if isinstance(event, ShutdownSentinel):
break
act_id = str(uuid.uuid4()).replace('-', '')[:12]
os.environ['__LITHOPS_ACTIVATION_ID'] = act_id
event['extra_env']['__LITHOPS_LOCAL_EXECUTION'] = 'True'
function_handler(event)
def _invoke(self, job, call_id):
payload = {'config': self.config,
'log_level': logging.getLevelName(logger.getEffectiveLevel()),
'func_key': job.func_key,
'data_key': job.data_key,
'extra_env': job.extra_env,
'execution_timeout': job.execution_timeout,
'data_byte_range': job.data_ranges[int(call_id)],
'executor_id': job.executor_id,
'job_id': job.job_id,
'call_id': call_id,
'host_submit_tstamp': time.time(),
'lithops_version': __version__,
'runtime_name': job.runtime_name,
'runtime_memory': job.runtime_memory}
self.queue.put(payload)
def run(self, job_description):
job = SimpleNamespace(**job_description)
for i in range(job.total_calls):
call_id = "{:05d}".format(i)
self._invoke(job, call_id)
for i in self.workers:
self.queue.put(ShutdownSentinel())
def wait(self):
for worker in self.workers:
worker.join()
if __name__ == "__main__":
logger.info('Starting Localhost job handler')
command = sys.argv[1]
logger.info('Received command: {}'.format(command))
if command == 'preinstalls':
extract_runtime_meta()
elif command == 'run':
job_filename = sys.argv[2]
logger.info('Got {} job file'.format(job_filename))
with open(job_filename, 'rb') as jf:
job = SimpleNamespace(**json.load(jf))
logger.info('ExecutorID {} | JobID {} - Starting execution'
.format(job.executor_id, job.job_id))
localhost_execuor = LocalhostExecutor(job.config, job.executor_id,
job.job_id, job.log_level)
localhost_execuor.run(job.job_description)
localhost_execuor.wait()
sentinel = '{}/{}_{}.done'.format(JOBS_DONE_DIR,
job.executor_id.replace('/', '-'),
job.job_id)
Path(sentinel).touch()
logger.info('ExecutorID {} | JobID {} - Execution Finished'
.format(job.executor_id, job.job_id))
| 33.927152 | 104 | 0.610189 | 2,959 | 0.577591 | 0 | 0 | 0 | 0 | 0 | 0 | 793 | 0.154792 |
a024addbdbfe31b4c073485e18f6d69dfd1ced29 | 17,878 | py | Python | app/FileViewer/FileServer/misc/mmpython/video/riffinfo.py | benyaboy/sage-graphics | 090640167329ace4b6ad266d47db5bb2b0394232 | [
"Unlicense"
] | null | null | null | app/FileViewer/FileServer/misc/mmpython/video/riffinfo.py | benyaboy/sage-graphics | 090640167329ace4b6ad266d47db5bb2b0394232 | [
"Unlicense"
] | null | null | null | app/FileViewer/FileServer/misc/mmpython/video/riffinfo.py | benyaboy/sage-graphics | 090640167329ace4b6ad266d47db5bb2b0394232 | [
"Unlicense"
] | 1 | 2021-07-02T10:31:03.000Z | 2021-07-02T10:31:03.000Z | #if 0
# $Id: riffinfo.py,v 1.33 2005/03/15 17:50:45 dischi Exp $
# $Log: riffinfo.py,v $
# Revision 1.33 2005/03/15 17:50:45 dischi
# check for corrupt avi
#
# Revision 1.32 2005/03/04 17:41:29 dischi
# handle broken avi files
#
# Revision 1.31 2004/12/13 10:19:07 dischi
# more debug, support LIST > 20000 (new max is 80000)
#
# Revision 1.30 2004/08/25 16:18:14 dischi
# detect aspect ratio
#
# Revision 1.29 2004/05/24 16:17:09 dischi
# Small changes for future updates
#
# Revision 1.28 2004/01/31 12:23:46 dischi
# remove bad chars from table (e.g. char 0 is True)
#
# Revision 1.27 2003/10/04 14:30:08 dischi
# add audio delay for avi
#
# Revision 1.26 2003/07/10 11:18:11 the_krow
# few more attributes added
#
# Revision 1.25 2003/07/07 21:36:44 dischi
# make fps a float and round it to two digest after the comma
#
# Revision 1.24 2003/07/05 19:36:37 the_krow
# length fixed
# fps introduced
#
# Revision 1.23 2003/07/02 11:17:30 the_krow
# language is now part of the table key
#
# Revision 1.22 2003/07/01 21:06:50 dischi
# no need to import factory (and when, use "from mmpython import factory"
#
# Revision 1.21 2003/06/30 13:17:20 the_krow
# o Refactored mediainfo into factory, synchronizedobject
# o Parsers now register directly at mmpython not at mmpython.mediainfo
# o use mmpython.Factory() instead of mmpython.mediainfo.get_singleton()
# o Bugfix in PNG parser
# o Renamed disc.AudioInfo into disc.AudioDiscInfo
# o Renamed disc.DataInfo into disc.DataDiscInfo
#
# Revision 1.20 2003/06/23 20:48:11 the_krow
# width + height fixes for OGM files
#
# Revision 1.19 2003/06/23 20:38:04 the_krow
# Support for larger LIST chunks because some files did not work.
#
# Revision 1.18 2003/06/20 19:17:22 dischi
# remove filename again and use file.name
#
# Revision 1.17 2003/06/20 19:05:56 dischi
# scan for subtitles
#
# Revision 1.16 2003/06/20 15:29:42 the_krow
# Metadata Mapping
#
# Revision 1.15 2003/06/20 14:43:57 the_krow
# Putting Metadata into MediaInfo from AVIInfo Table
#
# Revision 1.14 2003/06/09 16:10:52 dischi
# error handling
#
# Revision 1.13 2003/06/08 19:53:21 dischi
# also give the filename to init for additional data tests
#
# Revision 1.12 2003/06/08 13:44:58 dischi
# Changed all imports to use the complete mmpython path for mediainfo
#
# Revision 1.11 2003/06/08 13:11:38 dischi
# removed print at the end and moved it into register
#
# Revision 1.10 2003/06/07 23:10:50 the_krow
# Changed mp3 into new format.
#
# Revision 1.9 2003/06/07 22:30:22 the_krow
# added new avinfo structure
#
# Revision 1.8 2003/06/07 21:48:47 the_krow
# Added Copying info
# started changing riffinfo to new AV stuff
#
# Revision 1.7 2003/05/13 12:31:43 the_krow
# + Copyright Notice
#
#
# MMPython - Media Metadata for Python
# Copyright (C) 2003 Thomas Schueppel, Dirk Meyer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------
#endif
import re
import struct
import string
import fourcc
# import factory
import mmpython
from mmpython import mediainfo
# List of tags
# http://kibus1.narod.ru/frames_eng.htm?sof/abcavi/infotags.htm
# http://www.divx-digest.com/software/avitags_dll.html
# File Format
# http://www.taenam.co.kr/pds/documents/odmlff2.pdf
_print = mediainfo._debug
AVIINFO_tags = { 'title': 'INAM',
'artist': 'IART',
'product': 'IPRD',
'date': 'ICRD',
'comment': 'ICMT',
'language': 'ILNG',
'keywords': 'IKEY',
'trackno': 'IPRT',
'trackof': 'IFRM',
'producer': 'IPRO',
'writer': 'IWRI',
'genre': 'IGNR',
'copyright': 'ICOP',
'trackno': 'IPRT',
'trackof': 'IFRM',
'comment': 'ICMT',
}
class RiffInfo(mediainfo.AVInfo):
def __init__(self,file):
mediainfo.AVInfo.__init__(self)
# read the header
h = file.read(12)
if h[:4] != "RIFF" and h[:4] != 'SDSS':
self.valid = 0
return
self.valid = 1
self.mime = 'application/x-wave'
self.has_idx = False
self.header = {}
self.junkStart = None
self.infoStart = None
self.type = h[8:12]
self.tag_map = { ('AVIINFO', 'en') : AVIINFO_tags }
if self.type == 'AVI ':
self.mime = 'video/avi'
elif self.type == 'WAVE':
self.mime = 'application/x-wave'
try:
while self.parseRIFFChunk(file):
pass
except IOError:
if mediainfo.DEBUG:
print 'error in file, stop parsing'
self.find_subtitles(file.name)
# Copy Metadata from tables into the main set of attributes
for k in self.tag_map.keys():
map(lambda x:self.setitem(x,self.gettable(k[0],k[1]),self.tag_map[k][x]),
self.tag_map[k].keys())
if not self.has_idx:
_print('WARNING: avi has no index')
self.corrupt = 1
self.keys.append('corrupt')
def _extractHeaderString(self,h,offset,len):
return h[offset:offset+len]
def parseAVIH(self,t):
retval = {}
v = struct.unpack('<IIIIIIIIIIIIII',t[0:56])
( retval['dwMicroSecPerFrame'],
retval['dwMaxBytesPerSec'],
retval['dwPaddingGranularity'],
retval['dwFlags'],
retval['dwTotalFrames'],
retval['dwInitialFrames'],
retval['dwStreams'],
retval['dwSuggestedBufferSize'],
retval['dwWidth'],
retval['dwHeight'],
retval['dwScale'],
retval['dwRate'],
retval['dwStart'],
retval['dwLength'] ) = v
if retval['dwMicroSecPerFrame'] == 0:
_print("ERROR: Corrupt AVI")
self.valid = 0
return {}
return retval
def parseSTRH(self,t):
retval = {}
retval['fccType'] = t[0:4]
_print("parseSTRH(%s) : %d bytes" % ( retval['fccType'], len(t)))
if retval['fccType'] != 'auds':
retval['fccHandler'] = t[4:8]
v = struct.unpack('<IHHIIIIIIIII',t[8:52])
( retval['dwFlags'],
retval['wPriority'],
retval['wLanguage'],
retval['dwInitialFrames'],
retval['dwScale'],
retval['dwRate'],
retval['dwStart'],
retval['dwLength'],
retval['dwSuggestedBufferSize'],
retval['dwQuality'],
retval['dwSampleSize'],
retval['rcFrame'], ) = v
else:
try:
v = struct.unpack('<IHHIIIIIIIII',t[8:52])
( retval['dwFlags'],
retval['wPriority'],
retval['wLanguage'],
retval['dwInitialFrames'],
retval['dwScale'],
retval['dwRate'],
retval['dwStart'],
retval['dwLength'],
retval['dwSuggestedBufferSize'],
retval['dwQuality'],
retval['dwSampleSize'],
retval['rcFrame'], ) = v
self.delay = float(retval['dwStart']) / \
(float(retval['dwRate']) / retval['dwScale'])
except:
pass
return retval
def parseSTRF(self,t,strh):
fccType = strh['fccType']
retval = {}
if fccType == 'auds':
( retval['wFormatTag'],
retval['nChannels'],
retval['nSamplesPerSec'],
retval['nAvgBytesPerSec'],
retval['nBlockAlign'],
retval['nBitsPerSample'],
) = struct.unpack('<HHHHHH',t[0:12])
ai = mediainfo.AudioInfo()
ai.samplerate = retval['nSamplesPerSec']
ai.channels = retval['nChannels']
ai.samplebits = retval['nBitsPerSample']
ai.bitrate = retval['nAvgBytesPerSec'] * 8
# TODO: set code if possible
# http://www.stats.uwa.edu.au/Internal/Specs/DXALL/FileSpec/Languages
# ai.language = strh['wLanguage']
try:
ai.codec = fourcc.RIFFWAVE[retval['wFormatTag']]
except:
ai.codec = "Unknown"
self.audio.append(ai)
elif fccType == 'vids':
v = struct.unpack('<IIIHH',t[0:16])
( retval['biSize'],
retval['biWidth'],
retval['biHeight'],
retval['biPlanes'],
retval['biBitCount'], ) = v
retval['fourcc'] = t[16:20]
v = struct.unpack('IIIII',t[20:40])
( retval['biSizeImage'],
retval['biXPelsPerMeter'],
retval['biYPelsPerMeter'],
retval['biClrUsed'],
retval['biClrImportant'], ) = v
vi = mediainfo.VideoInfo()
try:
vi.codec = fourcc.RIFFCODEC[t[16:20]]
except:
vi.codec = "Unknown"
vi.width = retval['biWidth']
vi.height = retval['biHeight']
vi.bitrate = strh['dwRate']
vi.fps = round(float(strh['dwRate'] * 100) / strh['dwScale']) / 100
vi.length = strh['dwLength'] / vi.fps
self.video.append(vi)
return retval
def parseSTRL(self,t):
retval = {}
size = len(t)
i = 0
key = t[i:i+4]
sz = struct.unpack('<I',t[i+4:i+8])[0]
i+=8
value = t[i:]
if key == 'strh':
retval[key] = self.parseSTRH(value)
i += sz
else:
_print("parseSTRL: Error")
key = t[i:i+4]
sz = struct.unpack('<I',t[i+4:i+8])[0]
i+=8
value = t[i:]
if key == 'strf':
retval[key] = self.parseSTRF(value, retval['strh'])
i += sz
return ( retval, i )
def parseODML(self,t):
retval = {}
size = len(t)
i = 0
key = t[i:i+4]
sz = struct.unpack('<I',t[i+4:i+8])[0]
i += 8
value = t[i:]
if key == 'dmlh':
pass
else:
_print("parseODML: Error")
i += sz - 8
return ( retval, i )
def parseVPRP(self,t):
retval = {}
v = struct.unpack('<IIIIIIIIII',t[:4*10])
( retval['VideoFormat'],
retval['VideoStandard'],
retval['RefreshRate'],
retval['HTotalIn'],
retval['VTotalIn'],
retval['FrameAspectRatio'],
retval['wPixel'],
retval['hPixel'] ) = v[1:-1]
# I need an avi with more informations
# enum {FORMAT_UNKNOWN, FORMAT_PAL_SQUARE, FORMAT_PAL_CCIR_601,
# FORMAT_NTSC_SQUARE, FORMAT_NTSC_CCIR_601,...} VIDEO_FORMAT;
# enum {STANDARD_UNKNOWN, STANDARD_PAL, STANDARD_NTSC, STANDARD_SECAM}
# VIDEO_STANDARD;
#
r = retval['FrameAspectRatio']
r = float(r >> 16) / (r & 0xFFFF)
retval['FrameAspectRatio'] = r
if self.video:
map(lambda v: setattr(v, 'aspect', r), self.video)
return ( retval, v[0] )
def parseLIST(self,t):
retval = {}
i = 0
size = len(t)
while i < size-8:
# skip zero
if ord(t[i]) == 0: i += 1
key = t[i:i+4]
sz = 0
if key == 'LIST':
sz = struct.unpack('<I',t[i+4:i+8])[0]
_print("-> SUBLIST: len: %d, %d" % ( sz, i+4 ))
i+=8
key = "LIST:"+t[i:i+4]
value = self.parseLIST(t[i:i+sz])
_print("<-")
if key == 'strl':
for k in value.keys():
retval[k] = value[k]
else:
retval[key] = value
i+=sz
elif key == 'avih':
_print("SUBAVIH")
sz = struct.unpack('<I',t[i+4:i+8])[0]
i += 8
value = self.parseAVIH(t[i:i+sz])
i += sz
retval[key] = value
elif key == 'strl':
i += 4
(value, sz) = self.parseSTRL(t[i:])
_print("SUBSTRL: len: %d" % sz)
key = value['strh']['fccType']
i += sz
retval[key] = value
elif key == 'odml':
i += 4
(value, sz) = self.parseODML(t[i:])
_print("ODML: len: %d" % sz)
i += sz
elif key == 'vprp':
i += 4
(value, sz) = self.parseVPRP(t[i:])
_print("VPRP: len: %d" % sz)
retval[key] = value
i += sz
elif key == 'JUNK':
sz = struct.unpack('<I',t[i+4:i+8])[0]
i += sz + 8
_print("Skipping %d bytes of Junk" % sz)
else:
sz = struct.unpack('<I',t[i+4:i+8])[0]
_print("Unknown Key: %s, len: %d" % (key,sz))
i+=8
value = self._extractHeaderString(t,i,sz)
value = value.replace('\0', '').lstrip().rstrip()
if value:
retval[key] = value
i+=sz
return retval
def parseRIFFChunk(self,file):
h = file.read(8)
if len(h) < 4:
return False
name = h[:4]
size = struct.unpack('<I',h[4:8])[0]
if name == 'LIST' and size < 80000:
pos = file.tell() - 8
t = file.read(size)
key = t[:4]
_print('parse RIFF LIST: %d bytes' % (size))
value = self.parseLIST(t[4:])
self.header[key] = value
if key == 'INFO':
self.infoStart = pos
self.appendtable( 'AVIINFO', value )
elif key == 'MID ':
self.appendtable( 'AVIMID', value )
elif key in ('hdrl', ):
# no need to add this info to a table
pass
else:
_print('Skipping table info %s' % key)
elif name == 'JUNK':
self.junkStart = file.tell() - 8
self.junkSize = size
file.seek(size, 1)
elif name == 'idx1':
self.has_idx = True
_print('idx1: %s bytes' % size)
# no need to parse this
t = file.seek(size,1)
elif name == 'LIST':
_print('RIFF LIST to long to parse: %s bytes' % size)
# no need to parse this
t = file.seek(size,1)
elif name == 'RIFF':
_print("New RIFF chunk, extended avi [%i]" % size)
type = file.read(4)
if type != 'AVIX':
_print("Second RIFF chunk is %s, not AVIX, skipping", type)
file.seek(size-4, 1)
# that's it, no new informations should be in AVIX
return False
elif not name.strip(string.printable + string.whitespace):
# check if name is something usefull at all, maybe it is no
# avi or broken
t = file.seek(size,1)
_print("Skipping %s [%i]" % (name,size))
else:
# bad avi
_print("Bad or broken avi")
return False
return True
def buildTag(self,key,value):
text = value + '\0'
l = len(text)
return struct.pack('<4sI%ds'%l, key[:4], l, text[:l])
def setInfo(self,file,hash):
if self.junkStart == None:
raise "junkstart missing"
tags = []
size = 4 # Length of 'INFO'
# Build String List and compute req. size
for key in hash.keys():
tag = self.buildTag( key, hash[key] )
if (len(tag))%2 == 1: tag += '\0'
tags.append(tag)
size += len(tag)
_print("Tag [%i]: %s" % (len(tag),tag))
if self.infoStart != None:
_print("Infostart found. %i" % (self.infoStart))
# Read current info size
file.seek(self.infoStart,0)
s = file.read(12)
(list, oldsize, info) = struct.unpack('<4sI4s',s)
self.junkSize += oldsize + 8
else:
self.infoStart = self.junkStart
_print("Infostart computed. %i" % (self.infoStart))
file.seek(self.infoStart,0)
if ( size > self.junkSize - 8 ):
raise "Too large"
file.write( "LIST" + struct.pack('<I',size) + "INFO" )
for tag in tags:
file.write( tag )
_print("Junksize %i" % (self.junkSize-size-8))
file.write( "JUNK" + struct.pack('<I',self.junkSize-size-8) )
mmpython.registertype( 'video/avi', ('avi',), mediainfo.TYPE_AV, RiffInfo )
| 33.107407 | 85 | 0.507663 | 13,208 | 0.738785 | 0 | 0 | 0 | 0 | 0 | 0 | 6,889 | 0.385334 |
a0282750f46f0e414d25e4e4d34acff48c249677 | 843 | py | Python | h1st_contrib/cli/__init__.py | h1st-ai/h1st-contrib | 38fbb1fff4513bb3433bc12f2b436836e5e51c80 | [
"Apache-2.0"
] | 1 | 2022-02-19T18:55:43.000Z | 2022-02-19T18:55:43.000Z | h1st_contrib/cli/__init__.py | h1st-ai/h1st-contrib | 38fbb1fff4513bb3433bc12f2b436836e5e51c80 | [
"Apache-2.0"
] | null | null | null | h1st_contrib/cli/__init__.py | h1st-ai/h1st-contrib | 38fbb1fff4513bb3433bc12f2b436836e5e51c80 | [
"Apache-2.0"
] | null | null | null | """H1st CLI."""
import click
from .pred_maint import h1st_pmfp_cli
@click.group(name='h1st',
cls=click.Group,
commands={
'pmfp': h1st_pmfp_cli,
},
# Command kwargs
context_settings=None,
# callback=None,
# params=None,
help='H1st CLI >>>',
epilog='^^^ H1st CLI',
short_help='H1st CLI',
options_metavar='[OPTIONS]',
add_help_option=True,
no_args_is_help=True,
hidden=False,
deprecated=False,
# Group/MultiCommand kwargs
invoke_without_command=False,
subcommand_metavar='H1ST_SUB_COMMAND',
chain=False,
result_callback=None)
def h1st_cli():
"""H1st CLI."""
| 24.085714 | 51 | 0.498221 | 0 | 0 | 0 | 0 | 770 | 0.913405 | 0 | 0 | 182 | 0.215896 |
a0294cc7bc1ae9063a289d36fbf581ccd346caba | 1,008 | py | Python | SC_projects/recursion/dice_rolls_sum.py | hsiaohan416/stancode | 8920f2e99e184d165fa04551f24a2da8b0975219 | [
"MIT"
] | null | null | null | SC_projects/recursion/dice_rolls_sum.py | hsiaohan416/stancode | 8920f2e99e184d165fa04551f24a2da8b0975219 | [
"MIT"
] | null | null | null | SC_projects/recursion/dice_rolls_sum.py | hsiaohan416/stancode | 8920f2e99e184d165fa04551f24a2da8b0975219 | [
"MIT"
] | null | null | null | """
File: dice_rolls_sum.py
Name: Sharon
-----------------------------
This program finds all the dice rolls permutations
that sum up to a constant TOTAL. Students will find
early stopping a good strategy of decreasing the number
of recursive calls
"""
# This constant controls the sum of dice of our interest
TOTAL = 8
# global variable
run_times = 0
def main():
dice_sum(TOTAL)
print(f'Total run times: {run_times}')
def dice_sum(total):
dice_sum_helper(total, [])
def dice_sum_helper(total, ans):
global run_times
run_times += 1
if sum(ans) == total:
print(ans)
else:
for roll in [1, 2, 3, 4, 5, 6]:
if sum(ans) <= total:
diff = total - sum(ans)
if diff > roll:
# choose
ans.append(roll)
# explore
dice_sum_helper(total, ans)
# un-choose
ans.pop()
if __name__ == '__main__':
main()
| 21 | 56 | 0.545635 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 394 | 0.390873 |
a02a39862663da51e8f5219d5dd8ae0de6edd96f | 909 | py | Python | valid_parentheses.py | KevinLuo41/LeetCodeInPython | 051e1aab9bab17b0d63b4ca73473a7a00899a16a | [
"Apache-2.0"
] | 19 | 2015-01-19T19:36:09.000Z | 2020-03-18T03:10:12.000Z | valid_parentheses.py | CodingVault/LeetCodeInPython | 051e1aab9bab17b0d63b4ca73473a7a00899a16a | [
"Apache-2.0"
] | null | null | null | valid_parentheses.py | CodingVault/LeetCodeInPython | 051e1aab9bab17b0d63b4ca73473a7a00899a16a | [
"Apache-2.0"
] | 12 | 2015-04-25T14:20:38.000Z | 2020-09-27T04:59:59.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
valid_parentheses.py
Created by Shengwei on 2014-07-24.
"""
# https://oj.leetcode.com/problems/valid-parentheses/
# tags: easy, array, parentheses, stack
"""
Given a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.
The brackets must close in the correct order, "()" and "()[]{}" are all valid but "(]" and "([)]" are not.
"""
class Solution:
# @return a boolean
def isValid(self, s):
mappings = {')': '(', ']': '[', '}': '{'}
stack = []
for par in s:
if par in mappings.values():
stack.append(par)
elif stack and stack[-1] == mappings[par]:
stack.pop()
else:
return False
# note: remember to check if stack is empty
return False if stack else True
| 26.735294 | 118 | 0.540154 | 471 | 0.518152 | 0 | 0 | 0 | 0 | 0 | 0 | 508 | 0.558856 |
a0302470addb803e75aa2587a1202d6ec072bcdf | 3,104 | py | Python | doc/terms.py | RaenonX/Jelly-Bot-API | c7da1e91783dce3a2b71b955b3a22b68db9056cf | [
"MIT"
] | 5 | 2020-08-26T20:12:00.000Z | 2020-12-11T16:39:22.000Z | doc/terms.py | RaenonX/Jelly-Bot | c7da1e91783dce3a2b71b955b3a22b68db9056cf | [
"MIT"
] | 234 | 2019-12-14T03:45:19.000Z | 2020-08-26T18:55:19.000Z | doc/terms.py | RaenonX/Jelly-Bot-API | c7da1e91783dce3a2b71b955b3a22b68db9056cf | [
"MIT"
] | 2 | 2019-10-23T15:21:15.000Z | 2020-05-22T09:35:55.000Z | """Terms used in the bot."""
from collections import OrderedDict
from dataclasses import dataclass
from typing import List
from django.utils.translation import gettext_lazy as _
from JellyBot.systemconfig import Database
@dataclass
class TermExplanation:
"""An entry for a term and its explanation."""
term: str
description: str
example: str
@dataclass
class TermsCollection:
"""A holder containing multiple terms."""
name: str
terms: List[TermExplanation]
terms_collection = OrderedDict()
terms_collection["Core"] = TermsCollection(
_("Core"),
[TermExplanation(_("Operation"),
_("The system has two ways to control: on the website, using the API. "
"Some actions may only available at a single side."),
_("-"))
]
)
terms_collection["Features"] = TermsCollection(
_("Features"),
[TermExplanation(_("Auto-Reply"),
_("When the system receives/sees a word, it will reply back certain word(s) if it is set."),
_("User A setup an Auto-Reply module, which keyword is **A** and reply is **B**. Then, "
"somebody typed **A** wherever Jelly BOT can see, so Jelly BOT will reply **B** back.")),
TermExplanation(_("Execode"),
_("The users provide partial required information for an operation, then the system will yield a "
"code (Execode) to the users for completing it while holding it for %d hrs.<br>"
"Users will need to use the given Execode with the missing information for completing the "
"operation before it expires.") % (Database.ExecodeExpirySeconds // 3600),
_("User B created an Auto-Reply module on the website and choose the issue an Execode option. "
"Then, he submit the Execode in the channel, so the Auto-Reply module is registered.")),
TermExplanation(_("Profile System/Permission"),
_("Users can have multiple profiles in the channel for various features use. Profiles will have "
"some permission or their privilege attached.<br>Some profiles may be granted by votes from "
"channel members or assigned by channel manager.<br>"
"This system is similar to the role system of **Discord**."),
_("ChannelA have profiles called **A** with admin privilege and **B** for normal users.<br>"
"Users who have profile **A** assigned will be able to "
"use features that only admins can use.")),
TermExplanation(_("Channel Management"),
_("Users will be able to adjust the settings specifically designated to the channel. "
"The availability of what can be adjusted will base on the user's profile."),
_("Eligibility of accessing the pinned auto-reply module, "
"changing the admin/mod of a channel...etc.")),
]
)
| 47.030303 | 119 | 0.61018 | 242 | 0.077964 | 0 | 0 | 264 | 0.085052 | 0 | 0 | 1,830 | 0.589562 |
a03080839b2f11f2ef3a1cda34e010ada93f1947 | 2,619 | py | Python | tofnet/training/losses.py | victorjoos/tof2net | 068f5f08a241dbfb950251bea52fd9379466bf2f | [
"MIT"
] | null | null | null | tofnet/training/losses.py | victorjoos/tof2net | 068f5f08a241dbfb950251bea52fd9379466bf2f | [
"MIT"
] | 8 | 2021-02-02T23:07:37.000Z | 2022-03-12T00:51:26.000Z | tofnet/training/losses.py | victorjoos/tof2net | 068f5f08a241dbfb950251bea52fd9379466bf2f | [
"MIT"
] | 2 | 2020-10-01T08:23:24.000Z | 2020-11-09T22:01:47.000Z | import numpy as np
import operator
from itertools import cycle
from torch import nn
import torch.nn.functional as F
import torch
from torch.nn.modules.loss import *
from kornia.losses import *
class Loss:
def compute(self, output, target):
raise NotImplementedError()
def __call__(self, *args, **kwargs):
return self.compute(*args, **kwargs)
def __add__(self, other):
return LossesLambda(operator.add, self, other)
def __radd__(self, other):
return LossesLambda(operator.add, other, self)
def __sub__(self, other):
return LossesLambda(operator.sub, self, other)
def __rsub__(self, other):
return LossesLambda(operator.sub, other, self)
def __mul__(self, other):
return LossesLambda(operator.mul, self, other)
def __rmul__(self, other):
return LossesLambda(operator.mul, other, self)
class LossesLambda(Loss):
def __init__(self, f, *args, **kwargs):
self.function = f
self.args = args
self.kwargs = kwargs
def compute(self, output, target):
materialized = [i.compute(output, target) if isinstance(i, Loss) else i for i in self.args]
materialized_kwargs = {k: (v.compute(output, target) if isinstance(v, Loss) else v) for k, v in self.kwargs.items()}
return self.function(*materialized, **materialized_kwargs)
class Criterion(Loss):
def __init__(self, loss_fn):
self.loss_fn = loss_fn
def compute(self, output, target):
return self.loss_fn(output, target)
class MultiCriterion:
def __init__(self, losses:[Loss], weights=None):
self.losses = losses
self.weights = weights or [1] * len(losses)
def compute(self, outputs, targets):
output_loss = 0.0
for output, target, loss, weight in zip(outputs, targets, cycle(self.losses), cycle(self.weights)):
output_loss += weight * loss(output, target)
return output_loss
def __getitem__(self, idx):
return self.losses[idx % len(self.losses)]
def __call__(self, *args, **kwargs):
return self.compute(*args, **kwargs)
"""
####################
Single-class CNN
####################
"""
class IntBCEWithLogitsLoss(BCEWithLogitsLoss):
def ___init__(self, weight=None, size_average=None, reduce=None, reduction='mean', pos_weight=None):
super().__init__(size_average, reduce, reduction)
self.register_buffer('weight', weight)
self.register_buffer('pos_weight', pos_weight)
def forward(self, input, target):
return super().forward(input, target.float())
| 30.811765 | 124 | 0.655594 | 2,333 | 0.890798 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.040473 |
a030c83c63bfd7833e9eefaa8a970b32e999331c | 5,111 | py | Python | tests/cli/test_database.py | julienc91/dbtrigger | d06916a019641377bf3d45b2e8e38399643450db | [
"MIT"
] | null | null | null | tests/cli/test_database.py | julienc91/dbtrigger | d06916a019641377bf3d45b2e8e38399643450db | [
"MIT"
] | null | null | null | tests/cli/test_database.py | julienc91/dbtrigger | d06916a019641377bf3d45b2e8e38399643450db | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import uuid
import pytest
from dbtrigger.cli import DatabaseCli, QueryCli, ServerCli
from dbtrigger.config import settings
from dbtrigger.models import Database
@pytest.fixture(autouse=True)
def add_server(server):
ServerCli.add(server.identifier, server.hostname, server.dialect)
def compare_databases(db1, db2):
assert db1.identifier == db2.identifier
assert db1.server.identifier == db2.server.identifier
assert db1.name == db2.name
assert db1.username == db2.username
assert db1.password == db2.password
return True
def test_list(database, server):
DatabaseCli.add(database.identifier, server.identifier, database.name, database.username, database.password)
DatabaseCli.list()
def test_list_no_databasess():
DatabaseCli.list()
def add_database(database, server):
assert len(settings.databases) == 0
DatabaseCli.add(database.identifier, server.identifier, database.name, database.username, database.password)
assert len(settings.databases) == 1
assert compare_databases(database, settings.databases[database.identifier])
def test_add_database_duplicate(database, server):
DatabaseCli.add(database.identifier, server.identifier, database.name, database.username, database.password)
with pytest.raises(ValueError):
DatabaseCli.add(database.identifier, server.identifier, database.name, database.username, database.password)
assert len(settings.databases) == 1
def test_add_database_not_existing_server(database):
with pytest.raises(ValueError):
DatabaseCli.add(database.identifier, str(uuid.uuid4()), database.name, database.username, database.password)
assert len(settings.databases) == 0
def test_delete_database(database, server):
DatabaseCli.add(database.identifier, server.identifier, database.name, database.username, database.password)
DatabaseCli.delete(database.identifier)
assert len(settings.databases) == 0
def test_delete_database_with_queries(database, server, query):
DatabaseCli.add(database.identifier, server.identifier, database.name, database.username, database.password)
QueryCli.add(query.identifier, database.identifier, query.query)
assert len(settings.databases) == 1
assert len(settings.queries) == 1
DatabaseCli.delete(database.identifier)
assert len(settings.databases) == 0
assert len(settings.queries) == 0
def test_delete_database_not_existing(database, server):
DatabaseCli.add(database.identifier, server.identifier, database.name, database.username, database.password)
DatabaseCli.delete(database.identifier)
with pytest.raises(ValueError):
DatabaseCli.delete(database.identifier)
def test_update_database_not_existing(database, server):
with pytest.raises(ValueError):
DatabaseCli.update(database.identifier, server, 'new name', 'new username', 'new password')
def test_update_database_not_existing_server(database, server):
DatabaseCli.add(database.identifier, server.identifier, database.name, database.username, database.password)
with pytest.raises(ValueError):
DatabaseCli.update(database.identifier, str(uuid.uuid4()), 'new name', 'new username', 'new password')
def test_rename_database(database, server):
DatabaseCli.add(database.identifier, server.identifier, database.name, database.username, database.password)
new_identifier = str(uuid.uuid4())
DatabaseCli.rename(database.identifier, new_identifier)
assert len(settings.databases) == 1
renamed_db = settings.databases[new_identifier]
database.identifier = new_identifier
assert compare_databases(database, renamed_db)
def test_rename_database_with_queries(database, server, query):
DatabaseCli.add(database.identifier, server.identifier, database.name, database.username, database.password)
QueryCli.add(query.identifier, database.identifier, query.query)
new_identifier = str(uuid.uuid4())
DatabaseCli.rename(database.identifier, new_identifier)
assert len(settings.databases) == 1
assert len(settings.queries) == 1
assert settings.queries[query.identifier].database.identifier == new_identifier
def test_rename_database_not_existing(database):
new_identifier = str(uuid.uuid4())
with pytest.raises(ValueError):
DatabaseCli.rename(database.identifier, new_identifier)
def test_rename_database_duplicated(database, server):
other_identifier = str(uuid.uuid4())
other_db = Database(other_identifier, server, database.name, database.username, database.password)
DatabaseCli.add(database.identifier, server.identifier, database.name, database.username, database.password)
DatabaseCli.add(other_db.identifier, server.identifier, other_db.name, other_db.username, other_db.password)
assert len(settings.databases) == 2
with pytest.raises(ValueError):
DatabaseCli.rename(database.identifier, other_identifier)
assert compare_databases(database, settings.databases[database.identifier])
assert compare_databases(other_db, settings.databases[other_identifier])
| 38.428571 | 116 | 0.772256 | 0 | 0 | 0 | 0 | 123 | 0.024066 | 0 | 0 | 99 | 0.01937 |
a0316cb627ebde16bd1be493b5901c78bcdf8ad7 | 1,557 | py | Python | reqtest.py | jbreams/nagmq | 3fee4209898c717a02c5a07ba4c335428b9403eb | [
"Apache-2.0"
] | 21 | 2015-02-17T07:45:40.000Z | 2021-03-19T13:12:48.000Z | reqtest.py | jbreams/nagmq | 3fee4209898c717a02c5a07ba4c335428b9403eb | [
"Apache-2.0"
] | 7 | 2015-04-29T22:12:44.000Z | 2018-05-09T15:46:35.000Z | reqtest.py | jbreams/nagmq | 3fee4209898c717a02c5a07ba4c335428b9403eb | [
"Apache-2.0"
] | 9 | 2015-08-01T01:22:28.000Z | 2021-02-24T11:12:41.000Z | import zmq, time, json
context = zmq.Context()
pub = context.socket(zmq.REQ)
# These are random useless keys for testing Curve auth
pubkey = u"7d0:tz+tGVT&*ViD/SzU)dz(3=yIE]aT2TRNrG2$"
privkey = u"FCFo%:3pZTbiQq?MARHYk(<Kp*B-<RpRG7QMUlXr"
serverkey = u"mN>y$-+17hxa6F>r@}sxmL-uX}IM=:wIq}G4y*f["
pub.setsockopt_string(zmq.CURVE_PUBLICKEY, pubkey)
pub.setsockopt_string(zmq.CURVE_SECRETKEY, privkey)
pub.setsockopt_string(zmq.CURVE_SERVERKEY, serverkey)
pub.connect("tcp://localhost:5557")
keys = ['host_name', 'services', 'hosts', 'contacts', 'contact_groups',
'service_description', 'current_state', 'members', 'type', 'name',
'problem_has_been_acknowledged', 'plugin_output' ]
pub.send_json({ "host_name": "localhost", "include_services": True, "include_contacts": True, 'keys': keys })
resp = json.loads(pub.recv())
def status_to_string(val, ishost):
if(ishost):
if(val < 2):
return "UP"
else:
return "DOWN"
else:
if(val == 0):
return "OK"
elif(val == 1):
return "WARNING"
elif(val == 2):
return "CRITICAL"
elif(val == 3):
return "UNKNOWN"
for obj in resp:
if(obj['type'] == 'service'):
print "{0}@{1}: {2} {3}".format(
obj['service_description'], obj['host_name'],
status_to_string(obj['current_state'], 0), obj['plugin_output'])
elif(obj['type'] == 'host'):
print "{0}: {1} {2}".format(
obj['host_name'], status_to_string(obj['current_state'], 1),
obj['plugin_output'])
elif(obj['type'] == 'error'):
print obj['msg']
elif(obj['type'] == 'service_list'):
print obj['services']
| 31.14 | 109 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 678 | 0.435453 |
a032b7d1d6d08d471ad93367224c6b5463ad7672 | 392 | py | Python | extract_emails/email_filters/email_filter_interface.py | trisongz/extract-emails | 22485fd25edac993d448bf8e8af51c551694e5cd | [
"MIT"
] | 1 | 2020-11-22T01:29:41.000Z | 2020-11-22T01:29:41.000Z | extract_emails/email_filters/email_filter_interface.py | chiaminchuang/extract-emails | d7549186549a0776cfa28bc946550fd79b04043e | [
"MIT"
] | null | null | null | extract_emails/email_filters/email_filter_interface.py | chiaminchuang/extract-emails | d7549186549a0776cfa28bc946550fd79b04043e | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from typing import List
class EmailFilterInterface(ABC):
"""
Interface for email filters
"""
@abstractmethod
def filter(self, emails: List[str]) -> List[str]:
"""
Filter emails by params
:param: list(str) emails: list of emails for filtering
:return: Filtered list of emails
"""
pass
| 20.631579 | 62 | 0.614796 | 329 | 0.839286 | 0 | 0 | 243 | 0.619898 | 0 | 0 | 195 | 0.497449 |
a032fe94d351a63b7d04128ecd927fbb6c87a879 | 6,144 | py | Python | service/azservice/tooling1.py | cnective-inc/vscode-azurecli | 0ac34e2214078270b63cf6716423d40a60423834 | [
"MIT"
] | 38 | 2019-06-21T00:26:15.000Z | 2022-03-19T05:23:55.000Z | service/azservice/tooling1.py | cnective-inc/vscode-azurecli | 0ac34e2214078270b63cf6716423d40a60423834 | [
"MIT"
] | 46 | 2017-05-17T09:00:51.000Z | 2019-04-24T10:18:19.000Z | service/azservice/tooling1.py | cnective-inc/vscode-azurecli | 0ac34e2214078270b63cf6716423d40a60423834 | [
"MIT"
] | 27 | 2019-05-19T18:42:42.000Z | 2022-01-18T09:14:26.000Z | """tooling integration"""
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import os
import traceback
from importlib import import_module
from sys import stderr
import pkgutil
import yaml
from six.moves import configparser
from azure.cli.core.application import APPLICATION, Configuration
from azure.cli.core.commands import _update_command_definitions, BLACKLISTED_MODS
from azure.cli.core._profile import _SUBSCRIPTION_NAME, Profile
from azure.cli.core._session import ACCOUNT
from azure.cli.core._environment import get_config_dir as cli_config_dir
from azure.cli.core._config import az_config, GLOBAL_CONFIG_PATH, DEFAULTS_SECTION
from azure.cli.core.help_files import helps
from azure.cli.core.util import CLIError
GLOBAL_ARGUMENTS = {
'verbose': {
'options': ['--verbose'],
'help': 'Increase logging verbosity. Use --debug for full debug logs.'
},
'debug': {
'options': ['--debug'],
'help': 'Increase logging verbosity to show all debug logs.'
},
'output': {
'options': ['--output', '-o'],
'help': 'Output format',
'choices': ['json', 'tsv', 'table', 'jsonc']
},
'help': {
'options': ['--help', '-h'],
'help': 'Get more information about a command'
},
'query': {
'options': ['--query'],
'help': 'JMESPath query string. See http://jmespath.org/ for more information and examples.'
}
}
def initialize():
_load_profile()
def _load_profile():
azure_folder = cli_config_dir()
if not os.path.exists(azure_folder):
os.makedirs(azure_folder)
ACCOUNT.load(os.path.join(azure_folder, 'azureProfile.json'))
def load_command_table():
APPLICATION.initialize(Configuration())
command_table = APPLICATION.configuration.get_command_table()
_install_modules(command_table)
return command_table
def get_arguments(command):
return command.arguments
def arguments_loaded(command_name):
return True
def load_arguments(cmd_table, batch):
return False
def _install_modules(command_table):
for cmd in command_table:
command_table[cmd].load_arguments()
try:
mods_ns_pkg = import_module('azure.cli.command_modules')
installed_command_modules = [modname for _, modname, _ in
pkgutil.iter_modules(mods_ns_pkg.__path__)
if modname not in BLACKLISTED_MODS]
except ImportError:
pass
for mod in installed_command_modules:
try:
mod = import_module('azure.cli.command_modules.' + mod)
mod.load_params(mod)
mod.load_commands()
except Exception: # pylint: disable=broad-except
print("Error loading: {}".format(mod), file=stderr)
traceback.print_exc(file=stderr)
_update_command_definitions(command_table)
HELP_CACHE = {}
def get_help(group_or_command):
if group_or_command not in HELP_CACHE and group_or_command in helps:
HELP_CACHE[group_or_command] = yaml.load(helps[group_or_command])
return HELP_CACHE.get(group_or_command)
PROFILE = Profile()
def get_current_subscription():
_load_profile()
try:
return PROFILE.get_subscription()[_SUBSCRIPTION_NAME]
except CLIError:
return None # Not logged in
def get_configured_defaults():
_reload_config()
try:
options = az_config.config_parser.options(DEFAULTS_SECTION)
defaults = {}
for opt in options:
value = az_config.get(DEFAULTS_SECTION, opt)
if value:
defaults[opt] = value
return defaults
except configparser.NoSectionError:
return {}
def is_required(argument):
required_tooling = hasattr(argument.type, 'required_tooling') and argument.type.required_tooling is True
return required_tooling and argument.name != 'is_linux'
def get_defaults(arguments):
_reload_config()
return {name: _get_default(argument) for name, argument in arguments.items()}
def _get_default(argument):
configured = _find_configured_default(argument)
return configured or argument.type.settings.get('default')
def run_argument_value_completer(command, argument, cli_arguments):
try:
args = _to_argument_object(command, cli_arguments)
_add_defaults(command, args)
return argument.completer('', '', args)
except TypeError:
try:
return argument.completer('')
except TypeError:
try:
return argument.completer()
except TypeError:
return None
def _to_argument_object(command, cli_arguments):
result = lambda: None # noqa: E731
for argument_name, value in cli_arguments.items():
name, _ = _find_argument(command, argument_name)
setattr(result, name, value)
return result
def _find_argument(command, argument_name):
for name, argument in get_arguments(command).items():
if argument_name in argument.options_list:
return name, argument
return None, None
def _add_defaults(command, arguments):
_reload_config()
for name, argument in get_arguments(command).items():
if not hasattr(arguments, name):
default = _find_configured_default(argument)
if default:
setattr(arguments, name, default)
return arguments
def _reload_config():
az_config.config_parser.read(GLOBAL_CONFIG_PATH)
def _find_configured_default(argument):
if not (hasattr(argument.type, 'default_name_tooling') and argument.type.default_name_tooling):
return None
try:
return az_config.get(DEFAULTS_SECTION, argument.type.default_name_tooling, None)
except configparser.NoSectionError:
return None
| 29.681159 | 108 | 0.662598 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,033 | 0.168132 |
a03374010f5d746041d0825cf88d30f7bf187cff | 57 | py | Python | copy_topic_files.py | FoamyGuy/CircuitPython_Repo_Topics | 9a606e9549bcd663d6290c0648466022c1b964db | [
"MIT"
] | null | null | null | copy_topic_files.py | FoamyGuy/CircuitPython_Repo_Topics | 9a606e9549bcd663d6290c0648466022c1b964db | [
"MIT"
] | null | null | null | copy_topic_files.py | FoamyGuy/CircuitPython_Repo_Topics | 9a606e9549bcd663d6290c0648466022c1b964db | [
"MIT"
] | null | null | null | import os
os.system("cp -r ../../../topic_scripts/* ./") | 19 | 46 | 0.561404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.614035 |
a0355bef2d29e903faa1a547fedc8fc74c627d30 | 537 | py | Python | metaopt/concurrent/worker/worker.py | cigroup-ol/metaopt | 6dfd5105d3c6eaf00f96670175cae16021069514 | [
"BSD-3-Clause"
] | 8 | 2015-02-02T21:42:23.000Z | 2019-06-30T18:12:43.000Z | metaopt/concurrent/worker/worker.py | cigroup-ol/metaopt | 6dfd5105d3c6eaf00f96670175cae16021069514 | [
"BSD-3-Clause"
] | 4 | 2015-09-24T14:12:38.000Z | 2021-12-08T22:42:52.000Z | metaopt/concurrent/worker/worker.py | cigroup-ol/metaopt | 6dfd5105d3c6eaf00f96670175cae16021069514 | [
"BSD-3-Clause"
] | 6 | 2015-02-27T12:35:33.000Z | 2020-10-15T21:04:02.000Z | # -*- coding: utf-8 -*-
"""
Minimal worker implementation.
"""
# Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# First Party
from metaopt.concurrent.worker.base import BaseWorker
class Worker(BaseWorker):
"""Minimal worker implementation."""
def __init__(self):
super(Worker, self).__init__()
self._worker_id = None
@property
def worker_id(self):
return self._worker_id
def run(self):
raise NotImplementedError()
| 20.653846 | 67 | 0.687151 | 288 | 0.536313 | 0 | 0 | 65 | 0.121043 | 0 | 0 | 118 | 0.219739 |
a036a26ddca3cff8f085b18091bc763c3dc73fd2 | 261 | py | Python | Fundamentos/test-constantes.py | ijchavez/python | bccd94a9bee90125e2be27b0355bdaedb0ae9d19 | [
"Unlicense"
] | null | null | null | Fundamentos/test-constantes.py | ijchavez/python | bccd94a9bee90125e2be27b0355bdaedb0ae9d19 | [
"Unlicense"
] | null | null | null | Fundamentos/test-constantes.py | ijchavez/python | bccd94a9bee90125e2be27b0355bdaedb0ae9d19 | [
"Unlicense"
] | null | null | null | # para importar todo from constantes import *
from constantes import MI_CONSTANTE
from constantes import Matematicas as Mate
print(MI_CONSTANTE)
print(Mate.PI)
MI_CONSTANTE = "nuevo valor"
Mate.PI = "3.14"
print(">>>",MI_CONSTANTE)
print(">>>",Mate.PI)
| 18.642857 | 45 | 0.735632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.283525 |
a03a7becb2df6dd7e3600ceabbb203ca1e648d2d | 10,131 | py | Python | venv/lib/python3.8/site-packages/hgext/remotefilelog/shallowbundle.py | JesseDavids/mqtta | 389eb4f06242d4473fe1bcff7fc6a22290e0d99c | [
"Apache-2.0"
] | 4 | 2021-02-05T10:57:39.000Z | 2022-02-25T04:43:23.000Z | venv/lib/python3.8/site-packages/hgext/remotefilelog/shallowbundle.py | JesseDavids/mqtta | 389eb4f06242d4473fe1bcff7fc6a22290e0d99c | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/hgext/remotefilelog/shallowbundle.py | JesseDavids/mqtta | 389eb4f06242d4473fe1bcff7fc6a22290e0d99c | [
"Apache-2.0"
] | null | null | null | # shallowbundle.py - bundle10 implementation for use with shallow repositories
#
# Copyright 2013 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
from mercurial.i18n import _
from mercurial.node import bin, hex, nullid
from mercurial import (
bundlerepo,
changegroup,
error,
match,
mdiff,
pycompat,
)
from . import (
constants,
remotefilelog,
shallowutil,
)
NoFiles = 0
LocalFiles = 1
AllFiles = 2
def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None):
if not isinstance(rlog, remotefilelog.remotefilelog):
for c in super(cls, self).group(nodelist, rlog, lookup, units=units):
yield c
return
if len(nodelist) == 0:
yield self.close()
return
nodelist = shallowutil.sortnodes(nodelist, rlog.parents)
# add the parent of the first rev
p = rlog.parents(nodelist[0])[0]
nodelist.insert(0, p)
# build deltas
for i in pycompat.xrange(len(nodelist) - 1):
prev, curr = nodelist[i], nodelist[i + 1]
linknode = lookup(curr)
for c in self.nodechunk(rlog, curr, prev, linknode):
yield c
yield self.close()
class shallowcg1packer(changegroup.cgpacker):
def generate(self, commonrevs, clnodes, fastpathlinkrev, source, **kwargs):
if shallowutil.isenabled(self._repo):
fastpathlinkrev = False
return super(shallowcg1packer, self).generate(
commonrevs, clnodes, fastpathlinkrev, source, **kwargs
)
def group(self, nodelist, rlog, lookup, units=None, reorder=None):
return shallowgroup(
shallowcg1packer, self, nodelist, rlog, lookup, units=units
)
def generatefiles(self, changedfiles, *args):
try:
linknodes, commonrevs, source = args
except ValueError:
commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args
if shallowutil.isenabled(self._repo):
repo = self._repo
if isinstance(repo, bundlerepo.bundlerepository):
# If the bundle contains filelogs, we can't pull from it, since
# bundlerepo is heavily tied to revlogs. Instead require that
# the user use unbundle instead.
# Force load the filelog data.
bundlerepo.bundlerepository.file(repo, b'foo')
if repo._cgfilespos:
raise error.Abort(
b"cannot pull from full bundles",
hint=b"use `hg unbundle` instead",
)
return []
filestosend = self.shouldaddfilegroups(source)
if filestosend == NoFiles:
changedfiles = list(
[f for f in changedfiles if not repo.shallowmatch(f)]
)
return super(shallowcg1packer, self).generatefiles(changedfiles, *args)
def shouldaddfilegroups(self, source):
repo = self._repo
if not shallowutil.isenabled(repo):
return AllFiles
if source == b"push" or source == b"bundle":
return AllFiles
caps = self._bundlecaps or []
if source == b"serve" or source == b"pull":
if constants.BUNDLE2_CAPABLITY in caps:
return LocalFiles
else:
# Serving to a full repo requires us to serve everything
repo.ui.warn(_(b"pulling from a shallow repo\n"))
return AllFiles
return NoFiles
def prune(self, rlog, missing, commonrevs):
if not isinstance(rlog, remotefilelog.remotefilelog):
return super(shallowcg1packer, self).prune(
rlog, missing, commonrevs
)
repo = self._repo
results = []
for fnode in missing:
fctx = repo.filectx(rlog.filename, fileid=fnode)
if fctx.linkrev() not in commonrevs:
results.append(fnode)
return results
def nodechunk(self, revlog, node, prevnode, linknode):
prefix = b''
if prevnode == nullid:
delta = revlog.rawdata(node)
prefix = mdiff.trivialdiffheader(len(delta))
else:
# Actually uses remotefilelog.revdiff which works on nodes, not revs
delta = revlog.revdiff(prevnode, node)
p1, p2 = revlog.parents(node)
flags = revlog.flags(node)
meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags)
meta += prefix
l = len(meta) + len(delta)
yield changegroup.chunkheader(l)
yield meta
yield delta
def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs):
if not shallowutil.isenabled(repo):
return orig(repo, outgoing, version, source, *args, **kwargs)
original = repo.shallowmatch
try:
# if serving, only send files the clients has patterns for
if source == b'serve':
bundlecaps = kwargs.get('bundlecaps')
includepattern = None
excludepattern = None
for cap in bundlecaps or []:
if cap.startswith(b"includepattern="):
raw = cap[len(b"includepattern=") :]
if raw:
includepattern = raw.split(b'\0')
elif cap.startswith(b"excludepattern="):
raw = cap[len(b"excludepattern=") :]
if raw:
excludepattern = raw.split(b'\0')
if includepattern or excludepattern:
repo.shallowmatch = match.match(
repo.root, b'', None, includepattern, excludepattern
)
else:
repo.shallowmatch = match.always()
return orig(repo, outgoing, version, source, *args, **kwargs)
finally:
repo.shallowmatch = original
def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args):
if not shallowutil.isenabled(repo):
return orig(repo, source, revmap, trp, expectedfiles, *args)
newfiles = 0
visited = set()
revisiondatas = {}
queue = []
# Normal Mercurial processes each file one at a time, adding all
# the new revisions for that file at once. In remotefilelog a file
# revision may depend on a different file's revision (in the case
# of a rename/copy), so we must lay all revisions down across all
# files in topological order.
# read all the file chunks but don't add them
progress = repo.ui.makeprogress(_(b'files'), total=expectedfiles)
while True:
chunkdata = source.filelogheader()
if not chunkdata:
break
f = chunkdata[b"filename"]
repo.ui.debug(b"adding %s revisions\n" % f)
progress.increment()
if not repo.shallowmatch(f):
fl = repo.file(f)
deltas = source.deltaiter()
fl.addgroup(deltas, revmap, trp)
continue
chain = None
while True:
# returns: (node, p1, p2, cs, deltabase, delta, flags) or None
revisiondata = source.deltachunk(chain)
if not revisiondata:
break
chain = revisiondata[0]
revisiondatas[(f, chain)] = revisiondata
queue.append((f, chain))
if f not in visited:
newfiles += 1
visited.add(f)
if chain is None:
raise error.Abort(_(b"received file revlog group is empty"))
processed = set()
def available(f, node, depf, depnode):
if depnode != nullid and (depf, depnode) not in processed:
if not (depf, depnode) in revisiondatas:
# It's not in the changegroup, assume it's already
# in the repo
return True
# re-add self to queue
queue.insert(0, (f, node))
# add dependency in front
queue.insert(0, (depf, depnode))
return False
return True
skipcount = 0
# Prefetch the non-bundled revisions that we will need
prefetchfiles = []
for f, node in queue:
revisiondata = revisiondatas[(f, node)]
# revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]
for dependent in dependents:
if dependent == nullid or (f, dependent) in revisiondatas:
continue
prefetchfiles.append((f, hex(dependent)))
repo.fileservice.prefetch(prefetchfiles)
# Apply the revisions in topological order such that a revision
# is only written once it's deltabase and parents have been written.
while queue:
f, node = queue.pop(0)
if (f, node) in processed:
continue
skipcount += 1
if skipcount > len(queue) + 1:
raise error.Abort(_(b"circular node dependency"))
fl = repo.file(f)
revisiondata = revisiondatas[(f, node)]
# revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
node, p1, p2, linknode, deltabase, delta, flags = revisiondata
if not available(f, node, f, deltabase):
continue
base = fl.rawdata(deltabase)
text = mdiff.patch(base, delta)
if not isinstance(text, bytes):
text = bytes(text)
meta, text = shallowutil.parsemeta(text)
if b'copy' in meta:
copyfrom = meta[b'copy']
copynode = bin(meta[b'copyrev'])
if not available(f, node, copyfrom, copynode):
continue
for p in [p1, p2]:
if p != nullid:
if not available(f, node, f, p):
continue
fl.add(text, meta, trp, linknode, p1, p2)
processed.add((f, node))
skipcount = 0
progress.complete()
return len(revisiondatas), newfiles
| 33.325658 | 80 | 0.583062 | 3,478 | 0.343303 | 1,378 | 0.136018 | 0 | 0 | 0 | 0 | 1,837 | 0.181325 |
a03aa51d99ddd848b1a155bf6b547a0503967011 | 10,507 | py | Python | tests/test_main.py | FixItDad/vault-pwmgr | 8c9edec3786eefbf72f0c13c24f3d4e331ab1562 | [
"MIT"
] | 1 | 2018-01-26T12:45:44.000Z | 2018-01-26T12:45:44.000Z | tests/test_main.py | FixItDad/vault-pwmgr | 8c9edec3786eefbf72f0c13c24f3d4e331ab1562 | [
"MIT"
] | null | null | null | tests/test_main.py | FixItDad/vault-pwmgr | 8c9edec3786eefbf72f0c13c24f3d4e331ab1562 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Functional tests for the main password manager page
# Currently targeting Firefox
# Depends on the vault configuration provided by the startdev.sh script.
# Depends on pytest-sourceorder to force test case execution order.
# TODO: configure vault data from pretest fixture.
import datetime
import pytest
import time
from pytest_sourceorder import ordered
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import testutils
# Test vault-pwmgr server to point to for tests
PWMGR_URL = "http://127.0.0.1:7080/"
HISTGROUP = testutils.HISTGROUP
# Used to store state information for multi-step tests. Generally tests should
# be independent, but some cases may depend on results returned earlier by the
# program under test. This can be used judiciously instead of than duplicating
# test code or having very long test cases that test multiple items.
state = {}
def _login_pw(driver, userid, userpw):
""" Helper routine to log in by password with the supplied credentials. """
loginid = driver.find_element_by_id("loginid")
loginid.clear()
loginid.send_keys(userid)
loginpw = driver.find_element_by_id("loginpw")
loginpw.clear()
loginpw.send_keys(userpw)
loginpw.submit()
@pytest.fixture(scope="module")
def webdriver_module():
# Create a new instance of the browser driver at the module level
driver = webdriver.Firefox()
yield driver
driver.quit()
@pytest.fixture
def driver(webdriver_module):
# Set up the initial webdirver state for functions in this module.
# These functions test post login functionality, so start with a
# fresh login page and enter test user credentials.
webdriver_module.get(PWMGR_URL)
WebDriverWait(webdriver_module, 10).until(EC.title_contains("Vault Password Manager"))
_login_pw(webdriver_module,'user1','user1pw')
WebDriverWait(webdriver_module, 10).until(EC.presence_of_element_located((By.ID,"entrydetails")))
WebDriverWait(webdriver_module, 10).until(EC.presence_of_element_located((By.TAG_NAME,"nav")))
return webdriver_module
def ztest_navigation_visibility(driver):
"""
Requirement: All authorized items should be reachable from the nav tree.
Requirement: Nav tree initially shows only collection names.
Gradually expand tree to reveal all 3 levels: collection, group, item
"""
nav = testutils.NavigationHelper(driver)
# initially only collection names are visible
visible = nav.visiblelist()
assert visible == [('linuxadmin',), ('user1',)]
# open a collection, groups in the collection should be visible
nav.click(["linuxadmin"])
visible = nav.visiblelist()
assert visible == [
('linuxadmin','webservers/'),
('user1',),
]
# open other collection. All groups visible
nav.click(["user1"])
visible = nav.visiblelist()
assert visible == [
('linuxadmin','webservers/'),
('user1','Pauls Stuff/'),
('user1','network/'),
('user1','web/'),
]
# open a group. Group items are visible
nav.click(["user1","web/"])
visible = nav.visiblelist()
assert visible == [
('linuxadmin','webservers/'),
('user1','Pauls Stuff/'),
('user1','network/'),
('user1','web/', 'google'),
('user1','web/', 'netflix'),
]
# Close a group and open another
nav.click(["user1","web/"])
nav.click(["linuxadmin","webservers/"])
visible = nav.visiblelist()
assert visible == [
('linuxadmin','webservers/','LoadBal'),
('linuxadmin','webservers/','extA'),
('linuxadmin','webservers/','extB'),
('user1','Pauls Stuff/'),
('user1','network/'),
('user1','web/'),
]
#open the last group
nav.click(["user1","Pauls Stuff/"])
visible = nav.visiblelist()
assert visible == [
('linuxadmin','webservers/','LoadBal'),
('linuxadmin','webservers/','extA'),
('linuxadmin','webservers/','extB'),
('user1','Pauls Stuff/','$+dream'),
('user1','network/'),
('user1','web/'),
]
# close a collection, all groups and items in the collection are hidden
nav.click(["linuxadmin"])
visible = nav.visiblelist()
assert visible == [
('linuxadmin',),
('user1','Pauls Stuff/','$+dream'),
('user1','network/'),
('user1','web/'),
]
@ordered
class TestAddRemove(object):
"""
"""
def test_add_item_from_initial(s,driver):
""" Requirement: Add an item.
Add from initial screen with blank fields.
"""
nav = testutils.NavigationHelper(driver)
form = testutils.ItemHelper(driver)
# initially only collection names are visible
visible = nav.visiblelist()
assert visible == [('linuxadmin',), ('user1',),]
assert form.fields == {
"collectionid":"user1",
"groupid":"",
"notes":"",
"password":"",
"title":"",
"url":"",
"userid":"",
}
form.fields = {
"collectionid":"user1",
"groupid":"web",
"title":"Facepalm",
"url":"https://facepalm.com",
"userid":"bob",
"password":"bobknows",
"notes":"Forget privacy!",
}
# Should be able to read the values back.
assert form.fields == {
"collectionid":"user1",
"groupid":"web",
"title":"Facepalm",
"url":"https://facepalm.com",
"userid":"bob",
"password":"bobknows",
"notes":"Forget privacy!",
}
form.add_new()
assert form.message == "Added new entry web/Facepalm"
# visible in nav tree?
nav.click(["user1"])
nav.click(["user1","web/"])
assert nav.visible(('user1','web/', 'Facepalm'))
def test_del_item_facepalm(s,driver):
""" Requirements: Items can be deleted. Old items are moved
to an Archive group in the same collection. The item title contains a timestamp.
Form fields are cleared. A delete message is shown.
"""
nav = testutils.NavigationHelper(driver)
nav.click(["user1"])
nav.click(["user1","web/"])
assert nav.visible(('user1','web/', 'Facepalm'))
nav.click(["user1","web/","Facepalm"])
form = testutils.ItemHelper(driver)
assert form.fields == {
"collectionid":"user1",
"groupid":"web",
"notes":"Forget privacy!",
"password":"bobknows",
"title":"Facepalm",
"url":"https://facepalm.com",
"userid":"bob",
}, "Expected item values displayed when selected"
form.delete()
delete_ts = datetime.datetime.utcnow()
WebDriverWait(driver, 5).until(
EC.text_to_be_present_in_element(
(By.ID,"mainmsg"),"Deleted entry web/Facepalm"))
assert form.fields == {
"collectionid":"user1",
"groupid":"",
"notes":"",
"password":"",
"title":"",
"url":"",
"userid":"",
}, "Requirement: Fields are cleared after delete"
assert form.message == "Deleted entry web/Facepalm", "Requirement: delete message displayed"
time.sleep(1) # Nav tree needs extra time to update
visible = nav.visiblelist()
assert visible == [
(u'linuxadmin',),
(u'user1',HISTGROUP),
(u'user1',u'Pauls Stuff/'),
(u'user1',u'network/'),
(u'user1',u'web/', u'google'),
(u'user1',u'web/', u'netflix'),
], "Archive group is visible in nav tree"
nav.click(["user1", HISTGROUP])
title = nav.findarchived(delete_ts, ('user1','web','Facepalm') )
assert title is not None, "Requirement: deleted entry is in archive group."
# Stash title name for later test step
state["test_del_item_facepalm_title"] = title
def test_del_archived_item_facepalm(s,driver):
""" Requirements: Items can be deleted from the archive group.
"""
nav = testutils.NavigationHelper(driver)
form = testutils.ItemHelper(driver)
nav.click(["user1"])
nav.click(["user1", HISTGROUP])
title = state["test_del_item_facepalm_title"]
del state["test_del_item_facepalm_title"]
nav.click(("user1", HISTGROUP, title))
assert form.fields == {
"collectionid":"user1",
"groupid":HISTGROUP[:-1],
"notes":"Forget privacy!",
"password":"bobknows",
"title":title,
"url":"https://facepalm.com",
"userid":"bob",
}, "Archived entry values are as expected."
form.delete()
WebDriverWait(driver, 5).until(
EC.text_to_be_present_in_element(
(By.ID,"mainmsg"),"Deleted entry %s%s" % (HISTGROUP, title)))
assert form.fields == {
"collectionid":"user1",
"groupid":"",
"notes":"",
"password":"",
"title":"",
"url":"",
"userid":"",
}, "Requirement: fields cleared after delete (archived entry)"
assert nav.hidden(('user1', HISTGROUP, title)), "Requirement: item removed from archive"
def ztest_delete_item(driver):
""" """
assert False, 'not implemented'
def ztest_modify_item_group(driver):
""" """
assert False, 'not implemented'
def ztest_modify_item_notes(driver):
""" """
assert False, 'not implemented'
def ztest_modify_item_password(driver):
""" """
assert False, 'not implemented'
def ztest_modify_item_title(driver):
""" """
assert False, 'not implemented'
def ztest_modify_item_url(driver):
""" """
assert False, 'not implemented'
def ztest_modify_item_userid(driver):
""" """
assert False, 'not implemented'
def ztest_clear_item_fields(driver):
""" """
assert False, 'not implemented'
def ztest_shared_item_visibility(driver):
""" """
assert False, 'not implemented'
| 31.743202 | 101 | 0.596745 | 5,051 | 0.480727 | 161 | 0.015323 | 5,900 | 0.56153 | 0 | 0 | 4,760 | 0.453031 |
a03c291ae4978fabfb123a70eed6e3604690f22e | 319 | py | Python | python/cowSum.py | TenType/competition | 1715c79c88992e4603b327f962f44eb5bffcb801 | [
"MIT"
] | 1 | 2022-02-05T02:11:37.000Z | 2022-02-05T02:11:37.000Z | python/cowSum.py | TenType/competition | 1715c79c88992e4603b327f962f44eb5bffcb801 | [
"MIT"
] | null | null | null | python/cowSum.py | TenType/competition | 1715c79c88992e4603b327f962f44eb5bffcb801 | [
"MIT"
] | null | null | null | n, t = map(int, input().split())
f = []
results = [-1, -1, -1, -1]
for i in range(0, n):
f.append(list(map(int, input().split())))
# print(f)
for j in range(0, n):
if t in range(f[j][4], f[j][5]):
for k in range(0, 4):
if f[j][k] > results[k]:
results[k] = f[j][k]
for m in range(0, 4):
print(results[m])
| 19.9375 | 42 | 0.526646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.031348 |
a03d0e1506063ae02913fe583bcbdc21759f23a9 | 2,031 | py | Python | reacticket/extensions/usersettings.py | i-am-zaidali/Toxic-Cogs | 088cb364f9920c20879751da6b7333118ba1bf41 | [
"MIT"
] | 56 | 2019-03-21T21:03:26.000Z | 2022-03-14T08:26:55.000Z | reacticket/extensions/usersettings.py | i-am-zaidali/Toxic-Cogs | 088cb364f9920c20879751da6b7333118ba1bf41 | [
"MIT"
] | 38 | 2019-08-20T02:18:27.000Z | 2022-02-22T11:19:05.000Z | reacticket/extensions/usersettings.py | i-am-zaidali/Toxic-Cogs | 088cb364f9920c20879751da6b7333118ba1bf41 | [
"MIT"
] | 44 | 2019-07-04T06:17:54.000Z | 2022-03-25T19:18:31.000Z | from typing import Optional
from reacticket.extensions.abc import MixinMeta
from reacticket.extensions.mixin import settings
class ReacTicketUserSettingsMixin(MixinMeta):
@settings.group()
async def userpermissions(self, ctx):
"""Control the permissions that users have with their own tickets"""
pass
@userpermissions.command()
async def usercanclose(self, ctx, yes_or_no: Optional[bool] = None):
"""Set whether users can close their own tickets or not."""
if yes_or_no is None:
yes_or_no = not await self.config.guild(ctx.guild).usercanclose()
await self.config.guild(ctx.guild).usercanclose.set(yes_or_no)
if yes_or_no:
await ctx.send("Users can now close their own tickets.")
else:
await ctx.send("Only administrators can now close tickets.")
@userpermissions.command()
async def usercanmodify(self, ctx, yes_or_no: Optional[bool] = None):
"""Set whether users can add or remove additional users to their ticket."""
if yes_or_no is None:
yes_or_no = not await self.config.guild(ctx.guild).usercanmodify()
await self.config.guild(ctx.guild).usercanmodify.set(yes_or_no)
if yes_or_no:
await ctx.send("Users can now add/remove other users to their own tickets.")
else:
await ctx.send("Only administrators can now add/remove users to tickets.")
@userpermissions.command()
async def usercanname(self, ctx, yes_or_no: Optional[bool] = None):
"""Set whether users can rename their tickets and associated channels."""
if yes_or_no is None:
yes_or_no = not await self.config.guild(ctx.guild).usercanname()
await self.config.guild(ctx.guild).usercanname.set(yes_or_no)
if yes_or_no:
await ctx.send("Users can now rename their tickets and associated channels.")
else:
await ctx.send("Only administrators can now rename tickets and associated channels.")
| 42.3125 | 97 | 0.678484 | 1,902 | 0.936484 | 0 | 0 | 1,834 | 0.903003 | 1,719 | 0.846381 | 607 | 0.298868 |
a040b6c0982b77309717ef74a52f2c7e1c8af890 | 1,276 | py | Python | tests/test_command_line.py | cwithmichael/breakzip | 3c81bd2a081b47f2e57d1b72f2b0fe6b76e613b3 | [
"MIT"
] | 5 | 2020-08-15T11:40:17.000Z | 2021-03-22T15:15:37.000Z | tests/test_command_line.py | cwithmichael/breakzip | 3c81bd2a081b47f2e57d1b72f2b0fe6b76e613b3 | [
"MIT"
] | null | null | null | tests/test_command_line.py | cwithmichael/breakzip | 3c81bd2a081b47f2e57d1b72f2b0fe6b76e613b3 | [
"MIT"
] | null | null | null | from breakzip import command_line
import pytest
import os
@pytest.fixture
def enc_zip(rootdir):
"""Returns an EncryptedZipFile instance with a test zip"""
test_zip = os.path.join(rootdir, "cats.zip")
return test_zip
def test_reading_file(enc_zip, mocker):
mocker.patch.object(command_line.sys, "argv")
mocker.patch.object(command_line.breakzip, "get_info")
mocker.patch.object(command_line.breakzip, "find_password")
command_line.sys.argv = ["", enc_zip, "jpg"]
command_line.main()
assert command_line.breakzip.get_info.called_once()
assert command_line.breakzip.find_password.called_once()
def test_reading_file_not_found(mocker):
mocker.patch.object(command_line.sys, "argv")
command_line.sys.argv = ["", "", "jpg"]
with pytest.raises(SystemExit):
command_line.main()
def test_reading_ext_not_found(enc_zip, mocker):
mocker.patch.object(command_line.sys, "argv")
command_line.sys.argv = ["", enc_zip, "drwho"]
with pytest.raises(SystemExit):
command_line.main()
def test_insufficient_input(mocker):
mocker.patch.object(command_line.sys, "argv")
command_line.sys.argv = ["", ""]
with pytest.raises(SystemExit) as e:
command_line.main()
assert e.value.code == 2
| 29.674419 | 63 | 0.71395 | 0 | 0 | 0 | 0 | 169 | 0.132445 | 0 | 0 | 146 | 0.11442 |
a04150678757a161b57d09c46ef15266722ed6e3 | 643 | py | Python | tourney/commands/stats_command.py | netromdk/tourney | 192d9dec935ac087969a810870b784e3d626b9d5 | [
"MIT"
] | 1 | 2022-01-11T07:29:49.000Z | 2022-01-11T07:29:49.000Z | tourney/commands/stats_command.py | netromdk/tourney | 192d9dec935ac087969a810870b784e3d626b9d5 | [
"MIT"
] | 7 | 2018-10-26T08:10:23.000Z | 2021-08-17T06:13:27.000Z | tourney/commands/stats_command.py | netromdk/tourney | 192d9dec935ac087969a810870b784e3d626b9d5 | [
"MIT"
] | 2 | 2018-08-24T13:43:02.000Z | 2019-02-20T09:09:19.000Z | from .command import Command
from datetime import datetime
import calendar
from tourney.util import this_season_filter
from tourney.stats import Stats
class StatsCommand(Command):
def __init__(self):
super(StatsCommand, self).__init__("stats")
self.set_ephemeral(False)
def execute(self, lookup=None):
stats = Stats.get()
if not stats.generate(time_filter=this_season_filter):
return "There are no recorded matches to generate statistics from!"
stats.save()
month = calendar.month_name[datetime.today().month]
return "Statistics for the {} season:\n".format(month) + stats.general_response(lookup)
| 26.791667 | 91 | 0.748056 | 488 | 0.758942 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.155521 |
a041cf9593ba00ff0663499073797bf96dd8b200 | 300 | py | Python | agnocomplete/urls.py | mike-perdide/django-agnocomplete | 1ef67a0a808cfe61c6d1ac5ec449ee1a0f0246e8 | [
"MIT"
] | null | null | null | agnocomplete/urls.py | mike-perdide/django-agnocomplete | 1ef67a0a808cfe61c6d1ac5ec449ee1a0f0246e8 | [
"MIT"
] | null | null | null | agnocomplete/urls.py | mike-perdide/django-agnocomplete | 1ef67a0a808cfe61c6d1ac5ec449ee1a0f0246e8 | [
"MIT"
] | 1 | 2022-01-03T16:18:00.000Z | 2022-01-03T16:18:00.000Z | """
Agnostic Autocomplete URLS
"""
from django.conf.urls import url
from .views import AgnocompleteView, CatalogView
urlpatterns = [
url(
r'^(?P<klass>[-_\w]+)/$',
AgnocompleteView.as_view(),
name='agnocomplete'),
url(r'^$', CatalogView.as_view(), name='catalog'),
]
| 21.428571 | 54 | 0.626667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.286667 |
a044f460600112720a002859c5451c1e4332abe6 | 10,037 | py | Python | tem/repo.py | tem-cli/tem | 6974734a000604fe201fcba573b05e8fe50eda72 | [
"MIT"
] | null | null | null | tem/repo.py | tem-cli/tem | 6974734a000604fe201fcba573b05e8fe50eda72 | [
"MIT"
] | null | null | null | tem/repo.py | tem-cli/tem | 6974734a000604fe201fcba573b05e8fe50eda72 | [
"MIT"
] | null | null | null | """Repository operations"""
import os
from tem import config, util
class Repo:
"""A python representation of a repository."""
def __init__(self, *args):
if not args:
self.path = ""
if isinstance(args[0], str):
self.path = args[0]
elif isinstance(args[0], Repo):
self.path = args[0].path
def abspath(self):
"""Get the absolute path of the repo, preserving symlinks."""
return util.abspath(self.path)
def realpath(self):
"""Get the real path of the repo."""
return os.path.realpath(self.path)
def name(self):
"""Get the name of the repo at ``path`` from its configuration.
If the repo has not configured a name, the base name of its directory
is used. This works even if the repository does not exist on the
filesystem.
"""
# TODO put this entry in the local config file
cfg = config.Parser(self.path + "/.tem/repo")
name = cfg["general.name"]
if name:
return name
return util.basename(self.path)
def has_template(self, template):
"""Test if the repo contains `template`."""
return os.path.exists(util.abspath(self.path + "/" + template))
@staticmethod
def named(name):
"""
Return absolute path to the repository with the given name if it exists;
otherwise return `name` unmodified.
"""
# TODO decide how to handle ambiguity
for repo in lookup_path:
repo = Repo(repo)
if repo.name() == name:
return repo
return Repo(None)
@staticmethod
def from_id(repo_id):
"""Get a repository with id ``repo_id``.
A repository id is a common term for name and path. To determine if
``repo_id`` is a name or a path, the following strategy is used:
- If it contains a '/', it is resolved as a path
- Otherwise, a repo with the name ``repo_id`` is looked up and if
found, that repo is returned
- If the above fails, ``repo_id`` is resolved as a path. In this case,
the repo need not exist on the filesystem.
"""
if "/" in repo_id:
return Repo(repo_id)
return Repo.named(repo_id)
#: List of lookup paths for tem repositories
lookup_path = [
Repo(line) for line in os.environ.get("REPO_PATH", "").split("\n") if line
]
class RepoSpec:
"""An abstraction for various ways of specifying tem repositories
The following types of specs are supported:
- absolute or relative path to a repository
- name of a repository
- absolute or relative path to a repository that is to be excluded
(when `spec_type` is :attr:`EXCLUDE`)
- all repositories from :data:`path`
You can obtain the list of repositories from a spec by calling
:func:`repos`.
If ``spec_type`` is :attr:`EXCLUDE` then ``pseudopaths`` are
excluded from the final list. If ``spec_type`` is :attr:`FROM_LOOKUP_PATH`
then all the paths from :data:`repo.lookup_path` are included in the spec.
An empty spec is euivalent to a `FROM_LOOKUP_PATH` spec.
Attributes
----------
paths : str, list, optional
Repository paths or other types of specs
spec_type
Values: `INCLUDE`, `EXCLUDE`, `FROM_LOOKUP_PATH` or a bitwise OR of
these
Constants
---------
INCLUDE
Specified repos or specs will be included in the final list of repos.
EXCLUDE
Specified specs will be excluded from the final list of repos.
FROM_LOOKUP_PATH
Repos from :data:`path` will be:
- included if `INCLUDE` is set
- excluded if `EXCLUDE` is set
Methods
-------
"""
INCLUDE = 1
EXCLUDE = 2
FROM_LOOKUP_PATH = 4
@staticmethod
def of_type(spec_type):
"""
Look at :func:`__init__` for the proper ways of specifying a spec type.
"""
def func(specs=None):
return RepoSpec(specs=specs, spec_type=spec_type)
return func
# Holds the paths/subspecs
_data: list
def __init__(self, specs=None, spec_type=None):
"""Initialize repo spec
In the most basic form, ``specs`` is a string or list of strings
representing repository paths or names. Specs can also contain other
specs. ``spec_type`` is the type of spec and can be a single type or a
tuple containing multiple types. If no `spec_type` is specified, the
spec will be of the ``INCLUDE`` type.
"""
if not spec_type and isinstance(specs, int):
# Constructed with only spec_type as its argument
spec_type = specs
specs = None
elif not spec_type:
# Unspecified spec_type should fall back to INCLUDE
spec_type = RepoSpec.INCLUDE
# Error checking
if not spec_type & (
self.INCLUDE | self.EXCLUDE | self.FROM_LOOKUP_PATH
):
raise ValueError("invalid spec type")
if spec_type & RepoSpec.INCLUDE and spec_type & RepoSpec.EXCLUDE:
raise ValueError(
"spec_type cannot contain both INCLUDE and EXCLUDE"
)
if spec_type & RepoSpec.FROM_LOOKUP_PATH and specs is not None:
raise ValueError("cannot specify specs with FROM_LOOKUP_PATH")
self._data = []
self.spec_type = spec_type
if specs is not None:
self.append(specs)
def append(self, specs):
"""Append specs to the list."""
err = ValueError("specs must be a string, spec, or list of specs")
if isinstance(specs, str):
self._data += [s for s in specs.split("\n") if s]
elif isinstance(specs, RepoSpec):
self._data.append(specs)
elif isinstance(specs, list):
# All items in specs must be strings or RepoSpecs
if all(isinstance(spec, (str, RepoSpec)) for spec in specs):
self._data += specs
else:
raise err
else:
raise err
def _abspaths(self, included):
"""Get a list of paths that are included/excluded by this spec."""
if (included and (self.spec_type & RepoSpec.EXCLUDE)) or (
not included and not (self.spec_type & RepoSpec.EXCLUDE)
):
# Only exclude-type specs can exclude paths, and only other-type
# specs can include
return []
if not self._data:
if included:
return lookup_path
return []
if self.spec_type & RepoSpec.FROM_LOOKUP_PATH:
return lookup_path
result = lookup_path.copy()
# If at least one subspec is not EXCLUDE, initialize empty result
for item in self._data:
if isinstance(item, str) or (
not item.spec_type & RepoSpec.EXCLUDE
):
result = []
break
for item in self._data:
if isinstance(item, str):
result.append(resolve(item))
elif isinstance(item, RepoSpec):
if item.spec_type & RepoSpec.EXCLUDE:
result[:] = [
spec
for spec in result
if spec
not in item._abspaths( # pylint: disable=protected-access disable=line-too-long
False
)
]
else:
result += item.repos()
else:
raise ValueError(
"Spec list contains invalid types. Please "
"report this as a bug."
)
return list(dict.fromkeys(result)) # Remove duplicates
def repos(self):
"""Return absolute paths of repositores specified by this spec."""
# return self._abspaths(True)
return [Repo(path) for path in self._abspaths(True)]
def is_valid_name(name):
"""Test if ``name`` is a valid repository name."""
return "/" not in name
def resolve(path_or_name):
"""Get the repo identified by ``path_or_name``.
The following strategy is used:
- If the argument is a valid repository name, find a repo in
`repo.lookup_path` with the given name.
- If the argument is a path or the previous step failed to find a repo,
return the absolute path version of the input path.
"""
if not path_or_name:
return Repo()
if is_valid_name(path_or_name):
return named(path_or_name)
return Repo(path_or_name)
def find_template(template: str, repos=None, at_most=-1):
"""Return the absolute path of a template, looked up in ``repos``.
Parameters
----------
template : str
Path to template relative to the containing repo.
repos : list[int]
Repositories to look up. A None value will use :data:`path`.
at_most : int
Return no more than this number of repositories.
Returns
-------
template_paths : list[str]
List of absolute paths to templates under the given repos.
Notes
-----
A template can be a directory tree, e.g. "a/b/c".
"""
if repos is None:
repos = lookup_path
if at_most == 0:
return []
result_paths = []
i = 0
for repo in repos:
if i >= at_most and at_most != -1:
break
template_abspath = repo.abspath() + "/" + template
if os.path.exists(template_abspath):
result_paths.append(template_abspath)
return result_paths
def remove_from_path(remove_repos):
"""Remove matching repos from REPO_PATH environment variable."""
remove_repo_paths = [r.realpath() for r in remove_repos]
lookup_path[:] = (
repo
for repo in lookup_path
if repo.realpath() not in remove_repo_paths
)
| 31.170807 | 104 | 0.586629 | 7,884 | 0.785494 | 0 | 0 | 1,285 | 0.128026 | 0 | 0 | 4,937 | 0.49188 |
a04797fdee39fb437fee089feeeacafda35a132b | 2,064 | py | Python | experiments/cntJson.py | MadhuNimmo/jalangi2 | bbe8350b8ede5d978c1b3923780f277aacb1d074 | [
"Apache-2.0"
] | null | null | null | experiments/cntJson.py | MadhuNimmo/jalangi2 | bbe8350b8ede5d978c1b3923780f277aacb1d074 | [
"Apache-2.0"
] | null | null | null | experiments/cntJson.py | MadhuNimmo/jalangi2 | bbe8350b8ede5d978c1b3923780f277aacb1d074 | [
"Apache-2.0"
] | null | null | null | import json
import sys
#filename='/home/anon/js-acg-examples-master/Knockout_test_results/StatWala.json'
cnt=0
cnt2=0
#item_dict = json.loads(filename)
#import json
filename1 = sys.argv[1]
#filename2 = sys.argv[2]
#out_key = filename2.read().split('\n')
'''out_key = [line.rstrip('\n') for line in open(filename2)]'''
with open(filename1) as f1:
data = json.load(f1)
#print(len(data))
listy = []
'''with open(filename2) as f2:
data2 = json.load(f2)'''
'''for out in out_key:'''
'''for key,value in data.items():
if ("app.js") in key or ("base.js") in key:
#print(key)
for k,v in value.items():
cnt+=len(v)'''
for key,value in data.items():
for k,v in value.items():
cnt+=len(v)
'''for key, value in data.items():
cnt+=len(value)
for item in value:
if(item == "Var(/Users/UserXYZ/Documents/todomvc-master/examples/angularjs/node_modules/angular/angular.js@1633:48390-48494, %ssa_val 16)"):
listy.append(key)
#print(key.find("jquery.js")>-1 & item.find("jquery.js")>-1)
#if((key.find("app.js")>-1 & item.find("base.js")>-1)==True):
#cnt2+=1
listy2=[]
for key, value in data.items():
if(key=="Var(/Users/UserXYZ/Documents/todomvc-master/examples/angularjs/node_modules/angular/angular.js@7750:270474-289480, [childTranscludeFn])"):
for item in value:
if(item in set(listy)):
listy2.append(item)'''
print(cnt)
'''print(cnt2)
print(len(listy))
print(len(listy2))'''
'''for key1,value1 in data1.items():
for key2,value2 in data2.items():
if(key1==key2 or key1 in key2 ):
for k1,v1 in value1.items():
for k2,v2 in value2.items():
if(v1!=v2):
print(key1,value1)'''
#if two json obs are same
'''a, b = json.dumps(data1, sort_keys=True), json.dumps(data2, sort_keys=True)
print(a == b)''' | 34.983051 | 157 | 0.562984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,811 | 0.877422 |
a047d0ac5a16d636bbec804560bb56282540b1b2 | 13,172 | py | Python | remit/settings.py | naamara/blink | 326c035b2f0ef0feae4cd7aa2d4e73fa4a40171a | [
"Unlicense",
"MIT"
] | null | null | null | remit/settings.py | naamara/blink | 326c035b2f0ef0feae4cd7aa2d4e73fa4a40171a | [
"Unlicense",
"MIT"
] | 10 | 2019-12-26T17:31:31.000Z | 2022-03-21T22:17:33.000Z | remit/settings.py | naamara/blink | 326c035b2f0ef0feae4cd7aa2d4e73fa4a40171a | [
"Unlicense",
"MIT"
] | null | null | null | ''' settings for Django '''
import os
import django.conf.global_settings as DEFAULT_SETTINGS
LOCALHOST = False
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DEBUG_PAYMENTS = DEBUG
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir)) + '/'
LIVE = 1
ADMINS = (
('Madra David', '[email protected]'),
)
APP_EMAILS = {
'contact_us':'[email protected]',
'about_us':'[email protected]',
'info':'[email protected]',
'support':'[email protected]',
}
DEBUG_EMAILS = {
'[email protected]' ,
}
APP_NAME = 'Useremit'
DOMAIN_NAME = 'Remit'
APP_TITLE = 'Remit | Send Money to Mobile Money in Uganda or Kenya | Pay utility bills online'
MANAGERS = ADMINS
USE_JUMIO = True
BASE_URL = 'https://useremit.com/'
BASE_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir)) + '/'
DATABASES = {
'default': {
# Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'ENGINE': 'django.db.backends.postgresql_psycopg2',
# Or path to database file if using sqlite3.
'NAME': 'anenyuoe4',
# The following settings are not used with sqlite3:
'USER': 'dqebbquaa4iba',
'PASSWORD': 'WMm8mq1ZYAOn',
# Empty for localhost through domain sockets or '127.0.0.1' for
# localhost through TCP.
'HOST': 'LOCALHOST',
'PORT': '', # Set to empty string for default.
'OPTIONS': {'autocommit': True, },
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['www.useremit.com', 'http://useremit.com',
'https://useremit.com', 'https://useremit.com']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
#TIME_ZONE = 'Africa/Nairobi'
TIME_ZONE ='UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = BASE_DIR + 'static/uploads/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = BASE_URL + 'static/uploads/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
#GEOIP_PATH = BASE_URL + 'geoip_data/'
geo_dir = os.path.dirname(__file__)
geo_rel_path = "geoip"
GEOIP_PATH = os.path.join(geo_dir, geo_rel_path)
EMAIL_TEMPLATE_DIR = BASE_DIR + 'templates/email/'
AJAX_TEMPLATE_DIR = BASE_DIR + 'templates/ajax/'
SMS_TEMPLATE_DIR = BASE_DIR + 'templates/sms/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ksx8+lq!5pzx&)xuqp0sc-rdgtd14gmix-eglq(iz%3+7h)f52'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'sslify.middleware.SSLifyMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
#'session_security.middleware.SessionSecurityMiddleware',
# Uncomment the next line for simple clickjacking protection:
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'remit.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'remit.wsgi.application'
TEMPLATE_DIRS = (
BASE_DIR + 'templates',
BASE_DIR + 'remit_admin/templates/',
BASE_DIR + 'remit_admin/templates/admin/',
)
INSTALLED_APPS = (
#background tasks
#'huey.djhuey',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'remit',
'social_widgets',
'accounts',
#'south'
'landingapp',
'coverage',
#'notification',
'nexmo',
'guardian',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
#'django_admin_bootstrapped.bootstrap3',
#'django_admin_bootstrapped',
# Uncomment the next line to enable the admin:
'remit_admin',
'session_security',
'gravatar',
'django_cron',
'django.contrib.humanize',
'django_extensions',
#'django_bitcoin',
'btc',
'rest_framework',
'rest_framework.authtoken',
'api',
'seo',
'payments',
'background_task',
'django.contrib.admin',
'ipn',
'standard',
'crispy_forms',
'tinymce',
#'django_twilio',
)
PAYPAL_RECEIVER_EMAIL = "[email protected]"
# Rest Framework
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAdminUser'
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
#'rest_framework.renderers.BrowsableAPIRenderer',
),
# Use Django's standard `django.contrib.auth` permissions,
'DATETIME_FORMAT': '%Y-%m-%d %H:%M:%S'
}
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Custom template processors
# YOpay
YOPAY_USERNAME = '100224720137'
YOPAY_PASSWORD = 'jLQF-r1oa-OyIq-0zoQ-544O-7U1F-oGj5-YoyU'
YOPAY_ENDPOINT = 'https://paymentsapi1.yo.co.ug/ybs/task.php'
# Ipay
LIVE = 1
IPAY_CALLBACK_URL = '%stransaction/confirm_payment/' % BASE_URL
IPAY_USER = 'redcore'
IPAY_MERCHANT = 'RedCore'
IPAY_HASH_KEY = '0yiq0zoQ544O'
# uba
UBA_CALLBACK_URL = ''
UBA_MERCHANT_ID = ''
UBA_MERCHANT_KEY = ''
#jumio
JUMIO_URL="https://netverify.com/api/netverify/v2/initiateNetverify/"
JUMIO_TOKEN="fcf1eec3-728d-4f8a-8811-5b8e0e534597"
JUMIO_SECRET="9mnQyVj1ppiyVESYroDHZS23Z9OfQ9GS"
JUMIO_USER_AGENT="MyCompany MyApp/1.0.0"
USE_JUMIO = True
"""
JUMIO_SUCCESS_URL="https://simtransfer.com/jumiopass/"
JUMIO_ERROR_URL="https://simtransfer.com/jumiofail/"
"""
JUMIO_SUCCESS_URL="https://simtransfer.com/idscanned/"
JUMIO_ERROR_URL="https://simtransfer.com/idscanfailed/"
JUMIO_CALLBACK="https://simtransfer.com/jumiodata/"
# Mailgun
ANONYMOUS_USER_ID = -1
AUTH_PROFILE_MODULE = 'accounts.Profile'
LOGIN_URL = BASE_URL + 'login/'
SIGNUP_URL = BASE_URL + 'signup/'
LOGOUT_URL = BASE_URL + 'signout/'
AUTHENTICATION_BACKENDS = (
'accounts.backends.EmailVerificationBackend',
'remit.backends.EmailAuthBackend',
'guardian.backends.ObjectPermissionBackend',
)
ACTIVATION_LINK = BASE_URL + 'activate/'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
"""
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
DEFAULT_FROM_EMAIL = ''
DEFAULT_TO_EMAIL = ''
"""
#EMAIL_PORT = 587
ADMIN_USER='admin_key_user'
ADMIN_USER_KEY='user_004_admin'
# Mailgun settings
DEFAULT_FROM_EMAIL = 'Remit.ug <[email protected]>'
#EMAIL_USE_TLS = True
#EMAIL_HOST = 'smtp.mailgun.org'
#EMAIL_HOST_USER = '[email protected]'
#EMAIL_HOST_PASSWORD = '25s0akinnuk8'
#EMAIL_PORT = 25
# Mailgun settings
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
#EMAIL_TEMPLATE_DIR = '%stemplates/email/' % (BASE_DIR)
# using sandbox account here , change later
"""
MAILGUN_ACCESS_KEY = 'key-159a0akhdauw79rtshe1rw-itl6t-0i6'
MAILGUN_SERVER_NAME = 'remit.ug'
MAILGUN_ACCESS_LINK = 'https://api.mailgun.net/v2/remit.ug/messages'
"""
MAILGUN_ACCESS_KEY = 'key-159a0akhdauw79rtshe1rw-itl6t-0i6'
MAILGUN_SERVER_NAME = 'useremit.com'
MAILGUN_ACCESS_LINK = 'https://api.mailgun.net/v3/useremit.com/messages'
CONTACT_NO = '+256783877133'
# Nexmo
NEXMO_USERNAME = '8cede62f'
NEXMO_PASSWORD = 'd4d43a29'
NEXMO_FROM = 'Remit'
#Nexmo App
NEXMO_API_KEY = '8cede62fSecret'
NEXMO_API_SECRET = 'd4d43a29'
NEXMO_DEFAULT_FROM = 'Remit'
#if set to zero we use twilio
USE_NEXMO = 0
USE_TWILIO = True
USE_SUKUMA = False
USE_AFRICA_SMS = True
TWILIO_ACCOUNT_SID='AC2a0de3ac9808d7bfa5c3d75853c073d6'
TWILIO_AUTH_TOKEN='82b2ab8535255c8fd8d96bad96103ae7'
TWILIO_DEFAULT_CALLERID = 'Remit'
# Session security
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# cron jobs
CRON_CLASSES = [
"remit.cron.UpdateRates",
# ...
]
# Paganation
PAGNATION_LIMIT = 10
# Avatar
GRAVATAR_URL = "https://www.gravatar.com/avatar.php?"
# Bitcoin
#BITCOIND_CONNECTION_STRING = "http://ubuntu:bitwa8bfede82llet@localhost:8332"
BITCOIND_CONNECTION_STRING = "http://redcorebrpc:BKGyjwyNXzHumywcau3FubmyaJ8NypJtd1eSdTYCqSkJ@localhost:8332"
# How many bitcoin network confirmations are required until we consider the transaction
# as received
BITCOIN_MINIMUM_CONFIRMATIONS = 3
# Use Django signals to tell the system when new money has arrived to your
# wallets
BITCOIN_TRANSACTION_SIGNALING = True
from decimal import Decimal
MAIN_ADDRESS = '12oaMnJZZJRx59kWyAshzmogHERo8y54Et'
BITCOIN_PAYMENT_BUFFER_SIZE = 1
BITCOIN_ADDRESS_BUFFER_SIZE = 1
PAYMENT_VALID_HOURS = 1
BITCOIN_PRIVKEY_FEE = Decimal("0.0005")
BITCOIN_TRANSACTION_CACHING = 1
#admin who processed transactions
PROCESSED_BY = 1
#background tasks
#HUEY_CONFIG = {
# 'QUEUE': 'huey.backends.redis_backend.RedisBlockingQueue',
# 'QUEUE_NAME': 'test-queue',
# 'QUEUE_CONNECTION': {
# 'host': 'localhost',
# 'port': 6379,
# },
# 'THREADS': 4,
#}
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
CSRF_FAILURE_VIEW = 'remit.views.csrf_failure_view'
MTN_SDP = '172.25.48.43'
MTN_TEST_BED = 0
MTN_SDP_USERNAME = 'remitug.sp1'
MTN_SDP_PASS = 'Huawei2014'
MTN_SDP_SERVICEID = '2560110001380'
MTN_SDP_URL = 'http://172.25.48.43:8310/'
MTN_VENDOR_CODE = 'REMIT'
REVENUE_SHARE = 2.16
#disable email and sms sending
DISABLE_COMMS = False
#background tasks
MAX_ATTEMPTS = 5
#need this for generating reports from sqlite
IS_SQLITE = False
OTHER_FEES = True
OTHER_FEES = True
SEND_KYC_SMS = True
# Pesapot
PESAPOT_URL = 'http://pesapot.com/api/'
PESAPOT_TOKEN = ''
PESAPOT_KEY = ''
#paybill
PAYBILL = False
DISABLE_MTN = True
ENABLE_TRADELANCE = True
ENABLE_YO = False
DISABLE_AIRTEL_MONEY = False
DISABLE_MTN_MOBILE_MONEY = False
#force Transaction id
FORCE_TRANSACTION_ID = True
# Localhost settings
# Crispy forms tags settings
CRISPY_TEMPLATE_PACK = 'bootstrap3'
try:
from local_settings import *
except ImportError:
pass
STATIC_ROOT = BASE_DIR + 'static'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = BASE_URL + 'static/'
| 24.994307 | 109 | 0.719708 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,735 | 0.663149 |
a04c74859e147b481962d7389a2144e8a4b8236e | 1,151 | py | Python | tests/integration/test_issue_1447.py | alexey-tereshenkov-oxb/pex | 2e2d1e50e604fdee48b0d51aea482ca255521ff0 | [
"Apache-2.0"
] | 2,160 | 2015-01-06T17:57:39.000Z | 2022-03-30T19:59:01.000Z | tests/integration/test_issue_1447.py | alexey-tereshenkov-oxb/pex | 2e2d1e50e604fdee48b0d51aea482ca255521ff0 | [
"Apache-2.0"
] | 1,242 | 2015-01-22T14:56:46.000Z | 2022-03-31T18:02:38.000Z | tests/integration/test_issue_1447.py | Satertek/pex | 64de1c4cf031118ef446ac98a8c164c91c23bb9b | [
"Apache-2.0"
] | 248 | 2015-01-15T13:34:50.000Z | 2022-03-26T01:24:18.000Z | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import shutil
import subprocess
import sys
from pex.pex_info import PexInfo
from pex.testing import run_pex_command
from pex.typing import TYPE_CHECKING
from pex.variables import unzip_dir
if TYPE_CHECKING:
from typing import Any
def test_layout_identification(tmpdir):
# type: (Any) -> None
pex_root = os.path.join(str(tmpdir), "pex_root")
pex_file = os.path.join(str(tmpdir), "a.pex")
run_pex_command(
args=["-o", pex_file, "--pex-root", pex_root, "--runtime-pex-root", pex_root]
).assert_success()
pex_hash = PexInfo.from_pex(pex_file).pex_hash
assert pex_hash is not None
expected_unzip_dir = unzip_dir(pex_root, pex_hash)
assert not os.path.exists(expected_unzip_dir)
subprocess.check_call(args=[pex_file, "-c", ""])
assert os.path.isdir(expected_unzip_dir)
shutil.rmtree(expected_unzip_dir)
os.chmod(pex_file, 0o644)
subprocess.check_call(args=[sys.executable, pex_file, "-c", ""])
assert os.path.isdir(expected_unzip_dir)
| 29.512821 | 85 | 0.730669 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.186794 |
a04cf7b68b006d07caae20b361bd4e847b1b78eb | 13,900 | py | Python | tests/gfp.py | mcepl/git-packaging-tools | de705a9ac2efd1752754e4feb093fe85821f9224 | [
"MIT"
] | 8 | 2017-08-15T12:51:34.000Z | 2020-10-07T09:58:34.000Z | tests/gfp.py | mcepl/git-packaging-tools | de705a9ac2efd1752754e4feb093fe85821f9224 | [
"MIT"
] | 5 | 2017-02-04T12:32:16.000Z | 2020-07-01T14:13:19.000Z | tests/gfp.py | mcepl/git-packaging-tools | de705a9ac2efd1752754e4feb093fe85821f9224 | [
"MIT"
] | 6 | 2017-02-07T13:31:21.000Z | 2021-02-10T23:14:03.000Z | #!/usr/bin/python3
#
# Copyright (c) 2017-2020, SUSE LLC
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer. Redistributions
# in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the SUSE Linux Products GmbH nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Author: Bo Maryniuk <[email protected]>
This tool helps to:
1. Format patches from Git the way it has a minimal impact on
the changes in the future
2. Update patches to the current package source
3. Detect content differences, if the filename is still the same
4. Generate include message for .changes logfile
'''
import os
import sys
import re
import argparse
import shutil
ORDERING_FILE = 'patches.orders.txt'
CHANGES_FILE = 'patches.changes.txt'
def remove_order(filename):
'''
Remove order of the patch filename.
Git formats patches: XXXX-filename.patch
This function removes the "XXXX-" part, if any.
'''
ordnum = os.path.basename(filename).split('-')[0]
if ordnum and not re.sub(r'[0-9]', '', ordnum):
filename = os.path.join(os.path.dirname(filename),
filename.split('-', 1)[-1]).lower()
ordnum = int(ordnum)
else:
ordnum = None
return ordnum, filename
def remove_order_from_subject(src_file, dst_file, use_unique=False):
'''
Remove subject inside the patch.
Git format patches inside with the following subject format:
Subject: [PATCH X/Y] .........
This function removes [PATCH X/Y] part, if any. In Git
format-patches one can add "-N" flag, so then subject won't have
these numbers, but just "[PATCH]". In this case we leave it out.
'''
if os.path.exists(dst_file) and not use_unique:
raise IOError('the file {0} exists'.format(dst_file))
if os.path.exists(dst_file) and use_unique:
dst_file = unique(dst_file)
dst = open(dst_file, 'w')
for fline in open(src_file).read().split(os.linesep):
fline_tk = re.split(r'\s+\[PATCH \d+/\d+\]\s+', fline)
if len(fline_tk) == 2 and fline_tk[0] == 'Subject:':
fline = ' [PATCH] '.join(fline_tk)
dst.write('{0}\n'.format(fline))
dst.close()
def git_format_patch(tag):
'''
Formats patches from the given tag.
'''
patches = 0
for patch in os.popen(
'git format-patch {0}'.format(tag)).read().split(os.linesep):
if patch.split('.')[-1] == 'patch':
patches += 1
print("Patches fetched: {0}".format(patches))
def get_diff_contents(data):
'''
Get diff contents only.
'''
# Yes, I know about library https://github.com/cscorley/whatthepatch
# But for now we go ultra-primitive to keep no deps
data = '--'.join(data.split("--")[:-1])
contents = []
for chunk in re.split(r'@@.*?@@.*?\n', data)[1:]:
contents.append(chunk.split('diff --git')[0])
return contents
def unique(fname):
'''
Change name to the unique, in case it isn't.
:param fname:
:param use:
:return:
'''
fname = fname.split('.')
if '-' not in fname[0]:
fname[0] = '{0}-{1}'.format(fname[0], 1)
else:
chnk = fname[0].split('-')
try:
fname[0] = '{0}-{1}'.format('-'.join(chnk[:-1]), int(chnk[-1]) + 1)
except ValueError:
# Filename is not in "str-int", but "str-str".
fname[0] = '{0}-{1}'.format(fname[0], 1)
return '.'.join(fname)
def extract_spec_source_patches(specfile):
'''
Extracts source patches from the .spec file to match existing
comments, according to the
https://en.opensuse.org/openSUSE:Packaging_Patches_guidelines
:param: specfile
:return:
'''
patch_sec_start = False
patch_sec_end = False
head_buff = []
patch_section = []
for spec_line in open(specfile).read().split(os.linesep):
if re.match(r'^[Pp]atch[0-9]+:', spec_line) and not patch_sec_start:
patch_sec_start = True
if not spec_line.startswith('#') and \
not re.match(r'^[Pp]atch[0-9]+:', spec_line) and \
patch_sec_start and \
not patch_sec_end:
patch_sec_end = True
if not patch_sec_start and not patch_sec_end:
head_buff.append(spec_line)
if patch_sec_start and not patch_sec_end:
patch_section.append(spec_line)
first_comment = []
for head_line in reversed(head_buff):
if not head_line:
break
if head_line.startswith('#'):
first_comment.append(head_line)
patch_section.insert(0, os.linesep.join(first_comment))
patchset = {}
curr_key = None
for line in reversed(patch_section):
if re.match(r'^[Pp]atch[0-9]+:', line):
curr_key = re.sub(r'^[Pp]atch[0-9]+:', '', line).strip()
patchset[curr_key] = []
continue
if curr_key and line and line.startswith('#'):
patchset[curr_key].append(line)
return patchset
def do_remix_spec(args):
'''
Remix spec file.
:param args:
:return:
'''
if not os.path.exists(args.spec or ''):
raise IOError('Specfile {0} is not accessible or is somewhere else'.format(args.spec))
if not os.path.exists(args.ordering or ''):
args.ordering = './{0}'.format(ORDERING_FILE)
if not os.path.exists(args.ordering):
raise IOError('Ordering file is expected "./{0}" but is not visible'.format(ORDERING_FILE))
patchset = extract_spec_source_patches(args.spec)
for o_line in open(args.ordering).read().split(os.linesep):
if re.match(r'^[Pp]atch[0-9]+:', o_line):
ref, pname = [_f for _f in o_line.split(' ') if _f]
print(os.linesep.join(patchset.get(pname) or ['# Description N/A']))
print(ref.ljust(15), pname)
def do_create_patches(args):
'''
Create and reformat patches for the package.
'''
current_dir = os.path.abspath('.')
if not args.existing:
if os.listdir(current_dir):
print("Error: this directory has to be empty!")
sys.exit(1)
git_format_patch(args.format)
else:
if not [fname for fname in os.listdir(current_dir) if fname.endswith('.patch')]:
print("Error: can't find a single patch in {0} to work with!".format(current_dir))
sys.exit(1)
ord_fh = open(args.ordering or ORDERING_FILE, 'w')
ord_fh.write('#\n#\n# This is pre-generated snippets of patch ordering\n#\n')
ord_patches_p = []
patches = 0
for fname in os.listdir(current_dir):
if fname.split('.')[-1] == 'patch':
# Check if we should skip this patch in case subject starts with SKIP_TAG
with open(fname) as patch_file:
if any(re.match(r'^Subject: \[PATCH.*] {}'.format(re.escape(args.skip_tag)), i) for i in patch_file.readlines()):
print("Skipping {}".format(fname))
os.unlink(fname)
continue
print("Preparing {}".format(fname))
order, nfname = remove_order(fname)
if args.index is not None:
order += args.index
remove_order_from_subject(fname, nfname, use_unique=args.increment)
os.unlink(fname)
ord_fh.write('{patch}{fname}\n'.format(patch='Patch{0}:'.format(order).ljust(15), fname=nfname))
ord_patches_p.append(order)
patches += 1
if ord_patches_p:
ord_fh.write('#\n#\n# Patch processing inclusion:\n')
for order in ord_patches_p:
ord_fh.write('%patch{num} -p1\n'.format(num=order))
else:
ord_fh.write('# Nothing here, folks... :-(\n')
ord_fh.close()
print("\nRe-formatted {0} patch{1}".format(patches, patches > 1 and 'es' or ''))
def do_update_patches(args):
'''
Update patches on the target package source.
'''
print("Updating packages from {0} directory".format(args.update))
added = []
removed = []
changed = []
# Gather current patches
current_patches = {}
for fname in os.listdir(os.path.abspath(".")):
if fname.endswith('.patch'):
current_patches[os.path.basename(fname)] = True
for fname in os.listdir(args.update):
if fname.endswith('.patch'):
fname = os.path.join(args.update, fname)
if os.path.isfile(fname):
current_patches[os.path.basename(fname)] = False
n_fname = os.path.basename(fname)
if not os.path.exists(n_fname):
print("Adding {0} patch".format(fname))
shutil.copyfile(fname, os.path.join(os.path.abspath("."), n_fname))
added.append(n_fname)
else:
if get_diff_contents(open(fname).read()) != get_diff_contents(open(n_fname).read()):
if args.changed:
print("Replacing {0} patch".format(n_fname))
os.unlink(n_fname)
shutil.copyfile(fname, os.path.join(os.path.abspath("."), n_fname))
changed.append(n_fname)
else:
print("WARNING: Patches {0} and {1} are different!".format(fname, n_fname))
for fname in sorted([patch_name for patch_name, is_dead in list(current_patches.items()) if is_dead]):
print("Removing {0} patch".format(fname))
os.unlink(fname)
removed.append(fname)
# Generate an include for spec changes
with open(CHANGES_FILE, "w") as changes:
for title, data in [('Changed', changed), ('Added', added),
('Removed', removed)]:
if not data:
continue
print("- {}:".format(title), file=changes)
for fname in sorted(data):
print(" * {}".format(fname), file=changes)
print(file=changes)
if not removed and not added and not changes:
print("No files has been changed")
def main():
'''
Main app.
'''
VERSION = '0.2'
parser = argparse.ArgumentParser(description='Git patch formatter for RPM packages')
parser.add_argument('-u', '--update', action='store', const=None,
help='update current patches with the destination path')
parser.add_argument('-f', '--format', action='store', const=None,
help='specify tag or range of commits for patches to be formatted')
parser.add_argument('-o', '--ordering', action='store', const=None,
help='specify ordering spec inclusion file. Default: {0}'.format(ORDERING_FILE))
parser.add_argument('-x', '--index', action='store', const=None,
help='specify start ordering index. Default: 0')
parser.add_argument('-s', '--spec', action='store', const=None,
help='remix spec file and extract sources with their comments to match new patch ordering')
parser.add_argument('-i', '--increment', action='store_const', const=True,
help='use increments for unique names when patch commits repeated')
parser.add_argument('-c', '--changed', action='store_const', const=True,
help='update also changed files with the content')
parser.add_argument('-e', '--existing', action='store_const', const=True,
help='work with already formatted patches from Git')
parser.add_argument('-k', '--skip-tag', action='store', const=None, default='[skip]',
help='skip commits starting with this tag. Default: [skip]')
parser.add_argument('-v', '--version', action='store_const', const=True,
help='show version')
args = parser.parse_args()
try:
if args.index:
try:
args.index = int(args.index)
except ValueError:
raise Exception('Value "{0}" should be a digit'.format(args.index))
if args.version:
print("Version: {0}".format(VERSION))
elif args.spec:
do_remix_spec(args)
elif args.update and not args.format:
do_update_patches(args)
elif (args.format and not args.update) or args.existing:
do_create_patches(args)
else:
parser.print_help()
sys.exit(1)
except Exception as ex:
print("Critical error:", ex, file=sys.stderr)
if __name__ == '__main__':
main()
| 36.197917 | 129 | 0.606475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,279 | 0.379784 |
a04efbad847960e56a6c9a8e43d4465164fb4801 | 5,455 | py | Python | modules/dispatch.py | kex5n/Vehicles-Dispatch-Simulator | d0cca03fbf56e4b0ceeef8dafc59de105c1d4507 | [
"MIT"
] | null | null | null | modules/dispatch.py | kex5n/Vehicles-Dispatch-Simulator | d0cca03fbf56e4b0ceeef8dafc59de105c1d4507 | [
"MIT"
] | null | null | null | modules/dispatch.py | kex5n/Vehicles-Dispatch-Simulator | d0cca03fbf56e4b0ceeef8dafc59de105c1d4507 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
import random
from typing import List
import numpy as np
import torch
from config import Config
from domain import DispatchMode
from models import DQN
from modules.state import FeatureManager
from objects import Area, Vehicle
from objects.area import AreaManager
from objects.vehicle import VehicleManager
random.seed(1234)
np.random.seed(1234)
torch.manual_seed(1234)
torch.cuda.manual_seed_all(1234)
torch.backends.cudnn.deterministic = True
@dataclass(frozen=True)
class DispatchOrder:
vehicle_id: int
start_node_id: int
end_node_id: int
action: int
from_area_id: int = None
to_area_id: int = None
class DispatchModuleInterface:
def dispatch(self, area_manager: AreaManager, vehicle: Vehicle) -> DispatchOrder:
raise NotImplementedError
def __call__(self, area_manager: AreaManager, vehicle_manager: VehicleManager) -> List[DispatchOrder]:
raise NotImplementedError
class RandomDispatch(DispatchModuleInterface):
def dispatch(self, area_manager: AreaManager, vehicle: Vehicle) -> DispatchOrder:
current_area: Area = area_manager.get_area_by_area_id(vehicle.location_area_id)
candidate_area_id = [current_area.id] + current_area.get_neighbor_ids()
next_area = area_manager.get_area_by_area_id(random.choice(candidate_area_id))
next_node_id = next_area.centroid
start_node_id = vehicle.location_node_id
return DispatchOrder(
vehicle_id=vehicle.id,
start_node_id=start_node_id,
end_node_id=next_node_id,
action=None,
)
def __call__(self, area_manager: AreaManager, vehicle_manager: VehicleManager) -> List[DispatchOrder]:
dispatch_order_list: List[DispatchOrder] = []
for area in area_manager.get_area_list():
for vehicle_id in area.get_idle_vehicle_ids():
vehicle = vehicle_manager.get_vehicle_by_vehicle_id(vehicle_id)
dispatch_order = self.dispatch(
area_manager=area_manager,
vehicle=vehicle,
)
dispatch_order_list.append(dispatch_order)
return dispatch_order_list
class DQNDispatch(DispatchModuleInterface):
def __init__(self, config: Config, is_train=False):
self.model = DQN(k=config.K, num_actions=9)
self.__feature_manager = FeatureManager(k=config.K)
self.is_train = is_train
def dispatch(self, area_manager: AreaManager, vehicle: Vehicle, prediction, episode: int = 0, is_train: bool = False) -> DispatchOrder:
current_area = area_manager.get_area_by_area_id(vehicle.location_area_id)
candidate_area_id = [current_area.id] + current_area.get_neighbor_ids()
supply_array = np.array([area.num_idle_vehicles for area in area_manager.get_area_list()])
state_list = self.__feature_manager.calc_state(
area=current_area,
demand_array=prediction,
supply_array=supply_array
)
state_array = torch.FloatTensor(state_list)
action = self.model.get_action(state_array, episode=episode, candidate_area_ids=candidate_area_id, is_train=is_train)
next_area_id = candidate_area_id[action]
next_node_id = area_manager.get_area_by_area_id(next_area_id).centroid
return DispatchOrder(
vehicle_id=vehicle.id,
start_node_id=vehicle.location_node_id,
end_node_id=next_node_id,
action=action,
from_area_id=current_area.id,
to_area_id=next_area_id
)
def memorize(self, state, action, next_state, reward, from_area_id, to_area_id) -> None:
self.model.memorize(state, action, next_state, reward, from_area_id, to_area_id)
def train(self, area_manager: AreaManager, date_info, episode=None):
return self.model.update_q_function(area_manager=area_manager, date_info=date_info, episode=episode)
def save(self, checkpoint_path: str) -> None:
self.model.save_checkpoint(checkpoint_path)
def load(self, checkpoint_path: str) -> None:
self.model.load_checkpoint(checkpoint_path)
def __call__(self, area_manager: AreaManager, vehicle_manager: VehicleManager, prediction: np.ndarray, episode: int = 0) -> List[DispatchOrder]:
dispatch_order_list: List[DispatchOrder] = []
for area in area_manager.get_area_list():
for vehicle_id in area.get_idle_vehicle_ids():
vehicle = vehicle_manager.get_vehicle_by_vehicle_id(vehicle_id)
dispatch_order = self.dispatch(
area_manager=area_manager,
vehicle=vehicle,
episode=episode,
prediction=prediction,
is_train=self.is_train,
)
dispatch_order_list.append(dispatch_order)
return dispatch_order_list
def load_dispatch_component(dispatch_mode: DispatchMode, config: Config, is_train=False) -> DispatchModuleInterface:
if dispatch_mode == DispatchMode.DQN:
dispatch_module = DQNDispatch(config=config, is_train=is_train)
return dispatch_module
elif dispatch_mode == DispatchMode.RANDOM:
dispatch_module = RandomDispatch()
return dispatch_module
elif dispatch_mode == DispatchMode.NOT_DISPATCH:
return None
else:
raise NotImplementedError
| 40.110294 | 148 | 0.700642 | 4,428 | 0.811732 | 0 | 0 | 180 | 0.032997 | 0 | 0 | 0 | 0 |
a04f7f2d5934d5148efee6d0cd9e612d55de51c8 | 4,716 | py | Python | dice_vtk/geometries/geometry_base.py | dicehub/dice_vtk | ab8d9f34ae359461db5687d05bf38548bbaca6ea | [
"MIT"
] | null | null | null | dice_vtk/geometries/geometry_base.py | dicehub/dice_vtk | ab8d9f34ae359461db5687d05bf38548bbaca6ea | [
"MIT"
] | null | null | null | dice_vtk/geometries/geometry_base.py | dicehub/dice_vtk | ab8d9f34ae359461db5687d05bf38548bbaca6ea | [
"MIT"
] | null | null | null | # Standard Python modules
# =======================
import weakref
from abc import ABCMeta, abstractmethod, abstractproperty
# External modules
# ================
from vtk import vtkActor
from vtk import vtkMapper
from vtk import vtkPolyDataAlgorithm
from vtk import vtkBoundingBox
# DICE modules
# ============
from dice_tools import wizard
class VisObject(metaclass=ABCMeta):
@abstractmethod
def attach(self, scene):
pass
@abstractmethod
def detach(self, scene):
pass
class GeometryProperty(property):
name = "unnamed_property"
def __init__(self, fget = None, fset = None):
property.__init__(self, fget = self.__fget, fset = self.__fset)
self.__getter = fget
self.__setter = fset
def __fget(self, obj):
return self.__getter(obj)
def __fset(self, obj, value):
self.__setter(obj, value)
wizard.w_property_changed(obj,
name = self.name, value = value)
def __call__(self, fget):
return self.getter(fget)
def getter(self, fget):
self.__getter = fget
return self
def setter(self, fset):
self.__setter = fset
return self
class GeometryBaseMeta(ABCMeta):
def __new__(cls, classname, bases, classDict):
for name, attr in classDict.items():
if isinstance(attr, GeometryProperty):
attr.name = name
return super().__new__(cls, classname, bases, classDict)
class GeometryBase(VisObject, metaclass=GeometryBaseMeta):
selection = weakref.WeakSet()
def __init__(self, name='UnnamedGeometry', **kwargs):
super().__init__(**kwargs)
self.__name = name
self.__selected = False
self.__saved_color = None
@GeometryProperty
def name(self):
return self.__name
@name.setter
def name(self, value):
self.__name = value
@abstractmethod
def get_bounds(self, scene):
pass
@abstractmethod
def get_sources(self):
pass
def get_mappers(self):
return [v.GetMapper() for v in self.get_actors()]
@abstractmethod
def get_actors(self):
pass
@property
def selected(self):
return self.__selected
@property
def saved_color(self):
return self.__saved_color
@saved_color.setter
def saved_color(self, value):
self.__saved_color = value
@GeometryProperty
def color(self):
if self.__saved_color != None:
return self.__saved_color
return self.get_color()
@color.setter
def color(self, value):
if self.selected:
self.saved_color = value
else:
self.set_color(value)
@abstractmethod
def get_color(self):
pass
@abstractmethod
def set_color(self, value):
pass
@abstractproperty
def visible(self):
pass
@visible.setter
def visible(self, value):
pass
@abstractproperty
def opacity(self):
pass
@opacity.setter
def opacity(self, value):
pass
@abstractproperty
def representation(self):
pass
@representation.setter
def representation(self, value):
pass
@abstractproperty
def edge_visible(self):
pass
@edge_visible.setter
def edge_visible(self, value):
pass
@abstractproperty
def position(self):
pass
@position.setter
def position(self, value):
pass
@classmethod
def w_geometry_objects_select(cls, objects, enable, clear):
if clear and cls.selection:
for o in objects:
cls.selection.discard(o)
for o in cls.selection:
o.set_selected(False)
for o in objects:
if enable:
cls.selection.add(o)
o.set_selected(True)
else:
cls.selection.discard(o)
o.set_selected(False)
def set_selected(self, enable):
if enable and not self.__selected:
color = getattr(self, 'color', None)
if color != None:
self.__saved_color = color
self.set_color([0.9, 0, 0])
self.__selected = True
wizard.w_geometry_object_selection_state(self, True)
elif not enable and self.__selected:
self.__selected = False
color = getattr(self, 'color', None)
if color != None:
self.set_color(self.__saved_color)
self.__saved_color = None
wizard.w_geometry_object_selection_state(self, False)
wizard.subscribe(GeometryBase, 'w_geometry_objects_select')
| 23.117647 | 71 | 0.601569 | 4,299 | 0.911578 | 0 | 0 | 2,127 | 0.451018 | 0 | 0 | 190 | 0.040288 |
a05379809d542906a1e8b3ecab8d346bf1a2d752 | 2,272 | py | Python | tests/integration/sts/replayer_integration_test.py | jhall11/sts | b484f184824c9fe59864103f24fdfa24ff8bcdcd | [
"Apache-2.0"
] | 5 | 2016-03-18T15:12:04.000Z | 2019-01-28T20:18:24.000Z | tests/integration/sts/replayer_integration_test.py | jhall11/sts | b484f184824c9fe59864103f24fdfa24ff8bcdcd | [
"Apache-2.0"
] | null | null | null | tests/integration/sts/replayer_integration_test.py | jhall11/sts | b484f184824c9fe59864103f24fdfa24ff8bcdcd | [
"Apache-2.0"
] | 1 | 2019-11-02T22:04:48.000Z | 2019-11-02T22:04:48.000Z | #!/usr/bin/env python
#
# Copyright 2011-2013 Colin Scott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os
sys.path.append(os.path.dirname(__file__) + "/../../..")
simple_cfg = '''
from sts.control_flow.replayer import Replayer
from sts.simulation_state import SimulationConfig
simulation_config = SimulationConfig()
control_flow = Replayer(simulation_config, "%s")
'''
class ReplayerTest(unittest.TestCase):
tmpsuperlog = '/tmp/superlog.tmp'
tmpcfg = 'config/replayer_simple_test.py'
tmpcfgpyc = 'config/replayer_simple_test.pyc'
tmpcfgmodule = 'config.replayer_simple_test'
def write_simple_superlog(self):
''' Returns the file. Make sure to close afterwards! '''
superlog = open(self.tmpsuperlog, 'w')
e1 = str('''{"dependent_labels": ["e2"], "start_dpid": 8, "class": "LinkFailure",'''
''' "start_port_no": 3, "end_dpid": 15, "end_port_no": 2, "label": "e1", "time": [0,0], "round": 0}''')
superlog.write(e1 + '\n')
e2 = str('''{"dependent_labels": [], "start_dpid": 8, "class": "LinkRecovery",'''
''' "start_port_no": 3, "end_dpid": 15, "end_port_no": 2, "label": "e2", "time": [0,0], "round": 0}''')
superlog.write(e2 + '\n')
superlog.close()
def write_simple_cfg(self):
cfg = open(self.tmpcfg, 'w')
cfg.write(simple_cfg % self.tmpsuperlog)
cfg.close()
def basic_test(self):
try:
self.write_simple_superlog()
self.write_simple_cfg()
ret = os.system("./simulator.py -c %s" % self.tmpcfgmodule)
self.assertEqual(0, ret)
finally:
os.unlink(self.tmpsuperlog)
os.unlink(self.tmpcfg)
if os.path.exists(self.tmpcfgpyc):
os.unlink(self.tmpcfgpyc)
if __name__ == '__main__':
unittest.main()
| 33.910448 | 116 | 0.677377 | 1,315 | 0.578785 | 0 | 0 | 0 | 0 | 0 | 0 | 1,360 | 0.598592 |
a054dcd6697f8d246a99e2b87deb291ef103d4ce | 101 | py | Python | add.py | Harshillab/python | 877d5fa6769ce7bcc28ca75c247df42ed7375e55 | [
"MIT"
] | null | null | null | add.py | Harshillab/python | 877d5fa6769ce7bcc28ca75c247df42ed7375e55 | [
"MIT"
] | null | null | null | add.py | Harshillab/python | 877d5fa6769ce7bcc28ca75c247df42ed7375e55 | [
"MIT"
] | null | null | null | import os
a=input("enter username:")
if a.isalpha():
os.system("useradd "+a)
os.system("passwd a")
| 16.833333 | 26 | 0.673267 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.366337 |
a055c0b6a8a397cfaf7bde8f028637510c8a76bc | 3,733 | py | Python | responsive_dashboard/views.py | rhooper/django-responsive-dashboard | 039d634cbefb87be610334c01bda1a790cf5cd71 | [
"BSD-3-Clause"
] | 28 | 2015-07-08T01:03:17.000Z | 2022-03-11T13:30:49.000Z | responsive_dashboard/views.py | burke-software/django-responsive-dashboard | e08d7a12155d87d78cb3928bcc58f2701d326b69 | [
"BSD-3-Clause"
] | 4 | 2018-09-03T14:15:42.000Z | 2021-06-10T17:24:09.000Z | responsive_dashboard/views.py | rhooper/django-responsive-dashboard | 039d634cbefb87be610334c01bda1a790cf5cd71 | [
"BSD-3-Clause"
] | 13 | 2015-01-15T14:33:30.000Z | 2021-08-23T02:39:38.000Z | """Views."""
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, Http404
from django.shortcuts import render, redirect
from responsive_dashboard.dashboard import dashboards
from responsive_dashboard.models import UserDashboard, UserDashlet
# pylint: disable=no-member
@login_required
def generate_dashboard(request, app_name="", title=""):
"""Generate a dashboard view.
Generates a dashboard view by looking up the dashboard from its name.
responsive_dashboards is a list of all possible dashboards.
"""
dashboard_name = app_name
if title:
dashboard_name = '{0}__{1}'.format(app_name, title)
dashboard = dashboards.get_dashboard(dashboard_name)
if dashboard is None:
raise Http404("Dashboard does not exist")
user_dashboard = UserDashboard.objects.get_or_create(
dashboard_name=dashboard_name,
user=request.user,
)[0]
user_dashlets = user_dashboard.userdashlet_set.all()
dashlet_names = []
addable_dashlet_names = []
for dashlet in dashboard.dashlets:
dashlet.set_request(request)
if (dashlet.is_default() and
not user_dashlets.filter(dashlet_name=dashlet.title)):
user_dashlets.create(dashlet_name=dashlet.title, user_dashboard=user_dashboard)
dashlet_names += [dashlet.title]
if dashlet.allow_multiple or user_dashlets.filter(deleted=False, dashlet_name=dashlet.title).count() == 0:
addable_dashlet_names += [dashlet.title]
user_dashlets = user_dashlets.filter(
dashlet_name__in=dashlet_names,
deleted=False, )
for user_dashlet in user_dashlets:
for dashlet in dashboard.dashlets:
if dashlet.title == user_dashlet.dashlet_name:
dashlet.user_dashlet = user_dashlet # Lets us access per user settings in templates
user_dashlet.dashlet = dashlet
break
include_jquery = False
if getattr(settings, 'RESPONSIVE_DASHBOARD_INCLUDE_JQUERY', None):
include_jquery = True
return render(request, dashboard.template_name, {
'dashboard': dashboard,
'dashlets': user_dashlets,
'new_dashlet_names': addable_dashlet_names,
'app_name': app_name,
'title': title,
'include_jquery': include_jquery
})
@login_required
def ajax_reposition(request, **kwargs):
""" Save the position field in the user dashlet
django-positions should take care of everythign """
dashlet = UserDashlet.objects.get(
user_dashboard__user=request.user, id=request.POST['dashlet_id'])
dashlet.position = int(request.POST['position'])
dashlet.save()
return HttpResponse('SUCCESS')
@login_required
def ajax_delete(request, **kwargs):
""" Delete user dashlet by marking as deleted. """
dashlet = UserDashlet.objects.get(
user_dashboard__user=request.user, id=request.POST['dashlet_id'])
dashlet.deleted = True
dashlet.save()
return HttpResponse('SUCCESS')
@login_required
def add_dashlet(request, app_name="", title=""):
""" Add a new user dashlet then reload the page """
dashboard_name = app_name
if title:
dashboard_name = '{0}__{1}'.format(app_name, title)
user_dashboard = UserDashboard.objects.get_or_create(
dashboard_name=dashboard_name,
user=request.user,
)[0]
dashlet_name = request.GET['dashlet_name']
if not dashlet_name:
raise Exception('Cannot add a null dashlet')
UserDashlet.objects.create(
user_dashboard=user_dashboard,
dashlet_name=dashlet_name,
)
return redirect(request.META['HTTP_REFERER'])
| 34.88785 | 114 | 0.698902 | 0 | 0 | 0 | 0 | 3,373 | 0.903563 | 0 | 0 | 740 | 0.198232 |
a05679a1770f12c767a08a09a8c1456749cc03d4 | 769 | py | Python | app/config.py | tomakado/markovscope-api | 3dd60439d980e3b77429850f1b43cb37ffd02f99 | [
"BSD-3-Clause"
] | 1 | 2021-03-06T06:36:25.000Z | 2021-03-06T06:36:25.000Z | app/config.py | tomakado/markovscope-api | 3dd60439d980e3b77429850f1b43cb37ffd02f99 | [
"BSD-3-Clause"
] | null | null | null | app/config.py | tomakado/markovscope-api | 3dd60439d980e3b77429850f1b43cb37ffd02f99 | [
"BSD-3-Clause"
] | null | null | null | import os
from dataclasses import dataclass
@dataclass(frozen=True)
class Config:
listen_host: str
listen_port: int
yc_oauth_token: str
yc_folder_id: str
data_path: str
debug_enabled: bool
@staticmethod
def create_from_env() -> 'Config':
return Config(
listen_host=os.getenv('LISTEN_HOST', '0.0.0.0'),
listen_port=int(os.getenv('LISTEN_PORT', 8000)),
yc_oauth_token=os.getenv('YC_OAUTH_TOKEN'),
yc_folder_id=os.getenv('YC_FOLDER_ID'),
data_path=os.getenv('DATA_PATH'),
debug_enabled=(
True if os.getenv('DEBUG_ENABLED') in ['1', 'true', 'yes', 'on']
else False
),
)
CONFIG = Config.create_from_env()
| 25.633333 | 80 | 0.595579 | 662 | 0.860858 | 0 | 0 | 686 | 0.892068 | 0 | 0 | 117 | 0.152146 |
a0578bb9313ba5a000fe92b2495ddd4a94b1be7e | 15,486 | py | Python | atlas/atlas.py | pythseq/atlas | 6fd8d9e8ad05d234fc408aef8e0989da199f3b48 | [
"BSD-3-Clause"
] | 1 | 2020-12-31T14:54:49.000Z | 2020-12-31T14:54:49.000Z | atlas/atlas.py | pythseq/atlas | 6fd8d9e8ad05d234fc408aef8e0989da199f3b48 | [
"BSD-3-Clause"
] | null | null | null | atlas/atlas.py | pythseq/atlas | 6fd8d9e8ad05d234fc408aef8e0989da199f3b48 | [
"BSD-3-Clause"
] | null | null | null | import click
import logging
import multiprocessing
import os
import sys
from atlas import __version__
from atlas.conf import make_config
from atlas.parsers import refseq_parser
from atlas.tables import merge_tables
from atlas.workflows import download, run_workflow
logging.basicConfig(level=logging.INFO, datefmt="%Y-%m-%d %H:%M", format="[%(asctime)s %(levelname)s] %(message)s")
@click.group(context_settings=dict(help_option_names=["-h", "--help"]))
@click.version_option(__version__)
@click.pass_context
def cli(obj):
"""ATLAS - a framework for assembly, annotation, and genomic binning of metagenomic and
metatranscriptomic data.
For updates and reporting issues, see: https://github.com/pnnl/atlas
"""
@cli.command("refseq", short_help="enables tree based LCA and LCA star methods")
@click.argument("tsv", type=click.Path(exists=True))
@click.argument("namemap", type=click.Path(exists=True))
@click.argument("treefile", type=click.Path(exists=True))
@click.argument("output", type=click.File("w", atomic=True))
@click.option("-s", "--summary-method", type=click.Choice(["lca", "majority", "best"]), default="lca", show_default=True, help="summary method for annotating ORFs; when using LCA, it's recommended that one limits the number of hits using --top-fraction though function will be assigned per the best hit; 'best' is fastest")
@click.option("-a", "--aggregation-method", type=click.Choice(["lca", "lca-majority", "majority"]), default="lca-majority", show_default=True, help="summary method for aggregating ORF taxonomic assignments to contig level assignment; 'lca' will result in most stringent, least specific assignments")
@click.option("--majority-threshold", type=float, default=0.51, show_default=True, help="constitutes a majority fraction at tree node for 'lca-majority' ORF aggregation method")
@click.option("--min-identity", type=int, default=70, show_default=True, help="minimum allowable percent ID of BLAST hit")
@click.option("--min-bitscore", type=int, default=0, show_default=True, help="minimum allowable bitscore of BLAST hit; 0 disables")
@click.option("--min-length", type=int, default=60, show_default=True, help="minimum allowable BLAST alignment length")
@click.option("--max-evalue", type=float, default=0.000001, show_default=True, help="maximum allowable e-value of BLAST hit")
@click.option("--max-hits", type=int, default=10, show_default=True, help="maximum number of BLAST hits to consider when summarizing ORFs; can drastically alter ORF LCA assignments if too high without further limits")
@click.option("--table-name", default="refseq", help="table name within namemap database; expected columns are 'name', 'function', and 'taxonomy'")
@click.option("--top-fraction", type=float, default=1, show_default=True, help="filters ORF BLAST hits by only keep hits within this fraction of the highest bitscore; this is recommended over --max-hits")
def run_refseq_parser(tsv, namemap, treefile, output, summary_method, aggregation_method, majority_threshold, min_identity, min_bitscore, min_length, max_evalue, max_hits, table_name, top_fraction):
"""Parse TSV (tabular BLAST output [-outfmt 6]), grabbing taxonomy metadata from ANNOTATION to
compute LCAs.
The BLAST hits are assumed to be sorted by query with decreasing bitscores (best alignment first):
\b
sort -k1,1 -k12,12rn tsv > sorted_tsv
Annotation file should include your BLAST subject sequence ID, a function, a taxonomy name,
the taxonomy ID, and the parent taxonomy ID. This file is generated from `prepare-refseq`:
\b
gi|507051347|ref|WP_016122340.1| two-component sensor histidine kinase Bacillus cereus 1396 86661
gi|507052147|ref|WP_016123132.1| two-component sensor histidine kinase Bacillus cereus 1396 86661
gi|507053266|ref|WP_016124222.1| two-component sensor histidine kinase Bacillus cereus 1396 86661
The RefSeq function is always derived from the best BLAST hit.
The output will give contig, ORF ID, the lineage assigned to the contig based on
--aggregation-method, the probability of error (erfc), taxonomy assigned to the ORF, the
best hit's product, the best hit's evalue, and the best hit's bitscore:
\b
contig orf taxonomy erfc orf_taxonomy refseq refseq_evalue refseq_bitscore
k121_52126 k121_52126_1 root 1.0 root hypothetical protein 1.0e-41 176.8
"""
refseq_parser(tsv, namemap, treefile, output, summary_method, aggregation_method, majority_threshold, min_identity, min_bitscore, min_length, max_evalue, max_hits, table_name, top_fraction)
@cli.command("gff2tsv", short_help="writes version of Prokka TSV with contig as new first column")
@click.argument("gff", type=click.Path(exists=True))
@click.argument("output", type=click.File("w", atomic=True))
@click.option("--feature-type", default="CDS", show_default=True, help="feature type in GFF annotation to print")
def run_gff_to_tsv(gff, output, feature_type):
import re
locus_tag_re = re.compile(r"locus_tag=(.*?)(?:;|$)")
ec_re = re.compile(r"eC_number=(.*?)(?:;|$)")
gene_re = re.compile(r"gene=(.*?)(?:;|$)")
product_re = re.compile(r"product=(.*?)(?:;|$)")
# print the header into the output file
print("contig_id", "locus_tag", "ftype", "gene", "EC_number", "product", sep="\t", file=output)
with open(gff) as gff_fh:
for line in gff_fh:
if line.startswith("##FASTA"):
break
if line.startswith("#"):
continue
toks = line.strip().split("\t")
if not toks[2] == feature_type:
continue
try:
locus_tag = locus_tag_re.findall(toks[-1])[0]
except IndexError:
locus_tag = ""
if not locus_tag:
logging.critical("Unable to locate a locus tag in [%s]" % toks[-1])
sys.exit(1)
try:
gene = gene_re.findall(toks[-1])[0]
except IndexError:
gene = ""
try:
ec_number = ec_re.findall(toks[-1])[0]
except IndexError:
ec_number = ""
try:
product = product_re.findall(toks[-1])[0]
except IndexError:
product = ""
print(toks[0], locus_tag, toks[2], gene, ec_number, product, sep="\t", file=output)
@cli.command("munge-blast", short_help="adds contig ID to prokka annotated ORFs")
@click.argument("tsv", type=click.Path(exists=True))
@click.argument("gff", type=click.Path(exists=True))
@click.argument("output", type=click.File("w", atomic=True))
@click.option("--gene-id", default="ID", show_default=True, help="tag in gff attributes corresponding to ORF ID")
def run_munge_blast(tsv, gff, output, gene_id):
"""Prokka ORFs are reconnected to their origin contigs using the GFF of the Prokka output.
Contig output is re-inserted as column 1, altering blast hits to be tabular + an extra initial
column that will be used to place the ORFs into context.
"""
import re
gff_map = dict()
logging.info("step 1 of 2; parsing %s" % gff)
# gff attrs: ID=Flavobacterium_00802;inference=ab initio prediction:Prodigal:2.60;...
orf_id_re = re.compile(r"%s=(.*?)\;" % gene_id)
with open(gff) as prokka_gff:
for line in prokka_gff:
if line.startswith("##FASTA"):
break
if line.startswith("#"):
continue
toks = line.strip().split("\t")
try:
orf_id = orf_id_re.findall(toks[-1])[0]
except IndexError:
# some, like repeat regions, will not have a locus_tag=, but they also will not
# be in the .faa file that is being locally aligned
logging.warning("Unable to locate ORF ID using '%s' for line '%s'" % (gene_id, " ".join(toks)))
continue
gff_map[orf_id] = toks[0]
logging.info("step 2 of 2; parsing %s" % tsv)
# example blast hit:
# Flavobacterium_00002 gi|500936490|ref|WP_012025625.1| 100.0 187 0 0 1 187 1 187 1.7e-99 369.8
with open(tsv) as blast_hits:
for line in blast_hits:
toks = line.strip().split("\t")
try:
toks.insert(0, gff_map[toks[0]])
except KeyError:
logging.critical("%s was not found in the GFF [%s]" % (toks[0], gff))
logging.critical("processing of %s was halted" % tsv)
sys.exit(1)
print(*toks, sep="\t", file=output)
@cli.command("merge-tables", short_help="merge Prokka TSV, Counts, and Taxonomy")
@click.argument("prokkatsv", type=click.Path(exists=True))
@click.argument("refseqtsv", type=click.Path(exists=True))
@click.argument("output")
@click.option("--counts", type=click.Path(exists=True), help="Feature Counts result TSV")
@click.option("--completeness", type=click.Path(exists=True), help="CheckM completeness TSV")
@click.option("--taxonomy", type=click.Path(exists=True), help="CheckM taxonomy TSV")
@click.option("--fasta", multiple=True, type=click.Path(exists=True), help="Bin fasta file path; can be specified multiple times")
def run_merge_tables(prokkatsv, refseqtsv, output, counts, completeness, taxonomy, fasta):
"""Combines Prokka TSV, RefSeq TSV, and Counts TSV into a single table, merging on locus tag.
"""
merge_tables(prokkatsv, refseqtsv, output, counts, completeness, taxonomy, fasta)
@cli.command("make-config", short_help="prepopulate a configuration file with samples and defaults")
@click.argument("config")
@click.argument("path")
@click.option("--assembler", default="megahit",
type=click.Choice(["megahit", "spades"]),
show_default=True, help="contig assembler")
@click.option("--data-type", default="metagenome",
type=click.Choice(["metagenome", "metatranscriptome"]),
show_default=True, help="sample data type")
@click.option("--database-dir", default="databases", show_default=True,
help="location of formatted databases (from `atlas download`)")
# @click.option("--process", default="assemble",
# type=click.Choice(["annotate", "assemble"]),
# help="additional fields in the configuration file have no effect on the protocol, to limit the options for annotation only set `--process annotate`")
@click.option("--threads", default=multiprocessing.cpu_count(), type=int,
help="number of threads to use per multi-threaded job")
def run_make_config(config, path, data_type, database_dir, threads, assembler):
"""Write the file CONFIG and complete the sample names and paths for all
FASTQ files in PATH.
PATH is traversed recursively and adds any file with '.fastq' or '.fq' in
the file name with the file name minus extension as the sample ID.
"""
make_config(config, path, data_type, database_dir, threads, assembler)
@cli.command("QC", context_settings=dict(ignore_unknown_options=True), short_help="quality control workflow (without assembly)")
@click.argument("config")
@click.option("-j", "--jobs", default=multiprocessing.cpu_count(), type=int, show_default=True, help="use at most this many cores in parallel; total running tasks at any given time will be jobs/threads")
@click.option("-o", "--out-dir", default=os.path.realpath("."), show_default=True, help="results output directory")
@click.option("--no-conda", is_flag=True, default=False, show_default=True, help="do not use conda environments")
@click.option("--dryrun", is_flag=True, default=False, show_default=True, help="do not execute anything")
@click.argument("snakemake_args", nargs=-1, type=click.UNPROCESSED)
def run_qc(config, jobs, out_dir, no_conda, dryrun, snakemake_args):
"""Runs the ATLAS Quality control protocol, the first step of the workflow.
A skeleton configuration file can be generated with defaults using:
\b
atlas make-config
For more details, see: http://pnnl-atlas.readthedocs.io/
"""
run_workflow(os.path.realpath(config), jobs, out_dir, no_conda, dryrun, snakemake_args,workflow="qc")
@cli.command("assemble", context_settings=dict(ignore_unknown_options=True), short_help="assembly workflow")
@click.argument("config")
@click.option("-j", "--jobs", default=multiprocessing.cpu_count(), type=int, show_default=True, help="use at most this many cores in parallel; total running tasks at any given time will be jobs/threads")
@click.option("-o", "--out-dir", default=os.path.realpath("."), show_default=True, help="results output directory")
@click.option("--no-conda", is_flag=True, default=False, show_default=True, help="do not use conda environments")
@click.option("--dryrun", is_flag=True, default=False, show_default=True, help="do not execute anything")
@click.argument("snakemake_args", nargs=-1, type=click.UNPROCESSED)
def run_assemble(config, jobs, out_dir, no_conda, dryrun, snakemake_args):
"""Runs the complete ATLAS protocol from raw reads through assembly, annotation, quantification,
and genomic binning.
A skeleton configuration file can be generated with defaults using:
\b
atlas make-config
For more details, see: http://pnnl-atlas.readthedocs.io/
"""
run_workflow(os.path.realpath(config), jobs, out_dir, no_conda, dryrun, snakemake_args,workflow="complete")
@cli.command("annotate", context_settings=dict(ignore_unknown_options=True), short_help="annotation workflow")
@click.argument("config")
@click.option("-j", "--jobs", default=multiprocessing.cpu_count(), type=int, show_default=True, help="use at most this many cores in parallel; total running tasks at any given time will be jobs/threads")
@click.option("-o", "--out-dir", default=os.path.realpath("."), show_default=True, help="results output directory")
@click.option("--no-conda", is_flag=True, default=False, show_default=True, help="do not use conda environments")
@click.option("--dryrun", is_flag=True, default=False, show_default=True, help="do not execute anything")
@click.argument("snakemake_args", nargs=-1, type=click.UNPROCESSED)
def run_annotate(config, jobs, out_dir, no_conda, dryrun, snakemake_args):
"""Runs the ATLAS annotation protocol on assembled contigs. If FASTQ files are provided
for a sample, quantification is also performed.
A skeleton configuration file can be generated using:
\b
atlas make-config
For more details, see: http://pnnl-atlas.readthedocs.io/
"""
run_workflow(os.path.realpath(config), jobs, out_dir, no_conda, dryrun, snakemake_args,workflow="annotate")
@cli.command("download", context_settings=dict(ignore_unknown_options=True), short_help="download reference files")
@click.option("-j", "--jobs", default=multiprocessing.cpu_count(), type=int, show_default=True, help="number of simultaneous downloads")
@click.option("-o", "--out-dir", default=os.path.join(os.path.realpath("."), "databases"), show_default=True, help="database download directory")
@click.argument("snakemake_args", nargs=-1, type=click.UNPROCESSED)
def run_download(jobs, out_dir, snakemake_args):
"""Executes a snakemake workflow to download reference database files and validate based on
their MD5 checksum.
"""
download(jobs, out_dir, snakemake_args)
if __name__ == "__main__":
cli()
| 54.336842 | 323 | 0.694692 | 0 | 0 | 0 | 0 | 15,028 | 0.970425 | 0 | 0 | 7,570 | 0.488829 |
a0590b43efec682503f6e281362973bb8f85de85 | 1,101 | py | Python | tests/unit/core/types/test_relationships.py | jeffsawatzky/python-jsonapi | 8f181d6764b525f58d06517c65b1f0d24f3c2282 | [
"MIT"
] | null | null | null | tests/unit/core/types/test_relationships.py | jeffsawatzky/python-jsonapi | 8f181d6764b525f58d06517c65b1f0d24f3c2282 | [
"MIT"
] | 237 | 2020-07-23T05:53:22.000Z | 2022-03-30T23:02:35.000Z | tests/unit/core/types/test_relationships.py | jeffsawatzky/python-jsonapi | 8f181d6764b525f58d06517c65b1f0d24f3c2282 | [
"MIT"
] | null | null | null | """Test cases for the python_jsonapi.core.types.relationships module."""
from python_jsonapi.core.types.relationships import Relationship
from python_jsonapi.core.types.relationships import RelationshipsMixin
def test_relationship_init() -> None:
"""Can init a new relationships."""
sut = Relationship()
assert sut is not None
def test_mixin_init() -> None:
"""Can init a new mixin."""
sut = RelationshipsMixin()
assert sut is not None
relationship = Relationship()
sut = RelationshipsMixin(relationships={"self": relationship})
assert sut is not None
assert sut.relationships is not None
assert sut.relationships["self"] == relationship
def test_mixin_add_relationship() -> None:
"""Can add a new entry."""
sut = RelationshipsMixin()
sut.add_relationship(key="relationship1", relationship=Relationship())
sut.add_relationship(key="relationship2", relationship=Relationship())
assert sut.relationships is not None
assert sut.relationships["relationship1"] is not None
assert sut.relationships["relationship2"] is not None
| 34.40625 | 74 | 0.735695 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.210718 |
a0595a142eaf248d183d94e735f0ba479dc117a7 | 48 | py | Python | needlestack/indices/__init__.py | needlehaystack/needlestack | e00529a2a7c2d85059936a85f54dfb55e515b6ef | [
"Apache-2.0"
] | 3 | 2019-10-03T22:15:21.000Z | 2022-02-08T09:05:41.000Z | needlestack/indices/__init__.py | cungtv/needlestack | e00529a2a7c2d85059936a85f54dfb55e515b6ef | [
"Apache-2.0"
] | 1 | 2021-04-30T21:08:47.000Z | 2021-04-30T21:08:47.000Z | needlestack/indices/__init__.py | cungtv/needlestack | e00529a2a7c2d85059936a85f54dfb55e515b6ef | [
"Apache-2.0"
] | 2 | 2019-08-02T19:13:09.000Z | 2019-10-25T01:47:17.000Z | from needlestack.indices.index import BaseIndex
| 24 | 47 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a05a419a9ddf5084b706e695f35bb68b2e11e8f7 | 698 | py | Python | app/accounts/utilities.py | porowns/Krypted-Auth | ed171bfbd1c98a4c171ddf6a20b18691330b1646 | [
"MIT"
] | 6 | 2017-12-13T21:53:05.000Z | 2018-10-04T02:47:05.000Z | app/accounts/utilities.py | porowns/Krypted-Auth | ed171bfbd1c98a4c171ddf6a20b18691330b1646 | [
"MIT"
] | 106 | 2019-08-11T23:00:39.000Z | 2021-06-10T19:45:54.000Z | app/accounts/utilities.py | KryptedGaming/kryptedauth | ed171bfbd1c98a4c171ddf6a20b18691330b1646 | [
"MIT"
] | 10 | 2020-01-18T11:28:44.000Z | 2022-02-21T06:08:39.000Z | from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import send_mail
def username_or_email_resolver(username):
if User.objects.filter(email=username).exists():
return User.objects.get(email=username).username
else:
return username
def send_activation_email(user):
send_mail(
'Verify your Krypted account',
'Welcome to %s. \n Please click the following link to verify your account. \n https://%s/accounts/activate/%s' % (
settings.SITE_TITLE, settings.SITE_DOMAIN, user.info.secret),
settings.DEFAULT_FROM_EMAIL,
[user.email],
fail_silently=False)
| 33.238095 | 123 | 0.684814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.19914 |
a05b0d63377d071367b35f0034a3b68acdab2c2d | 245 | py | Python | run/lemmatize.py | osmanbaskaya/mapping-impact | 8024dd3b916ac2dfc336221dd32faba4c0a98442 | [
"MIT"
] | 1 | 2016-03-14T15:28:22.000Z | 2016-03-14T15:28:22.000Z | run/lemmatize.py | osmanbaskaya/mapping-impact | 8024dd3b916ac2dfc336221dd32faba4c0a98442 | [
"MIT"
] | null | null | null | run/lemmatize.py | osmanbaskaya/mapping-impact | 8024dd3b916ac2dfc336221dd32faba4c0a98442 | [
"MIT"
] | null | null | null | #! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Osman Baskaya"
from nltk.stem.wordnet import WordNetLemmatizer
import sys
lmtzr = WordNetLemmatizer()
for line in sys.stdin:
print ' '.join(map(lmtzr.lemmatize, line.split()))
| 14.411765 | 54 | 0.689796 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.240816 |
a05cc4dfec88f66003f8b833c676b2a3c02c79c3 | 4,066 | py | Python | markov.py | garybake/markov_lyrics | 816043acd849b77097aa5bd504b123c6b306b801 | [
"MIT"
] | null | null | null | markov.py | garybake/markov_lyrics | 816043acd849b77097aa5bd504b123c6b306b801 | [
"MIT"
] | null | null | null | markov.py | garybake/markov_lyrics | 816043acd849b77097aa5bd504b123c6b306b801 | [
"MIT"
] | null | null | null | # https://realpython.com/blog/python/lyricize-a-flask-app-to-create-lyrics-using-markov-chains/
from random import choice
import sys
def generateModel(text, order):
model = {}
for i in range(0, len(text) - order):
fragment = text[i:i+order]
next_letter = text[i+order]
if fragment not in model:
model[fragment] = {}
if next_letter not in model[fragment]:
model[fragment][next_letter] = 1
else:
model[fragment][next_letter] += 1
return model
def getNextCharacter(model, fragment):
letters = []
for letter in model[fragment].keys():
for times in range(0, model[fragment][letter]):
letters.append(letter)
return choice(letters)
def generateText(text, order, length):
model = generateModel(text, order)
currentFragment = text[0:order]
output = ""
for i in range(0, length-order):
newCharacter = getNextCharacter(model, currentFragment)
output += newCharacter
currentFragment = currentFragment[1:] + newCharacter
print output
text = "some sample text"
text = """
An old man turned ninety-eight
He won the lottery and died the next day
It's a black fly in your Chardonnay
It's a death row pardon two minutes too late
And isn't it ironic... don't you think
It's like rain on your wedding day
It's a free ride when you've already paid
It's the good advice that you just didn't take
Who would've thought... it figures
Mr. Play It Safe was afraid to fly
He packed his suitcase and kissed his kids goodbye
He waited his whole damn life to take that flight
And as the plane crashed down he thought
"Well isn't this nice..."
And isn't it ironic... don't you think
It's like rain on your wedding day
It's a free ride when you've already paid
It's the good advice that you just didn't take
Who would've thought... it figures
Well life has a funny way of sneaking up on you
When you think everything's okay and everything's going right
And life has a funny way of helping you out when
You think everything's gone wrong and everything blows up
In your face
A traffic jam when you're already late
A no-smoking sign on your cigarette break
It's like ten thousand spoons when all you need is a knife
It's meeting the man of my dreams
And then meeting his beautiful wife
And isn't it ironic...don't you think
A little too ironic...and, yeah, I really do think...
It's like rain on your wedding day
It's a free ride when you've already paid
It's the good advice that you just didn't take
Who would've thought... it figures
Life has a funny way of sneaking up on you
Life has a funny, funny way of helping you out
Helping you out
I recommend getting your heart trampled on to anyone
I recommend walking around naked in your living room
Swallow it down (what a jagged little pill)
It feels so good (swimming in your stomach)
Wait until the dust settles
You live you learn
You love you learn
You cry you learn
You lose you learn
You bleed you learn
You scream you learn
I recommend biting off more then you can chew to anyone
I certainly do
I recommend sticking your foot in your mouth at any time
Feel free
Throw it down (the caution blocks you from the wind)
Hold it up (to the rays)
You wait and see when the smoke clears
You live you learn
You love you learn
You cry you learn
You lose you learn
You bleed you learn
You scream you learn
Wear it out (the way a three-year-old would do)
Melt it down (you're gonna have to eventually anyway)
The fire trucks are coming up around the bend
You live you learn
You love you learn
You cry you learn
You lose you learn
You bleed you learn
You scream you learn
You grieve you learn
You choke you learn
You laugh you learn
You choose you learn
You pray you learn
You ask you learn
You live you learn
"""
# text = "For now, well generate sample text via the very scientific method of throwing a string directly into the code based on some copied & pasted Alanis Morisette lyrics."
if __name__ == "__main__":
generateText(text, int(sys.argv[1]), int(sys.argv[2])) | 30.571429 | 175 | 0.734629 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,981 | 0.733153 |
a05d8bd7f43816678e051cbb74ff64ee556b6710 | 1,545 | py | Python | curriculum_tools/curriculum_tools/NamedEnv.py | darpa-l2m/meta-arcade | 9c9539c1feef89e9d1d55507bf4f75c965a25038 | [
"MIT"
] | 2 | 2021-12-17T19:54:41.000Z | 2021-12-20T06:08:31.000Z | curriculum_tools/curriculum_tools/NamedEnv.py | darpa-l2m/meta-arcade | 9c9539c1feef89e9d1d55507bf4f75c965a25038 | [
"MIT"
] | 1 | 2021-12-17T20:45:07.000Z | 2021-12-21T16:30:24.000Z | curriculum_tools/curriculum_tools/NamedEnv.py | darpa-l2m/meta-arcade | 9c9539c1feef89e9d1d55507bf4f75c965a25038 | [
"MIT"
] | null | null | null | """
Copyright © 2021 The Johns Hopkins University Applied Physics Laboratory LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import gym
import copy
class NamedEnv(gym.Wrapper):
def __init__(self, env, name):
super().__init__(env)
self._name = name
@property
def name(self):
return copy.deepcopy(self._name)
@property
def unwrapped(self):
return self
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action):
return self.env.step(action)
| 33.586957 | 82 | 0.741748 | 394 | 0.253539 | 0 | 0 | 124 | 0.079794 | 0 | 0 | 1,131 | 0.727799 |
a05f0813319812f0e9d30a1ddfef3dd56345c333 | 9,265 | py | Python | scirex/metrics/paired_bootstrap.py | viswavi/SciREX | 8e4b402e95d438c92eeecee315d389903a963b8d | [
"Apache-2.0"
] | null | null | null | scirex/metrics/paired_bootstrap.py | viswavi/SciREX | 8e4b402e95d438c92eeecee315d389903a963b8d | [
"Apache-2.0"
] | null | null | null | scirex/metrics/paired_bootstrap.py | viswavi/SciREX | 8e4b402e95d438c92eeecee315d389903a963b8d | [
"Apache-2.0"
] | null | null | null | # Adapted from Graham Neubig's Paired Bootstrap script
# https://github.com/neubig/util-scripts/blob/master/paired-bootstrap.py
import numpy as np
from sklearn.metrics import f1_score, precision_score, recall_score
from tqdm import tqdm
EVAL_TYPE_ACC = "acc"
EVAL_TYPE_BLEU = "bleu"
EVAL_TYPE_BLEU_DETOK = "bleu_detok"
EVAL_TYPE_PEARSON = "pearson"
EVAL_TYPE_F1 = "f1"
EVAL_TYPE_MACRO_F1 = "macro-f1"
EVAL_TYPE_PREC = "precision"
EVAL_TYPE_REC = "recall"
EVAL_TYPE_AVG = "avg"
EVAL_TYPES = [EVAL_TYPE_ACC,
EVAL_TYPE_BLEU,
EVAL_TYPE_BLEU_DETOK,
EVAL_TYPE_PEARSON,
EVAL_TYPE_F1,
EVAL_TYPE_AVG,
EVAL_TYPE_PREC,
EVAL_TYPE_REC]
def eval_preproc(data, eval_type='acc'):
''' Preprocess into the appropriate format for a particular evaluation type '''
if type(data) == str:
data = data.strip()
if eval_type == EVAL_TYPE_BLEU:
data = data.split()
elif eval_type == EVAL_TYPE_PEARSON:
data = float(data)
elif eval_type in [EVAL_TYPE_F1, EVAL_TYPE_MACRO_F1, EVAL_TYPE_PREC, EVAL_TYPE_REC]:
data = float(data)
elif eval_type == EVAL_TYPE_AVG:
data = float(data)
return data
def eval_measure(gold, sys, eval_type='acc'):
''' Evaluation measure
This takes in gold labels and system outputs and evaluates their
accuracy. It currently supports:
* Accuracy (acc), percentage of labels that match
* Pearson's correlation coefficient (pearson)
* BLEU score (bleu)
* BLEU_detok, on detokenized references and translations, with internal tokenization
:param gold: the correct labels
:param sys: the system outputs
:param eval_type: The type of evaluation to do (acc, pearson, bleu, bleu_detok)
'''
if eval_type == EVAL_TYPE_ACC:
return sum([1 if g == s else 0 for g, s in zip(gold, sys)]) / float(len(gold))
elif eval_type == EVAL_TYPE_BLEU:
import nltk
gold_wrap = [[x] for x in gold]
return nltk.translate.bleu_score.corpus_bleu(gold_wrap, sys)
elif eval_type == EVAL_TYPE_PEARSON:
return np.corrcoef([gold, sys])[0,1]
elif eval_type == EVAL_TYPE_BLEU_DETOK:
import sacrebleu
# make sure score is 0-based instead of 100-based
return sacrebleu.corpus_bleu(sys, [gold]).score / 100.
elif eval_type == EVAL_TYPE_F1:
return f1_score(gold, sys)
elif eval_type == EVAL_TYPE_MACRO_F1:
return f1_score(gold, sys, average="macro")
elif eval_type == EVAL_TYPE_PREC:
return precision_score(gold, sys)
elif eval_type == EVAL_TYPE_REC:
return recall_score(gold, sys)
elif eval_type == EVAL_TYPE_AVG:
return np.mean(sys)
else:
raise NotImplementedError('Unknown eval type in eval_measure: %s' % eval_type)
def eval_with_paired_bootstrap(gold, sys1, sys2,
num_samples=10000, sample_ratio=0.5,
eval_type='acc',
return_results=False):
''' Evaluate with paired boostrap
This compares two systems, performing a significance tests with
paired bootstrap resampling to compare the accuracy of the two systems.
:param gold: The correct labels
:param sys1: The output of system 1
:param sys2: The output of system 2
:param num_samples: The number of bootstrap samples to take
:param sample_ratio: The ratio of samples to take every time
:param eval_type: The type of evaluation to do (acc, pearson, bleu, bleu_detok)
'''
assert(len(gold) == len(sys1))
assert(len(gold) == len(sys2))
# Preprocess the data appropriately for they type of eval
gold = [eval_preproc(x, eval_type) for x in gold]
sys1 = [eval_preproc(x, eval_type) for x in sys1]
sys2 = [eval_preproc(x, eval_type) for x in sys2]
sys1_scores = []
sys2_scores = []
wins = [0, 0, 0]
n = len(gold)
ids = list(range(n))
for _ in tqdm(range(num_samples)):
# Subsample the gold and system outputs
np.random.shuffle(ids)
reduced_ids = ids[:int(len(ids)*sample_ratio)]
reduced_gold = [gold[i] for i in reduced_ids]
reduced_sys1 = [sys1[i] for i in reduced_ids]
reduced_sys2 = [sys2[i] for i in reduced_ids]
# Calculate accuracy on the reduced sample and save stats
sys1_score = eval_measure(reduced_gold, reduced_sys1, eval_type=eval_type)
sys2_score = eval_measure(reduced_gold, reduced_sys2, eval_type=eval_type)
if sys1_score > sys2_score:
wins[0] += 1
elif sys1_score < sys2_score:
wins[1] += 1
else:
wins[2] += 1
sys1_scores.append(sys1_score)
sys2_scores.append(sys2_score)
# Print win stats
wins = [x/float(num_samples) for x in wins]
print('Win ratio: sys1=%.3f, sys2=%.3f, tie=%.3f' % (wins[0], wins[1], wins[2]))
if wins[0] > wins[1]:
print('(sys1 is superior with p value p=%.10f)\n' % (1-wins[0]))
elif wins[1] > wins[0]:
print('(sys2 is superior with p value p=%.10f)\n' % (1-wins[1]))
# Print system stats
sys1_scores.sort()
sys2_scores.sort()
print('sys1 mean=%.3f, median=%.3f, 95%% confidence interval=[%.3f, %.3f]' %
(np.mean(sys1_scores), np.median(sys1_scores), sys1_scores[int(num_samples * 0.025)], sys1_scores[int(num_samples * 0.975)]))
print('sys2 mean=%.3f, median=%.3f, 95%% confidence interval=[%.3f, %.3f]' %
(np.mean(sys2_scores), np.median(sys2_scores), sys2_scores[int(num_samples * 0.025)], sys2_scores[int(num_samples * 0.975)]))
if return_results:
sys1_summary = (np.mean(sys1_scores), (sys1_scores[int(num_samples * 0.025)], sys1_scores[int(num_samples * 0.975)]))
sys2_summary = (np.mean(sys2_scores), (sys2_scores[int(num_samples * 0.025)], sys2_scores[int(num_samples * 0.975)]))
p_value_lose = 1-wins[0]
p_value_win = 1-wins[1]
return sys1_summary, sys2_summary, p_value_lose, p_value_win
def eval_with_hierarchical_paired_bootstrap(gold, sys1_list, sys2_list,
num_samples=10000, sample_ratio=0.5,
eval_type='acc',
return_results=False):
''' Evaluate with a hierarchical paired boostrap
This compares two systems, performing a significance tests with
paired bootstrap resampling to compare the accuracy of the two systems, with
two-level sampling: first we sample a model, then we sample data to evaluate
it on.
:param gold: The correct labels
:param sys1: The output of system 1
:param sys2: The output of system 2
:param num_samples: The number of bootstrap samples to take
:param sample_ratio: The ratio of samples to take every time
:param eval_type: The type of evaluation to do (acc, pearson, bleu, bleu_detok)
'''
for sys1 in sys1_list:
assert(len(gold) == len(sys1))
for sys2 in sys2_list:
assert(len(gold) == len(sys2))
# Preprocess the data appropriately for they type of eval
gold = [eval_preproc(x, eval_type) for x in gold]
sys1_list = [[eval_preproc(x, eval_type) for x in sys1] for sys1 in sys1_list]
sys2_list = [[eval_preproc(x, eval_type) for x in sys2] for sys2 in sys2_list]
sys1_scores = []
sys2_scores = []
wins = [0, 0, 0]
n = len(gold)
ids = list(range(n))
for _ in tqdm(range(num_samples)):
# Subsample the gold and system outputs
np.random.shuffle(ids)
reduced_ids = ids[:int(len(ids)*sample_ratio)]
sys1_idx = np.random.choice(list(range(len(sys1_list))))
sys1 = sys1_list[sys1_idx]
sys2_idx = np.random.choice(list(range(len(sys2_list))))
sys2 = sys2_list[sys2_idx]
reduced_gold = [gold[i] for i in reduced_ids]
reduced_sys1 = [sys1[i] for i in reduced_ids]
reduced_sys2 = [sys2[i] for i in reduced_ids]
# Calculate accuracy on the reduced sample and save stats
sys1_score = eval_measure(reduced_gold, reduced_sys1, eval_type=eval_type)
sys2_score = eval_measure(reduced_gold, reduced_sys2, eval_type=eval_type)
if sys1_score > sys2_score:
wins[0] += 1
elif sys1_score < sys2_score:
wins[1] += 1
else:
wins[2] += 1
sys1_scores.append(sys1_score)
sys2_scores.append(sys2_score)
# Print win stats
wins = [x/float(num_samples) for x in wins]
print('Win ratio: sys1=%.3f, sys2=%.3f, tie=%.3f' % (wins[0], wins[1], wins[2]))
if wins[0] > wins[1]:
print('(sys1 is superior with p value p=%.10f)\n' % (1-wins[0]))
elif wins[1] > wins[0]:
print('(sys2 is superior with p value p=%.10f)\n' % (1-wins[1]))
# Print system stats
sys1_scores.sort()
sys2_scores.sort()
print('sys1 mean=%.3f, median=%.3f, 95%% confidence interval=[%.3f, %.3f]' %
(np.mean(sys1_scores), np.median(sys1_scores), sys1_scores[int(num_samples * 0.025)], sys1_scores[int(num_samples * 0.975)]))
print('sys2 mean=%.3f, median=%.3f, 95%% confidence interval=[%.3f, %.3f]' %
(np.mean(sys2_scores), np.median(sys2_scores), sys2_scores[int(num_samples * 0.025)], sys2_scores[int(num_samples * 0.975)]))
if return_results:
sys1_summary = (np.mean(sys1_scores), (sys1_scores[int(num_samples * 0.025)], sys1_scores[int(num_samples * 0.975)]))
sys2_summary = (np.mean(sys2_scores), (sys2_scores[int(num_samples * 0.025)], sys2_scores[int(num_samples * 0.975)]))
p_value_lose = 1-wins[0]
p_value_win = 1-wins[1]
return sys1_summary, sys2_summary, p_value_lose, p_value_win
| 40.814978 | 135 | 0.681705 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,894 | 0.312358 |
a05f5a0fc89824667b995e5851cdb833729517df | 970 | py | Python | mypage/paginator.py | kirill-ivanov-a/mypage-flask | b803dfdf3d38d32879d81b8682d51e387c8f709f | [
"MIT"
] | null | null | null | mypage/paginator.py | kirill-ivanov-a/mypage-flask | b803dfdf3d38d32879d81b8682d51e387c8f709f | [
"MIT"
] | null | null | null | mypage/paginator.py | kirill-ivanov-a/mypage-flask | b803dfdf3d38d32879d81b8682d51e387c8f709f | [
"MIT"
] | null | null | null | from paginate_sqlalchemy import SqlalchemyOrmPage
class Paginator(SqlalchemyOrmPage):
def __init__(self, *args, radius=3, **kwargs):
super().__init__(*args, **kwargs)
self.radius = radius
self.page_range = self._make_page_range()
def _make_page_range(self):
if self.page_count < self.radius:
return list(p for p in range(1, self.page_count + 1))
if self.page - self.radius > 2:
page_range = [self.first_page, None]
page_range += list(p for p in range(self.page - self.radius, self.page))
else:
page_range = list(p for p in range(1, self.page))
if self.page + self.radius < self.last_page - 1:
page_range += list(p for p in range(self.page, self.page + self.radius + 1))
page_range += [None, self.last_page]
else:
page_range += list(p for p in range(self.page, self.last_page + 1))
return page_range
| 34.642857 | 88 | 0.609278 | 917 | 0.945361 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a060c7c4400126644e1d48eb927d2de5fe556729 | 4,915 | py | Python | models/deeplab_v2.py | iamsofancyyoualreadyknow/IHC-based-labels-generation-and-semantic-segmentation-for-lung-cancer | 57904544c6d6b43dcd5937afeb474c0a47456d98 | [
"MIT"
] | null | null | null | models/deeplab_v2.py | iamsofancyyoualreadyknow/IHC-based-labels-generation-and-semantic-segmentation-for-lung-cancer | 57904544c6d6b43dcd5937afeb474c0a47456d98 | [
"MIT"
] | null | null | null | models/deeplab_v2.py | iamsofancyyoualreadyknow/IHC-based-labels-generation-and-semantic-segmentation-for-lung-cancer | 57904544c6d6b43dcd5937afeb474c0a47456d98 | [
"MIT"
] | null | null | null | import tensorflow as tf
# import slim
# conv layers
layers = tf.contrib.layers
arg_scope = tf.contrib.framework.arg_scope
def vgg_conv_dilation(inputs, weight_decay=0.0005):
with arg_scope([layers.convolution2d, layers.max_pool2d], padding='SAME'):
with arg_scope([layers.convolution2d], rate=1,
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=layers.l2_regularizer(weight_decay)):
net = layers.convolution2d(inputs, 64, [3, 3], scope='vgg_16/conv1/conv1_1' )
net = layers.convolution2d(net, 64, [3, 3], scope='vgg_16/conv1/conv1_2' )
net = layers.max_pool2d(net, [3, 3], stride=[2,2], scope='vgg_16/pool1')
net = layers.convolution2d(net, 128, [3, 3], scope='vgg_16/conv2/conv2_1' )
net = layers.convolution2d(net, 128, [3, 3], scope='vgg_16/conv2/conv2_2' )
net = layers.max_pool2d(net, [3, 3], stride=[2,2], scope='vgg_16/pool2')
net = layers.convolution2d(net, 256, [3, 3], scope='vgg_16/conv3/conv3_1' )
net = layers.convolution2d(net, 256, [3, 3], scope='vgg_16/conv3/conv3_2' )
net = layers.convolution2d(net, 256, [3, 3], scope='vgg_16/conv3/conv3_3' )
net = layers.max_pool2d(net, [3, 3], stride=[2,2], scope='vgg_16/pool3')
net = layers.convolution2d(net, 512, [3, 3], scope='vgg_16/conv4/conv4_1' )
net = layers.convolution2d(net, 512, [3, 3], scope='vgg_16/conv4/conv4_2' )
net = layers.convolution2d(net, 512, [3, 3], scope='vgg_16/conv4/conv4_3' )
net = layers.max_pool2d(net, [3, 3], stride=[1,1], scope='vgg_16/pool4')
net = layers.convolution2d(net, 512, [3, 3], rate=2, scope='vgg_16/conv5/conv5_1' )
net = layers.convolution2d(net, 512, [3, 3], rate=2, scope='vgg_16/conv5/conv5_2' )
net = layers.convolution2d(net, 512, [3, 3], rate=2, scope='vgg_16/conv5/conv5_3' )
return net
def deeplab_top(inputs, num_classes=34, dropout=False, weight_decay=0.0005):
with arg_scope([layers.convolution2d, layers.max_pool2d], padding='SAME'):
with arg_scope([layers.convolution2d], rate=1,
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=layers.l2_regularizer(weight_decay),
biases_initializer=tf.constant_initializer(value=0, dtype=tf.float32),
biases_regularizer=layers.l2_regularizer(weight_decay)):
with arg_scope([layers.dropout], keep_prob = 0.5, is_training=dropout):
pool5 = layers.max_pool2d(inputs, [3, 3], scope='vgg_16/pool5')
#fc61: dilation = 6
net = layers.convolution2d(pool5, 1024, [3, 3], rate=6, scope='fc6_1')
net = layers.dropout(net, scope='drop6_1')
#fc71: dilation = 1
net = layers.convolution2d(net, 1024, [1, 1], scope='fc7_1')
net = layers.dropout(net, scope='drop7_1')
#fc81:
fc8_1 = layers.convolution2d(net, num_classes, [1, 1], scope='fc8_1')
#fc62: dilation = 12
net = layers.convolution2d(pool5, 1024, [3, 3], rate=12, scope='fc6_2')
net = layers.dropout(net, scope='drop6_2')
#fc72: dilation = 1
net = layers.convolution2d(net, 1024, [1, 1], scope='fc7_2')
net = layers.dropout(net, scope='drop7_2')
#fc82
fc8_2 = layers.convolution2d(net, num_classes, [1, 1], scope='fc8_2')
#fc63: dilation = 18
net = layers.convolution2d(pool5, 1024, [3, 3], rate=18, scope='fc6_3')
net = layers.dropout(net, scope='drop6_3')
#fc73: dilation = 1
net = layers.convolution2d(net, 1024, [1, 1], scope='fc7_3')
net = layers.dropout(net, scope='drop7_3')
#fc83:
fc8_3 = layers.convolution2d(net, num_classes, [1, 1], scope='fc8_3')
#fc64: dilation = 24
net = layers.convolution2d(pool5, 1024, [3, 3], rate=24, scope='fc6_4')
net = layers.dropout(net, scope='drop6_4')
#fc74: dilation = 1
net = layers.convolution2d(net, 1024, [1, 1], scope='fc7_4')
net = layers.dropout(net, scope='drop7_4')
#fc84:
fc8_4 = layers.convolution2d(net, num_classes, [1, 1], scope='fc8_4')
net = tf.add_n([fc8_1, fc8_2, fc8_3, fc8_4])
return net
def deeplab_v2(inputs, num_classes=34, dropout=False, weight_decay=0.0005):
feature = vgg_conv_dilation(inputs)
seg = deeplab_top(feature, num_classes=num_classes, dropout=dropout, weight_decay=weight_decay)
return seg | 56.494253 | 99 | 0.585554 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 728 | 0.148118 |
a06437850e2dae1448abd64b704f6b42218ef386 | 968 | py | Python | Python/logging.py | saurabhcommand/Hello-world | 647bad9da901a52d455f05ecc37c6823c22dc77e | [
"MIT"
] | 1,428 | 2018-10-03T15:15:17.000Z | 2019-03-31T18:38:36.000Z | Python/logging.py | saurabhcommand/Hello-world | 647bad9da901a52d455f05ecc37c6823c22dc77e | [
"MIT"
] | 1,162 | 2018-10-03T15:05:49.000Z | 2018-10-18T14:17:52.000Z | Python/logging.py | saurabhcommand/Hello-world | 647bad9da901a52d455f05ecc37c6823c22dc77e | [
"MIT"
] | 3,909 | 2018-10-03T15:07:19.000Z | 2019-03-31T18:39:08.000Z | import datetime
# Log parm(File_name)
class logging_class:
def __init__(self, log_file_name, verbose):
self.log_file_name = log_file_name
self.stream = open(log_file_name, "a")
self.verbose = verbose
# Write a line in the log file
def create_log(self, to_add):
if (to_add != "\n"):
self.stream.write(str(datetime.datetime.now().replace(microsecond=0)))
if (self.verbose is True):
print (str(datetime.datetime.now().replace(microsecond=0)), end = " ")
if (self.verbose is True):
print(" ", end = " ")
print(to_add)
self.stream.write(" ")
self.stream.write(to_add)
self.stream.write("\n")
#add log lines, change behevior if tab or str
def add_logging(self, to_add):
if (type(to_add) == str):
self.create_log(to_add)
else:
for ii in to_add:
self.create_log(ii)
| 31.225806 | 86 | 0.572314 | 927 | 0.957645 | 0 | 0 | 0 | 0 | 0 | 0 | 121 | 0.125 |
a064737d7eb5496d755ad0d39ca50e2c9279c4d9 | 10,541 | py | Python | tfep/utils/cli/tool.py | andrrizzi/tfep | a98ec870007a2ceb72cab147d9e0dfffb7dc8849 | [
"MIT"
] | 5 | 2021-07-30T16:01:46.000Z | 2021-12-14T15:24:29.000Z | tfep/utils/cli/tool.py | andrrizzi/tfep | a98ec870007a2ceb72cab147d9e0dfffb7dc8849 | [
"MIT"
] | 2 | 2021-08-13T12:19:13.000Z | 2021-10-06T08:04:18.000Z | tfep/utils/cli/tool.py | andrrizzi/tfep | a98ec870007a2ceb72cab147d9e0dfffb7dc8849 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
Utility classes to wrap command line tools.
The module provides a class :class:`.CLITool` that provides boilerplate code to
wrap command line tools and make them compatible to :class:`~tfep.utils.cli.Launcher`.
"""
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import abc
import inspect
import os
# =============================================================================
# CLITOOL
# =============================================================================
class CLITool:
"""Command line tool wrapper.
The class mainly fulfills two roles:
1. Encapsulates input and outputs of a command and provide a command
specification that can be understood by :class:`tfep.utils.cli.Launcher`.
2. Converts and sanitizes Python types to string command line parameters.
3. Provides CLI interfaces with readable parameter names avoiding abbreviations
that makes the code harder to read.
Wrapping a new command line tool requires creating a new class that inherits
from ``CLITool`` and defines its arguments using the options descriptors such
as :class:`.AbsolutePathOption` and :class:`.FlagOption` (see examples below).
The constructor takes as input ordered and keyword arguments. Keyword arguments
must match those defined with the option descriptors when the wrapper is declared.
Ordered arguments must be strings are appended to the command as strings.
The path to the executable (or simply the executable name if it is in the
system path) can be set globally through the class variable ``EXECUTABLE_PATH``,
or it can be specific to the command instance as specified in the constructor.
To associate a command to a particular subprogram, you can use the
``SUBPROGRAM`` class variable. E.g., for the gmx program in the GROMACS suite,
creating ``CLITool`` that prepare a ``gmx mdrun ...`` command requires
setting ``SUBPROGRAM = 'mdrun'``.
Once defined and instantiated, a command can be run either using a
:class:`~tfep.utils.cli.Launcher` class or the standard module ``subprocess``
after building the command with the :func:`.CLITool.to_subprocess` method.
Parameters
----------
executable_path : str, optional
The executable path associated to the instance of the command. If this
is not specified, the ``EXECUTABLE_PATH`` class variable is used instead.
See Also
--------
`tfep.utils.cli.Launcher` : Launch and run commands.
Examples
--------
Suppose we want to create a wrapper for a subset of the command ``grep``
that supports reading the pattern from a file. We can create a wrapper
with the following syntax
>>> class MyGrep(CLITool):
... EXECUTABLE_PATH = 'grep'
... patterns_file_path = KeyValueOption('-f')
... max_count = KeyValueOption('-m')
... print_version = FlagOption('-v')
You can then create an command instance specifying the options. For example,
:class:`.FlagOption`s takes either ``True`` or ``False``.
>>> my_grep_cmd = MyGrep(print_version=True)
You can then pass the command to a :class:`~tfep.utils.cli.Launcher` or use
the :func:`.CLITool.to_subprocess` method can be used to convert the command
to a sanitized ``list`` that can be executed by the Python standard module
``subprocess``.
>>> my_grep_cmd.to_subprocess()
['grep', '-v']
Another example more complex example
>>> my_grep_cmd = MyGrep('input.txt', patterns_file_path='my_patterns.txt', max_count=3)
>>> my_grep_cmd.to_subprocess()
['grep', '-m', '3', '-f', 'my_patterns.txt', 'input.txt']
"""
SUBPROGRAM = None
def __init__(self, *args, executable_path=None, **kwargs):
self.args = args
self._executable_path = executable_path
# Check that keyword arguments match.
options_descriptions = self._get_defined_options()
for k, v in kwargs.items():
if k not in options_descriptions:
raise AttributeError('Undefined CLI option ' + k)
# Set the value.
setattr(self, k, v)
@property
def executable_path(self):
"""The path to the command executable to run."""
if self._executable_path is None:
return self.EXECUTABLE_PATH
return self._executable_path
@executable_path.setter
def executable_path(self, value):
self._executable_path = value
def to_subprocess(self):
"""Convert the command to a list that can be run with the ``subprocess`` module.
Returns
-------
subprocess_cmd : List[str]
The command in subprocess format. For example ``['grep', '-v']``.
"""
subprocess_cmd = [self.executable_path]
# Add subprogram
if self.SUBPROGRAM is not None:
subprocess_cmd.append(self.SUBPROGRAM)
# Append all options.
for option_descriptor in self._get_defined_options().values():
subprocess_cmd.extend(option_descriptor.to_subprocess(self))
# Append all ordered args.
subprocess_cmd.extend([str(x) for x in self.args])
return subprocess_cmd
@classmethod
def _get_defined_options(cls):
"""Return a dict attribute_name -> description object for all CLIOptions defined."""
options_descriptors = {}
for attribute_name, descriptor_object in inspect.getmembers(cls, inspect.isdatadescriptor):
if isinstance(descriptor_object, CLIOption):
options_descriptors[attribute_name] = descriptor_object
return options_descriptors
# =============================================================================
# CLI OPTIONS
# =============================================================================
class CLIOption(abc.ABC):
"""Generic descriptor for command line option.
This must be inherited by all options for :class:``.CLITool`` to automatically
discover the option. To implement this, it is sufficient to provide an
implementation of the ``to_subprocess()`` method, which takes the object
instance as input and outputs a list with the strings to append to the
command in ``subprocess`` format.
Parameters
----------
option_name : str
The name of the option in the command line interface (e.g., ``'-o'``).
"""
def __init__(self, option_name):
self.option_name = option_name
def __set_name__(self, owner_type, name):
self.public_name = name
self.private_name = '_' + name
def __get__(self, owner_instance, owner_type):
if owner_instance is None:
# This was call from the owner class. Return the descriptor object.
return self
return getattr(owner_instance, self.private_name, None)
def __set__(self, owner_instance, value):
setattr(owner_instance, self.private_name, value)
@abc.abstractmethod
def to_subprocess(self, owner_instance):
"""Return the strings to append to the command in ``subprocess`` format.
For example, it might return something like ``['-o', 'path_to_my_file.txt']``.
"""
pass
class KeyValueOption(CLIOption):
"""A generic command line key-value option.
This descriptor simply converts the value to string.
Parameters
----------
option_name : str
The name of the option in the command line interface (e.g., ``'-o'``).
"""
def to_subprocess(self, owner_instance):
"""Implements ``CLIOption.to_subprocess()``."""
value = getattr(owner_instance, self.private_name, None)
if value is None:
return []
return [self.option_name, str(value)]
class AbsolutePathOption(KeyValueOption):
"""A file or directory path that is converted to an absolute path when instantiated.
Relative file paths change change with the current working directory. This
option type converts relative paths to absolute paths when the option is
assigned so that it refers to the same file even if the working directory
is changed.
Parameters
----------
option_name : str
The name of the option in the command line interface (e.g., ``'-o'``).
"""
def __set__(self, owner_instance, value):
abs_path = os.path.abspath(value)
setattr(owner_instance, self.private_name, abs_path)
class FlagOption(CLIOption):
"""A generic command line flag option.
This descriptor accepts only ``True``/``False`` or ``None`` and it specifies
CLI flag parameters (i.e., that do not take a value). If ``None``, it is not
passed to the command. If ``False``, its behavior depends on the
``prepend_no_to_false`` parameter (see below).
Parameters
----------
option_name : str
The name of the option in the command line interface (e.g., ``'-o'``).
prepend_to_false : str, optional
If given and the descriptor is ``False``, this string (typically ``'no'``)
is inserted into the flag passed to the command right after the dash
character(s).
"""
def __init__(self, option_name, prepend_to_false=None):
super().__init__(option_name)
self.prepend_to_false = prepend_to_false
def __set__(self, owner_instance, value):
if not isinstance(value, bool) and value is not None:
raise ValueError(self.public_name + ' must be either a boolean or None')
setattr(owner_instance, self.private_name, value)
def to_subprocess(self, owner_instance):
"""Implements ``CLIOption.to_subprocess()``."""
value = getattr(owner_instance, self.private_name, None)
if (value is None or (
(not value and self.prepend_to_false is None))):
return []
if value is True:
return [self.option_name]
# value is False and self.prepend_to_false is not None.
if self.option_name.startswith('--'):
n_dashes = 2
else:
n_dashes = 1
option_name = self.option_name[:n_dashes] + self.prepend_to_false + self.option_name[n_dashes:]
return [option_name]
| 36.223368 | 103 | 0.626411 | 9,540 | 0.905037 | 0 | 0 | 1,010 | 0.095816 | 0 | 0 | 6,969 | 0.661133 |
a06853a9eca27d640f292fe2b2ffaac04fbafad7 | 1,128 | py | Python | invite2app/lib/facebook_auth.py | andresgz/invite2app | 3531db131c4f0646ae01b511971d6642128361e0 | [
"BSD-3-Clause"
] | null | null | null | invite2app/lib/facebook_auth.py | andresgz/invite2app | 3531db131c4f0646ae01b511971d6642128361e0 | [
"BSD-3-Clause"
] | null | null | null | invite2app/lib/facebook_auth.py | andresgz/invite2app | 3531db131c4f0646ae01b511971d6642128361e0 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import facebook
from allauth.socialaccount.models import SocialToken
from django.core.exceptions import ObjectDoesNotExist
class FacebookAuth(object):
"""
Interface bettween Django AllAuth and Facebook SDK
"""
def __init__(self, user_id):
super(FacebookAuth, self).__init__()
# Only integers are allowed
if not isinstance(user_id, (int, long)):
raise TypeError("An Integer is expected")
self.user_id = user_id
def get_graph(self):
"""
Returns a Graph object to be used on the Facebook SDK.
"""
return facebook.GraphAPI(access_token=self.get_access_token())
def get_access_token(self):
"""
Get a valid token for the user from AllAuth
"""
try:
token = SocialToken.objects.get(
account__user_id=self.user_id).token
except ObjectDoesNotExist:
raise NotValidFacebookAccount("A token has not been found")
return token
class NotValidFacebookAccount(Exception):
"""
NotValidAccount Exception.
"""
pass
| 27.512195 | 71 | 0.638298 | 975 | 0.864362 | 0 | 0 | 0 | 0 | 0 | 0 | 355 | 0.314716 |
a068b901b478d011dc44a977f7e4cc0f17632eaf | 11,386 | py | Python | visualize_high_LOO/visualize_high_LOO_cifar_norb.py | mkuchnik/Efficient_Augmentation | a82190c02509682c34f2df782fb58f8ffd3b11da | [
"MIT"
] | 11 | 2019-05-09T22:43:29.000Z | 2021-01-13T22:26:48.000Z | visualize_high_LOO/visualize_high_LOO_cifar_norb.py | mkuchnik/Efficient_Augmentation | a82190c02509682c34f2df782fb58f8ffd3b11da | [
"MIT"
] | 1 | 2020-10-07T14:03:47.000Z | 2020-10-07T14:03:47.000Z | visualize_high_LOO/visualize_high_LOO_cifar_norb.py | mkuchnik/Efficient_Augmentation | a82190c02509682c34f2df782fb58f8ffd3b11da | [
"MIT"
] | 6 | 2019-03-05T02:26:01.000Z | 2021-05-11T14:35:41.000Z | import pprint
import time
import keras
import numpy as np
import joblib
import dataset_loaders
import selection_policy
import augmentations
import experiments
import experiments_util
import featurized_classifiers
import visualization_util
import matplotlib.pyplot as plt
mem = joblib.Memory(cachedir="./cache", verbose=1)
def run_test(classes,
rounds,
n_aug_sample_points,
n_train,
n_jobs,
cv,
use_GPU,
batch_size,
dataset,
aug_transformation,
aug_kw_args,
logistic_reg__C,
CNN_extractor_max_iter,
use_loss,
experiment_configs,
results_filename,
):
run_params = {
"classes": classes,
"rounds": rounds,
"n_aug_sample_points": n_aug_sample_points,
"n_train": n_train,
"n_jobs": n_jobs,
"cv": cv,
"use_GPU": use_GPU,
"batch_size": batch_size,
"dataset": dataset.name,
"aug_transformation": aug_transformation.name,
"aug_kw_args": aug_kw_args,
"logistic_reg__C": logistic_reg__C,
"CNN_extractor_max_iter": CNN_extractor_max_iter,
"use_loss": use_loss,
"experiment_configs": experiment_configs,
"results_filename": results_filename,
}
pprint.pprint(run_params)
assert n_aug_sample_points
(x_train, y_train), (x_test, y_test) = experiments_util.prepare_dataset(
dataset,
classes,
n_train,
)
print("Train class breakdown: {}".format(
np.unique(y_train, return_counts=True))
)
print("Test class breakdown: {}".format(
np.unique(y_test, return_counts=True))
)
aug_f = augmentations.get_transformation(aug_transformation)
(orig_and_auged_x_train,
orig_and_auged_y_train,
orig_and_auged_idxs_train) = \
experiments_util.poison_dataset(x_train,
y_train,
aug_f,
aug_kw_args)
(orig_and_auged_x_test,
orig_and_auged_y_test,
orig_and_auged_idxs_test) = \
experiments_util.poison_dataset(x_test,
y_test,
aug_f,
aug_kw_args)
print("x_train", x_train.shape)
print("orig_and_auged_x_train", orig_and_auged_x_train.shape)
feature_clf = featurized_classifiers.build_featurized_ResNet_feature_clf(
CNN_extractor_max_iter,
use_GPU,
batch_size)
@mem.cache
def transform_features(x, y):
return feature_clf.fit_transform(x, y=y)
featurized_x_train = transform_features(x=x_train, y=y_train)
featurized_y_train = y_train
featurized_x_test = transform_features(x=x_test, y=y_test)
featurized_y_test = y_test
orig_and_auged_featurized_x_train = transform_features(x=orig_and_auged_x_train,
y=orig_and_auged_y_train)
orig_and_auged_featurized_y_train = orig_and_auged_y_train
orig_and_auged_featurized_x_train_to_source_idxs = orig_and_auged_idxs_train
orig_and_auged_featurized_x_test = transform_features(x=orig_and_auged_x_test,
y=orig_and_auged_y_test)
orig_and_auged_featurized_y_test = orig_and_auged_y_test
orig_and_auged_featurized_x_test_to_source_idxs = orig_and_auged_idxs_test
clf = featurized_classifiers.build_logistic_reg_clf(
logistic_reg__C,
cv,
)
(no_aug_no_poison_acc,
poisoned_acc,
all_aug_train_poisoned_acc,
aug_scores,
after_aug_scores,
best_params,
training_total_time) = experiments_util.train_and_score_clf(
clf,
featurized_x_train,
y_train,
featurized_x_test,
y_test,
orig_and_auged_featurized_x_train,
orig_and_auged_featurized_y_train,
orig_and_auged_featurized_x_test,
orig_and_auged_featurized_y_test,
use_loss,
cv,
)
training_end_time = time.time()
img_ranks = np.argsort(np.abs(aug_scores))
top_n = 100
good_imgs = x_train[img_ranks][-top_n:]
bad_imgs = x_train[img_ranks][:top_n]
print("scores", aug_scores)
print("scores", aug_scores.shape)
print("ranks", img_ranks)
print("ranks", img_ranks.shape)
print("good", good_imgs.shape)
print("bad", bad_imgs.shape)
figures = {"{}".format(i): img for i, img in enumerate(good_imgs)}
assert len(figures) == top_n
visualization_util.plot_figures(figures, nrows=10, ncols=10)
plt.savefig("good_images.pdf", bbox_inches="tight", pad_inches=0)
plt.show()
figures = {"{}".format(i): img for i, img in enumerate(bad_imgs)}
assert len(figures) == top_n
visualization_util.plot_figures(figures, nrows=10, ncols=10)
plt.savefig("bad_images.pdf", bbox_inches="tight", pad_inches=0)
plt.show()
def main():
rounds = 5
#rounds = 3
n_aug_sample_points = [1, 10, 50, 100, 250, 500, 750, 1000]
n_train = 1000
n_jobs = 1
cv = 1
use_GPU = True
batch_size = 128
CNN_extractor_max_iter = 40
# use_loss = False
use_loss = True
# Can use multiple valus of C for cross-validation
logistic_reg__Cs = [[10], [100], [1000]]
classes_datasets = [
# ((0, 1), dataset_loaders.Dataset.NORB),
((0, 1), dataset_loaders.Dataset.CIFAR10),
]
selected_augmentations = [
#(augmentations.Image_Transformation.translate, {"mag_aug": 6}),
(augmentations.Image_Transformation.translate, {"mag_aug": 3}),
#(augmentations.Image_Transformation.rotate, {"mag_aug": 5,
# "n_rotations": 4}),
#(augmentations.Image_Transformation.crop, {"mag_augs": [2]}),
]
experiment_configs = [
("baseline", False, False),
("random_proportional", False, False),
("random_proportional", False, True),
("random_proportional", True, False),
("random_proportional", True, True),
("random_inverse_proportional", False, False),
#("random_inverse_proportional", True, False),
#("random_softmax_proportional", False, False),
#("random_softmax_proportional", False, True),
#("random_softmax_proportional", True, False),
#("random_softmax_proportional", True, True),
#("random_inverse_softmax_proportional", False, False),
#("random_inverse_softmax_proportional", True, False),
("deterministic_proportional", False, False),
("deterministic_proportional", False, True),
("deterministic_proportional", True, False),
("deterministic_proportional", True, True),
("deterministic_inverse_proportional", False, False),
("deterministic_inverse_proportional", True, False),
]
for logistic_reg__C in logistic_reg__Cs:
for classes, dataset in classes_datasets:
for aug_transformation, aug_kw_args in selected_augmentations:
dataset_class_str = experiments_util.classes_to_class_str(classes)
print("Class types: {}".format(dataset_class_str))
reg_str = "-".join(list(map(str, logistic_reg__C)))
results_filename = "aug_results_{}_{}_{}_{}{}".format(
dataset.name,
dataset_class_str,
aug_transformation.name,
reg_str,
"_loss" if use_loss else "",
)
run_test(classes,
rounds,
n_aug_sample_points,
n_train,
n_jobs,
cv,
use_GPU,
batch_size,
dataset,
aug_transformation,
aug_kw_args,
logistic_reg__C,
CNN_extractor_max_iter,
use_loss,
experiment_configs,
results_filename,
)
use_loss = False
# Can use multiple valus of C for cross-validation
logistic_reg__Cs = [[10], [100], [1000]]
classes_datasets = [
# ((0, 1), dataset_loaders.Dataset.NORB),
((0, 1), dataset_loaders.Dataset.CIFAR10),
]
selected_augmentations = [
#(augmentations.Image_Transformation.translate, {"mag_aug": 6}),
#(augmentations.Image_Transformation.rotate, {"mag_aug": 5,
# "n_rotations": 4}),
#(augmentations.Image_Transformation.crop, {"mag_augs": [2]}),
(augmentations.Image_Transformation.compose, {"n_aug": 10}),
]
experiment_configs = [
("baseline", False, False),
("random_proportional", False, False),
("random_proportional", False, True),
("random_proportional", True, False),
("random_proportional", True, True),
("random_inverse_proportional", False, False),
#("random_inverse_proportional", True, False),
#("random_softmax_proportional", False, False),
#("random_softmax_proportional", False, True),
#("random_softmax_proportional", True, False),
#("random_softmax_proportional", True, True),
#("random_inverse_softmax_proportional", False, False),
#("random_inverse_softmax_proportional", True, False),
("deterministic_proportional", False, False),
("deterministic_proportional", False, True),
("deterministic_proportional", True, False),
("deterministic_proportional", True, True),
("deterministic_inverse_proportional", False, False),
("deterministic_inverse_proportional", True, False),
]
for logistic_reg__C in logistic_reg__Cs:
for classes, dataset in classes_datasets:
for aug_transformation, aug_kw_args in selected_augmentations:
dataset_class_str = experiments_util.classes_to_class_str(classes)
print("Class types: {}".format(dataset_class_str))
reg_str = "-".join(list(map(str, logistic_reg__C)))
results_filename = "aug_results_{}_{}_{}_{}{}".format(
dataset.name,
dataset_class_str,
aug_transformation.name,
reg_str,
"_loss" if use_loss else "",
)
run_test(classes,
rounds,
n_aug_sample_points,
n_train,
n_jobs,
cv,
use_GPU,
batch_size,
dataset,
aug_transformation,
aug_kw_args,
logistic_reg__C,
CNN_extractor_max_iter,
use_loss,
experiment_configs,
results_filename,
)
if __name__ == "__main__":
main()
| 36.031646 | 84 | 0.584138 | 0 | 0 | 0 | 0 | 93 | 0.008168 | 0 | 0 | 2,559 | 0.22475 |
a06ffffb39b0434296021e5eee8841761190d6b0 | 370 | py | Python | backend/main_app/serializers.py | RTUITLab/Avia-Hack-2021-RealityX | ca700492d314a28e23fa837cd2dfa04dd67c167c | [
"Apache-2.0"
] | null | null | null | backend/main_app/serializers.py | RTUITLab/Avia-Hack-2021-RealityX | ca700492d314a28e23fa837cd2dfa04dd67c167c | [
"Apache-2.0"
] | null | null | null | backend/main_app/serializers.py | RTUITLab/Avia-Hack-2021-RealityX | ca700492d314a28e23fa837cd2dfa04dd67c167c | [
"Apache-2.0"
] | null | null | null | from rest_framework import serializers
from .models import *
class MessageSerializer(serializers.ModelSerializer):
created_at = serializers.DateTimeField(format="%d.%m.%Y %H:%M")
class Meta:
depth = 2
model = Message
fields = ('id', 'description', 'created_at', 'answer', 'correct', 'incorrect', 'kml', 'num_correct', 'num_incorrect')
| 30.833333 | 125 | 0.672973 | 306 | 0.827027 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.286486 |
a0732a0c9e8673154fd43815d09dd9d39d2b3b7d | 96 | py | Python | batchout/core/util.py | ilia-khaustov/batchout | e916a1b0bfac771e6c96d0ff2478dc3f44804a94 | [
"MIT"
] | 8 | 2019-11-05T06:54:30.000Z | 2021-12-14T14:52:24.000Z | batchout/core/util.py | ilia-khaustov/batchout | e916a1b0bfac771e6c96d0ff2478dc3f44804a94 | [
"MIT"
] | null | null | null | batchout/core/util.py | ilia-khaustov/batchout | e916a1b0bfac771e6c96d0ff2478dc3f44804a94 | [
"MIT"
] | 1 | 2020-05-05T09:31:14.000Z | 2020-05-05T09:31:14.000Z |
def to_iter(obj):
try:
return iter(obj)
except TypeError:
return obj,
| 12 | 24 | 0.552083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a0767541c421c26c6de084316db254c59c03c5d0 | 17,875 | py | Python | web_site/wx/lib.py | Fixdq/dj-deep | 6712a722c7f620b76f21b1ebf0b618f42eb4a58a | [
"MIT"
] | null | null | null | web_site/wx/lib.py | Fixdq/dj-deep | 6712a722c7f620b76f21b1ebf0b618f42eb4a58a | [
"MIT"
] | null | null | null | web_site/wx/lib.py | Fixdq/dj-deep | 6712a722c7f620b76f21b1ebf0b618f42eb4a58a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on 2014-5-13
@author: skycrab
"""
import json
import time
import random
import string
import urllib
import hashlib
import threading
import traceback
import xml.etree.ElementTree as ET
import logging
from urllib import request as urllib2
from functools import wraps
from .config import WxPayConf, WxPayConf_shop
try:
import pycurl
from cStringIO import StringIO
except ImportError:
pycurl = None
try:
import requests
except ImportError:
requests = None
logger = logging.getLogger('control')
def catch(func):
@wraps(func)
def wrap(*args,**kwargs):
try:
return func(*args,**kwargs)
except Exception as e:
print(traceback.format_exc())
return None
return wrap
class ObjectDict(dict):
"""Makes a dictionary behave like an object, with attribute-style access.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
class Singleton(object):
"""可配置单例模式"""
_instance_lock = threading.Lock()
def __new__(cls, *args, **kwargs):
if not hasattr(cls, "_instance"):
with cls._instance_lock:
if not hasattr(cls, "_instance"):
impl = cls.configure() if hasattr(cls, "configure") else cls
instance = super(Singleton, cls).__new__(impl, *args, **kwargs)
if not isinstance(instance, cls):
instance.__init__(*args, **kwargs)
cls._instance = instance
return cls._instance
class class_property(object):
""" A property can decorator class or instance
class Foo(object):
@class_property
def foo(cls):
return 42
print(Foo.foo)
print(Foo().foo)
"""
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj, type=None):
value = self.func(type)
return value
class BaseHttpClient(object):
include_ssl = False
def get(self, url, second=30):
if self.include_ssl:
return self.postXmlSSL(None, url, second, False, post=False)
else:
return self.postXml(None, url, second)
def postXml(self, xml, url, second=30):
if self.include_ssl:
return self.postXmlSSL(xml, url, second, cert=False)
else:
raise NotImplementedError("please implement postXML")
def postXmlSSL(self, xml, url, second=30, cert=True, cert_path=WxPayConf.SSLCERT_PATH,
key_path=WxPayConf.SSLKEY_PATH, post=True):
raise NotImplementedError("please implement postXMLSSL")
class UrllibClient(BaseHttpClient):
"""使用urlib2发送请求"""
def postXml(self, xml, url, second=30):
"""不使用证书"""
data = urllib2.urlopen(url, xml, timeout=second).read()
return data
class CurlClient(BaseHttpClient):
"""使用Curl发送请求"""
include_ssl = True
def __init__(self):
self.curl = pycurl.Curl()
self.curl.setopt(pycurl.SSL_VERIFYHOST, False)
self.curl.setopt(pycurl.SSL_VERIFYPEER, False)
# 设置不输出header
self.curl.setopt(pycurl.HEADER, False)
def postXmlSSL(self, xml, url, second=30, cert=True, cert_path=WxPayConf.SSLCERT_PATH,
key_path=WxPayConf.SSLKEY_PATH, post=True):
"""使用证书"""
self.curl.setopt(pycurl.URL, url)
self.curl.setopt(pycurl.TIMEOUT, second)
# 设置证书
# 使用证书:cert 与 key 分别属于两个.pem文件
# 默认格式为PEM,可以注释
if cert:
self.curl.setopt(pycurl.SSLKEYTYPE, "PEM")
self.curl.setopt(pycurl.SSLKEY, key_path)
self.curl.setopt(pycurl.SSLCERTTYPE, "PEM")
self.curl.setopt(pycurl.SSLCERT, cert_path)
# post提交方式
if post:
self.curl.setopt(pycurl.POST, True)
self.curl.setopt(pycurl.POSTFIELDS, xml)
buff = StringIO()
self.curl.setopt(pycurl.WRITEFUNCTION, buff.write)
self.curl.perform()
return buff.getvalue()
class RequestsClient(BaseHttpClient):
include_ssl = True
def postXmlSSL(self, xml, url, second=30, cert=True,
cert_path=WxPayConf.SSLCERT_PATH, key_path=WxPayConf.SSLKEY_PATH, post=True):
if cert:
cert_config = (cert_path, key_path)
else:
cert_config = None
if post:
# res = requests.post(url, data=xml, second=30, cert=cert_config)
res = requests.post(url, data=xml, cert=cert_config)
else:
res = requests.get(url, timeout=second, cert=cert_config)
return res.content
class HttpClient(Singleton, BaseHttpClient):
@classmethod
def configure(cls):
config_client = WxPayConf.HTTP_CLIENT
client_cls = {"urllib": UrllibClient,
"curl": CurlClient,
"requests": RequestsClient}.get(config_client.lower(), None)
if client_cls:
return client_cls
if pycurl is not None:
print("HTTP_CLIENT config error, Use 'CURL'")
return CurlClient
if requests is not None:
print("HTTP_CLIENT config error, Use 'REQUESTS'")
return RequestsClient
else:
print("HTTP_CLIENT config error, Use 'URLLIB'")
return UrllibClient
class WeixinHelper(object):
@classmethod
def checkSignature(cls, signature, timestamp, nonce):
"""微信对接签名校验"""
tmp = [WxPayConf.TOKEN, timestamp, nonce]
tmp.sort()
code = hashlib.sha1("".join(tmp)).hexdigest()
return code == signature
@classmethod
def nonceStr(cls, length):
"""随机数"""
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
@classmethod
def xmlToArray(cls, xml):
"""将xml转为array"""
return dict((child.tag, child.text) for child in ET.fromstring(xml))
@classmethod
def oauth2(cls, redirect_uri, scope="snsapi_userinfo", state="STATE"):
"""网页授权获取用户信息
http://mp.weixin.qq.com/wiki/17/c0f37d5704f0b64713d5d2c37b468d75.html
"""
_OAUTH_URL = "https://open.weixin.qq.com/connect/oauth2/authorize?appid={0}&redirect_uri={1}&response_type=code&scope={2}&state={3}#wechat_redirect"
return _OAUTH_URL.format(WxPayConf.APPID, urllib.quote(redirect_uri, safe=''), scope, state)
@classmethod
def proxy(cls, redirect_uri, scope="snsapi_userinfo", state="STATE", device="mobile"):
"""网页授权获取用户信息
http://mp.weixin.qq.com/wiki/17/c0f37d5704f0b64713d5d2c37b468d75.html
"""
_PROXY_URL = "http://shop.xuemei99.com/open/proxy?redirect_uri={0}&scope={1}&state={2}&device={3}"
return _PROXY_URL.format(urllib.quote(redirect_uri, safe=''), scope, state, device)
@classmethod
def getAccessToken(cls, appid=WxPayConf.APPID, secret=WxPayConf.APPSECRET):
"""获取access_token
需要缓存access_token,由于缓存方式各种各样,不在此提供
http://mp.weixin.qq.com/wiki/11/0e4b294685f817b95cbed85ba5e82b8f.html
"""
_ACCESS_URL = "https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid={0}&secret={1}"
return HttpClient().get(_ACCESS_URL.format(appid, secret))
@classmethod
def getShopAccessToken(cls):
"""获取access_token
需要缓存access_token,由于缓存方式各种各样,不在此提供
http://mp.weixin.qq.com/wiki/11/0e4b294685f817b95cbed85ba5e82b8f.html
"""
_ACCESS_URL = "http://shop.xuemei99.com/open/access_token?format=json"
return HttpClient().get(_ACCESS_URL)
@classmethod
def getUserInfo(cls, access_token, openid, lang="zh_CN"):
"""获取用户基本信息
http://mp.weixin.qq.com/wiki/14/bb5031008f1494a59c6f71fa0f319c66.html
"""
_USER_URL = "https://api.weixin.qq.com/cgi-bin/user/info?access_token={0}&openid={1}&lang={2}"
return HttpClient().get(_USER_URL.format(access_token, openid, lang))
@classmethod
def getUserInfoBatch(cls, data, access_token):
"""批量获取用户基本信息
http://mp.weixin.qq.com/wiki/1/8a5ce6257f1d3b2afb20f83e72b72ce9.html
"""
_USER_URL = "https://api.weixin.qq.com/cgi-bin/user/info/batchget?access_token={0}"
return HttpClient().postXml(data, _USER_URL.format(access_token))
@classmethod
def getAccessTokenByCode(cls, code):
"""通过code换取网页授权access_token, 该access_token与getAccessToken()返回是不一样的
http://mp.weixin.qq.com/wiki/17/c0f37d5704f0b64713d5d2c37b468d75.html
"""
_CODEACCESS_URL = "https://api.weixin.qq.com/sns/oauth2/access_token?appid={0}&secret={1}&code={2}&grant_type=authorization_code"
url = _CODEACCESS_URL.format(WxPayConf.APPID, WxPayConf.APPSECRET, code)
return HttpClient().get(url)
@classmethod
def getShopAccessTokenByCode(cls, code):
"""通过code换取网页授权access_token, 该access_token与getAccessToken()返回是不一样的
http://mp.weixin.qq.com/wiki/17/c0f37d5704f0b64713d5d2c37b468d75.html
"""
_CODEACCESS_URL = "https://api.weixin.qq.com/sns/oauth2/access_token?appid={0}&secret={1}&code={2}&grant_type=authorization_code"
url = _CODEACCESS_URL.format(WxPayConf_shop.APPID, WxPayConf_shop.APPSECRET, code)
return HttpClient().get(url)
@classmethod
def refreshAccessToken(cls, refresh_token):
"""刷新access_token, 使用getAccessTokenByCode()返回的refresh_token刷新access_token,可获得较长时间有效期
http://mp.weixin.qq.com/wiki/17/c0f37d5704f0b64713d5d2c37b468d75.html
"""
_REFRESHTOKRN_URL = "https://api.weixin.qq.com/sns/oauth2/refresh_token?appid={0}&grant_type=refresh_token&refresh_token={1}"
return HttpClient().get(_REFRESHTOKRN_URL.format(WxPayConf.APPID, refresh_token))
@classmethod
def getSnsapiUserInfo(cls, access_token, openid, lang="zh_CN"):
"""拉取用户信息(通过网页授权)
"""
_SNSUSER_URL = "https://api.weixin.qq.com/sns/userinfo?access_token={0}&openid={1}&lang={2}"
return HttpClient().get(_SNSUSER_URL.format(access_token, openid, lang))
@classmethod
def getAccessTokenValid(cls, access_token, openid):
"""检测access_token是否过期"""
_ACCESSTOKEN_VALID_URL = 'https://api.weixin.qq.com/sns/auth?access_token={0}&openid={1}'
return HttpClient().get(_ACCESSTOKEN_VALID_URL.format(access_token, openid))
@classmethod
def getMaterialList(cls, data, access_token):
"""发送客服消息
http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html
"""
_SEND_URL ="https://api.weixin.qq.com/cgi-bin/material/batchget_material?access_token={0}"
data = json.dumps(data, ensure_ascii=False)
return HttpClient().postXml(data, _SEND_URL.format(access_token))
@classmethod
def sendMsgAll(cls, data, access_token):
"""发送客服消息
http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html
"""
_SEND_URL ="https://api.weixin.qq.com/cgi-bin/message/mass/sendall?access_token={0}"
data = json.dumps(data, ensure_ascii=False)
return HttpClient().postXml(data, _SEND_URL.format(access_token))
@classmethod
def send(cls, data, access_token):
"""发送客服消息
http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html
"""
_SEND_URL ="https://api.weixin.qq.com/cgi-bin/message/custom/send?access_token={0}"
data = json.dumps(data, ensure_ascii=False)
return HttpClient().postXml(data, _SEND_URL.format(access_token))
@classmethod
def sendTemplateMsg(cls, data, access_token):
"""发送模版消息
http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html
"""
_SEND_URL ="https://api.weixin.qq.com/cgi-bin/message/template/send?access_token={0}"
data = json.dumps(data, ensure_ascii=False)
return HttpClient().postXml(data, _SEND_URL.format(access_token))
@classmethod
def getTextMessage(cls, openid, message):
data = {
"touser": openid,
"msgtype":"text",
"text":
{
"content": message
}
}
return data
@classmethod
def sendTextMessage(cls, openid, message, access_token):
"""发送文本消息
"""
data = {
"touser": openid,
"msgtype":"text",
"text":
{
"content": message
}
}
return cls.send(data, access_token)
@classmethod
def getJsapiTicket(cls, access_token):
"""获取jsapi_tocket
"""
_JSAPI_URL = "https://api.weixin.qq.com/cgi-bin/ticket/getticket?access_token={0}&type=jsapi"
return HttpClient().get(_JSAPI_URL.format(access_token))
@classmethod
def jsapiSign(cls, jsapi_ticket, url):
"""jsapi_ticket 签名"""
sign = {
'nonceStr': cls.nonceStr(15),
'jsapi_ticket': jsapi_ticket,
'timestamp': int(time.time()),
'url': url
}
signature = '&'.join(['%s=%s' % (key.lower(), sign[key]) for key in sorted(sign)])
sign["signature"] = hashlib.sha1(signature).hexdigest()
return sign
@classmethod
def long2short(cls, long_url, access_token):
"""长链接转短链接
https://mp.weixin.qq.com/wiki/6/856aaeb492026466277ea39233dc23ee.html
"""
_SEND_URL = "https://api.weixin.qq.com/cgi-bin/shorturl?access_token={0}"
data = json.dumps({'long_url': long_url, 'action': 'long2short'}, ensure_ascii=False)
return HttpClient().postXml(data, _SEND_URL.format(access_token))
@classmethod
def getComponentAccessToken(cls, app_id, app_secret, ticket):
"""开放平台--获取component access token"""
_COMPONENT_URL = "https://api.weixin.qq.com/cgi-bin/component/api_component_token"
data = json.dumps({'component_appid': app_id, 'component_appsecret': app_secret, 'component_verify_ticket': ticket}, ensure_ascii=False)
return HttpClient().postXml(data, _COMPONENT_URL)
@classmethod
def getPreAuthCode(cls, app_id, component_access_token):
"""开放平台--获取预授权码"""
_PRE_AUTH_CODE_URL = "https://api.weixin.qq.com/cgi-bin/component/api_create_preauthcode?component_access_token={0}"
data = json.dumps({'component_appid': app_id}, ensure_ascii=False)
return HttpClient().postXml(data, _PRE_AUTH_CODE_URL.format(component_access_token))
@classmethod
def getQueryAuth(cls, app_id, code, component_access_token):
"""开放平台--使用授权码换取公众号的接口调用凭据和授权信息"""
_QUERY_AUTH_URL = "https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token={0}"
data = json.dumps({'component_appid': app_id, 'authorization_code': code}, ensure_ascii=False)
return HttpClient().postXml(data, _QUERY_AUTH_URL.format(component_access_token))
@classmethod
def getAuthorizerToken(cls, component_access_token, component_appid, authorizer_appid, authorizer_refresh_token):
"""开放平台--获取公众号接口调用token"""
_AUTH_TOKEN_URL = "https://api.weixin.qq.com/cgi-bin/component/api_authorizer_token?component_access_token={0}"
data = json.dumps({'component_appid': component_appid, 'authorizer_appid': authorizer_appid, 'authorizer_refresh_token': authorizer_refresh_token}, ensure_ascii=False)
return HttpClient().postXml(data, _AUTH_TOKEN_URL.format(component_access_token))
@classmethod
def getAuthInfo(cls, app_id, auth_app_id, component_access_token):
"""开放平台--获取授权方的公众号帐号基本信息"""
_AUTH_INFO_URL = "https://api.weixin.qq.com/cgi-bin/component/api_get_authorizer_info?component_access_token={0}"
data = json.dumps({'component_appid': app_id, 'authorizer_appid': auth_app_id}, ensure_ascii=False)
return HttpClient().postXml(data, _AUTH_INFO_URL.format(component_access_token))
@classmethod
def openOauth2(cls, app_id, c_app_id, redirect_uri, scope="snsapi_userinfo", state="STATE"):
"""开放平台--网页授权获取code
https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&t=resource/res_list&verify=1&id=open1419318590&token=&lang=zh_CN
"""
_OPEN_OAUTH_URL = "https://open.weixin.qq.com/connect/oauth2/authorize?appid={0}&redirect_uri={1}&response_type=code&scope={2}&state={3}&component_appid={4}#wechat_redirect"
return _OPEN_OAUTH_URL.format(app_id, urllib.quote(redirect_uri, safe=''), scope, state, c_app_id)
@classmethod
def openGetAccessToken(cls, app_id, code, c_app_id, token):
"""开放平台--网页授权获取code
https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&t=resource/res_list&verify=1&id=open1419318590&token=&lang=zh_CN
"""
_OPEN_TOKEN_URL = "https://api.weixin.qq.com/sns/oauth2/component/access_token?appid={0}&code={1}&grant_type=authorization_code&component_appid={2}&component_access_token={3}"
return HttpClient().get(_OPEN_TOKEN_URL.format(app_id, code, c_app_id, token))
@classmethod
def openRefreshAccessToken(cls, app_id, c_app_id, c_access_token, refresh_token):
"""开放平台--网页授权获取code
https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&t=resource/res_list&verify=1&id=open1419318590&token=&lang=zh_CN
"""
_OPEN_REFRESH_URL = "https://api.weixin.qq.com/sns/oauth2/component/refresh_token?appid={0}&grant_type=refresh_token&component_appid={1}&component_access_token={2}&refresh_token={3}"
return HttpClient().get(_OPEN_REFRESH_URL.format(app_id, c_app_id, c_access_token, refresh_token))
| 38.690476 | 190 | 0.658238 | 17,820 | 0.95647 | 0 | 0 | 13,502 | 0.724706 | 0 | 0 | 6,868 | 0.368633 |
a076da17c1234915c44f55110c45dfe832f020a4 | 4,723 | py | Python | argo_dsl/tasks.py | zen-xu/argo-dsl | 76b18073c8dd850b212ccaee2a0c95f718c67db6 | [
"Apache-2.0"
] | null | null | null | argo_dsl/tasks.py | zen-xu/argo-dsl | 76b18073c8dd850b212ccaee2a0c95f718c67db6 | [
"Apache-2.0"
] | null | null | null | argo_dsl/tasks.py | zen-xu/argo-dsl | 76b18073c8dd850b212ccaee2a0c95f718c67db6 | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
from contextlib import contextmanager
from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
from argo_dsl.api.io.argoproj.workflow import v1alpha1
if TYPE_CHECKING:
from .template import Template
class _StepOutputs(str):
_name: str
_kind: str
def __new__(cls, name, kind):
obj = super().__new__(cls, "{{steps.%s.outputs.%s}}" % (name, kind))
obj._name = name
obj._kind = kind
return obj
def __getattribute__(self, item: str) -> Any:
if item.startswith("_"):
return super().__getattribute__(item)
return "{{steps.%s.outputs.%s.%s}}" % (self._name, self._kind, item)
class _Item(str):
def __new__(cls) -> _Item:
return super().__new__(cls, "{{item}}")
Item = _Item()
SERIALIZE_ARGUMENT_FUNCTION = Callable[[Any], str]
SERIALIZE_ARGUMENT_METHOD = Callable[["Template", Any], str]
class TaskStep:
def __init__(
self,
workflow_step: v1alpha1.WorkflowStep,
serialize_argument_func: Union[SERIALIZE_ARGUMENT_FUNCTION, SERIALIZE_ARGUMENT_METHOD] = str,
):
self.workflow_step = workflow_step
self.serialize_argument_func = serialize_argument_func
self._arguments: Optional[Dict[str, Any]] = None
self._batch_arguments: Optional[Union[str, List[Dict[str, Any]]]] = None
self._sequence: Optional[v1alpha1.Sequence] = None
self._when: Optional[str] = None
def call(self, **arguments) -> TaskStep:
self._arguments = arguments
return self
def batch_call(self, batch_arguments: Union[str, List[Dict[str, Any]]]) -> TaskStep:
self._batch_arguments = batch_arguments
return self
def sequence(
self,
count: Optional[int] = None,
start: Optional[int] = None,
end: Optional[int] = None,
format: Optional[str] = None,
):
self._sequence = v1alpha1.Sequence(count=count, start=start, end=end, format=format)
return self
def when(self, expression: str):
self._when = expression
@property
def id(self) -> str:
return "{{steps.%s.id}}" % self.workflow_step.name
@property
def ip(self) -> str:
return "{{steps.%s.ip}}" % self.workflow_step.name
@property
def status(self) -> str:
return "{{steps.%s.status}}" % self.workflow_step.name
@property
def exit_code(self) -> str:
return "{{steps.%s.exitCode}}" % self.workflow_step.name
@property
def started_at(self) -> str:
return "{{steps.%s.startedAt}}" % self.workflow_step.name
@property
def finished_at(self) -> str:
return "{{steps.%s.finishedAt}}" % self.workflow_step.name
@property
def outputs_result(self) -> str:
return "{{steps.%s.outputs.result}}" % self.workflow_step.name
@property
def outputs_parameters(self) -> _StepOutputs:
return _StepOutputs(self.workflow_step.name, "parameters")
@property
def outputs_artifacts(self) -> _StepOutputs:
return _StepOutputs(self.workflow_step.name, "artifacts")
class TaskStepMaker:
def __init__(self, template: "Template"):
self.template = template
def __call__(self, name: str) -> TaskStep:
workflow_step = v1alpha1.WorkflowStep(name=name, template=self.template.name)
s = TaskStep(workflow_step, self.template.serialize_argument)
return s
class TaskStepRefer:
def __init__(self, template: str, name: str, cluster_scope: Optional[bool] = None):
self.template_ref = v1alpha1.TemplateRef(template=template, name=name, clusterScope=cluster_scope)
def __call__(self, name: str) -> TaskStep:
workflow_step = v1alpha1.WorkflowStep(name=name, templateRef=self.template_ref)
s = TaskStep(workflow_step)
return s
class TaskSteps:
def __init__(self):
self.steps: List[List[TaskStep]] = []
self._parallel: bool = False
self._inited_parallel_steps: bool = False
@contextmanager
def parallel(self):
try:
self._parallel = True
self._inited_parallel_steps = False
yield None
finally:
self._parallel = False
self._inited_parallel_steps = False
def add(self, step: TaskStep):
if self._parallel:
if not self._inited_parallel_steps:
self.steps.append([])
self._inited_parallel_steps = True
self.steps[-1].append(step)
else:
self.steps.append([step])
| 28.79878 | 106 | 0.645988 | 4,204 | 0.890112 | 237 | 0.05018 | 1,235 | 0.261486 | 0 | 0 | 265 | 0.056108 |
a07710f0bb90f929f1fc7e78cba178a4fc0fa117 | 1,007 | py | Python | tests/article_test.py | Kabu1/flashnews | 30852077c465ce828452125ec5e2b21115609c38 | [
"Unlicense"
] | null | null | null | tests/article_test.py | Kabu1/flashnews | 30852077c465ce828452125ec5e2b21115609c38 | [
"Unlicense"
] | null | null | null | tests/article_test.py | Kabu1/flashnews | 30852077c465ce828452125ec5e2b21115609c38 | [
"Unlicense"
] | null | null | null | import unittest
from app.models import Article
class ArticleTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Article class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_article = Article('Jack Healy, Jack Nicas and Mike Baker','Firefighters continued to battle blazes along the West Coast that have now charred nearly five million acres. At least 17 people are dead, with dozens still missing','2020-09-12T00:32:15.000Z','https://www.nytimes.com/2020/09/11/us/fires-oregon-california-washington.html?action=click&module=Spotlight&pgtype=Homepage','https://static01.nyt.com/images/2020/09/11/us/11wildfires-oregon02/11wildfires-oregon02-facebookJumbo.jpg','A Line of Fire South of Portland and a Yearslong Recovery Ahead')
def test_instance(self):
'''
Test to check creation of new article instance
'''
self.assertTrue(isinstance(self.new_article,Article))
| 45.772727 | 569 | 0.715988 | 952 | 0.945382 | 0 | 0 | 0 | 0 | 0 | 0 | 736 | 0.730884 |
a0779d0acb5d0ce28e46508caa76d16adb915bd8 | 474 | py | Python | intern/conv_java_import.py | zaqwes8811/smart-vocabulary-cards | abeab5c86b1c6f68d8796475cba80c4f2c6055ff | [
"Apache-2.0"
] | null | null | null | intern/conv_java_import.py | zaqwes8811/smart-vocabulary-cards | abeab5c86b1c6f68d8796475cba80c4f2c6055ff | [
"Apache-2.0"
] | 11 | 2015-01-25T14:22:52.000Z | 2015-09-08T09:59:38.000Z | intern/conv_java_import.py | zaqwes8811/vocabulary-cards | abeab5c86b1c6f68d8796475cba80c4f2c6055ff | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
'''
Created on 18.04.2013
@author: кей
'''
import dals.os_io.io_wrapper as dal
def convert_one_line(msg):
copy_line = msg.split(';')[0]
if copy_line:
name = copy_line.split('.')[-1]
print copy_line+' as '+name
if __name__=='__main__':
sets = dal.get_utf8_template()
sets['name'] = 'test_import_to_jy.txt'
readed = dal.file2list(sets)
map(convert_one_line, readed)
print 'Done'
| 19.75 | 43 | 0.594937 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.257862 |
a078916cfe94a2866d1c06904964969d62a237ec | 25 | py | Python | relatives/__init__.py | treyhunner/django-relatives | a578ab135f865df2835957cedfd00476c4b65e18 | [
"MIT"
] | 10 | 2015-08-14T00:22:52.000Z | 2021-09-16T08:15:14.000Z | relatives/__init__.py | treyhunner/django-relatives | a578ab135f865df2835957cedfd00476c4b65e18 | [
"MIT"
] | 12 | 2015-03-09T20:17:16.000Z | 2021-09-30T18:46:11.000Z | relatives/__init__.py | treyhunner/django-relatives | a578ab135f865df2835957cedfd00476c4b65e18 | [
"MIT"
] | 3 | 2016-01-05T15:20:10.000Z | 2018-08-03T10:51:23.000Z | __version__ = '1.3.0.a3'
| 12.5 | 24 | 0.64 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.4 |
a07a7419dbe104e7dbe0af27f725918587fdc9f2 | 4,596 | py | Python | handlers/class_handler.py | Hargre/faltometro-bot | 271772dc52c9d0454d96ef3c43e3a0da32075743 | [
"MIT"
] | null | null | null | handlers/class_handler.py | Hargre/faltometro-bot | 271772dc52c9d0454d96ef3c43e3a0da32075743 | [
"MIT"
] | 2 | 2019-04-02T13:18:23.000Z | 2019-04-11T14:00:06.000Z | handlers/class_handler.py | Hargre/faltometro-bot | 271772dc52c9d0454d96ef3c43e3a0da32075743 | [
"MIT"
] | null | null | null | import logging
import math
from emoji import emojize
from peewee import DoesNotExist
from telegram import ParseMode
from telegram.ext import CommandHandler
from telegram.ext import ConversationHandler
from telegram.ext import Filters
from telegram.ext import MessageHandler
from telegram.ext import RegexHandler
from constants import limit_status
from handlers.shared import cancel_handler
from handlers.shared import select_class_keyboard
from models.class_model import ClassModel
ASK_NAME, ASK_LIMIT = range(2)
DELETING_CLASS = range(1)
def add_class_entry(bot, update):
update.message.reply_text(
'Qual o nome da matéria?'
)
return ASK_NAME
def add_class_name(bot, update, user_data):
class_name = update.message.text
user_data['class_name'] = class_name
update.message.reply_text(
'Ok! E qual o limite de faltas?'
)
return ASK_LIMIT
def add_skip_limit(bot, update, user_data):
skipped_classes_limit = update.message.text
user_data['skipped_classes_limit'] = skipped_classes_limit
user_data['chat_id'] = update.message.chat_id
__save(user_data)
update.message.reply_text(
'Pronto!'
)
return ConversationHandler.END
def add_class_handler():
handler = ConversationHandler(
entry_points=[CommandHandler('add_materia', add_class_entry)],
states={
ASK_NAME: [
MessageHandler(
Filters.text,
add_class_name,
pass_user_data=True
)
],
ASK_LIMIT: [
RegexHandler(
'^\d+$',
add_skip_limit,
pass_user_data=True
)
],
},
fallbacks=[cancel_handler()]
)
return handler
def list_classes(bot, update):
classes = ClassModel.select().where(ClassModel.chat_id == update.message.chat_id)
response = ''
for class_model in classes:
line = (
'*%s:*\n``` %s / %s faltas\t\t\t\t%s```\n\n'
% (
class_model.class_name,
class_model.skipped_classes,
class_model.skipped_classes_limit,
__get_status_emoji(class_model.skipped_classes, class_model.skipped_classes_limit)
)
)
response += line
update.message.reply_text(response, parse_mode=ParseMode.MARKDOWN)
def list_classes_handler():
handler = CommandHandler('resumo', list_classes)
return handler
def delete_class_entry(bot, update):
select_class_keyboard(update)
return DELETING_CLASS
def delete_class(bot, update):
class_name = update.message.text
chat_id = update.message.chat_id
try:
missed_class = ClassModel.get((ClassModel.chat_id == chat_id) & (ClassModel.class_name == class_name))
missed_class.delete_instance()
update.message.reply_text(
'Matéria removida!',
parse_mode=ParseMode.MARKDOWN
)
return ConversationHandler.END
except DoesNotExist:
update.message.reply_text(
'Não conheço essa matéria! Tente novamente.'
)
def delete_class_handler():
handler = ConversationHandler(
entry_points=[CommandHandler('tirar_materia', delete_class_entry)],
states={
DELETING_CLASS: [
MessageHandler(
Filters.text,
delete_class,
)
],
},
fallbacks=[cancel_handler()]
)
return handler
def __get_status_emoji(skipped_classes, skipped_classes_limit):
status_ok = emojize(":white_check_mark:", use_aliases=True)
status_warning = emojize(":warning:", use_aliases=True)
status_danger = emojize(":sos:", use_aliases=True)
status_failed = emojize(":x:", use_aliases=True)
skipped_percent = (skipped_classes * 100) / skipped_classes_limit
skipped_percent = math.floor(skipped_percent)
if skipped_percent < limit_status.WARNING:
return status_ok
elif skipped_percent >= limit_status.WARNING and skipped_percent < limit_status.DANGER:
return status_warning
elif skipped_percent >= limit_status.DANGER and skipped_percent <= limit_status.LIMIT:
return status_danger
else:
return status_failed
def __save(user_data):
ClassModel.create(
chat_id = user_data['chat_id'],
class_name = user_data['class_name'],
skipped_classes_limit = int(user_data['skipped_classes_limit'])
)
| 27.035294 | 110 | 0.647302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 354 | 0.07694 |
a07ca3f53342a4c5c568050fc58fa24424a4bf96 | 1,137 | py | Python | jupyter/aBasic/a_datatype_class/Ex01_valuable.py | WoolinChoi/test | a0f9c8ecc63443acaae61d744eecec6c943d3a26 | [
"MIT"
] | null | null | null | jupyter/aBasic/a_datatype_class/Ex01_valuable.py | WoolinChoi/test | a0f9c8ecc63443acaae61d744eecec6c943d3a26 | [
"MIT"
] | 1 | 2021-03-30T09:01:47.000Z | 2021-03-30T09:01:47.000Z | jupyter/aBasic/a_datatype_class/Ex01_valuable.py | WoolinChoi/test | a0f9c8ecc63443acaae61d744eecec6c943d3a26 | [
"MIT"
] | 1 | 2019-12-06T18:21:10.000Z | 2019-12-06T18:21:10.000Z | """
파이션 - 무료이지만 강력하다
` 만들고자 하는 대부분의 프로그램 가능
` 물론, 하드웨어 제어같은 복잡하고 반복 연산이 많은 프로그램은 부적절
` 그러나, 다른언어 프로그램을 파이썬에 포함 가능
[주의] 줄을 맞추지 않으면 실행 안됨
[실행] Run 메뉴를 클릭하거나 단축키로 shift + alt + F10
[도움말] ctrl + q
"""
""" 여러줄 주석 """
# 한줄 주석
# print("헬로우")
# print('hello')
# print("""안녕""")
# print('''올라''')
# 실행 : ctrl + shift + F10
# 작은 따옴표 '' 와 큰 따옴표 "" 를 구분하지 않는다.
'''
변수란
파이션의 모든 자료형은 객체로 취급한다
a = 7
7을 가리키는 변수 a이다. ( 저장한다는 표현 안함 )
변수 a는 7이라는 정수형 객체를 가리키는 레퍼런스이다.
여기서 7은 기존 프로그램언어에 말하는 상수가 아닌 하나의 객체이다.
[변수명 규칙]
- 영문자 + 숫자 + _ 조합
- 첫글자에 숫자는 안됨
- 대소문자 구별
- 길이 제한 없음
- 예약어 사용 안됨
'''
'''
예약어 확인
import keyword
print(keyword.kwlist)
'''
'''
a = 7 # 7 객체를 가르키는 변수 a
b = 7 # 7 객체를 가르키는 변수 b
print(type(a)) # int
print(a is 7) # true
print(b is 7) # true
print(a is b) # true
print(id(a))
print(id(b))
print(id(7))
# id 값이 동일하다 즉, a와 b와 7의 객체 id를 가르키는 변수명만 다르다
'''
# 여러 변수 선언
a, b = 5, 10
print('a+b=', a+b)
# 파이썬에서 두 변수의 값 바꾸기(swapping)
a, b = b, a
print('a=', a, 'b=', b)
# 변수의 삭제
del b
print(b)
| 15.364865 | 50 | 0.504837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,729 | 0.948437 |
a07d57857f23110458e28cf9b4145b1716e6f940 | 2,502 | py | Python | convert.py | povle/SP-SR2-converter | 7a675204e15b340deac2b98634805cdf75e6fd4a | [
"MIT"
] | 3 | 2021-01-09T20:11:31.000Z | 2022-03-31T02:05:52.000Z | convert.py | povle/SP-SR2-converter | 7a675204e15b340deac2b98634805cdf75e6fd4a | [
"MIT"
] | null | null | null | convert.py | povle/SP-SR2-converter | 7a675204e15b340deac2b98634805cdf75e6fd4a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import sys
import io
import os.path
import shutil
import requests
from convert_file import convert_file
from gooey import Gooey, GooeyParser
if len(sys.argv) >= 2:
if '--ignore-gooey' not in sys.argv:
sys.argv.append('--ignore-gooey')
@Gooey(program_name='SP to SR2 converter', tabbed_groups=True, optional_cols=1)
def main():
parser = GooeyParser()
basic_options = parser.add_argument_group('Basic Options')
group = basic_options.add_mutually_exclusive_group(required=True)
group.add_argument('--input_file', '-i', type=argparse.FileType('rb'), help='path to the source craft xml', widget='FileChooser')
group.add_argument('--id', help='ID of the craft (https://www.simpleplanes.com/a/??????/)')
basic_options.add_argument('--output_file', '-o', type=argparse.FileType('wb'), help='path to the output file')
advanced_options = parser.add_argument_group('Advanced Options')
advanced_options.add_argument('--scale', '-s', type=float, default=1, help='scale of the converted craft', widget='DecimalField')
group2 = advanced_options.add_mutually_exclusive_group()
group2.add_argument('--only_ids', nargs='*', metavar='part_id', help='convert only parts with given ids')
group2.add_argument('--exclude_ids', nargs='*', metavar='part_id', default=[], help='ignore parts with given ids')
group3 = advanced_options.add_mutually_exclusive_group()
group3.add_argument('--only_types', nargs='*', metavar='SP_type', help='convert only parts with given types')
group3.add_argument('--exclude_types', nargs='*', metavar='SP_type', default=[], help='ignore parts with given types')
args = parser.parse_args()
output_file = args.output_file or None
if args.id:
r = requests.get(f'http://www.simpleplanes.com/Client/DownloadAircraft?a={args.id}')
if r.content == b'0':
raise ValueError('Incorrect craft ID')
input_file = io.BytesIO(r.content)
if output_file is None:
output_file = open(args.id+'_SR.xml', 'wb')
else:
input_file = args.input_file
if output_file is None:
output_name = os.path.split(input_file.name)[1]
output_name = os.path.splitext(output_name)[0]+'_SR.xml'
output_file = open(output_name, 'wb')
with input_file as i, output_file as o:
converted = convert_file(i, args)
shutil.copyfileobj(converted, o)
if __name__ == '__main__':
main()
| 43.137931 | 133 | 0.688649 | 0 | 0 | 0 | 0 | 2,172 | 0.868106 | 0 | 0 | 706 | 0.282174 |
a07db8162c85985e5fa4859871927e9c03a5f877 | 5,289 | py | Python | bsd2/vagrant-ansible/ansible/lib/ansible/runner/action_plugins/fetch.py | dlab-berkeley/collaboratool-archive | fa474e05737f78e628d6b9398c58cf7c966a7bba | [
"Apache-2.0"
] | 1 | 2016-01-20T14:36:02.000Z | 2016-01-20T14:36:02.000Z | bsd2/vagrant-ansible/ansible/lib/ansible/runner/action_plugins/fetch.py | dlab-berkeley/collaboratool-archive | fa474e05737f78e628d6b9398c58cf7c966a7bba | [
"Apache-2.0"
] | null | null | null | bsd2/vagrant-ansible/ansible/lib/ansible/runner/action_plugins/fetch.py | dlab-berkeley/collaboratool-archive | fa474e05737f78e628d6b9398c58cf7c966a7bba | [
"Apache-2.0"
] | null | null | null | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import pwd
import random
import traceback
import tempfile
import base64
import ansible.constants as C
from ansible import utils
from ansible import errors
from ansible import module_common
from ansible.runner.return_data import ReturnData
class ActionModule(object):
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
''' handler for fetch operations '''
if self.runner.noop_on_check(inject):
return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not (yet) supported for this module'))
# load up options
options = {}
if complex_args:
options.update(complex_args)
options.update(utils.parse_kv(module_args))
source = options.get('src', None)
dest = options.get('dest', None)
flat = options.get('flat', False)
flat = utils.boolean(flat)
fail_on_missing = options.get('fail_on_missing', False)
fail_on_missing = utils.boolean(fail_on_missing)
if source is None or dest is None:
results = dict(failed=True, msg="src and dest are required")
return ReturnData(conn=conn, result=results)
if flat:
if dest.endswith("/"):
# if the path ends with "/", we'll use the source filename as the
# destination filename
base = os.path.basename(source)
dest = os.path.join(dest, base)
if not dest.startswith("/"):
# if dest does not start with "/", we'll assume a relative path
dest = utils.path_dwim(self.runner.basedir, dest)
else:
# files are saved in dest dir, with a subdir for each host, then the filename
dest = "%s/%s/%s" % (utils.path_dwim(self.runner.basedir, dest), conn.host, source)
dest = dest.replace("//","/")
# calculate md5 sum for the remote file
remote_md5 = self.runner._remote_md5(conn, tmp, source)
# use slurp if sudo and permissions are lacking
remote_data = None
if remote_md5 in ('1', '2') or self.runner.sudo:
slurpres = self.runner._execute_module(conn, tmp, 'slurp', 'src=%s' % source, inject=inject)
if slurpres.is_successful():
if slurpres.result['encoding'] == 'base64':
remote_data = base64.b64decode(slurpres.result['content'])
if remote_data is not None:
remote_md5 = utils.md5s(remote_data)
# these don't fail because you may want to transfer a log file that possibly MAY exist
# but keep going to fetch other log files
if remote_md5 == '0':
result = dict(msg="unable to calculate the md5 sum of the remote file", file=source, changed=False)
return ReturnData(conn=conn, result=result)
if remote_md5 == '1':
if fail_on_missing:
result = dict(failed=True, msg="the remote file does not exist", file=source)
else:
result = dict(msg="the remote file does not exist, not transferring, ignored", file=source, changed=False)
return ReturnData(conn=conn, result=result)
if remote_md5 == '2':
result = dict(msg="no read permission on remote file, not transferring, ignored", file=source, changed=False)
return ReturnData(conn=conn, result=result)
# calculate md5 sum for the local file
local_md5 = utils.md5(dest)
if remote_md5 != local_md5:
# create the containing directories, if needed
if not os.path.isdir(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
# fetch the file and check for changes
if remote_data is None:
conn.fetch_file(source, dest)
else:
f = open(dest, 'w')
f.write(remote_data)
f.close()
new_md5 = utils.md5(dest)
if new_md5 != remote_md5:
result = dict(failed=True, md5sum=new_md5, msg="md5 mismatch", file=source, dest=dest)
return ReturnData(conn=conn, result=result)
result = dict(changed=True, md5sum=new_md5, dest=dest)
return ReturnData(conn=conn, result=result)
else:
result = dict(changed=False, md5sum=local_md5, file=source, dest=dest)
return ReturnData(conn=conn, result=result)
| 42.653226 | 135 | 0.626962 | 4,327 | 0.818113 | 0 | 0 | 0 | 0 | 0 | 0 | 1,718 | 0.324825 |
a07ebd61f61d120e3815b7fb4a6cf2eeafd36431 | 4,563 | py | Python | src/plot_by_genome.py | MaaT-Pharma/AMBER | 76aa10e2295265b16337b7bfab769d67d3bea66a | [
"Apache-2.0"
] | null | null | null | src/plot_by_genome.py | MaaT-Pharma/AMBER | 76aa10e2295265b16337b7bfab769d67d3bea66a | [
"Apache-2.0"
] | null | null | null | src/plot_by_genome.py | MaaT-Pharma/AMBER | 76aa10e2295265b16337b7bfab769d67d3bea66a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import argparse
import os
import sys
import matplotlib
import numpy as np
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from src import plots
from src.utils import load_data
from src.utils import argparse_parents
def plot_by_genome(data, out_file=None, sort_by='completeness'):
not_sort_by = list(set(['purity','completeness']) - set([sort_by]))[0] # get the metric not sorted by
data = sorted(data, key=lambda x: x[sort_by])
genomes = []
precision = []
recall = []
for genome in data:
genomes.append(genome['mapped_genome'])
precision.append(genome['purity'])
recall.append(genome['completeness'])
sort = {'purity': precision, 'completeness': recall}
fig, ax1 = plt.subplots(figsize=(len(genomes) * 0.15, 5))
ax1.plot(np.arange(len(genomes)), sort[sort_by], color='black')
plt.xticks(np.arange(len(genomes)), genomes, rotation='vertical', fontsize="smaller")
ax1.plot(np.arange(len(genomes)), sort[not_sort_by], '.', color='red')
# transform y labels to percentages
vals = ax1.get_yticks()
ax1.set_yticklabels(['{:3.0f}%'.format(x * 100) for x in vals])
plt.legend((sort_by.title(), not_sort_by.title()))
plt.grid(True)
plt.tight_layout()
if out_file is None:
plt.show()
else:
plt.savefig(os.path.normpath(out_file + '.png'), dpi=100, format='png', bbox_inches='tight')
plt.savefig(os.path.normpath(out_file + '.pdf'), dpi=100, format='pdf', bbox_inches='tight')
plt.close(fig)
def plot_by_genome2(bin_metrics_per_query, binning_labels, output_dir):
colors_list = plots.create_colors_list()
if len(bin_metrics_per_query) > len(colors_list):
raise RuntimeError("Plot only supports 29 colors")
fig, axs = plt.subplots(figsize=(6, 5))
# force axis to be from 0 to 100%
axs.set_xlim([0.0, 1.0])
axs.set_ylim([0.0, 1.0])
i = 0
for query_metrics in bin_metrics_per_query:
precision = []
recall = []
for metrics in query_metrics:
precision.append(metrics['purity'])
recall.append(metrics['completeness'])
axs.scatter(precision, recall, marker='o', color=colors_list[i], s=[8] * len(precision))
i += 1
# turn on grid
axs.minorticks_on()
axs.grid(which='major', linestyle='-', linewidth='0.5')
axs.grid(which='minor', linestyle=':', linewidth='0.5')
# transform plot_labels to percentages
vals = axs.get_xticks()
axs.set_xticklabels(['{:3.0f}%'.format(x * 100) for x in vals])
vals = axs.get_yticks()
axs.set_yticklabels(['{:3.0f}%'.format(x * 100) for x in vals])
plt.xlabel('Purity per bin')
plt.ylabel('Completeness per genome')
plt.tight_layout()
fig.savefig(os.path.normpath(output_dir + '/purity_completeness_per_bin.eps'), dpi=100, format='eps', bbox_inches='tight')
lgd = plt.legend(binning_labels, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=0, frameon=False)
for handle in lgd.legendHandles:
handle.set_sizes([100.0])
fig.savefig(os.path.normpath(output_dir + '/purity_completeness_per_bin.png'), dpi=100, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(os.path.normpath(output_dir + '/purity_completeness_per_bin.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
# def main():
# parser = argparse.ArgumentParser(description="Plot purity and completeness per genome. Genomes can be sorted by completeness (default) or purity")
# parser.add_argument('file', nargs='?', type=argparse.FileType('r'), help=argparse_parents.HELP_FILE)
# parser.add_argument('-s','--sort_by', help='Sort by either purity or completeness (default: completeness)', choices=set(['purity','completeness']))
# parser.add_argument('-o','--out_file', help='Path to store image (default: only show image)')
# args = parser.parse_args()
# if not args.file and sys.stdin.isatty():
# parser.print_help()
# parser.exit(1)
# metrics = load_data.load_tsv_table(sys.stdin if not sys.stdin.isatty() else args.file)
# if args.sort_by is not None:
# plot_by_genome(metrics, args.out_file, args.sort_by)
# else:
# plot_by_genome(metrics, args.out_file)
if __name__ == "__main__":
main()
| 38.669492 | 153 | 0.67368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,546 | 0.338812 |
a080d1b200263a36cd31a3e857bf790cbd1e3259 | 16,548 | py | Python | tests/test_references.py | isprojects/djangorestframework-inclusions | c6669f404a8a80f2c524a8adfb6548b2eef235c7 | [
"MIT"
] | null | null | null | tests/test_references.py | isprojects/djangorestframework-inclusions | c6669f404a8a80f2c524a8adfb6548b2eef235c7 | [
"MIT"
] | 4 | 2019-11-15T10:21:20.000Z | 2021-04-22T13:37:32.000Z | tests/test_references.py | isprojects/djangorestframework-inclusions | c6669f404a8a80f2c524a8adfb6548b2eef235c7 | [
"MIT"
] | null | null | null | from django.urls import reverse
from rest_framework.test import APITestCase
from testapp.models import (
A,
B,
C,
Child,
ChildProps,
Container,
Entry,
MainObject,
Parent,
Tag,
)
from .mixins import InclusionsMixin
class ReferenceTests(InclusionsMixin, APITestCase):
maxDiff = None
@classmethod
def setUpTestData(cls):
cls.tag1 = Tag.objects.create(name="you")
cls.tag2 = Tag.objects.create(name="are")
cls.tag3 = Tag.objects.create(name="it")
cls.parent1 = Parent.objects.create(name="Papa Johns")
cls.parent1.tags.set([cls.tag1, cls.tag2])
cls.parent2 = Parent.objects.create(name="Papa Roach")
cls.parent2.tags.set([cls.tag2])
cls.child1 = Child.objects.create(parent=cls.parent1, name="Children of Bodom")
cls.child2 = Child.objects.create(parent=cls.parent1, name="Children of Men")
cls.child1.tags.set([cls.tag3])
cls.parent1.favourite_child = cls.child2
cls.parent1.save()
cls.childprops = ChildProps.objects.create(child=cls.child2)
cls.container1 = Container.objects.create(name="container 1")
cls.container1.save()
cls.entryA = Entry.objects.create(name="A", container=cls.container1)
cls.entryA.tags.set([cls.tag1])
cls.entryA.save()
cls.entryB = Entry.objects.create(name="B", container=cls.container1)
cls.entryB.tags.set([cls.tag3])
cls.entryB.save()
def test_tag_list(self): # without pagination
expected = {
"data": [
{"id": self.tag1.id, "name": "you"},
{"id": self.tag2.id, "name": "are"},
{"id": self.tag3.id, "name": "it"},
],
"inclusions": {},
}
self.assertResponseData("tag-list", expected)
def test_tag_detail(self):
expected = {"data": {"id": self.tag1.id, "name": "you"}, "inclusions": {}}
self.assertResponseData("tag-detail", expected, pk=self.tag1.pk)
def test_custom_action_no_inclusion_serializer(self):
"""
Assert that custom actions with inclusion renderer don't trigger
inclusion machinery.
"""
expected = [
{"id": self.tag1.id, "name": "you"},
{"id": self.tag2.id, "name": "are"},
{"id": self.tag3.id, "name": "it"},
]
self.assertResponseData("tag-custom-action", expected)
def test_custom_action_inclusion_serializer(self):
"""
Assert that the inclusion machinery does kick in if inclusion
serializers are involved.
"""
entry_c = C.objects.create()
expected = {"data": {"id": entry_c.id, "b": None}, "inclusions": {}}
self.assertResponseData("c-custom-action", expected, pk=entry_c.pk)
def test_parent_list(self): # with pagination
expected = {
"count": 2,
"previous": None,
"next": None,
"data": [
{
"id": self.parent1.id,
"name": "Papa Johns",
"tags": [self.tag1.id, self.tag2.id],
"favourite_child": self.child2.pk,
},
{
"id": self.parent2.id,
"name": "Papa Roach",
"tags": [self.tag2.id],
"favourite_child": None,
},
],
"inclusions": {},
}
self.assertResponseData("parent-list", expected)
def test_parent_list_include_tags(self):
expected = {
"count": 2,
"previous": None,
"next": None,
"data": [
{
"id": self.parent1.id,
"name": "Papa Johns",
"tags": [self.tag1.id, self.tag2.id],
"favourite_child": self.child2.pk,
},
{
"id": self.parent2.id,
"name": "Papa Roach",
"tags": [self.tag2.id],
"favourite_child": None,
},
],
"inclusions": {
"testapp.Tag": [
{"id": self.tag1.id, "name": "you"},
{"id": self.tag2.id, "name": "are"},
]
},
}
self.assertResponseData("parent-list", expected, params={"include": "tags"})
def test_parent_detail(self):
expected = {
"data": {
"id": self.parent2.id,
"name": "Papa Roach",
"tags": [self.tag2.id],
"favourite_child": None,
},
"inclusions": {},
}
self.assertResponseData("parent-detail", expected, pk=self.parent2.pk)
def test_parent_detail_with_include(self):
expected = {
"data": {
"id": self.parent2.id,
"name": "Papa Roach",
"tags": [self.tag2.id],
"favourite_child": None,
},
"inclusions": {"testapp.Tag": [{"id": self.tag2.id, "name": "are"}]},
}
self.assertResponseData(
"parent-detail", expected, pk=self.parent2.pk, params={"include": "*"}
)
def test_nested_include(self):
expected = {
"data": {
"id": self.child2.id,
"name": "Children of Men",
"childprops": self.childprops.id,
"parent": {
"id": self.parent1.id,
"name": "Papa Johns",
"tags": [self.tag1.id, self.tag2.id],
"favourite_child": self.child2.id,
},
"tags": [],
},
"inclusions": {
"testapp.Tag": [
{"id": self.tag1.id, "name": "you"},
{"id": self.tag2.id, "name": "are"},
]
},
}
self.assertResponseData(
"child-detail",
expected,
params={"include": "parent.tags"},
pk=self.child2.pk,
)
def test_include_all_detail(self):
expected = {
"data": {
"id": self.child2.id,
"name": "Children of Men",
"childprops": self.childprops.id,
"parent": {
"id": self.parent1.id,
"name": "Papa Johns",
"tags": [self.tag1.id, self.tag2.id],
"favourite_child": self.child2.id,
},
"tags": [],
},
"inclusions": {
"testapp.Tag": [
{"id": self.tag1.id, "name": "you"},
{"id": self.tag2.id, "name": "are"},
],
"testapp.ChildProps": [
{"id": self.childprops.id, "child": self.child2.pk}
],
},
}
self.assertResponseData(
"child-detail", expected, params={"include": "*"}, pk=self.child2.pk
)
def test_include_all_list(self):
expected = {
"count": 2,
"next": None,
"previous": None,
"data": [
{
"id": self.child1.id,
"name": "Children of Bodom",
"childprops": None,
"parent": {
"id": self.parent1.id,
"name": "Papa Johns",
"tags": [self.tag1.id, self.tag2.id],
"favourite_child": self.child2.id,
},
"tags": [self.tag3.id],
},
{
"id": self.child2.id,
"name": "Children of Men",
"childprops": self.childprops.id,
"parent": {
"id": self.parent1.id,
"name": "Papa Johns",
"tags": [self.tag1.id, self.tag2.id],
"favourite_child": self.child2.id,
},
"tags": [],
},
],
"inclusions": {
"testapp.Tag": [
{"id": self.tag1.id, "name": "you"},
{"id": self.tag2.id, "name": "are"},
{"id": self.tag3.id, "name": "it"},
],
"testapp.ChildProps": [
{"id": self.childprops.id, "child": self.child2.pk}
],
},
}
self.assertResponseData("child-list", expected, params={"include": "*"})
def test_include_fk_field(self):
expected = {
"data": {
"id": self.child2.id,
"name": "Children of Men",
"childprops": self.childprops.id,
"parent": {
"id": self.parent1.id,
"name": "Papa Johns",
"tags": [self.tag1.id, self.tag2.id],
"favourite_child": self.child2.id,
},
"tags": [],
},
"inclusions": {
"testapp.ChildProps": [
{"id": self.childprops.id, "child": self.child2.id}
]
},
}
self.assertResponseData(
"child-detail",
expected,
params={"include": "childprops"},
pk=self.child2.pk,
)
def test_flattened_inclusions(self):
expected = {
"data": {
"id": self.child1.id,
"name": "Children of Bodom",
"childprops": None,
"parent": {
"id": self.parent1.id,
"name": "Papa Johns",
"tags": [self.tag1.id, self.tag2.id],
"favourite_child": self.child2.id,
},
"tags": [self.tag3.id],
},
"inclusions": {
"testapp.Tag": [
{"id": self.tag1.id, "name": "you"},
{"id": self.tag2.id, "name": "are"},
{"id": self.tag3.id, "name": "it"},
]
},
}
self.assertResponseData(
"child-detail",
expected,
params={"include": "tags,parent.tags"},
pk=self.child1.pk,
)
def test_nested_include_multiple_from_same_child(self):
self.child2.tags.set([self.tag1])
self.addCleanup(self.child2.tags.clear)
expected = {
"data": {
"id": self.childprops.id,
"child": {
"id": self.child2.id,
"parent": self.parent1.id,
"tags": [self.tag1.id],
},
},
"inclusions": {
"testapp.Tag": [
{"id": self.tag1.id, "name": "you"},
{
"id": self.tag2.id,
"name": "are",
}, # included from Parent inclusion
],
"testapp.Parent": [
{
"id": self.parent1.id,
"name": "Papa Johns",
"tags": [self.tag1.id, self.tag2.id],
"favourite_child": self.child2.id,
}
],
},
}
self.assertResponseData(
"childprops-detail",
expected,
params={"include": "child.tags,child.parent"},
pk=self.childprops.pk,
)
def test_many(self):
expected = {
"data": {
"entries": [
{"id": self.entryA.id, "name": "A", "tags": [self.tag1.id]},
{"id": self.entryB.id, "name": "B", "tags": [self.tag3.id]},
],
"id": self.container1.id,
"name": "container 1",
},
"inclusions": {
"testapp.Tag": [
{"id": self.tag1.id, "name": "you"},
{"id": self.tag3.id, "name": "it"},
]
},
}
self.assertResponseData(
"container-detail",
expected,
params={"include": "entries.tags"},
pk=self.container1.pk,
)
def test_post(self):
url = reverse("parent-list")
response = self.client.post(
url, {"name": "Papa Post", "tags": [self.tag2.id], "favourite_child": None}
)
json = response.json()
json["data"].pop("id")
self.assertEqual(
json,
{
"data": {
"favourite_child": None,
"name": "Papa Post",
"tags": [self.tag2.id],
},
"inclusions": {},
},
)
def test_post_with_error(self):
url = reverse("parent-list")
response = self.client.post(url, {"wrong": "WRONG"})
json = response.json()
# things should not be wrapped in data
self.assertEqual(
json,
{"name": ["This field is required."], "tags": ["This field is required."]},
)
def test_post_with_non_field_error(self):
url = reverse("parent-list")
response = self.client.post(url, {"name": "Trigger", "tags": [self.tag2.id]})
json = response.json()
# things should not be wrapped in data
self.assertEqual(json, {"invalid": "WRONG"})
def test_list_action(self):
url = reverse("parent-check")
response = self.client.post(url, {"random": "data"})
json = response.json()
self.assertEqual(json, {"arbitrary": "content"})
def test_detail_action(self):
url = reverse("parent-check2", kwargs={"pk": self.parent1.pk})
response = self.client.post(url, {"random": "data"})
json = response.json()
self.assertEqual(json, {"arbitrary": "content"})
def test_read_only_inclusions(self):
"""
NEXT-827 -- Inclusions should work with read-only fields.
"""
expected = {
"count": 2,
"previous": None,
"next": None,
"data": [
{"id": self.entryA.id, "name": "A", "tags": [self.tag1.id]},
{"id": self.entryB.id, "name": "B", "tags": [self.tag3.id]},
],
"inclusions": {
"testapp.Tag": [
{"id": self.tag1.id, "name": "you"},
{"id": self.tag3.id, "name": "it"},
]
},
}
self.assertResponseData("entry-list", expected, params={"include": "tags"})
def test_nullable_relation(self):
"""
NEXT-856 -- requesting inclusions of nullable relations shouldn't crash.
"""
a1 = A.objects.create()
b1 = B.objects.create()
c1 = C.objects.create()
b2 = B.objects.create(a=a1)
c2 = C.objects.create(b=b2)
c3 = C.objects.create(b=b1)
expected = {
"count": 3,
"previous": None,
"next": None,
"data": [
{"id": c1.id, "b": None},
{"id": c2.pk, "b": {"id": b2.id, "a": a1.id}},
{"id": c3.pk, "b": {"id": b1.id, "a": None}},
],
"inclusions": {"testapp.A": [{"id": a1.id}]},
}
self.assertResponseData("c-list", expected, params={"include": "b.a"})
def test_reverse_relation(self):
"""
NEXT-1052 revealed a problem in inclusions.
Response data in the form of:
[{
'external_notifications': [],
}]
where an inclusion field is in the ExernalNotification serializer
bugged out.
"""
main_object = MainObject.objects.create()
# no actual related objects exist in database
expected = {
"data": [{"id": main_object.id, "relatedobject_set": []}],
"inclusions": {},
}
self.assertResponseData("mainobject-list", expected, params={"include": "*"})
| 32.383562 | 87 | 0.435098 | 16,288 | 0.984288 | 0 | 0 | 1,163 | 0.07028 | 0 | 0 | 3,954 | 0.238941 |
a08113f70d1b07cff7761da1bc92b7750832a572 | 1,508 | py | Python | q2_fondue/get_all.py | misialq/q2-fondue | a7a541ee017381b34d38ef766de39d5d62588465 | [
"BSD-3-Clause"
] | 10 | 2022-03-21T16:07:22.000Z | 2022-03-31T09:33:48.000Z | q2_fondue/get_all.py | misialq/q2-fondue | a7a541ee017381b34d38ef766de39d5d62588465 | [
"BSD-3-Clause"
] | null | null | null | q2_fondue/get_all.py | misialq/q2-fondue | a7a541ee017381b34d38ef766de39d5d62588465 | [
"BSD-3-Clause"
] | 4 | 2022-03-21T06:51:44.000Z | 2022-03-29T15:56:14.000Z | # ----------------------------------------------------------------------------
# Copyright (c) 2022, Bokulich Laboratories.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import qiime2 as q2
import pandas as pd
import threading
from q2_fondue.utils import handle_threaded_exception
from qiime2 import Artifact
threading.excepthook = handle_threaded_exception
def get_all(ctx, accession_ids, email, n_jobs=1, retries=2, log_level='INFO'):
# get required methods
get_metadata = ctx.get_action('fondue', 'get_metadata')
get_sequences = ctx.get_action('fondue', 'get_sequences')
# fetch metadata
metadata, failed_ids = get_metadata(
accession_ids, email, n_jobs, log_level
)
failed_ids_df = failed_ids.view(pd.DataFrame)
# fetch sequences - use metadata to get run ids, regardless if
# runs or projects were requested
run_ids = q2.Artifact.import_data(
'NCBIAccessionIDs', pd.Series(metadata.view(pd.DataFrame).index)
)
seq_single, seq_paired, failed_ids, = get_sequences(
run_ids, email, retries, n_jobs, log_level
)
failed_ids_df = failed_ids_df.append(failed_ids.view(pd.DataFrame))
if failed_ids_df.shape[0] > 0:
failed_ids = Artifact.import_data('SRAFailedIDs', failed_ids_df)
return metadata, seq_single, seq_paired, failed_ids
| 32.085106 | 78 | 0.657825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 550 | 0.364721 |
a08175a3e80e168fe04fe33684d0de9087ed3e33 | 2,652 | py | Python | markups/restructuredtext.py | LukeC8/pymarkups | eec6edbc870fc6fe50c56d30f3caa8b8ee4e239a | [
"BSD-3-Clause"
] | null | null | null | markups/restructuredtext.py | LukeC8/pymarkups | eec6edbc870fc6fe50c56d30f3caa8b8ee4e239a | [
"BSD-3-Clause"
] | null | null | null | markups/restructuredtext.py | LukeC8/pymarkups | eec6edbc870fc6fe50c56d30f3caa8b8ee4e239a | [
"BSD-3-Clause"
] | null | null | null | # vim: ts=8:sts=8:sw=8:noexpandtab
# This file is part of python-markups module
# License: 3-clause BSD, see LICENSE file
# Copyright: (C) Dmitry Shachnev, 2012-2018
import markups.common as common
from markups.abstract import AbstractMarkup, ConvertedMarkup
class ReStructuredTextMarkup(AbstractMarkup):
"""Markup class for reStructuredText language.
Inherits :class:`~markups.abstract.AbstractMarkup`.
:param settings_overrides: optional dictionary of overrides for the
`Docutils settings`_
:type settings_overrides: dict
.. _`Docutils settings`: http://docutils.sourceforge.net/docs/user/config.html
"""
name = 'reStructuredText'
attributes = {
common.LANGUAGE_HOME_PAGE: 'http://docutils.sourceforge.net/rst.html',
common.MODULE_HOME_PAGE: 'http://docutils.sourceforge.net/',
common.SYNTAX_DOCUMENTATION: 'http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html'
}
file_extensions = ('.rst', '.rest')
default_extension = '.rst'
@staticmethod
def available():
try:
import docutils.core
except ImportError:
return False
return True
def __init__(self, filename=None, settings_overrides=None):
self.overrides = settings_overrides or {}
self.overrides.update({
'math_output': 'MathJax %s?config=TeX-AMS_CHTML' % common.MATHJAX_WEB_URL,
'syntax_highlight': 'short',
'halt_level': 5, # Never convert system messages to exceptions
})
AbstractMarkup.__init__(self, filename)
from docutils.core import publish_parts
self._publish_parts = publish_parts
def convert(self, text):
parts = self._publish_parts(text, source_path=self.filename,
writer_name='html5', settings_overrides=self.overrides)
# Determine head
head = parts['head']
# Determine body
body = parts['html_body']
# Determine title
title = parts['title']
# Determine stylesheet
origstyle = parts['stylesheet']
# Cut off <style> and </style> tags
stylestart = '<style type="text/css">'
stylesheet = ''
if stylestart in origstyle:
stylesheet = origstyle[origstyle.find(stylestart)+25:origstyle.rfind('</style>')]
stylesheet += common.get_pygments_stylesheet('.code')
return ConvertedReStructuredText(head, body, title, stylesheet)
class ConvertedReStructuredText(ConvertedMarkup):
def __init__(self, head, body, title, stylesheet):
ConvertedMarkup.__init__(self, body, title, stylesheet)
self.head = head
def get_javascript(self, webenv=False):
if 'MathJax.js?config=TeX-AMS_CHTML' not in self.head:
return ''
return ('<script type="text/javascript" src="%s?config=TeX-AMS_CHTML"></script>\n' %
common.get_mathjax_url(webenv))
| 30.837209 | 99 | 0.735294 | 2,385 | 0.899321 | 0 | 0 | 114 | 0.042986 | 0 | 0 | 1,109 | 0.418175 |
a0827c33ad3c6db021a834ac073ebf6c9ba882a7 | 8,025 | py | Python | Intelligent Systems and Decision Support Systems/pm-test1.py | johnpras/Uni_work | 1edd8fd56e4d54cdcc0058f0a21799ef6015e3f6 | [
"MIT"
] | null | null | null | Intelligent Systems and Decision Support Systems/pm-test1.py | johnpras/Uni_work | 1edd8fd56e4d54cdcc0058f0a21799ef6015e3f6 | [
"MIT"
] | null | null | null | Intelligent Systems and Decision Support Systems/pm-test1.py | johnpras/Uni_work | 1edd8fd56e4d54cdcc0058f0a21799ef6015e3f6 | [
"MIT"
] | null | null | null | import pandas as pd
from pm4py.objects.log.importer.xes import importer as xes_import
from pm4py.objects.log.util import log as utils
from pm4py.statistics.start_activities.log.get import get_start_activities
from pm4py.statistics.end_activities.log.get import get_end_activities
from pm4py.algo.filtering.log.end_activities import end_activities_filter
from pm4py.visualization.petrinet import factory as vis_factory
from pm4py.algo.discovery.alpha import factory as alpha_miner
from pm4py.algo.discovery.heuristics import factory as heuristics_miner
from pm4py.algo.discovery.inductive import factory as inductive_miner
from pm4py.evaluation import factory as evaluation_factory
from pm4py.algo.conformance.tokenreplay import factory as token_replay
# Συνάρτηση που καλείται για κάθε αλγόριθμό και για τα δυο logs για να δούμε πόσα traces δεν συνάδουν
# με το process model
def print_fit_traces(log, net, initial_marking, final_marking):
replayed_traces = token_replay.apply(log, net, initial_marking, final_marking)
fit_traces = 0
for trace in replayed_traces:
if not trace["trace_is_fit"]:
fit_traces += 1
print("Number of non fit traces : ", fit_traces)
# 1. Διαβάζει το event log
log = xes_import.apply('edited_hh110_labour.xes')
trace_key_list = []
event_key_list = []
event_count = 0 # Counter για να μετρήσουμε το πλήθος των event
for trace in log:
# Βρίσκουμε τα keys κάθε trace και αν δεν υπάρχουν ήδη στη λίστα με τα key
# δηλαδή την trace_key_list τα προσθέτουμε στη λίστα.
for trace_key in trace.attributes.keys():
if trace_key not in trace_key_list:
trace_key_list.append(trace_key)
for event in trace:
# Κάνουμε το ίδιο και για τα keys των events
for event_key in event.keys():
if event_key not in event_key_list:
event_key_list.append(event_key)
event_count += 1 # Κάθε φορά που μπαίνουμε στην for των events αυξάνουμε τον counter κατά 1
# 2. Εμφανίζει τη δομή του trace και του event
print("Trace keys : " + str(trace_key_list))
print("Event keys : " + str(event_key_list))
# 3. Εμφανίζει το πλήθος των traces
print("Number of traces : " + str(len(log)))
# 4. Εμφανίζει το πλήθος των events
print("Number of events : " + str(event_count))
# 5. Εμφανίζει τα διαφορετικά events από τα οποία αποτελείται το event log
unique_events = utils.get_event_labels(log,'concept:name')
print("Events of log : " + str(unique_events))
# 6. Εμφανίζει τις δραστηριότητες με τις οποίες αρχίζουν και τελειώνουν τα
# traces και τη συχνότητα εμφάνισής τους
# Πρώτα βρίσκουμε τις δραστηριότητες με τις οποίες αρχίζουν
start_activities = get_start_activities(log)
print("Starting activities: " + str(start_activities))
# Και τώρα αντίστοιχα το ίδιο για τις δραστηριότητες με τις οποίες τελειώνουν
# τα traces
end_activities = get_end_activities(log)
print("End activities" + str(end_activities))
# 7. Εμφανίζει σε πίνακα το case id, activity name, transition (start ή
# complete), timestamp
# Φτιάχνουμε ένα άδειο DataFrame
log_df = pd.DataFrame(columns = ["Case ID" , "Activity Name" , "Transition" , "Timestamp"])
for trace_id, trace in enumerate(log):
for event_index, event in enumerate(trace):
#Φτιάχνουμε ένα DataFrame στο οποίο ουσιαστικά φορτώνουμε τα στοιχεία
#που θέλουμε από το τρέχον event, δηλαδή μια γραμμή του πίνακα
#που σκοπεύουμε να δημιουργήσουμε
row = pd.DataFrame({
"Case ID" : [trace.attributes["concept:name"]],
"Activity Name" : [event["concept:name"]],
"Transition" : [event["lifecycle:transition"]],
"Timestamp" : [event["time:timestamp"]]
})
#Κάνουμε append την γραμμή που φτιάξαμε στο DataFrame που ορίσαμε έξω από την
#επανάληψη
log_df = log_df.append(row, ignore_index = True)
print("Printing log table : \n")
print(log_df)
#Αν θέλουμε να εμφανίσουμε όλο το dataframe στην κονσόλα
# βγάζουμε από το σχόλιο την παρακάτω εντολή
#print(log_df.to_string(index=False))
#Για καλύτερη ανάγνωση εξάγουμε το log_df ως csv
log_df.to_csv('log_table.csv', index = False)
# 8. Φιλτράρει το event log και θα κρατήσει μόνο τα traces που τελειώνουν
# με την δραστηριότητα "end"
filtered_log = end_activities_filter.apply(log,["End"])
print("New log : \n " + str(filtered_log))
# Για επαλήθευση τυπώνουμε το size του filtered_log θα πρέπει να είναι
# ίσο με τη συχνότητα εμφάνισης του "End"
print("Size of filtered log : " + str(len(filtered_log)))
# Για καλύτερη ανάγνωση-επαλήθευση αν θέλουμε
# εξάγουμε το filtered_log ως csv
# βγάζοντας τις 2 επόμενες εντολές από τα comments
#filt_log_df = pd.DataFrame(filtered_log)
#filt_log_df.to_csv('filtered_log.csv')
# 9. Μοντέλα διεργασιών
# Alpha Miner
# Για το αρχικό log
net, initial_marking, final_marking = alpha_miner.apply(log)
gviz = vis_factory.apply(net, initial_marking, final_marking)
vis_factory.view(gviz)
evaluation_result = evaluation_factory.apply(log, net, initial_marking,final_marking)
print(evaluation_result)
print_fit_traces(log, net, initial_marking, final_marking)
#evaluation_df = pd.DataFrame(evaluation_result)
#print(evaluation_df)
#evaluation_df.to_csv('alpha_miner_log_evaluation.csv')
# Για το filtered log
net, initial_marking, final_marking = alpha_miner.apply(filtered_log)
gviz = vis_factory.apply(net, initial_marking, final_marking)
vis_factory.view(gviz)
evaluation_result = evaluation_factory.apply(filtered_log, net, initial_marking,final_marking)
print(evaluation_result)
print_fit_traces(log, net, initial_marking, final_marking)
#evaluation_df = pd.DataFrame(evaluation_result)
#print(evaluation_df)
#evaluation_df.to_csv('alpha_miner_filtered_log_evaluation.csv')
# Heuristics Miner
# Για το αρχικό log
net, initial_marking, final_marking = heuristics_miner.apply(log)
gviz = vis_factory.apply(net, initial_marking, final_marking)
vis_factory.view(gviz)
evaluation_result = evaluation_factory.apply(log, net, initial_marking,final_marking)
print(evaluation_result)
print_fit_traces(log, net, initial_marking, final_marking)
#evaluation_df = pd.DataFrame(evaluation_result)
#print(evaluation_df)
#evaluation_df.to_csv('heuristic_miner_log_evaluation.csv')
#alignments = alignment.apply_log(log, net, initial_marking, final_marking)
#pretty_print_alignments(alignments)
# Για το filtered log
net, initial_marking, final_marking = heuristics_miner.apply(filtered_log)
gviz = vis_factory.apply(net, initial_marking, final_marking)
vis_factory.view(gviz)
evaluation_result = evaluation_factory.apply(filtered_log, net, initial_marking,final_marking)
print(evaluation_result)
print_fit_traces(log, net, initial_marking, final_marking)
#evaluation_df = pd.DataFrame(evaluation_result)
#print(evaluation_df)
#evaluation_df.to_csv('heuristic_miner_filtered_log_evaluation.csv')
# Inductive Miner
# Για το αρχικό log
net, initial_marking, final_marking = inductive_miner.apply(log)
gviz = vis_factory.apply(net, initial_marking, final_marking)
vis_factory.view(gviz)
evaluation_result = evaluation_factory.apply(log, net, initial_marking,final_marking)
print(evaluation_result)
print_fit_traces(log, net, initial_marking, final_marking)
#evaluation_df = pd.DataFrame(evaluation_result)
#print(evaluation_df)
#evaluation_df.to_csv('inductive_miner_log_evaluation.csv')
# Για το filtered log
net, initial_marking, final_marking = inductive_miner.apply(filtered_log)
gviz = vis_factory.apply(net, initial_marking, final_marking)
vis_factory.view(gviz)
evaluation_result = evaluation_factory.apply(filtered_log, net, initial_marking,final_marking)
print(evaluation_result)
print_fit_traces(log, net, initial_marking, final_marking)
#evaluation_df = pd.DataFrame(evaluation_result)
#print(evaluation_df)
#evaluation_df.to_csv('inductive_miner_filtered_log_evaluation.csv')
| 42.68617 | 102 | 0.761994 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,661 | 0.506851 |
a083ee0517e616b51836bbd85b01482cd453d3cf | 9,480 | py | Python | utils/job_placement/nonuniformRandom/routers.py | scalability-llnl/damselfly | 394e39b3165388e262a90da415dc3338d0f44734 | [
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 5 | 2016-02-25T04:50:00.000Z | 2020-06-11T03:00:45.000Z | utils/job_placement/nonuniformRandom/routers.py | scalability-llnl/damselfly | 394e39b3165388e262a90da415dc3338d0f44734 | [
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | null | null | null | utils/job_placement/nonuniformRandom/routers.py | scalability-llnl/damselfly | 394e39b3165388e262a90da415dc3338d0f44734 | [
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 1 | 2017-02-07T05:43:53.000Z | 2017-02-07T05:43:53.000Z | #!/usr/bin/env python
##############################################################################
# Copyright (c) 2014, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# Written by:
# Nikhil Jain <[email protected]>
# Abhinav Bhatele <[email protected]>
# Peer-Timo Bremer <[email protected]>
#
# LLNL-CODE-678961. All rights reserved.
#
# This file is part of Damselfly. For details, see:
# https://github.com/LLNL/damselfly
# Please also read the LICENSE file for our notice and the LGPL.
##############################################################################
#
# Define k random distrobutions centered around random positions
# Keep track of empty cells
# For each set
# Until you have placed everything
# Randomly pull an empty cell
# Compute the current PDF value of this cell for this distribution
# sum-up the probability for all already occupied cells and then scale your
# current p with 1 / (1-sum)
# Pull uniform random number [0,1]
# Accept or reject sample
#
from sys import argv,exit
import numpy as np
import struct
from math import *
import random
from __builtin__ import True
symbol = ["ro","g^","bs","yo","cs"]
colors = ["r","g","b","y","c"]
def n_choose_k(n,k):
return factorial(n) / (factorial(k)*factorial(n-k))
# Base class for are probability distribution
class Distribution:
def __init__(self,total,center):
self.size = total
self.center = center
self.fill_sum = 0 # SUm of PMF of filled slots
# Shift the index i according to the center
def shift(self,i):
if abs(self.center - i) > self.size/2:
if i < self.center:
i += self.size
else:
i -= self.size
return self.size/2 - (self.center - i)
def pmf(self,i):
i = self.shift(i)
return self.centered_pmf(i)
# Adjusted pmf by filled slot
def adjustedPMF(self,i):
pmf = self.pmf(i)
if abs(1 - self.fill_sum) < 1e-8:
return 1
else:
return pmf / (1-self.fill_sum)
def fillSlot(self,i):
self.fill_sum += self.pmf(i)
#print "Filling slot ", i, " " , self.pmf(i), " ", self.fill_sum
class Binomial(Distribution):
def __init__(self,total,center,p):
Distribution.__init__(self,total,center)
self.p = p
def centered_pmf(self,i):
return n_choose_k(self.size, i)*pow(self.p,i)*pow(1-self.p,self.size-i)
class Geometric(Distribution):
def __init__(self,total,center,p):
Distribution.__init__(self, total, center)
self.p = p
def shift(self, i):
return abs(i-self.center)
def centered_pmf(self,i):
# Total mass of two geoemtrics attached at the center
total_mass = 2 - self.p
return (1-self.p)**(i)*self.p / total_mass
def rank_to_coords(rank,groups,rows,columns,nodes_per_router,cores_per_node):
dims = [0,0,0,0,rank]
dims[4] = rank % cores_per_node;
rank /= cores_per_node;
dims[3] = rank % nodes_per_router;
rank /= nodes_per_router;
dims[2] = rank % columns;
rank /= columns;
dims[1] = rank % rows;
rank /= rows;
dims[0] = rank % groups;
return dims
if len(argv) < 10:
print "Usage: %s <numGroups> <numRows> <numColumns> <numNodesPerRouter> <numCoresPerNode> [Binomial|Geometric] <p> <output filename> <#cores task 1> .... <#cores task N>"
exit(0)
# Parse the command line
groups = int(argv[1])
rows = int(argv[2])
columns = int(argv[3])
nodes_per_router = int(argv[4])
cores_per_node = int(argv[5])
dist = argv[6]
p = float(argv[7])
fileprefix = argv[8]
# Compute the system size
router_count = groups * rows *columns
node_count = router_count * nodes_per_router
cores_per_router = nodes_per_router * cores_per_node
core_count = router_count * nodes_per_router * cores_per_node
task_sizes = [int(arg) for arg in argv[9:]]
# Create a list of tasks
tasks = range(0,len(task_sizes))
# Shuffle the tasks to give everyone the opportunity to have an "empty" machine
np.random.shuffle(tasks)
# Adjust the order of sizes
task_sizes = [task_sizes[i] for i in tasks]
# Create random array of centers
task_centers = np.random.random_integers(0,router_count-1,len(tasks))
# Create the corresponding distributions
if dist == "Binomial":
task_distributions = [Binomial(router_count,c,p) for c in task_centers]
elif dist == "Geometric":
task_distributions = [Geometric(router_count,c,p) for c in task_centers]
# Slots
cores = np.zeros(core_count)
# List of empty router slots
empty = list(xrange(0, router_count))
# List of empty nodes
empty_nodes = list(xrange(0,node_count))
# Create scale down the task_sizes to leave some stragglers
task_sizes_tight = list(task_sizes)
for i,t in enumerate(task_sizes_tight):
# How many routers would this job fill
nr_rounters = t / cores_per_router
if nr_rounters * cores_per_router < t:
nr_rounters += 1
# Pick no more than about 3% of the routers to be left out
task_sizes_tight[i] = (97*nr_rounters) / 100 * cores_per_router
# For all tasks
for t,size,dist in zip(tasks,task_sizes_tight,task_distributions):
count = 0
while count < size:
# Choose a random node
elem = random.choice(empty)
# Get a uniform random number
test = np.random.uniform()
# Get the current pmf value for the distribution
current = dist.adjustedPMF(elem)
if current < 0:
print "Current ", current, " of ", elem, " tested against ", test
print dist.pmf(elem), dist.fill_sum
exit(0)
# If we pass the test
if test < current:
#print "Picked node", elem, " ", (size-count)/cores_per_node, " left to pick"
#print "Current ", current, dist.pmf(elem)," of ", elem, " tested against ", test
# Now fill up all the cores as long as
# we have tasks
i = 0
while i<cores_per_node*nodes_per_router and count<size:
cores[elem*cores_per_node*nodes_per_router + i] = t+1
i += 1
count += 1
# Remove the router from the empty list
empty.remove(elem)
# Remove the corresponding nodes (This assumine the sizes for this
# loop are multiples of the core_per_router
for i in xrange(0,nodes_per_router):
empty_nodes.remove(elem*nodes_per_router + i)
# Adjust all distributions to include another filled element
for d in task_distributions:
d.fillSlot(elem)
# Now place the remaining cores of the tasks by uniformly picking
# empty nodes
for t,full,tight in zip(tasks,task_sizes,task_sizes_tight):
size = full - tight
count = 0
while count < size:
# Choose a random node
elem = random.choice(empty_nodes)
i = 0
while i<cores_per_node and count<size:
cores[elem*cores_per_node + i] = t+1
i += 1
count += 1
# Remove the router from the empty list
empty_nodes.remove(elem)
if False:
pmfs = []
scale = 0
for d in task_distributions:
pmfs.append([d.pmf(i) for i in xrange(0,router_count)])
scale = max(scale,max(pmfs[-1]))
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
for pmf,t in zip(pmfs,tasks):
#print "Colors ", colors[t]
ax.plot(xrange(0,cores_per_node*nodes_per_router*router_count,cores_per_node),pmf,colors[t])
#print ""
for t in tasks:
#print "Colors ", symbol[t]
x = np.where(cores == t+1)
ax.plot(x,[(t+1)*scale/len(tasks) ]*len(x),symbol[t])
#print x
plt.show()
# set up text and binary files
csvfileall = open(fileprefix + ".csv", "w")
binfileall = open(fileprefix + ".bin", "wb")
csvfileall.write("g,r,c,n,core,jobid\n")
for taskid in xrange(0,len(tasks)):
x = np.where(cores == taskid+1)
# Now find the size of the t's job
loc = 0
while tasks[loc] != taskid:
loc += 1
if x[0].shape[0] != task_sizes[loc]:
print "Task assignment inconsistent for task ", taskid, ": found ", x[0].shape[0], " assigned cores but needed ", task_sizes[loc]
exit(0)
csvfile = open("%s-%d.csv" % (fileprefix, taskid), "w")
binfile = open("%s-%d.bin" % (fileprefix, taskid), "wb")
csvfile.write("g,r,c,n,core,jobid\n")
# print x
for rank in x[0]:
dims = rank_to_coords(rank, groups, rows, columns, nodes_per_router, cores_per_node)
csvfile.write("%d,%d,%d,%d,%d,0\n" % (dims[0],dims[1],dims[2],dims[3],dims[4]))
csvfileall.write("%d,%d,%d,%d,%d,%d\n" % (dims[0],dims[1],dims[2],dims[3],dims[4],taskid))
binfile.write(struct.pack('6i', dims[0], dims[1], dims[2], dims[3], dims[4], 0))
binfileall.write(struct.pack('6i', dims[0], dims[1], dims[2], dims[3], dims[4], taskid))
csvfile.close()
binfile.close()
csvfileall.close()
binfileall.close()
| 28.902439 | 175 | 0.59673 | 1,607 | 0.169515 | 0 | 0 | 0 | 0 | 0 | 0 | 3,092 | 0.32616 |
a0853c6f068e5b0ba0007116f943ea7455d91729 | 46,894 | py | Python | src/scml_vis/presenter.py | yasserfarouk/scml-vis | a8daff36bb29867a67c9a36bcdca9ceef9350e53 | [
"Apache-2.0"
] | null | null | null | src/scml_vis/presenter.py | yasserfarouk/scml-vis | a8daff36bb29867a67c9a36bcdca9ceef9350e53 | [
"Apache-2.0"
] | 2 | 2021-05-07T22:45:42.000Z | 2021-09-22T04:35:15.000Z | src/scml_vis/presenter.py | yasserfarouk/scml-vis | a8daff36bb29867a67c9a36bcdca9ceef9350e53 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import shutil
import itertools
import random
import sys
import traceback
from pathlib import Path
import altair as alt
import pandas as pd
import plotly as plotly
import plotly.express as px
import plotly.graph_objects as go
import streamlit as st
from pandas.api.types import is_numeric_dtype
from plotly.validators.scatter.marker import SymbolValidator
from streamlit import cli as stcli
import scml_vis.compiler as compiler
from scml_vis.compiler import VISDATA_FOLDER
from scml_vis.utils import (
add_selector,
add_stats_display,
add_stats_selector,
load_data,
plot_network,
score_distribution,
score_factors,
)
__all__ = ["main"]
MARKERS = SymbolValidator().values[2::3]
MARKERS = [_ for _ in MARKERS if not any(_.startswith(x) for x in ("star", "circle", "square"))]
random.shuffle(MARKERS)
MARKERS = ["circle", "square"] + MARKERS
DB_FOLDER = Path.home() / "negmas" / "runsdb"
DB_NAME = "rundb.csv"
BASE_FOLDERS = [
Path.home() / "negmas" / "logs" / "scml" / "scml2020",
Path.home() / "negmas" / "logs" / "scml" / "scml2020oneshot",
Path.home() / "negmas" / "logs" / "scml" / "scml2021oneshot",
Path.home() / "negmas" / "logs" / "scml" / "scml2021",
Path.home() / "negmas" / "logs" / "tournaments",
Path.home() / "negmas" / "tournaments",
]
def main(folder: Path):
st.set_page_config(layout="wide")
if folder is None:
add_base = st.sidebar.checkbox("Add Default paths", True)
add_cli = st.sidebar.checkbox("Add CLI runs", (DB_FOLDER / DB_NAME).exists())
options = dict(none="none")
if add_cli:
if (DB_FOLDER / DB_NAME).exists():
data = pd.read_csv(DB_FOLDER / DB_NAME, index_col=None, header=None)
data: pd.DataFrame
data = data.iloc[::-1]
data.columns = ["name", "type", "path"]
for _, x in data.iterrows():
options[x["path"]] = f"{x['type'][0]}:{x['name']}"
if add_base:
for base in BASE_FOLDERS:
type_ = base.name == "tournaments"
for child in base.glob("*"):
if not child.is_dir() or not compiler.has_logs(child):
continue
options[child] = f"{'t' if type_ else 'w'}:{child.name}"
folder = st.sidebar.selectbox("Select a run", list(options.keys()), format_func=lambda x: options[x])
if not folder or (isinstance(folder, str) and folder == "none"):
st.text(
"Cannot find any folders with logs.\nTry looking in default paths by checking 'Add Default paths' \nin the side bar or start the app with a folder containing log data using -f"
)
return
folder = Path(folder)
if folder.name != VISDATA_FOLDER:
folder = folder / VISDATA_FOLDER
if folder.exists():
re_compile = st.sidebar.button("Recompile visualization data?")
if re_compile:
st.error("Do you really, really, want to remove all visuallization data and recopmile?")
if st.button("Yes I'm OK with that"):
shutil.rmtree(folder)
if not folder.exists():
try:
do_compile = st.sidebar.button("Compile visualization data?")
if do_compile:
try:
compiler.main(folder.parent, max_worlds=None)
except Exception as e:
st.write(f"*Failed to compile visualization data for {folder}*\n### Exception:\n{str(e)}")
st.write(f"\n### Traceback:\n```\n{traceback.format_exc()}```")
else:
st.text("Either press 'Compile visualization data' to view logs of this folder or choose another one.")
return
except:
st.write(f"Folder {folder} contains no logs to use")
# folder = folder / VISDATA_FOLDER
# if not folder.exists():
# st.write(
# f"## SCML Visualizer\nError: No {VISDATA_FOLDER} folder found with visualization data at {str(folder)}"
# )
# return
if folder.name != VISDATA_FOLDER:
folder = folder / VISDATA_FOLDER
if not folder.exists():
st.write("Cannot find visualiation data")
return
st.write(f"## SCML Visualizer\n{str(folder.parent)}")
st.sidebar.markdown("## Data Selection")
tournaments = load_data(folder, "tournaments")
tournament_expander = st.sidebar.beta_expander("Tournament Selection")
with tournament_expander:
selected_tournaments, _, _ = add_selector(
st,
"",
tournaments["name"].unique(),
key="tournaments",
none=False,
default="one",
)
worlds = None
configs = load_data(folder, "configs")
if configs is None:
worlds = load_data(folder, "worlds")
config_names = worlds.loc[:, "name"].str.split("_").str[0].unique()
configs = pd.DataFrame(data=config_names, columns=["id"])
config_expander = st.sidebar.beta_expander("Config Selection")
with config_expander:
selected_configs, _, _ = add_selector(
st,
"",
configs["id"].unique(),
key="configs",
none=False,
default="all",
)
if worlds is None:
worlds = load_data(folder, "worlds")
if "config" not in worlds.columns:
worlds["config"] = worlds.loc[:, "name"].str.split("_").str[0]
worlds = worlds.loc[worlds.tournament.isin(selected_tournaments) & worlds.config.isin(selected_configs), :]
world_expander = st.sidebar.beta_expander("World Selection")
with world_expander:
selected_worlds, _, _ = add_selector(st, "", worlds.name, key="worlds", none=False, default="all")
worlds = worlds.loc[(worlds.name.isin(selected_worlds)), :]
agents = load_data(folder, "agents")
type_expander = st.sidebar.beta_expander("Type Selection")
with type_expander:
selected_types, _, _ = add_selector(st, "", agents.type.unique(), key="types", none=False, default="all")
agents = agents.loc[(agents.type.isin(selected_types)), :]
agent_expander = st.sidebar.beta_expander("Agent Selection")
with agent_expander:
selected_agents, _, _ = add_selector(st, "", agents.name.unique(), key="agents", none=False, default="all")
products = load_data(folder, "product_stats")
product_expander = st.sidebar.beta_expander("Product Selection")
with product_expander:
selected_products, _, _ = add_selector(
st, "", products["product"].unique(), key="products", none=False, default="all"
)
agents = agents.loc[(agents.type.isin(selected_types)), :]
nsteps = worlds.loc[worlds.name.isin(selected_worlds), "n_steps"].max()
nsteps = int(nsteps)
selected_steps = st.sidebar.slider("Steps", 0, nsteps, (0, nsteps))
selected_times = st.sidebar.slider("Relative Times", 0.0, 1.0, (0.0, 1.0))
st.sidebar.markdown("## Figure Selection")
# ts_figs = st.sidebar.beta_expander("Time Series")
# net_figs = st.sidebar.beta_expander("Networks")
# tbl_figs = st.sidebar.beta_expander("Tables")
# other_figs = st.sidebar.beta_expander("Others")
# if len(selected_worlds) == 1:
# fig_type = st.sidebar.selectbox(label="", options=["Time-series", "Networks", "Tables", "Others"], index=1)
# else:
# fig_type = st.sidebar.selectbox(label="", options=["Time-series", "Tables", "Others"], index=1)
#
# if fig_type == "Time-series":
# runner = display_time_series
# elif fig_type == "Networks":
# runner = display_networks
# elif fig_type == "Tables":
# runner = display_tables
# elif fig_type == "Others":
# runner = display_others
# else:
# st.text("Please choose what type of figures are you interested in")
# return
products_summary = (
products.loc[:, [_ for _ in products.columns if _ not in ("step", "relative_time")]]
.groupby(["tournament", "world", "product"])
.agg(["min", "max", "mean", "std"])
)
products_summary.columns = [f"{a}_{b}" for a, b in products_summary.columns]
products_summary = products_summary.reset_index()
data = dict(t=tournaments, w=worlds, a=agents, p=products_summary)
def filter(x, agent_field_sets):
if x is None:
return x
x = x.loc[(x.world.isin(selected_worlds)), :]
indx = None
for fields in agent_field_sets:
if not fields:
continue
indx = x[fields[0]].isin(selected_agents)
for f in fields[1:]:
indx = (indx) | (x[f].isin(selected_agents))
if indx is None:
return x
return x.loc[indx, :]
data["con"] = load_data(folder, "configs")
data["a"] = load_data(folder, "agents")
data["t"] = load_data(folder, "types")
data["c"] = filter(load_data(folder, "contracts"), [["buyer", "seller"]])
data["n"] = filter(load_data(folder, "negotiations"), [["buyer", "seller"]])
data["o"] = filter(load_data(folder, "offers"), [["sender", "receiver"]])
for runner, section_name in [
(display_networks, "Networks"),
(display_others, "Overview"),
(display_tables, "Tables"),
(display_time_series, "Time Series"),
]:
if section_name != "Time Series":
expander = st.sidebar.beta_expander(section_name, section_name == "Networks")
do_expand = expander.checkbox(f"Show {section_name}", section_name == "Networks")
else:
expander = st.sidebar
do_expand = st.sidebar.checkbox(section_name, True)
if do_expand:
runner(
folder,
selected_worlds,
selected_products,
selected_agents,
selected_types,
selected_steps,
selected_times,
data,
parent=expander,
)
# st.sidebar.markdown("""---""")
def filter_by_time(x, cols, selected_steps, selected_times):
indx = None
for k in cols:
step_col, time_col = f"{k}step", f"{k}relative_time"
i = (x[step_col] >= selected_steps[0]) & (x[step_col] <= selected_steps[1])
i &= (x[time_col] >= selected_times[0]) & (x[time_col] <= selected_times[1])
if indx is None:
indx = i
else:
indx |= i
if indx is not None:
return x.loc[indx, :]
return x
def show_a_world(
world,
selected_steps,
selected_times,
data,
parent,
weight_field,
edge_weights,
edge_colors,
node_weight,
condition_field,
x,
src,
gallery,
):
nodes = data["a"].loc[data["a"].world == world, :]
nodes["score*cost"] = nodes["final_score"] * nodes["cost"]
fields = [_ for _ in nodes.columns]
nodes = nodes.to_dict("records")
added = -data["a"].input_product.min()
nlevels = data["a"].input_product.max() + 1 + added
level_max = [0] * (nlevels)
dx, dy = 10, 10
for node in nodes:
l = node["input_product"] + added
node["pos"] = ((l + 1) * dx, level_max[l] * dy)
level_max[l] += 1
nodes = {n["name"]: n for n in nodes}
seller_dict = dict(zip(fields, itertools.repeat(float("nan"))))
buyer_dict = dict(zip(fields, itertools.repeat(float("nan"))))
nodes["SELLER"] = {**seller_dict, **dict(pos=(0, dy * (level_max[0] // 2)), name="Seller", type="System")}
nodes["BUYER"] = {
**buyer_dict,
**dict(pos=((nlevels + 1) * dx, dy * (level_max[-1] // 2)), name="Buyer", type="System"),
}
edges, weights = [], []
weight_field_name = "quantity" if weight_field == "count" else weight_field
time_cols = (
[condition_field + "_step", condition_field + "_relative_time"]
if condition_field != "step"
else ["step", "relative_time"]
)
x = x.loc[x.world == world, [weight_field_name, "seller", "buyer"] + time_cols]
x = filter_by_time(x, [condition_field + "_" if condition_field != "step" else ""], selected_steps, selected_times)
x.drop(time_cols, axis=1, inplace=True)
if weight_field == "unit_price":
x = x.groupby(["seller", "buyer"]).mean().reset_index()
x["unit_price"].fillna(0.0, inplace=True)
elif weight_field == "count":
x = x.groupby(["seller", "buyer"]).count().reset_index()
x.rename(columns=dict(quantity="count"), inplace=True)
else:
x = x.groupby(["seller", "buyer"]).sum().reset_index()
for _, d in x.iterrows():
edges.append((d["seller"], d["buyer"], d[weight_field]))
parent.plotly_chart(
plot_network(
fields, nodes, edges=edges, node_weights=node_weight, edge_colors=edge_colors, edge_weights=edge_weights
)
)
if gallery:
return
col1, col2 = parent.beta_columns(2)
mydata = data[src]
myselected = mydata.loc[(mydata.world == world), :]
myselected = filter_by_time(
myselected, [condition_field + "_" if condition_field != "step" else ""], selected_steps, selected_times
)
seller = col1.selectbox("Seller", [""] + sorted(x["seller"].unique()), key=f"seller-{world}")
buyer = col2.selectbox("Buyer", [""] + sorted(x["buyer"].unique()), key=f"seller-{world}")
if seller:
myselected = myselected.loc[(myselected.seller == seller), :]
if buyer:
myselected = myselected.loc[(myselected.buyer == buyer), :]
myselected = myselected.reset_index()
options = myselected
if src == "n":
col1, col2 = parent.beta_columns(2)
broken = col1.checkbox("Broken", False, key=f"broken-{world}")
timedout = col2.checkbox("Timedout", False, key=f"timedout-{world}")
if not broken:
options = options.loc[~options.broken, :]
if not timedout:
options = options.loc[~options.timedout, :]
# options = options.loc[(options["seller"]==seller) & (options["buyer"]==buyer) & (options.world == world) & (options[f"{condition_field}_step"]<= selected_steps[1]) & (options[f"{condition_field}_step"]>= selected_steps[0]) , :]
if src == "c":
displayed_cols = (
[
"id",
"delivery_step",
"quantity",
"unit_price",
"total_price",
"n_neg_steps",
"concluded_step",
"signed_step",
"executed_step",
"negotiation",
]
+ (["buyer"] if not buyer else [])
+ (["seller"] if not seller else [])
)
elif src == "n":
displayed_cols = ["id", "delivery_step", "quantity", "unit_price", "timedout", "broken", "step", "rounds"]
else:
return
parent.dataframe(
myselected.loc[:, displayed_cols].sort_values(
["signed_step", "delivery_step"] if src == "c" else ["step", "delivery_step"]
)
)
contract = None
options = filter_by_time(
options, [condition_field + "_" if condition_field != "step" else ""], selected_steps, selected_times
)
if parent.checkbox("Ignore Exogenous", key=f"ignore-exogenous-{world}", value=True):
options = options.loc[(options["buyer"] != "BUYER") & (options["seller"] != "SELLER"), :]
if src == "n":
options = options.loc[:, "id"].values
if len(options) < 1:
return
neg = parent.selectbox(label="Negotiation", options=options, key=f"negotiationselect-{world}")
elif src == "c":
options = options.loc[:, "id"].values
if len(options) < 1:
return
elif len(options) == 1:
contract = options[0]
else:
contract = parent.selectbox(label="Contract", options=options, key=f"contractselect-{world}")
neg = myselected.loc[myselected["id"] == contract, "negotiation"]
if len(neg) > 0:
neg = neg.values[0]
else:
neg = None
else:
return
if contract is not None:
parent.write(data["c"].loc[data["c"]["id"] == contract, :])
if not neg or data["n"] is None or len(data["n"]) == 0:
return
neg_info = data["n"].loc[data["n"]["id"] == neg]
offers = data["o"]
offers = offers.loc[offers.negotiation == neg, :].sort_values(["round", "sender"])
# if len(offers) >= 2:
# offers = offers.loc[offers["sender"].shift(1) != offers["sender"],:]
offers.index = range(len(offers))
parent.write(neg_info)
if len(neg_info) < 1:
return
neg_info = neg_info.to_dict("records")[0]
if not neg_info["broken"] and not neg_info["timedout"]:
agreement = dict(
quantity=neg_info["quantity"],
delivery_step=neg_info["delivery_step"],
unit_price=neg_info["unit_price"],
total_price=neg_info["unit_price"] * neg_info["quantity"],
)
else:
agreement = None
parent.markdown(f"**Agreement**: {agreement}")
trange = (neg_info["min_delivery_step"], neg_info["max_delivery_step"])
c1, c2 = parent.beta_columns(2)
if trange[1] > trange[0]:
is_3d = c2.checkbox("3D Graph", key=f"threed-{world}")
else:
is_3d = False
use_ranges = c1.checkbox("Use issue ranges to set axes", True, key=f"useissueranges-{world}")
qrange = (neg_info["min_quantity"] - 1, neg_info["max_quantity"] + 1)
urange = (neg_info["min_unit_price"] - 1, neg_info["max_unit_price"] + 1)
if is_3d:
fig = go.Figure()
for i, sender in enumerate(offers["sender"].unique()):
myoffers = offers.loc[offers["sender"] == sender, :]
fig.add_trace(
go.Scatter3d(
x=myoffers["quantity"],
y=myoffers["unit_price"],
z=myoffers["delivery_step"],
name=sender,
mode="lines+markers",
marker=dict(size=10),
marker_symbol=MARKERS[i],
)
)
if agreement:
fig.add_trace(
go.Scatter3d(
x=[agreement["quantity"]],
y=[agreement["unit_price"]],
z=[agreement["delivery_step"]],
mode="markers",
marker=dict(size=20),
name="Agreement",
marker_symbol="diamond",
)
)
fig.update_layout(xaxis_title="quantity", yaxis_title="unit_price")
else:
fig = go.Figure()
for i, sender in enumerate(offers["sender"].unique()):
myoffers = offers.loc[offers["sender"] == sender, :]
fig.add_trace(
go.Scatter(
x=myoffers["quantity"],
y=myoffers["unit_price"],
name=sender,
mode="lines+markers",
marker=dict(size=10),
marker_symbol=MARKERS[i],
)
)
if agreement:
fig.add_trace(
go.Scatter(
x=[agreement["quantity"]],
y=[agreement["unit_price"]],
mode="markers",
marker=dict(size=20),
name="Agreement",
marker_symbol="star",
)
)
fig.update_layout(xaxis_title="quantity", yaxis_title="unit_price")
if use_ranges:
fig.update_layout(xaxis_range=qrange, yaxis_range=urange)
col1, col2 = parent.beta_columns(2)
def fig_1d(y):
fig = go.Figure()
for i, sender in enumerate(offers["sender"].unique()):
myoffers = offers.loc[offers["sender"] == sender, :]
fig.add_trace(
go.Scatter(
x=myoffers["round"],
y=myoffers[y],
name=sender,
mode="lines+markers",
marker=dict(size=15),
marker_symbol=MARKERS[i],
)
)
if agreement:
fig.add_trace(
go.Scatter(
x=[offers["round"].max()],
y=[agreement[y]],
mode="markers",
marker=dict(size=20),
name="Agreement",
marker_symbol="star",
)
)
fig.update_layout(xaxis_title="Round", yaxis_title=y)
fig.update_layout(yaxis_range=urange if y == "unit_price" else qrange if y == "quantity" else trange)
return fig
col1.plotly_chart(fig_1d("quantity"))
col1.plotly_chart(fig)
col2.plotly_chart(fig_1d("unit_price"))
if trange[1] > trange[0]:
col2.plotly_chart(fig_1d("delivery_step"))
parent.dataframe(offers)
WORLD_INDEX = 0
def display_networks(
folder,
selected_worlds,
selected_products,
selected_agents,
selected_types,
selected_steps,
selected_times,
data,
parent=st.sidebar,
):
global WORLD_INDEX
max_worlds = parent.number_input("Max. Worlds", 1, None, 4)
if len(selected_worlds) < 1:
st.write("No worlds selected. Cannot show any networks")
return
if len(selected_worlds) > max_worlds:
st.write(f"More than {max_worlds} world selected ({len(selected_worlds)}). Will show the first {max_worlds}")
cols = st.beta_columns([1, 5, 1, 3])
# prev = cols[0].button("<")
# next = cols[2].button(">")
# if prev:
# WORLD_INDEX = (WORLD_INDEX - max_worlds) % len(selected_worlds)
# if next:
# WORLD_INDEX = (WORLD_INDEX + max_worlds) % len(selected_worlds)
WORLD_INDEX = cols[1].slider("", 0, len(selected_worlds) - 1, WORLD_INDEX)
randomize = cols[3].button("Randomize worlds")
if randomize:
random.shuffle(selected_worlds)
selected_worlds = selected_worlds[WORLD_INDEX : WORLD_INDEX + max_worlds]
what = parent.selectbox("Category", ["Contracts", "Negotiations"])
if what == "Contracts":
src = "c"
elif what == "Negotiations":
src = "n"
else:
src = "o"
x = data[src]
if x is None:
st.markdown(f"**{what}** data is **not** available in the logs.")
return
gallery = parent.checkbox("Gallery Mode", len(selected_worlds) > 1)
node_weight_options = sorted(
[_ for _ in data["a"].columns if is_numeric_dtype(data["a"][_]) and _ not in ("id", "is_default")]
)
default_node_weight = node_weight_options.index("final_score")
if default_node_weight is None:
default_node_weight = 0
with st.beta_expander("Networks Settings"):
cols = st.beta_columns(5 + int(gallery))
weight_field = cols[2].selectbox("Edge Weight", ["total_price", "unit_price", "quantity", "count"])
node_weight = cols[3].selectbox("Node Weight", ["none"] + node_weight_options, default_node_weight + 1)
per_step = cols[0].checkbox("Show one step only")
edge_weights = cols[0].checkbox("Variable Edge Width", True)
edge_colors = cols[0].checkbox("Variable Edge Colors", True)
if per_step:
selected_step = cols[1].number_input("Step", selected_steps[0], selected_steps[1], selected_steps[0])
selected_steps = [selected_step] * 2
x["total_price"] = x.quantity * x.unit_price
options = [_[: -len("_step")] for _ in x.columns if _.endswith("_step")]
if src != "c":
options.append("step")
condition_field = cols[4].selectbox("Condition", options, 0 if src != "n" else options.index("step"))
if gallery:
n_cols = cols[5].number_input("Columns", 1, 5, 2)
cols = st.beta_columns(n_cols)
else:
n_cols, cols = 1, [st]
for i, world in enumerate(selected_worlds):
show_a_world(
world,
selected_steps=selected_steps,
selected_times=selected_times,
data=data,
parent=cols[i % n_cols],
weight_field=weight_field,
edge_weights=edge_weights,
edge_colors=edge_colors,
node_weight=node_weight,
condition_field=condition_field,
x=x,
src=src,
gallery=gallery,
)
def display_tables(
folder,
selected_worlds,
selected_products,
selected_agents,
selected_types,
selected_steps,
selected_times,
data,
parent=st.sidebar,
):
remove_single = parent.checkbox("Remove fields with a single value", True)
def order_columns(x):
cols = sorted(x.columns)
for c in [
"buyer_type",
"seller_type",
"delivery_step",
"quantity",
"unit_price",
"total_price",
"buyer",
"seller",
"name",
"id",
]:
if c in cols:
cols = [c] + [_ for _ in cols if _ != c]
for c in ["world", "config", "group", "tournament"]:
if c in cols:
cols = [_ for _ in cols if _ != c] + [c]
return x.loc[:, cols]
def remove_singletons(x):
selected = []
for c in x.columns:
if len(x[c].unique()) < 2:
continue
selected.append(c)
return x.loc[:, selected]
def show_table(x, must_choose=False):
x = order_columns(x)
if remove_single:
x = remove_singletons(x)
selected_cols = st.multiselect(label="Columns", options=x.columns)
if selected_cols or must_choose:
st.dataframe(x.loc[:, selected_cols])
else:
st.dataframe(x)
def create_chart(df, type):
if type == "Scatter":
return alt.Chart(df).mark_point()
if type == "Bar":
return alt.Chart(df).mark_bar()
if type == "Box":
return alt.Chart(df).mark_boxplot()
if type == "Line":
return alt.Chart(df).mark_line()
raise ValueError(f"Unknown marker type {type}")
for lbl, k, has_step in (
("Tournaments", "t", False),
("Configs", "con", False),
("Worlds", "w", False),
("Products", "p", False),
("Agents", "a", False),
("Contracts", "c", True),
("Negotiations", "n", True),
("Offers", "o", True),
):
if data[k] is None or not len(data[k]):
continue
if not parent.checkbox(label=lbl, key=f"tbl-{lbl}-c1"):
continue
if has_step:
df = filter_by_time(
data[k], ["signed_", "concluded_"] if k == "c" else [""], selected_steps, selected_times
)
else:
df = data[k]
if lbl == "Agents":
if st.checkbox("Ignore Default Agents", True, key=f"tbl-{lbl}-ignore-default"):
df = df.loc[~df["is_default"], :]
elif lbl == "Contracts":
if st.checkbox("Ignore Exogenous Contracts", True, key=f"tbl-{lbl}-ignore-exogenous"):
df = df.loc[df["n_neg_steps"] < 1, :]
show_table(df)
st.text(f"{len(df)} records found")
cols = st.beta_columns(6)
type_ = cols[0].selectbox("Chart", ["Scatter", "Line", "Bar", "Box"], 0, key=f"select-{lbl}-chart")
x = cols[1].selectbox("x", ["none"] + list(df.columns), key=f"select-{lbl}-x")
y = m = c = s = "none"
if x != "none":
y = cols[2].selectbox("y", ["none"] + list(df.columns), key=f"select-{lbl}-y")
if y != "none":
m = cols[3].selectbox("Mark", ["none"] + list(df.columns), key=f"select-{lbl}-mark")
c = cols[4].selectbox("Color", ["none"] + list(df.columns), key=f"select-{lbl}-color")
s = cols[5].selectbox("Size", ["none"] + list(df.columns), key=f"select-{lbl}-size")
kwargs = dict(x=x, y=y)
if m != "none":
kwargs["shape"] = m
if s != "none":
kwargs["size"] = s
if c != "none":
kwargs["color"] = c
else:
kwargs = dict(x=x, y=alt.X(x, bin=True))
chart = create_chart(df, type_ if y != "none" else "Bar").encode(**kwargs)
st.altair_chart(chart, use_container_width=True)
def display_time_series(
folder,
selected_worlds,
selected_products,
selected_agents,
selected_types,
selected_steps,
selected_times,
data,
parent=st.sidebar,
):
settings = st.beta_expander("Time Series Settings")
ncols = settings.number_input("N. Columns", 1, 6, 2)
xvar = settings.selectbox("x-variable", ["step", "relative_time"], 1 - int(len(selected_worlds) == 1))
dynamic = settings.checkbox("Dynamic Figures", value=True)
sectioned = settings.checkbox("Figure Sections", True)
ci_level = settings.selectbox(options=[80, 90, 95], label="CI Level", index=2)
world_stats, selected_world_stats, combine_world_stats, overlay_world_stats = add_stats_selector(
folder,
"world_stats",
[[("world", selected_worlds), ("step", selected_steps), ("relative_time", selected_times)]],
xvar=xvar,
label="World Statistics",
choices=lambda x: [
_ for _ in x.columns if _ not in ("name", "world", "name", "tournament", "type", "step", "relative_time")
],
default_selector="one",
)
product_stats, selected_product_stats, combine_product_stats, overlay_product_stats = add_stats_selector(
folder,
"product_stats",
[[("product", selected_products), ("step", selected_steps), ("relative_time", selected_times)]],
xvar=xvar,
label="Product Statistics",
choices=lambda x: [
_
for _ in x.columns
if _ not in ("name", "world", "name", "tournament", "type", "step", "product", "relative_time")
],
default_selector="some",
default_choice=["trading_price"],
combine=False,
overlay=False,
)
default_agent_stats = [
"score",
"productivity",
"inventory_input",
"inventory_output",
"balance",
"assets",
"spot_market_loss",
"spot_market_quantity",
]
type_stats, selected_type_stats, combine_type_stats, overlay_type_stats = add_stats_selector(
folder,
"agent_stats",
[
[
("world", selected_worlds),
("type", selected_types),
("step", selected_steps),
("relative_time", selected_times),
]
],
xvar=xvar,
label="Type Statistics",
choices=lambda x: [
_ for _ in x.columns if _ not in ("name", "world", "name", "tournament", "type", "step", "relative_time")
],
key="type",
default_selector="some" if len(selected_worlds) != 1 else "none",
default_choice=default_agent_stats if len(selected_worlds) != 1 else None,
combine=False,
overlay=False,
)
agent_stats, selected_agent_stats, combine_agent_stats, overlay_agent_stats = add_stats_selector(
folder,
"agent_stats",
[
[
("world", selected_worlds),
("name", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
]
],
xvar=xvar,
label="Agent Statistics",
choices=lambda x: [
_ for _ in x.columns if _ not in ("name", "world", "name", "tournament", "type", "step", "relative_time")
],
default_selector="some" if len(selected_worlds) == 1 else "none",
default_choice=default_agent_stats if len(selected_worlds) == 1 else None,
combine=False,
overlay=False,
)
(
contract_stats_world,
selected_contract_stats_world,
combine_contract_stats_world,
overlay_contract_stats_world,
) = add_stats_selector(
folder,
"contract_stats",
[
[
("world", selected_worlds),
("buyer", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
[
("world", selected_worlds),
("seller", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
],
xvar=xvar,
label="Contract Statistics (World)",
default_selector="none",
choices=lambda x: [
_ for _ in x.columns if _.endswith("quantity") or _.endswith("count") or _.endswith("price")
],
key="world",
)
(
contract_stats_type,
selected_contract_stats_type,
combine_contract_stats_type,
overlay_contract_stats_type,
) = add_stats_selector(
folder,
"contract_stats",
[
[
("world", selected_worlds),
("buyer", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
[
("world", selected_worlds),
("seller", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
],
xvar=xvar,
label="Contract Statistics (Types)",
default_selector="none",
choices=lambda x: [
_ for _ in x.columns if _.endswith("quantity") or _.endswith("count") or _.endswith("price")
],
key="type",
)
(
contract_stats_agent,
selected_contract_stats_agent,
combine_contract_stats_agent,
overlay_contract_stats_agent,
) = add_stats_selector(
folder,
"contract_stats",
[
[
("world", selected_worlds),
("buyer", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
[
("world", selected_worlds),
("seller", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
],
xvar=xvar,
label="Contract Statistics (Agents)",
default_selector="none",
choices=lambda x: [
_ for _ in x.columns if _.endswith("quantity") or _.endswith("count") or _.endswith("price")
],
key="name",
)
def aggregate_contract_stats(stats, ignored_cols):
cols = [_ for _ in stats.columns if not any(_.endswith(x) for x in ["price", "quantity", "count"])]
ignored_cols = [_ for _ in cols if _.startswith(ignored_cols)]
cols = [_ for _ in cols if not _ in ignored_cols]
allcols = [_ for _ in stats.columns if not _ in ignored_cols]
# st.text(stats.columns)
# st.text(allcols)
# st.text(cols)
# st.text(len(stats))
stats = stats.loc[:, allcols].groupby(cols).sum()
# st.text(len(stats))
for c in stats.columns:
if c.endswith("unit_price"):
base = "_".join(c.split("_")[:-2])
stats[c] = stats[f"{base}_total_price"] / stats[f"{base}_quantity"]
stats[c] = stats[c].fillna(0)
# st.text(len(stats))
return stats.reset_index()
(
contract_stats_buyer_type,
selected_contract_stats_buyer_type,
combine_contract_stats_buyer_type,
overlay_contract_stats_buyer_type,
) = add_stats_selector(
folder,
"contract_stats",
[
[
("world", selected_worlds),
("buyer", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
# [("world", selected_worlds), ("seller", selected_agents)],
],
xvar=xvar,
label="Contract Statistics (Buyer Types)",
default_selector="none",
choices=lambda x: [
_ for _ in x.columns if _.endswith("quantity") or _.endswith("count") or _.endswith("price")
],
key="buyer_type",
)
(
contract_stats_seller_type,
selected_contract_stats_seller_type,
combine_contract_stats_seller_type,
overlay_contract_stats_seller_type,
) = add_stats_selector(
folder,
"contract_stats",
[
# [("world", selected_worlds), ("buyer", selected_agents)],
[
("world", selected_worlds),
("seller", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
],
xvar=xvar,
label="Contract Statistics (Seller Types)",
default_selector="none",
choices=lambda x: [
_ for _ in x.columns if _.endswith("quantity") or _.endswith("count") or _.endswith("price")
],
key="seller_type",
)
(
contract_stats_buyer,
selected_contract_stats_buyer,
combine_contract_stats_buyer,
overlay_contract_stats_buyer,
) = add_stats_selector(
folder,
"contract_stats",
[
[
("world", selected_worlds),
("buyer", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
# [("world", selected_worlds), ("seller", selected_agents)],
],
xvar=xvar,
label="Contract Statistics (Buyer)",
default_selector="none",
choices=lambda x: [
_ for _ in x.columns if _.endswith("quantity") or _.endswith("count") or _.endswith("price")
],
key="buyer",
)
(
contract_stats_seller,
selected_contract_stats_seller,
combine_contract_stats_seller,
overlay_contract_stats_seller,
) = add_stats_selector(
folder,
"contract_stats",
[
# [("world", selected_worlds), ("buyer", selected_agents)],
[
("world", selected_worlds),
("seller", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
],
xvar=xvar,
label="Contract Statistics (Seller)",
default_selector="none",
choices=lambda x: [
_ for _ in x.columns if _.endswith("quantity") or _.endswith("count") or _.endswith("price")
],
key="seller",
)
contract_stats_buyer = aggregate_contract_stats(contract_stats_buyer, "seller")
contract_stats_seller = aggregate_contract_stats(contract_stats_seller, "buyer")
contract_stats_buyer_type = aggregate_contract_stats(contract_stats_buyer, "seller_type")
contract_stats_seller_type = aggregate_contract_stats(contract_stats_seller, "buyer_type")
contract_stats_agent["agent"] = contract_stats_agent["seller"] + "->" + contract_stats_agent["buyer"]
contract_stats_agent["agent_type"] = contract_stats_agent["seller_type"] + "->" + contract_stats_agent["buyer_type"]
contract_stats_type["agent"] = contract_stats_type["seller"] + "->" + contract_stats_type["buyer"]
contract_stats_type["agent_type"] = contract_stats_type["seller_type"] + "->" + contract_stats_type["buyer_type"]
(
contract_stats_product,
selected_contract_stats_product,
combine_contract_stats_product,
overlay_contract_stats_product,
) = add_stats_selector(
folder,
"contract_stats",
[
[
("world", selected_worlds),
("buyer", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
[
("world", selected_worlds),
("seller", selected_agents),
("step", selected_steps),
("relative_time", selected_times),
],
],
xvar=xvar,
label="Contract Statistics (Product)",
default_selector="none",
choices=lambda x: [
_ for _ in x.columns if _.endswith("quantity") or _.endswith("count") or _.endswith("price")
],
key="product",
)
cols, start_col = add_stats_display(
world_stats,
selected_world_stats,
combine_world_stats,
overlay_world_stats,
ncols=ncols,
xvar=xvar,
hue="world",
title="World Figures",
sectioned=sectioned,
cols=None,
start_col=0,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
product_stats,
selected_product_stats,
combine_product_stats,
overlay_product_stats,
ncols=ncols,
xvar=xvar,
hue="product",
title="product Figures",
sectioned=sectioned,
cols=None,
start_col=0,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
type_stats,
selected_type_stats,
combine_type_stats,
overlay_type_stats,
ncols=ncols,
xvar=xvar,
hue="type",
title="Agent Type Figures",
sectioned=sectioned,
cols=cols,
start_col=start_col,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
agent_stats,
selected_agent_stats,
combine_agent_stats,
overlay_agent_stats,
ncols=ncols,
xvar=xvar,
hue="name",
title="Agent Instance Figures",
sectioned=sectioned,
cols=cols,
start_col=start_col,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
contract_stats_world,
selected_contract_stats_world,
combine_contract_stats_world,
overlay_contract_stats_world,
ncols=ncols,
xvar=xvar,
hue="world",
title="Trade Figures (World)",
sectioned=sectioned,
cols=cols,
start_col=start_col,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
contract_stats_type,
selected_contract_stats_type,
combine_contract_stats_type,
overlay_contract_stats_type,
ncols=ncols,
xvar=xvar,
hue="agent_type",
title="Trade Figures (Agent Type)",
sectioned=sectioned,
cols=cols,
start_col=start_col,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
contract_stats_buyer_type,
selected_contract_stats_buyer_type,
combine_contract_stats_buyer_type,
overlay_contract_stats_buyer_type,
ncols=ncols,
xvar=xvar,
hue="buyer_type",
title="Trade Figures (Buyer Type)",
sectioned=sectioned,
cols=cols,
start_col=start_col,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
contract_stats_seller_type,
selected_contract_stats_seller_type,
combine_contract_stats_seller_type,
overlay_contract_stats_seller_type,
ncols=ncols,
xvar=xvar,
hue="seller_type",
cols=cols,
start_col=start_col,
title="Trade Figures (Seller Type)",
sectioned=sectioned,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
contract_stats_agent,
selected_contract_stats_agent,
combine_contract_stats_agent,
overlay_contract_stats_agent,
ncols=ncols,
xvar=xvar,
hue="agent",
cols=cols,
start_col=start_col,
title="Trade Figures (Agent Instance)",
sectioned=sectioned,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
contract_stats_buyer,
selected_contract_stats_buyer,
combine_contract_stats_buyer,
overlay_contract_stats_buyer,
ncols=ncols,
xvar=xvar,
hue="buyer",
cols=cols,
start_col=start_col,
title="Trade Figures (Buyer Instance)",
sectioned=sectioned,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
contract_stats_seller,
selected_contract_stats_seller,
combine_contract_stats_seller,
overlay_contract_stats_seller,
ncols=ncols,
xvar=xvar,
hue="seller",
title="Trade Figures (Seller Instance)",
sectioned=sectioned,
cols=cols,
start_col=start_col,
dynamic=dynamic,
ci_level=ci_level,
)
cols, start_col = add_stats_display(
contract_stats_product,
selected_contract_stats_product,
combine_contract_stats_product,
overlay_contract_stats_product,
ncols=ncols,
xvar=xvar,
hue="product",
title="Trade Figures (Product)",
sectioned=sectioned,
cols=cols,
start_col=start_col,
dynamic=dynamic,
ci_level=ci_level,
)
def display_others(
folder,
selected_worlds,
selected_products,
selected_agents,
selected_types,
selected_steps,
selected_times,
data,
parent=st.sidebar,
):
# settings = parent.beta_expander("Settings")
# ncols = settings.number_input("N. Columns", min_value=1, max_value=6)
if parent.checkbox("Score Distribution", False):
score_distribution(selected_worlds, selected_agents, selected_types, data, parent=parent)
if parent.checkbox("Final Score Factors", False):
score_factors(selected_worlds, selected_agents, selected_types, data, parent=parent)
if __name__ == "__main__":
import sys
from streamlit import cli as stcli
folder = None
if len(sys.argv) > 1:
folder = Path(sys.argv[1])
if st._is_running_with_streamlit:
main(folder)
else:
sys.argv = ["streamlit", "run"] + sys.argv
sys.exit(stcli.main())
| 35.338357 | 233 | 0.565403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,641 | 0.205591 |
a08b15176907fc58ba9177ee950b34ee8cb64ffe | 691 | py | Python | cd4ml/problems/houses/features/feature_functions/feature_functions.py | camila-contreras/CD4ML-Scenarios | 806f812990c7cf33b5f78456f0065012b5b4cd35 | [
"MIT"
] | 113 | 2020-03-31T20:36:39.000Z | 2022-01-11T15:06:58.000Z | cd4ml/problems/houses/features/feature_functions/feature_functions.py | camila-contreras/CD4ML-Scenarios | 806f812990c7cf33b5f78456f0065012b5b4cd35 | [
"MIT"
] | 7 | 2020-06-10T05:11:35.000Z | 2022-01-06T02:55:21.000Z | cd4ml/problems/houses/features/feature_functions/feature_functions.py | camila-contreras/CD4ML-Scenarios | 806f812990c7cf33b5f78456f0065012b5b4cd35 | [
"MIT"
] | 260 | 2020-03-21T19:42:26.000Z | 2022-01-25T22:08:36.000Z | def zipcode_to_feature(zipcode, lookup, feature):
results = lookup.get(zipcode)
if results is None:
return None
else:
return results[feature]
def zipcode_to_state(zipcode, lookup):
return zipcode_to_feature(zipcode, lookup, 'state')
def avg_price_by_zipcode(zipcode, lookup):
return zipcode_to_feature(zipcode, lookup, 'avg_price_in_zip')
def num_in_zipcode(zipcode, lookup):
return zipcode_to_feature(zipcode, lookup, 'num_in_zip')
def avg_price_by_state(zipcode, lookup):
return zipcode_to_feature(zipcode, lookup, 'avg_price_in_state')
def num_in_state(zipcode, lookup):
return zipcode_to_feature(zipcode, lookup, 'num_in_state')
| 25.592593 | 68 | 0.751085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.10275 |
a08cb54701ee8d7129f53895ca2daa2a379bad89 | 4,431 | py | Python | QFA/MO_1QFA.py | gustawlippa/QFA | 7f1f8bd0d2c9cb9aeeeb924b2f002c9e849523be | [
"MIT"
] | 2 | 2021-01-30T23:14:36.000Z | 2021-02-17T01:41:56.000Z | QFA/MO_1QFA.py | gustawlippa/QFA | 7f1f8bd0d2c9cb9aeeeb924b2f002c9e849523be | [
"MIT"
] | null | null | null | QFA/MO_1QFA.py | gustawlippa/QFA | 7f1f8bd0d2c9cb9aeeeb924b2f002c9e849523be | [
"MIT"
] | null | null | null | import numpy as np
from typing import List
from math import sqrt
from QFA.Automaton import Automaton
from math import cos, sin, pi
class MO_1QFA(Automaton):
def __init__(self, alphabet: str,
initial_state: np.ndarray,
transition_matrices: List[np.ndarray],
projective_measurement: np.ndarray):
# list of chars
self.alphabet = alphabet
# np column vector, initial dist over states
self.initial_state = initial_state
# list of np matrices - position in list corresponds to position of letter in alphabet,
# perhaps a map could be better
self.transition_matrices = transition_matrices
# np matrix containing ones and zeroes
self.projective_measurement = projective_measurement
def process(self, word: str) -> (float, float):
acceptance_probability = self.initial_state
for letter in word:
transition_matrix = self.transition_matrices[self.alphabet.index(letter)]
acceptance_probability = transition_matrix @ acceptance_probability
acceptance_probability = self.projective_measurement @ acceptance_probability
acceptance_probability = np.vdot(acceptance_probability, acceptance_probability) # vdot(a,a) = norm squared (a)
return acceptance_probability, 0
def example():
print('MO_1QFA examples:')
mo_1qfa_example_1()
mo_1qfa_example_2()
mo_1qfa_example_3()
qfa = mo_1qfa_example_4()
return qfa
def mo_1qfa_example_1():
alphabet = 'a'
a_matrix = np.array([[sqrt(1/2), sqrt(1/2)], [sqrt(1/2), -sqrt(1/2)]])
initial_state = np.array([[1], [0]])
measurement = np.array([[0, 0], [0, 1]])
qfa = MO_1QFA(alphabet, initial_state, [a_matrix], measurement)
print('mo_qfa1')
# it should return 1/2
res = qfa.process('a')
print('a\t', res)
# example from QFA paper - returns 0 as it should
# the paper: https://www.researchgate.net/publication/264906610_Quantum_Finite_Automata
# Qiu, Daowen & Li, Lvzhou & Mateus, Paulo & Gruska, Jozef.
# (2012).
# Quantum Finite Automata. Handbook of Finite State Based Models and Applications.
# 10.1201/b13055-7.
res = qfa.process('aa')
print('aa\t', res)
return qfa
def mo_1qfa_example_2():
# example from wikipedia: (https://en.wikipedia.org/wiki/Quantum_finite_automata#Measure-once_automata)
alphabet = '01'
zero_matrix = np.array([[0, 1], [1, 0]])
one_matrix = np.array([[1, 0], [0, 1]])
projection_matrix = np.array([[1, 0], [0, 0]])
initial_state = np.array([[1], [0]])
qfa2 = MO_1QFA(alphabet, initial_state, [zero_matrix, one_matrix], projection_matrix)
# should behave like a DFA expecting words with an even number of '0's
print('mo_qfa2')
print('111\t', qfa2.process('111'))
print('101\t', qfa2.process('101'))
print('001\t', qfa2.process('001'))
print('\t', qfa2.process(''))
return qfa2
def mo_1qfa_example_3():
alphabet = '01'
zero_matrix = np.array([[0, 1], [1, 0]])
one_matrix = np.array([[1, 0], [0, 1]])
projection_matrix = np.array([[1, 0], [0, 0]])
# same example as the mo_1qfa_example_2, but the initial state is complex
initial_state = np.array([[1/2+1j/2], [1/(2*sqrt(2))+1j/(2*sqrt(2))]])
qfa3 = MO_1QFA(alphabet, initial_state, [zero_matrix, one_matrix], projection_matrix)
# one must remember that the initial state must be a quantum state, so it must comply with the normalisation
# condition
print('mo_qfa3')
print('111\t', qfa3.process('111'))
print('101\t', qfa3.process('101'))
print('001\t', qfa3.process('001'))
print('\t', qfa3.process(''))
return qfa3
def mo_1qfa_example_4():
# This automaton should accept the language L = {a^(3n)}
# words in L should have the acceptance probability 1
alphabet = 'a'
a_matrix = np.array([[cos(2*pi/3), -sin(2*pi/3)],
[sin(2*pi/3), cos(2*pi/3)]])
end_matrix = np.eye(2)
projection_matrix = np.array([[1, 0], [0, 0]])
initial_state = np.array([[1], [0]])
qfa = MO_1QFA(alphabet, initial_state, [a_matrix, end_matrix], projection_matrix)
print("mo_1qfa4")
print("a\t", qfa.process('a'))
print("aa\t", qfa.process('aa'))
print("aaa\t", qfa.process('aaa'))
return qfa
if __name__ == "__main__":
example()
| 31.425532 | 120 | 0.643421 | 1,220 | 0.275333 | 0 | 0 | 0 | 0 | 0 | 0 | 1,266 | 0.285714 |
a08cc063efc183d6784f567bb7e999cbddbf1bbf | 2,292 | py | Python | whats_in_the_cupboard/search/views.py | brandonholderman/whats_in_the_cupboard | 8f8b0abe8b94547fa488db689261a4f475a24779 | [
"MIT"
] | null | null | null | whats_in_the_cupboard/search/views.py | brandonholderman/whats_in_the_cupboard | 8f8b0abe8b94547fa488db689261a4f475a24779 | [
"MIT"
] | 10 | 2020-02-11T23:36:20.000Z | 2022-03-11T23:57:52.000Z | whats_in_the_cupboard/search/views.py | brandonholderman/whats_in_the_cupboard | 8f8b0abe8b94547fa488db689261a4f475a24779 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.views.generic import TemplateView, ListView
from django.shortcuts import render
from rest_framework.authentication import TokenAuthentication
from rest_framework import viewsets, mixins
from rest_framework.response import Response
from rest_framework import generics, status
from rest_framework.views import APIView
from .serializers import SearchSerializer
from .sample_data import MOCK_DATA
from .models import Search
import requests
import os
class SearchView(generics.ListAPIMixin):
serializer_class = SearchSerializer
queryset = Search.objects.all()
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
class HomeView(TemplateView):
"""
Home View Class.
"""
template_name = 'index.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
# class SearchView(mixins.ListAPIMixin):
# serializer_class = SearchSerializer
# def get(self, request):
# response = requests.get(MOCK_DATA)
# if response.ok:
# return response
# else:
# return None
# class PostCollection(ListModelMixin,
# CreateModelMixin,
# GenericAPIView):
# queryset = Post.objects.all()
# serializer_class = PostSerializer
# def get(self, request, *args, **kwargs):
# return self.list(request, *args, **kwargs)
# def post(self, request, *args, **kwargs):
# return self.create(request, *args, **kwargs)
# def delete(self, request, *args, **kwargs):
# return self.destroy(request, *args, **kwargs)
# return context
# def home(request):
# ip_address = request.META.get('HTTP_X_FORWARDED_FOR', '')
# response = requests.get(
# 'https://nasaapidimasv1.p.rapidapi.com/getAsteroidStats')
# nasadata = response.json()
# return render(request, 'home.html', {
# 'ip': nasadata['ip'],
# 'country': nasadata['country_name'],
# 'latitude': nasadata['latitude'],
# 'longitude': nasadata['longitude'],
# 'api_key': os.environ.get('API_KEY', '')
# })
# Create your views here.
| 27.614458 | 67 | 0.654887 | 434 | 0.189354 | 0 | 0 | 0 | 0 | 0 | 0 | 1,377 | 0.600785 |
a08ec5f751e5c0ed745a1196c05685644187a34f | 591 | py | Python | scripts/freq_shecker.py | Fumiya-K/ros_myo | dac160aae5d0cd75211c60261bd1232ef089e530 | [
"MIT"
] | null | null | null | scripts/freq_shecker.py | Fumiya-K/ros_myo | dac160aae5d0cd75211c60261bd1232ef089e530 | [
"MIT"
] | null | null | null | scripts/freq_shecker.py | Fumiya-K/ros_myo | dac160aae5d0cd75211c60261bd1232ef089e530 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import Vector3
from sensor_msgs.msg import Imu
import numpy as np
c_imu, c_angle = 0, 0
def cb_imu(msg):
global c_imu
c_imu += 1
def cb_angle(msg):
global c_angle
c_angle += 1
if c_angle == 400:
print("count of received data of angle = {}".format(c_angle))
print("count of received data of imu = {}".format(c_imu))
if __name__ == "__main__":
rospy.init_node("freq_checker")
imu_sub = rospy.Subscriber("/myo_raw/myo_ori", Vector3, cb_angle)
ang_sub = rospy.Subscriber("/myo_raw/myo_imu", Imu, cb_imu)
rospy.spin()
| 21.107143 | 66 | 0.717428 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.262267 |
a0921c92865225de5297219ccf69a9133b387063 | 2,431 | py | Python | dist/snippets/woosmap_http_zones_collection_request/woosmap_http_zones_collection_request.py | woosmap/openapi-specification | 7f934628a75695884db2fa29dd1d04efd1fb20de | [
"MIT"
] | null | null | null | dist/snippets/woosmap_http_zones_collection_request/woosmap_http_zones_collection_request.py | woosmap/openapi-specification | 7f934628a75695884db2fa29dd1d04efd1fb20de | [
"MIT"
] | 3 | 2021-12-20T16:15:13.000Z | 2022-02-15T00:44:19.000Z | dist/snippets/woosmap_http_zones_collection_request/woosmap_http_zones_collection_request.py | woosmap/openapi-specification | 7f934628a75695884db2fa29dd1d04efd1fb20de | [
"MIT"
] | null | null | null | # [START woosmap_http_zones_collection_request]
import requests
import json
url = "https://api.woosmap.com/zones?private_key=YOUR_PRIVATE_API_KEY"
payload = json.dumps({
"zones": [
{
"zone_id": "ZoneA",
"description": "Delivery Zone for Store A",
"store_id": "STORE_ID_123456",
"polygon": "POLYGON ((-122.496116 37.7648181,-122.4954079 37.751518,-122.4635648 37.7530788,-122.4618481 37.7514501,-122.4601315 37.7521288,-122.4565266 37.7513144,-122.4540375 37.7566755,-122.4528359 37.7583041,-122.4515485 37.7595934,-122.4546384 37.774656,-122.4718903 37.7731635,-122.472577 37.772485,-122.4755811 37.7725529,-122.4791001 37.7723493,-122.4793576 37.7713995,-122.4784993 37.769839,-122.4783276 37.7680071,-122.4774693 37.766718,-122.4772118 37.7652931,-122.496116 37.7648181))",
"types": [
"delivery"
]
},
{
"zone_id": "ZoneB",
"description": "Delivery Zone for Store B",
"store_id": "STORE_ID_123456",
"polygon": "POLYGON ((-122.4546384 37.774656,-122.4515485 37.7595934,-122.4354306 37.7602172,-122.4333707 37.7512596,-122.423071 37.7511239,-122.4242726 37.7687665,-122.4259893 37.7691736,-122.4289075 37.7732444,-122.4306241 37.7850483,-122.4472753 37.7830133,-122.445902 37.7759581,-122.4546384 37.774656))",
"types": [
"delivery"
]
},
{
"zone_id": "ZoneC",
"description": "Delivery Zone for Store C",
"store_id": "STORE_ID_45678",
"polygon": "POLYGON ((-122.4758889 37.7524995,-122.4751594 37.7321718,-122.4688079 37.7299995,-122.4648597 37.7261979,-122.4519851 37.7228035,-122.4483802 37.7215815,-122.4458053 37.726741,-122.4365356 37.7310857,-122.4315574 37.7324433,-122.4246909 37.7312214,-122.4219444 37.731493,-122.423071 37.7511239,-122.4333707 37.7512596,-122.4354306 37.7602172,-122.4515485 37.7595934,-122.4528628 37.7582744,-122.4540375 37.7566755,-122.4565266 37.7513144,-122.4601315 37.7521288,-122.4618481 37.7514501,-122.4635648 37.7530788,-122.4758889 37.7524995))",
"types": [
"delivery"
]
}
]
})
headers = {
'content-type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
# [END woosmap_http_zones_collection_request] | 52.847826 | 562 | 0.65035 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,847 | 0.75977 |
a0963df40f1df1fa608416915de9bf22beecf414 | 1,692 | py | Python | src/CyPhyMasterInterpreter/run_master_interpreter_sample.py | lefevre-fraser/openmeta-mms | 08f3115e76498df1f8d70641d71f5c52cab4ce5f | [
"MIT"
] | null | null | null | src/CyPhyMasterInterpreter/run_master_interpreter_sample.py | lefevre-fraser/openmeta-mms | 08f3115e76498df1f8d70641d71f5c52cab4ce5f | [
"MIT"
] | null | null | null | src/CyPhyMasterInterpreter/run_master_interpreter_sample.py | lefevre-fraser/openmeta-mms | 08f3115e76498df1f8d70641d71f5c52cab4ce5f | [
"MIT"
] | null | null | null | import win32com.client
# Disable early binding: full of race conditions writing the cache files,
# and changes the semantics since inheritance isn't handled correctly
import win32com.client.gencache
_savedGetClassForCLSID = win32com.client.gencache.GetClassForCLSID
win32com.client.gencache.GetClassForCLSID = lambda x: None
project = win32com.client.DispatchEx("Mga.MgaProject")
project.Open("MGA=" + r'D:\Projects\META\development\models\DynamicsTeam\MasterInterpreter\MasterInterpreter.mga')
# config_light = win32com.client.DispatchEx("CyPhyMasterInterpreter.ConfigurationSelectionLight")
# # GME id, or guid, or abs path or path to Test bench or SoT or PET
# config_light.ContextId = '{6d24a596-ec4f-4910-895b-d03a507878c3}'
# print config_light.SelectedConfigurationIds
# config_light.SetSelectedConfigurationIds(['id-0065-000000f1'])
# #config_light.KeepTemporaryModels = True
# #config_light.PostToJobManager = True
# master = win32com.client.DispatchEx("CyPhyMasterInterpreter.CyPhyMasterInterpreterAPI")
# master.Initialize(project)
# results = master.RunInTransactionWithConfigLight(config_light)
# It works only this way and does not worth the time to figure out the other way.
# will run ALL configurations.
focusobj = None
try:
project.BeginTransactionInNewTerr()
focusobj = project.GetObjectByID('id-0065-00000635')
finally:
project.AbortTransaction()
selectedobj=win32com.client.DispatchEx("Mga.MgaFCOs")
interpreter = "MGA.Interpreter.CyPhyMasterInterpreter"
launcher = win32com.client.DispatchEx("Mga.MgaLauncher")
launcher.RunComponent(interpreter, project, focusobj, selectedobj, 128)
project.Close() | 38.454545 | 115 | 0.785461 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,071 | 0.632979 |
a097f2e9cca87b9c4ab3fbfbe7eb9b74f83ce331 | 4,051 | py | Python | image_utils.py | datascisteven/Flictionary-Flask | 9437f0b6377b11cecfa37c8a94eb68cc4e7018f8 | [
"MIT"
] | null | null | null | image_utils.py | datascisteven/Flictionary-Flask | 9437f0b6377b11cecfa37c8a94eb68cc4e7018f8 | [
"MIT"
] | null | null | null | image_utils.py | datascisteven/Flictionary-Flask | 9437f0b6377b11cecfa37c8a94eb68cc4e7018f8 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from PIL import ImageOps
def view_image(img, filename = 'image'):
fig, ax = plt.subplots(figsize=(6, 9))
ax.imshow(img.reshape(96, 96).squeeze())
ax.axis('off')
plt.savefig(filename + '.png')
def convert_to_PIL(img):
img_r = img.reshape(96, 96)
pil_img = Image.new('RGB', (96, 96), 'white')
pixels = pil_img.load()
for i in range(0, 96):
for j in range(0, 96):
if img_r[i, j] > 0:
pixels[j, i] = (255 - int(img_r[i, j] * 255), 255 - int(img_r[i, j] * 255), 255 - int(img_r[i, j] * 255))
return pil_img
def convert_to_np(pil_img):
pil_img = pil_img.convert('RGB')
img = np.zeros((96, 96))
pixels = pil_img.load()
for i in range(0, 96):
for j in range(0, 96):
img[i, j] = 1 - pixels[j, i][0] / 255
return img
def crop_image(image):
cropped_image = image
# get image size
width, height = cropped_image.size
# get image pixels
pixels = cropped_image.load()
image_strokes_rows = []
image_strokes_cols = []
# run through the image
for i in range(0, width):
for j in range(0, height):
# save coordinates of the image
if (pixels[i,j][3] > 0):
image_strokes_cols.append(i)
image_strokes_rows.append(j)
# if image is not empty then crop to contents of the image
if (len(image_strokes_rows)) > 0:
# find the box for image
row_min = np.array(image_strokes_rows).min()
row_max = np.array(image_strokes_rows).max()
col_min = np.array(image_strokes_cols).min()
col_max = np.array(image_strokes_cols).max()
# find the box for cropping
margin = min(row_min, height - row_max, col_min, width - col_max)
# crop image
border = (col_min, row_min, width - col_max, height - row_max)
cropped_image = ImageOps.crop(cropped_image, border)
# get cropped image size
width_cropped, height_cropped = cropped_image.size
# create square resulting image to paste cropped image into the center
dst_im = Image.new("RGBA", (max(width_cropped, height_cropped), max(width_cropped, height_cropped)), "white")
offset = ((max(width_cropped, height_cropped) - width_cropped) // 2, (max(width_cropped, height_cropped) - height_cropped) // 2)
# paste to the center of a resulting image
dst_im.paste(cropped_image, offset, cropped_image)
#resize
dst_im.thumbnail((96, 96), Image.ANTIALIAS)
return dst_im
def normalize(arr):
arr = arr.astype('float')
# Do not touch the alpha channel
for i in range(3):
minval = arr[...,i].min()
maxval = arr[...,i].max()
if minval != maxval:
arr[...,i] -= minval
arr[...,i] *= (255.0/(maxval-minval))
return arr
def normalize_image(image):
arr = np.array(image)
new_img = Image.fromarray(normalize(arr).astype('uint8'),'RGBA')
return new_img
def alpha_composite(front, back):
front = np.asarray(front)
back = np.asarray(back)
result = np.empty(front.shape, dtype='float')
alpha = np.index_exp[:, :, 3:]
rgb = np.index_exp[:, :, :3]
falpha = front[alpha] / 255.0
balpha = back[alpha] / 255.0
result[alpha] = falpha + balpha * (1 - falpha)
old_setting = np.seterr(invalid='ignore')
result[rgb] = (front[rgb] * falpha + back[rgb] * balpha * (1 - falpha)) / result[alpha]
np.seterr(**old_setting)
result[alpha] *= 255
np.clip(result, 0, 255)
# astype('uint8') maps np.nan and np.inf to 0
result = result.astype('uint8')
result = Image.fromarray(result, 'RGBA')
return result
def alpha_composite_with_color(image, color=(255, 255, 255)):
back = Image.new('RGBA', size=image.size, color=color + (255,))
return alpha_composite(image, back)
def convert_to_rgb(image):
image_rgb = alpha_composite_with_color(image)
image_rgb.convert('RGB')
return image_rgb
| 28.935714 | 132 | 0.621328 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 536 | 0.132313 |
a098e971e8b1b7172d8860ca8ed8514362a25eea | 360 | py | Python | src/lqc/generate/web_page/ui_tools/create.py | tysmith/layout-quickcheck | c5ba9431a40f650a594140541e32af7c8ff21695 | [
"MIT"
] | null | null | null | src/lqc/generate/web_page/ui_tools/create.py | tysmith/layout-quickcheck | c5ba9431a40f650a594140541e32af7c8ff21695 | [
"MIT"
] | null | null | null | src/lqc/generate/web_page/ui_tools/create.py | tysmith/layout-quickcheck | c5ba9431a40f650a594140541e32af7c8ff21695 | [
"MIT"
] | null | null | null | import os
def ui_tools_js():
js_string = ""
with open(os.path.join(os.path.dirname(__file__), 'template.js'), 'r') as f:
js_string = f.read()
return js_string
def ui_tools_html():
html_string = ""
with open(os.path.join(os.path.dirname(__file__), 'template.html'), 'r') as f:
html_string = f.read()
return html_string | 25.714286 | 82 | 0.633333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.105556 |
a09ac1675173bac590c8d099736901eb4fe0b015 | 886 | py | Python | students/K33422/laboratory_works/Moruga_Elina/lr_2/simple_django_web_projects(1)/django_project_Moruga/project_first_app/views.py | Elyavor/ITMO_ICT_WebDevelopment_2021-2022 | 63fad07bcdc0a9a6b85d46eacf97182162262181 | [
"MIT"
] | null | null | null | students/K33422/laboratory_works/Moruga_Elina/lr_2/simple_django_web_projects(1)/django_project_Moruga/project_first_app/views.py | Elyavor/ITMO_ICT_WebDevelopment_2021-2022 | 63fad07bcdc0a9a6b85d46eacf97182162262181 | [
"MIT"
] | null | null | null | students/K33422/laboratory_works/Moruga_Elina/lr_2/simple_django_web_projects(1)/django_project_Moruga/project_first_app/views.py | Elyavor/ITMO_ICT_WebDevelopment_2021-2022 | 63fad07bcdc0a9a6b85d46eacf97182162262181 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import Http404
from .models import CarOwner
def detail(request, owner_id):
try: #метод try-except - обработчик исключений
p = CarOwner.objects.get(pk=owner_id) #pk - автоматически создается в джанго для любой таблицы в моделе (оно есть у любого объекта из бд), poll_id будет передан функции при её вызове.
#переменной p присваивается объект, полученный в результате выполнения запроса аналогичного "select * from Poll where pk=poll_id"
except CarOwner.DoesNotExist:
raise Http404("Car owner does not exist") #исключение которое будет вызвано, если блок try вернет значение False (не будут найдены записи в таблице Poll)
return render(request, 'detail.html', {'owner': p}) #данная строка рендерит хтмл страницу detail.html и передает в него объект "p", который в хтмл шаблоне будет называться "poll"
| 59.066667 | 192 | 0.767494 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 973 | 0.77161 |
a09b9af2b847bf39b063ead0a72aab28cd93427e | 126 | py | Python | wtpy/apps/__init__.py | Huijun-Cui/wtpy | 9a8243a20b944fbb37aa33d81215b7b36ac7b1e2 | [
"MIT"
] | null | null | null | wtpy/apps/__init__.py | Huijun-Cui/wtpy | 9a8243a20b944fbb37aa33d81215b7b36ac7b1e2 | [
"MIT"
] | null | null | null | wtpy/apps/__init__.py | Huijun-Cui/wtpy | 9a8243a20b944fbb37aa33d81215b7b36ac7b1e2 | [
"MIT"
] | null | null | null | from .WtBtAnalyst import WtBtAnalyst
from .WtCtaOptimizer import WtCtaOptimizer
__all__ = ["WtBtAnalyst","WtCtaOptimizer"] | 31.5 | 43 | 0.809524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.230159 |
a09c1cbcccf7a63039a5587fbbf109f0b5dc595c | 608 | py | Python | grove_potentiometer.py | cpmpercussion/empi_controller | 178d3952994d7e13067674cbcd261d945e6b4799 | [
"MIT"
] | null | null | null | grove_potentiometer.py | cpmpercussion/empi_controller | 178d3952994d7e13067674cbcd261d945e6b4799 | [
"MIT"
] | null | null | null | grove_potentiometer.py | cpmpercussion/empi_controller | 178d3952994d7e13067674cbcd261d945e6b4799 | [
"MIT"
] | null | null | null | import math
import sys
import time
from grove.adc import ADC
class GroveRotaryAngleSensor(ADC):
def __init__(self, channel):
self.channel = channel
self.adc = ADC()
@property
def value(self):
return self.adc.read(self.channel)
Grove = GroveRotaryAngleSensor
def main():
if len(sys.argv) < 2:
print('Usage: {} adc_channel'.format(sys.argv[0]))
sys.exit(1)
sensor = GroveRotaryAngleSensor(int(sys.argv[1]))
while True:
print('Rotary Value: {}'.format(sensor.value))
time.sleep(.2)
if __name__ == '__main__':
main()
| 17.882353 | 58 | 0.626645 | 202 | 0.332237 | 0 | 0 | 73 | 0.120066 | 0 | 0 | 51 | 0.083882 |
a09c77edcb165aec8e2b0d92f741bba565b1c3ad | 627 | py | Python | ad2web/api/forms.py | billfor/alarmdecoder-webapp | 43c3ebb2b44c7291cd89a2a7a31bbdfdb3ec06dc | [
"BSD-3-Clause",
"MIT"
] | 46 | 2015-06-14T02:19:16.000Z | 2022-03-24T03:11:19.000Z | ad2web/api/forms.py | billfor/alarmdecoder-webapp | 43c3ebb2b44c7291cd89a2a7a31bbdfdb3ec06dc | [
"BSD-3-Clause",
"MIT"
] | 66 | 2015-03-14T16:30:43.000Z | 2021-08-28T22:20:01.000Z | ad2web/api/forms.py | billfor/alarmdecoder-webapp | 43c3ebb2b44c7291cd89a2a7a31bbdfdb3ec06dc | [
"BSD-3-Clause",
"MIT"
] | 44 | 2015-02-13T19:23:37.000Z | 2021-12-30T04:17:21.000Z | # -*- coding: utf-8 -*-
import string
from flask_wtf import FlaskForm as Form
from wtforms.fields.html5 import URLField, EmailField, TelField
from wtforms import (ValidationError, HiddenField, TextField, HiddenField,
PasswordField, SubmitField, TextAreaField, IntegerField, RadioField,
FileField, DecimalField, BooleanField, SelectField, FormField, FieldList)
from wtforms.validators import (Required, Length, EqualTo, Email, NumberRange,
URL, AnyOf, Optional, IPAddress)
from flask_login import current_user
from ..user import User
from ..widgets import ButtonField
class APIKeyForm(Form):
pass | 34.833333 | 81 | 0.773525 | 32 | 0.051037 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.036683 |
a09e18ee423836b7c3ce0e61bbbd9d1885bd9f19 | 73 | py | Python | modelator_py/util/tla/__init__.py | informalsystems/modelator-py | d66464096c022799e680e6201590a2ead69be32d | [
"Apache-2.0"
] | null | null | null | modelator_py/util/tla/__init__.py | informalsystems/modelator-py | d66464096c022799e680e6201590a2ead69be32d | [
"Apache-2.0"
] | 3 | 2022-03-30T16:01:49.000Z | 2022-03-31T13:40:03.000Z | modelator_py/util/tla/__init__.py | informalsystems/modelator-py | d66464096c022799e680e6201590a2ead69be32d | [
"Apache-2.0"
] | null | null | null | """TLA+ parser and syntax tree."""
from .parser import parse, parse_expr
| 24.333333 | 37 | 0.726027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.465753 |
a0a2f155643acffd9a5b5d44e3b912311ab75ced | 2,084 | py | Python | runai/mp/keras/layers/core.py | bamps53/runai | 0c868160f64e1e063c6eb6f660d42917322d40c5 | [
"MIT"
] | 86 | 2020-01-23T18:56:41.000Z | 2022-02-14T22:32:08.000Z | runai/mp/keras/layers/core.py | bamps53/runai | 0c868160f64e1e063c6eb6f660d42917322d40c5 | [
"MIT"
] | 18 | 2020-01-24T17:55:18.000Z | 2021-12-01T01:01:32.000Z | runai/mp/keras/layers/core.py | bamps53/runai | 0c868160f64e1e063c6eb6f660d42917322d40c5 | [
"MIT"
] | 12 | 2020-02-03T14:30:44.000Z | 2022-01-08T16:06:59.000Z | import keras.backend as K
import keras.layers
import runai.mp
from .keep import Keep
from .parallelised import Parallelised
Activation = Keep.create('Activation')
class Dense(Parallelised, keras.layers.Dense):
def build(self, input_shape):
assert len(input_shape) == 2 # TODO(levosos): support more than two dimensions
total_cin = input_shape[-1]
cin, cout, c = self.calculate_cs(
cin=total_cin,
cout=self.units)
self.kernels = self.add_weights(
name='kernel',
shape=(cin, cout),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint) # TODO(levosos): is this ok?
if self.use_bias:
self.biases = self.add_weights(
name='bias',
shape=(c,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint) # TODO(levosos): is this ok?
self.input_spec = keras.layers.InputSpec(ndim=2, axes={-1: total_cin}) # TODO(levosos): use 'min_ndim' once supporting more than two dimensions
self.built = True
def call(self, inputs):
inputs = self.inputs(inputs, channel_axis=-1)
outputs = self.parallelise(
lambda input, kernel: K.dot(input, kernel),
inputs,
self.kernels)
if runai.mp.method == runai.mp.Method.Cin:
outputs = self.reduce_split(outputs, channel_axis=-1)
if self.use_bias:
outputs = self.parallelise(
lambda output, bias: K.bias_add(output, bias, data_format='channels_last'),
outputs,
self.biases)
if self.activation is not None:
outputs = self.parallelise(
lambda output: self.activation(output),
outputs)
return self.merge(outputs, channel_axis=-1)
Dropout = Keep.create('Dropout')
Flatten = Keep.create('Flatten')
| 31.575758 | 151 | 0.600768 | 1,848 | 0.886756 | 0 | 0 | 0 | 0 | 0 | 0 | 236 | 0.113244 |
a0a7abf53aec9f31e9e5488c61a3e3d5fb017c5d | 461 | py | Python | graphics.py | Nemo20k/lactose_multistability_model | e50d68bb508e243d0a775d1d562bd8e8b88b3b30 | [
"MIT"
] | null | null | null | graphics.py | Nemo20k/lactose_multistability_model | e50d68bb508e243d0a775d1d562bd8e8b88b3b30 | [
"MIT"
] | null | null | null | graphics.py | Nemo20k/lactose_multistability_model | e50d68bb508e243d0a775d1d562bd8e8b88b3b30 | [
"MIT"
] | null | null | null | from matplotlib import pyplot as plt
import numpy as np
def plot_bacteria(bacteria_ndarray: np.ndarray, dimensions: tuple, save_path: str = None, cmap: str='prism'):
im = bacteria_ndarray.reshape(dimensions)
fig = plt.imshow(im, cmap=cmap, vmin=0, vmax=1)
plt.title('Red/Green graphic distribution')
if save_path:
fig.write_png(save_path)
def plot_green_TMG(green_history):
pass
pass
def create_gif(history_ndarray):
pass
| 27.117647 | 109 | 0.724512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.084599 |
a0a9d841059677b45b6f09a062af0ebdbc1dceea | 5,394 | py | Python | webapp/element43/apps/common/util.py | Ososope/eve_online | b368f77aaff403e5f1523a1a0e01d105fed0ada9 | [
"BSD-3-Clause"
] | null | null | null | webapp/element43/apps/common/util.py | Ososope/eve_online | b368f77aaff403e5f1523a1a0e01d105fed0ada9 | [
"BSD-3-Clause"
] | null | null | null | webapp/element43/apps/common/util.py | Ososope/eve_online | b368f77aaff403e5f1523a1a0e01d105fed0ada9 | [
"BSD-3-Clause"
] | null | null | null | # utility functions
import ast
import urllib
import datetime
import pytz
import pylibmc
# Import settings
from django.conf import settings
# API Models
from apps.api.models import APIKey, Character, APITimer
# Eve_DB Models
from eve_db.models import MapSolarSystem
# API Access Masks
CHARACTER_API_ACCESS_MASKS = {'AccountBalance': 1,
'AssetList': 2,
'CalendarEventAttendees': 4,
'CharacterSheet': 8,
'ContactList': 16,
'ContactNotifications': 32,
'FacWarStats': 64,
'IndustryJobs': 128,
'KillLog': 256,
'MailBodies': 512,
'MailingLists': 1024,
'MailMessages': 2048,
'MarketOrders': 4096,
'Medals': 8192,
'Notifications': 16384,
'NotificationTexts': 32768,
'Research': 65536,
'SkillInTraining': 131072,
'SkillQueue': 262144,
'Standings': 524288,
'UpcomingCalendarEvents': 1048576,
'WalletJournal': 2097152,
'WalletTransactions': 4194304,
'CharacterInfo': 25165824,
'AccountStatus': 33554432,
'Contracts': 67108864,
'Locations': 134217728}
def get_memcache_client():
"""
Returns a ready-to-use memcache client
"""
return pylibmc.Client(settings.MEMCACHE_SERVER,
binary=settings.MEMCACHE_BINARY,
behaviors=settings.MEMCACHE_BEHAVIOUR)
def dictfetchall(cursor):
"""
Returns all rows from a cursor as a dict
"""
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def cast_empty_string_to_int(string):
"""
Casts empty string to 0
"""
# Strip stuff only if it's a string
if isinstance(string, str):
string = string.strip()
return int(string) if string else 0
def cast_empty_string_to_float(string):
"""
Casts empty string to 0
"""
# Strip stuff only if it's a string
if isinstance(string, str):
string = string.strip()
return float(string) if string else 0
def calculate_character_access_mask(sheets):
"""
Returns combined access mask for a list of API sheets.
"""
mask = 0
for sheet in sheets:
mask += CHARACTER_API_ACCESS_MASKS[sheet]
return mask
def manage_character_api_timers(character):
"""
Adds and removes character APITimers for a given character depending on the character's key permissions.
When we add more functions, we need to add them to the masks dictionary.
"""
key_mask = character.apikey.accessmask
for sheet in CHARACTER_API_ACCESS_MASKS:
mask = CHARACTER_API_ACCESS_MASKS[sheet]
if ((mask & key_mask) == mask):
# If we have permission, create timer if not already present
try:
APITimer.objects.get(character=character, apisheet=sheet)
except APITimer.DoesNotExist:
new_timer = APITimer(character=character,
corporation=None,
apisheet=sheet,
nextupdate=pytz.utc.localize(datetime.datetime.utcnow()))
new_timer.save()
else:
# If we are not permitted to do this, remove existent timers
try:
APITimer.objects.get(character=character, apisheet=sheet).delete
except APITimer.DoesNotExist:
pass
def validate_characters(user, access_mask):
"""
Returns characters of a user that match a given minimum access mask.
"""
# Get keys
keys = APIKey.objects.filter(user=user)
characters = []
for key in keys:
# Do a simple bitwise operation to determine if we have sufficient rights with this key.
if ((access_mask & key.accessmask) == access_mask):
# Get all chars from that key which have sufficient permissions.
characters += list(Character.objects.filter(apikey=key))
return characters
def find_path(start, finish, security=5, invert=0):
"""
Returns a list of system objects which represent the path.
start: system_id of first system
finish: system_id of last system
security: sec level of system * 10
invert: if true (1), use security as highest seclevel you want to enter, default (0) seclevel is the lowest you want to try to use
"""
# Set params
params = urllib.urlencode({'start': start, 'finish': finish, 'seclevel': security, 'invert': invert})
response = urllib.urlopen('http://localhost:3455/path', params)
path_list = ast.literal_eval(response.read())
path = []
for waypoint in path_list:
path.append(MapSolarSystem.objects.get(id=waypoint))
return path
| 31 | 134 | 0.561735 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,762 | 0.326659 |
a0ade23395d3069649385af2893a0f1454cfd97f | 349 | py | Python | forms/views/field.py | alphagov/submit-forms | 6339b40debbab668263246162ab33c68391ef744 | [
"MIT"
] | 3 | 2017-11-20T18:17:47.000Z | 2019-08-09T14:59:36.000Z | forms/views/field.py | alphagov/submit-forms | 6339b40debbab668263246162ab33c68391ef744 | [
"MIT"
] | null | null | null | forms/views/field.py | alphagov/submit-forms | 6339b40debbab668263246162ab33c68391ef744 | [
"MIT"
] | 3 | 2019-08-29T11:55:16.000Z | 2021-04-10T19:52:14.000Z | from django.shortcuts import render
from ..models import Field
def fields(request):
fields = Field.objects.all()
return render(request, 'fields.html', {'fields': fields})
def field(request, key=None, template='field.html'):
field = Field.objects.get(field=key)
return render(request, template, {
'field': field,
})
| 20.529412 | 61 | 0.670487 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.114613 |
a0af5afc99a71406be5ffead3cb66d5a5fbdf490 | 2,608 | py | Python | crafting/CraftingHandler.py | uuk0/mcpython-4 | 1ece49257b3067027cc43b452a2fc44908d3514c | [
"MIT"
] | 2 | 2019-08-21T08:23:45.000Z | 2019-09-25T13:20:28.000Z | crafting/CraftingHandler.py | uuk0/mcpython-4 | 1ece49257b3067027cc43b452a2fc44908d3514c | [
"MIT"
] | 11 | 2019-08-21T08:46:01.000Z | 2021-09-08T01:18:04.000Z | crafting/CraftingHandler.py | uuk0/mcpython-4 | 1ece49257b3067027cc43b452a2fc44908d3514c | [
"MIT"
] | 5 | 2019-08-30T08:19:57.000Z | 2019-10-26T03:31:16.000Z | """mcpython - a minecraft clone written in python licenced under MIT-licence
authors: uuk, xkcdjerry
original game by forgleman licenced under MIT-licence
minecraft by Mojang
blocks based on 1.14.4.jar of minecraft, downloaded on 20th of July, 2019"""
import globals as G
import crafting.IRecipeType
import json
import ResourceLocator
import item.ItemHandler
import traceback
import mod.ModMcpython
class CraftingHandler:
def __init__(self):
self.recipeinfotable = {}
# all shapeless recipes sorted after item count
self.crafting_recipes_shapeless = {}
# all shaped recipes sorted after item count and than size
self.crafting_recipes_shaped = {}
self.loaded_mod_dirs = []
def __call__(self, obj):
if issubclass(obj, crafting.IRecipeType.IRecipe):
self.recipeinfotable[obj.get_recipe_name()] = obj
else:
raise ValueError()
return obj
def add_recipe(self, recipe: crafting.IRecipeType.IRecipe):
recipe.register()
def add_recipe_from_data(self, data: dict):
name = data["type"]
if name in self.recipeinfotable:
recipe = self.recipeinfotable[name].from_data(data)
self.add_recipe(recipe)
return recipe
else:
raise ValueError("can't load recipe. recipe class {} not arrival".format(name))
def add_recipe_from_file(self, file: str):
try:
self.add_recipe_from_data(ResourceLocator.read(file, "json"))
except ValueError:
pass
def load(self, modname):
if modname in self.loaded_mod_dirs:
print("ERROR: mod '{}' has tried to load crafting recipes twice or more".format(modname))
return # make sure to load only ones!
self.loaded_mod_dirs.append(modname)
for itemname in ResourceLocator.get_all_entries("data/{}/recipes".format(modname)):
mod.ModMcpython.mcpython.eventbus.subscribe("stage:recipe:bake", self.add_recipe_from_file, itemname,
info="loading crafting recipe from {}".format(itemname))
G.craftinghandler = CraftingHandler()
def load_recipe_providers():
from . import (GridRecipes)
mod.ModMcpython.mcpython.eventbus.subscribe("stage:recipe:groups", load_recipe_providers,
info="loading crafting recipe groups")
mod.ModMcpython.mcpython.eventbus.subscribe("stage:recipes", G.craftinghandler.load, "minecraft",
info="loading crafting recipes")
| 35.243243 | 113 | 0.657209 | 1,750 | 0.671012 | 0 | 0 | 0 | 0 | 0 | 0 | 688 | 0.263804 |
a0b0788c0fdd53bb74359f134c5cbbe7dd53cb63 | 1,625 | py | Python | xcache.py | ATLAS-Analytics/AlarmAndAlertService | a167439b0c3f3c9594af52bd21fe8713b5f47bf1 | [
"MIT"
] | null | null | null | xcache.py | ATLAS-Analytics/AlarmAndAlertService | a167439b0c3f3c9594af52bd21fe8713b5f47bf1 | [
"MIT"
] | 1 | 2021-05-26T02:21:42.000Z | 2021-05-26T02:21:42.000Z | xcache.py | ATLAS-Analytics/AlarmAndAlertService | a167439b0c3f3c9594af52bd21fe8713b5f47bf1 | [
"MIT"
] | null | null | null | # Checks number of concurrent connections from XCaches to MWT2 dCache.
# Creates alarm if more than 200 from any server.
# ====
# It is run every 30 min from a cron job.
import json
from datetime import datetime
import requests
from alerts import alarms
config_path = '/config/config.json'
with open(config_path) as json_data:
config = json.load(json_data,)
print('current time', datetime.now())
res = requests.get(
'http://graphite.mwt2.org/render?target=dcache.xrootd.*&format=json&from=now-2min')
if (res.status_code == 200):
data = res.json()
print(data)
print('recieved data on {} servers'.format(len(data)))
else:
print('problem in receiving connections!')
ALARM = alarms('Virtual Placement', 'XCache', 'large number of connections')
for server in data:
serverIP = server['target'].replace('dcache.xrootd.', '').replace('_', '.')
connections = server['datapoints'][-1][0]
timestamp = server['datapoints'][-1][1]
timestamp = datetime.fromtimestamp(timestamp)
timestamp = timestamp.strftime("%Y-%m-%d %H:%M:%S")
if not connections:
print('n connections not retrieved... skipping.')
continue
if connections < 200:
print('server {} has {} connections.'.format(serverIP, connections))
else:
source = {
"xcache": serverIP,
"n_connections": connections,
"timestamp": timestamp
}
print(source)
ALARM.addAlarm(
body='too many connections.',
tags=[serverIP],
source=source
)
| 30.092593 | 88 | 0.619692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 612 | 0.376615 |
a0b0d03bf62e28fff9360da39608230424f15bea | 769 | py | Python | Question3_Competetive_Programming/solution.py | Robotrek-TechTatva/big-pp | 5790075638aa7f39d787dfc390f43da1cdb4ed56 | [
"MIT"
] | null | null | null | Question3_Competetive_Programming/solution.py | Robotrek-TechTatva/big-pp | 5790075638aa7f39d787dfc390f43da1cdb4ed56 | [
"MIT"
] | null | null | null | Question3_Competetive_Programming/solution.py | Robotrek-TechTatva/big-pp | 5790075638aa7f39d787dfc390f43da1cdb4ed56 | [
"MIT"
] | null | null | null | import csv
def area(x1, y1, x2, y2, x3, y3):
return abs((x1 * (y2 - y3) + x2 * (y3 - y1)
+ x3 * (y1 - y2)) / 2.0)
def isInside(lis):
x, y = 0, 0
x1, y1, x2, y2, x3, y3 = lis[1:]
x1 = int(x1)
x2 = int(x2)
x3 = int(x3)
y1 = int(y1)
y2 = int(y2)
y3 = int(y3)
A = area (x1, y1, x2, y2, x3, y3)
A1 = area (x, y, x2, y2, x3, y3)
A2 = area (x1, y1, x, y, x3, y3)
A3 = area (x1, y1, x2, y2, x, y)
if(A == A1 + A2 + A3):
return True
else:
return False
filename = "traingles.csv"
with open(filename, 'r') as csv_file:
csv_reader = csv.reader(csv_file)
next(csv_reader)
for line in csv_reader:
if line:
print(isInside(line)) | 21.361111 | 49 | 0.470741 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.023407 |
a0b1f6e65ee6e7176da940ac100c95bce2eaea30 | 238 | py | Python | tutorials/W0D4_Calculus/solutions/W0D4_Tutorial2_Solution_359be293.py | vasudev-sharma/course-content | 46fb9be49da52acb5df252dda43f11b6d1fe827f | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 1 | 2021-06-09T09:56:21.000Z | 2021-06-09T09:56:21.000Z | tutorials/W0D4_Calculus/solutions/W0D4_Tutorial2_Solution_359be293.py | macasal/course-content | 0fc5e1a0d736c6b0391eeab587012ed0ab01e462 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 1 | 2021-06-16T05:41:08.000Z | 2021-06-16T05:41:08.000Z | tutorials/W0D4_Calculus/solutions/W0D4_Tutorial2_Solution_359be293.py | macasal/course-content | 0fc5e1a0d736c6b0391eeab587012ed0ab01e462 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null | t = np.arange(0, 10, 0.1) # Time from 0 to 10 years in 0.1 steps
with plt.xkcd():
p = np.exp(0.3 * t)
fig = plt.figure(figsize=(6, 4))
plt.plot(t, p)
plt.ylabel('Population (millions)')
plt.xlabel('time (years)')
plt.show() | 21.636364 | 64 | 0.60084 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.315126 |
a0b4e94b3b4a3e4439a5b84940a160611b866816 | 1,063 | py | Python | test/python/squarepants_test/plugins/test_link_resources_jars.py | ericzundel/mvn2pants | 59776864939515bc0cae28e1b89944ce55b98b21 | [
"Apache-2.0"
] | 8 | 2015-04-14T22:37:56.000Z | 2021-01-20T19:46:40.000Z | test/python/squarepants_test/plugins/test_link_resources_jars.py | ericzundel/mvn2pants | 59776864939515bc0cae28e1b89944ce55b98b21 | [
"Apache-2.0"
] | 1 | 2016-01-13T23:19:14.000Z | 2016-01-22T22:47:48.000Z | test/python/squarepants_test/plugins/test_link_resources_jars.py | ericzundel/mvn2pants | 59776864939515bc0cae28e1b89944ce55b98b21 | [
"Apache-2.0"
] | 3 | 2015-12-13T08:35:34.000Z | 2018-08-01T17:44:59.000Z | # Tests for code in squarepants/src/main/python/squarepants/plugins/copy_resources/tasks/copy_resource_jars
#
# Run with:
# ./pants test squarepants/src/test/python/squarepants_test/plugins:copy_resources
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants_test.tasks.task_test_base import TaskTestBase
from squarepants.plugins.link_resources_jars.targets.resources_jar import ResourcesJar
from squarepants.plugins.link_resources_jars.tasks.link_resources_jars import LinkResourcesJars
class CopyResourcesTest(TaskTestBase):
@classmethod
def task_type(cls):
return LinkResourcesJars
def test_resources_jar_target(self):
jar = JarDependency(org='foo', name='bar', rev='1.2.3')
lib = self.make_target(spec='test/foo-library', target_type=JarLibrary, jars=[jar])
resource_jar = self.make_target(spec='test/copy-resources', target_type=ResourcesJar,
dependencies=[lib], dest='foo.jar')
self.assertEquals('foo.jar', resource_jar.payload.dest)
| 40.884615 | 107 | 0.804327 | 485 | 0.456256 | 0 | 0 | 63 | 0.059266 | 0 | 0 | 275 | 0.258702 |
a0b646cbb8b05a36f6c66a8ee0acf369718630ee | 2,339 | py | Python | src/binwalk/__main__.py | puppywang/binwalk | fa0c0bd59b8588814756942fe4cb5452e76c1dcd | [
"MIT"
] | 5,504 | 2017-11-30T21:25:07.000Z | 2022-03-31T17:00:58.000Z | src/binwalk/__main__.py | puppywang/binwalk | fa0c0bd59b8588814756942fe4cb5452e76c1dcd | [
"MIT"
] | 247 | 2017-12-07T06:09:56.000Z | 2022-03-23T05:34:47.000Z | src/binwalk/__main__.py | puppywang/binwalk | fa0c0bd59b8588814756942fe4cb5452e76c1dcd | [
"MIT"
] | 953 | 2017-12-01T17:05:17.000Z | 2022-03-26T13:15:33.000Z | import os
import sys
# If installed to a custom prefix directory, binwalk may not be in
# the default module search path(s). Try to resolve the prefix module
# path and make it the first entry in sys.path.
# Ensure that 'src/binwalk' becomes '.' instead of an empty string
_parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
for _module_path in [
# from repo: src/scripts/ -> src/
_parent_dir,
# from build dir: build/scripts-3.4/ -> build/lib/
os.path.join(_parent_dir, "lib"),
# installed in non-default path: bin/ -> lib/python3.4/site-packages/
os.path.join(_parent_dir,
"lib",
"python%d.%d" % (sys.version_info[0], sys.version_info[1]),
"site-packages")
]:
if os.path.exists(_module_path) and _module_path not in sys.path:
sys.path = [_module_path] + sys.path
import binwalk
import binwalk.modules
def runme():
with binwalk.Modules() as modules:
try:
if len(sys.argv) == 1:
sys.stderr.write(modules.help())
sys.exit(1)
# If no explicit module was enabled in the command line arguments,
# run again with the default signature scan explicitly enabled.
elif not modules.execute():
# Make sure the Signature module is loaded before attempting
# an implicit signature scan; else, the error message received
# by the end user is not very helpful.
if hasattr(binwalk.modules, "Signature"):
modules.execute(*sys.argv[1:], signature=True)
else:
sys.stderr.write("Error: Signature scans not supported; ")
sys.stderr.write("make sure you have python-lzma installed and try again.\n")
sys.exit(2)
except binwalk.ModuleException as e:
sys.exit(3)
def main():
try:
# Special options for profiling the code. For debug use only.
if '--profile' in sys.argv:
import cProfile
sys.argv.pop(sys.argv.index('--profile'))
cProfile.run('runme()')
else:
runme()
except IOError:
pass
except KeyboardInterrupt:
sys.stdout.write("\n")
if __name__ == "__main__":
main()
| 35.984615 | 97 | 0.595554 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 944 | 0.403591 |
a0b73f136f5ae88a402fa6be43272da9242cdedc | 642 | py | Python | MINI_WEB/mini_web/framework/mini_frame_4.py | GalphaXie/LaoX | b7e8f9744292dc052c870e4d873052e9bfec19ee | [
"MIT"
] | null | null | null | MINI_WEB/mini_web/framework/mini_frame_4.py | GalphaXie/LaoX | b7e8f9744292dc052c870e4d873052e9bfec19ee | [
"MIT"
] | 12 | 2020-03-24T17:39:25.000Z | 2022-03-12T00:01:24.000Z | MINI_WEB/mini_web/framework/mini_frame_4.py | GalphaXie/LaoX | b7e8f9744292dc052c870e4d873052e9bfec19ee | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# file: mini_frame.py
# Created by Guang at 19-7-19
# description:
# *-* coding:utf8 *-*
import time
def login():
return "Welcome xxx login website! %s" % time.ctime()
def register():
return "Welcome xxx register on our website! %s" % time.ctime()
def profile():
return "你来到了一片荒原之上..."
def application(env, start_response):
start_response('200 OK', [('Content-Type', 'text/html'), ("Content-Type", "text/html;charset=utf-8")])
if env["FILE_PATH"] == "/login.py":
return login()
elif env["FILE_PATH"] == "/register.py":
return register()
else:
return profile()
| 21.4 | 106 | 0.619938 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 331 | 0.5 |
a0b7ca82a2ce39606a44ac65893f26c1b02da5d3 | 3,174 | py | Python | server.py | AndrewB330/BinanceTerminal | 3699a295d2b2af810d30ff692bab4e106ec44392 | [
"MIT"
] | 14 | 2020-03-09T04:08:03.000Z | 2021-12-29T14:53:32.000Z | server.py | AndrewB330/BinanceTerminal | 3699a295d2b2af810d30ff692bab4e106ec44392 | [
"MIT"
] | null | null | null | server.py | AndrewB330/BinanceTerminal | 3699a295d2b2af810d30ff692bab4e106ec44392 | [
"MIT"
] | null | null | null | import time
import pymongo
import schedule
from order import *
from utils import *
# MONGODB
db = pymongo.MongoClient("mongodb://localhost:27017/")["ShaurmaBinanceTerminal"]
order_db = db["orders"]
JOB_INTERVAL = 10.0 # interval of updating
jobs_pool = {}
def worker(symbol):
try:
time_start = time.time()
# get all active orders
active_orders = order_db.find({
'$and': [
{'symbol': symbol},
{'$or': [
{'status': OrderStatus.WAITING},
{'status': OrderStatus.PLACED}
]}
]
})
# update all active orders
for json_order in active_orders:
order = Order(json_order)
order.update()
order_db.update_one({"_id": order._id}, {"$set": order.to_json()})
# adjust updating period
time_elapsed = min(JOB_INTERVAL, time.time() - time_start)
jobs_pool[symbol].interval = JOB_INTERVAL - time_elapsed
except Exception as e:
log.error('Worker %s error: %s', symbol, repr(e))
def jobs_maintainer():
# get all active symbols
cursor = order_db.find({
'$or': [
{'status': OrderStatus.WAITING},
{'status': OrderStatus.PLACED}
]
}).distinct('symbol')
working = set()
# run jobs for not working, but active symbols
for symbol in cursor:
if symbol not in jobs_pool:
log.info('Worker started, symbol: %s', symbol)
jobs_pool[symbol] = schedule.every(JOB_INTERVAL).seconds.do(worker, symbol=symbol)
jobs_pool[symbol].run()
working.add(symbol)
# remove jobs for working, but not active symbols
for k in list(jobs_pool.keys()):
if k not in working:
log.info('Worker stopped, symbol: %s', k)
schedule.cancel_job(jobs_pool[k])
jobs_pool.pop(k)
def initialize_test_db():
order_db.drop()
o = [
create_limit('BTCUSDT', Side.BUY, Decimal('7400.00'), Decimal('0.0015')),
create_limit('BTCUSDT', Side.BUY, Decimal('7103.65'), Decimal('0.0020')),
create_limit('BTCUSDT', Side.SELL, Decimal('9500.00'), Decimal('0.0030')),
create_limit('BTCUSDT', Side.SELL, Decimal('9600.00'), Decimal('0.0010')),
create_market_stop('BTCUSDT', Side.SELL, Decimal('6675.50'), Decimal('0.0035')),
create_trailing_market_stop('BTCUSDT', Side.SELL, Decimal('100.00'), Decimal('7600.00'), Decimal('0.0035')),
create_market_stop('XLMUSDT', Side.SELL, Decimal('0.105'), Decimal('50.0')),
create_trailing_market_stop('XLMUSDT', Side.SELL, Decimal('0.01'), Decimal('0.14'), Decimal('0.0015')),
create_trailing_market_stop('XLMUSDT', Side.SELL, Decimal('0.01'), Decimal('0.14'), Decimal('0.0015'))
]
for o in o:
order_db.insert_one(o.to_json())
print('Test DB initialized')
def run_server():
maintainer = schedule.every(5).seconds.do(jobs_maintainer)
maintainer.run()
while True:
schedule.run_pending()
time.sleep(0.1)
if __name__ == '__main__':
# initialize_test_db()
run_server()
| 31.74 | 116 | 0.598614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 732 | 0.230624 |
a0b8186276c361f655fc43a3b80aba5c60bd0210 | 4,979 | py | Python | sarenka/backend/api_searcher/searcher_full.py | adolabsnet/sarenka | 2032aa6ddebfc69b0db551b7793080d17282ced2 | [
"MIT"
] | 380 | 2019-12-05T09:37:47.000Z | 2022-03-31T09:37:27.000Z | sarenka/backend/api_searcher/searcher_full.py | watchmen-coder/sarenka | d7fc0928e4992de3dbb1546137ca6a158e930ba8 | [
"MIT"
] | 14 | 2020-09-26T17:49:42.000Z | 2022-02-04T18:16:16.000Z | sarenka/backend/api_searcher/searcher_full.py | watchmen-coder/sarenka | d7fc0928e4992de3dbb1546137ca6a158e930ba8 | [
"MIT"
] | 60 | 2021-01-01T16:25:30.000Z | 2022-03-26T18:48:03.000Z | """
Moduł spiający wszystkie wyszukiwania w jedną klasę - wszystkei dane dla adresu ip/domeny.
Klasa bezpośrednio używana w widoku Django.
"""
from rest_framework.reverse import reverse
from typing import List, Dict
import whois
import socket
from connectors.credential import CredentialsNotFoundError
from api_searcher.search_engines.censys_engine.censys_host_search import CensysHostSearch
from api_searcher.search_engines.shodan_engine.shodan_host_search import ShodanHostSearch
from .dns.dns_searcher import DNSSearcher, DNSSearcherError
class SearcherFull:
"""Klasa zwracajaca wszystkie znalezione dane - zwraca infromacje ze wszystkich serwisów trzeich, informacje o DNS etc."""
def __init__(self, ip_address:str, local_host_address="", user_credentials=None):
self.host = ip_address
self.host_address = local_host_address
self.user_credentials = user_credentials
def get_whois_data(self):
"""Metoda zwraca dane z bazy whois."""
return whois.whois(self.host)
def get_banner(self, port_list)->List[Dict]:
"""Metoda zwraca banery, które pórbuje uzyskac dla otwartych portów zwróconych przez seriwsy trzecie"""
result = []
for port in port_list:
s = socket.socket()
s.connect((self.host, int(port)))
s.settimeout(5)
try:
# jak nie ma banera to rzuca timeotam
response = s.recv(1024)
if response:
result.append({port: response})
except socket.timeout:
result.append({port: "Unable to grab banner."})
return result
def get_censys_data(self):
"""Metoda zwraca dane wyszukane w serwisie http://censys.io/"""
try:
if not self.user_credentials:
raise CredentialsNotFoundError("UserCredentials object does not exist.")
except CredentialsNotFoundError as ex:
settings_url = self.host_address + reverse("user_credentials")
return {
"censys": {
"error": "Unable to get credentials for service http://censys.io/. "
"Please create account on https://censys.io/ and add valid settings "
f"for SARENKA app on {settings_url}",
"details": str(ex)
}
}
try:
response = CensysHostSearch(self.user_credentials).get_data(self.host) #
response.update({"banners": self.get_banner(response["ports"])})
return response
except Exception as ex:
# censys nie udostępnia do importu klasy exceptionu CensysNotFoundException o.Ó
return {
"censys": {
"error": f"Unable to get infromation from https://censys.io/ service.",
"details": str(ex)
}
}
def get_shodan_data(self):
"""Metoda zwraca dane wyszukane w serwisie https://www.shodan.io/"""
try:
if not self.user_credentials:
raise CredentialsNotFoundError("UserCredentials object does not exist.")
except CredentialsNotFoundError as ex:
settings_url = self.host_address + reverse("user_credentials")
return {
"shodan": {
"error": "Unable to get credentials for service https://www.shodan.io/. "
"Please create account on https://www.shodan.io/ and add valid settings "
f"for SARENKA app on {settings_url}",
"details": str(ex)
}
}
try:
response = ShodanHostSearch(self.user_credentials).get_data(self.host) #
return response
except Exception as ex:
# censys nie udostępnia do importu klasy exceptionu CensysNotFoundException o.Ó
return {
"shodan": {
"error": f"Unable to get infromation from https://www.shodan.io/ service.",
"details": str(ex)
}
}
def get_dns_data(self):
"""Metoda zwraca informacje o rekordach DNS hosta."""
try:
data = DNSSearcher(self.host).get_data()
return data
except DNSSearcherError as ex:
return {"error": str(ex)}
except Exception as ex:
return {"error": f"Unable to get DNS record data for host={self.host}.", "details": str(ex)}
@property
def values(self):
"""Zwraca jsona ze wszystkimi danymi - metoda pomocna dla widoków Django."""
response = {
"whois": self.get_whois_data(),
"dns_data": self.get_dns_data(),
}
response.update({"censys": self.get_censys_data()})
response.update({"shodan": self.get_shodan_data()})
return response | 40.811475 | 126 | 0.589677 | 4,444 | 0.88969 | 0 | 0 | 382 | 0.076476 | 0 | 0 | 1,705 | 0.341341 |
a0ba9ed937616e6ee4572b155cab4164464097a6 | 75 | py | Python | Codewars/8kyu/get-the-mean-of-an-array/Python/solution1.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | 7 | 2017-09-20T16:40:39.000Z | 2021-08-31T18:15:08.000Z | Codewars/8kyu/get-the-mean-of-an-array/Python/solution1.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | Codewars/8kyu/get-the-mean-of-an-array/Python/solution1.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | # Python - 3.6.0
get_average = lambda marks: int(sum(marks) / len(marks))
| 18.75 | 56 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.213333 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.