hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2665879f29151dd4bf50f5357e2d4d3726e90112 | 391 | py | Python | Google Drive/Learning/Python/DataScience/Classes/Dog.py | mobiusworkspace/mobiuswebsite | 73eef1bd4fc07ea318aad431de09eac10fc4da3a | [
"CC-BY-3.0"
] | null | null | null | Google Drive/Learning/Python/DataScience/Classes/Dog.py | mobiusworkspace/mobiuswebsite | 73eef1bd4fc07ea318aad431de09eac10fc4da3a | [
"CC-BY-3.0"
] | null | null | null | Google Drive/Learning/Python/DataScience/Classes/Dog.py | mobiusworkspace/mobiuswebsite | 73eef1bd4fc07ea318aad431de09eac10fc4da3a | [
"CC-BY-3.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 17 01:08:13 2020
@author: OAdeoye
"""
class Dog:
def __init__(self, name):
self.name = name
def respond_to_command(self, command):
if (command == self.name):
print(self.name + " is Barking!!!")
bingo = Dog("Bingo")
bingo.respond_to_command("Bingo")
bingo = Dog("Winco")
bingo.respond_to_command("Bingo") | 19.55 | 47 | 0.618926 | 191 | 0.488491 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.327366 |
26665c848e7389ad3807abd7e96f3f0161f6a4ff | 704 | py | Python | tests/test_query.py | tikazyq/google-analytics-api-wrapper | 91ffc1d6b4cb810aea20541153d64730873903b9 | [
"MIT"
] | 6 | 2015-03-11T01:38:54.000Z | 2021-07-09T21:40:12.000Z | tests/test_query.py | QPC-database/google-analytics-api-wrapper | 91ffc1d6b4cb810aea20541153d64730873903b9 | [
"MIT"
] | null | null | null | tests/test_query.py | QPC-database/google-analytics-api-wrapper | 91ffc1d6b4cb810aea20541153d64730873903b9 | [
"MIT"
] | 1 | 2021-07-09T21:40:13.000Z | 2021-07-09T21:40:13.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from datetime import datetime, timedelta
from analytics_query import analytics_query as aq
START_DATE = (datetime.now() - timedelta(7)).strftime('%Y-%m-%d')
END_DATE = (datetime.now() - timedelta(1)).strftime('%Y-%m-%d')
start_date = '2015-03-03'
end_date = '2015-03-05'
def main():
df = aq.get_api_query(
start_date=start_date,
end_date=end_date,
dimensions='ga:transactionId,ga:date,ga:dateHour,ga:campaign,ga:source,ga:hostname',
metrics='ga:transactionRevenue,ga:itemRevenue,ga:itemQuantity,ga:transactions',
oldest_prf=True,
)
print df.head()
if __name__ == '__main__':
main()
| 24.275862 | 92 | 0.674716 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 236 | 0.335227 |
2669475d57fe48eb8f470f059b2de2b3e28b5b3e | 2,864 | py | Python | GameManager.py | redxdev/Matching | 6d65933a64bf0f22a18a27c675cb8e95f4161e08 | [
"MIT"
] | 1 | 2016-05-06T10:23:24.000Z | 2016-05-06T10:23:24.000Z | GameManager.py | redxdev/Matching | 6d65933a64bf0f22a18a27c675cb8e95f4161e08 | [
"MIT"
] | null | null | null | GameManager.py | redxdev/Matching | 6d65933a64bf0f22a18a27c675cb8e95f4161e08 | [
"MIT"
] | null | null | null | from WordList import WordList, WordCard
import pygame
class GameManager:
def __init__(self):
self.wordList = WordList()
self.cards = []
self.badCards = (None, None)
self.goodCards = (None, None)
self.timer = 0
def startGame(self, pairCount):
self.cards = self.wordList.getRandomCards(4)
def generateCardPosition(self, screenSize, i):
cardW, cardH = screenSize
cardW /= 9
cardH /= 5
return ((i % 4) * (cardW * 2) + cardW, int(i / 4) * (cardH * 2) + cardH, cardW, cardH)
def draw(self, screen):
if self.timer <= 0:
bad1, bad2 = self.badCards
if bad1 is not None and bad2 is not None:
bad1.selected = False
bad2.selected = False
self.badCards = (None, None)
good1, good2 = self.goodCards
if good1 is not None and good2 is not None:
good1.selected = False
good2.selected = False
good1.active = False
good2.active = False
self.goodCards = (None, None)
self.checkForGameEnd()
if self.timer > 0:
self.timer -= 0.08
for i in range(0, len(self.cards)):
card = self.cards[i]
if card.active:
screenW, screenH = screen.get_size()
x, y, cardW, cardH = self.generateCardPosition((screenW, screenH), i)
card.draw(screen, (x, y), (cardW, cardH), screenW / 64)
def onClick(self, screen, x, y):
bad1, bad2 = self.badCards
if bad1 is not None and bad2 is not None:
return
good1, good2 = self.goodCards
if good1 is not None and good2 is not None:
return
found = None
for i in range(0, len(self.cards)):
card = self.cards[i]
cx, cy, cw, ch = self.generateCardPosition(screen.get_size(), i)
if x >= cx and x <= cx + cw and y >= cy and y <= cy + ch:
found = card
break
if found is not None:
self.select(found)
def select(self, card):
if card.selected:
card.selected = False
return
other = None
for c in self.cards:
if c.selected:
other = c
break
if other == None:
card.selected = True
return
if other.matches(card):
card.selected = True
self.goodCards = (card, other)
self.timer = 3.0
else:
card.selected = True
self.badCards = (card, other)
self.timer = 3.0
def checkForGameEnd(self):
for c in self.cards:
if c.active:
return
self.startGame(4) | 28.929293 | 94 | 0.50419 | 2,808 | 0.980447 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
266b073ce320af6c6412a8f34133f369b56ae914 | 1,687 | py | Python | src/main.py | ekim1919/TDAGo | 014db546dae3dedb4f7206288333756fc358ed8a | [
"MIT"
] | null | null | null | src/main.py | ekim1919/TDAGo | 014db546dae3dedb4f7206288333756fc358ed8a | [
"MIT"
] | null | null | null | src/main.py | ekim1919/TDAGo | 014db546dae3dedb4f7206288333756fc358ed8a | [
"MIT"
] | null | null | null | from plot import *
from experiments import *
import warnings
warnings.filterwarnings("ignore") #Ignore warnings for now
import sys
import os
import argparse
def main():
parser = argparse.ArgumentParser(description='Analysis of Go Games')
parser.add_argument('dir',nargs='*')
parser.add_argument('--conn',dest="conn",action='store_true')
parser.add_argument('--avg',dest="avg",action='store_true')
parser.add_argument('--score',dest="score",action='store_true')
parser.add_argument('--anim',dest="anim",action='store_true')
args = parser.parse_args()
if args.conn:
run_conn_routine(args.dir[0])
if args.avg:
predict_avg_experi(args.dir[0],args.dir[1])
if args.score:
test_score_routine(args.dir[0])
if args.anim:
test_anim_routine(args.dir[0])
#test_save_routine(str(argv[0]))
if __name__ == '__main__':
main()
#Test routines
#Animation routines
#Persistence Diagrams?\
#Go analysis features.
#Ideas
#How to interpret H_1 points on DGMS? For example, if a point has a earlier,later birthtime vs earlier,later deathtime? How do we interpret this as properties of possible enclosed territory.
#We can now start to add points to the white/black board to model obstructions to building territory. A good idea would be to find ways to create "meaningful" boards for analysis of specific advantage properties.
#Research more about Go fighting strategies and early,late game caveats
#Create a modular framework such that you have TDA-DATA -> plot modules -> customizable plot figure -> analysis interface
#Create a caching scheme to cache all sequential computations and diagrams made. See cache-tools
| 35.145833 | 212 | 0.740368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 982 | 0.582098 |
266b7396a2ed1939667431e7fe0b116000780021 | 1,358 | py | Python | script/python3/util/env.py | setminami/IrControl | bcdd44b7f6aeca75226cdcfc611dc63032c38949 | [
"MIT"
] | null | null | null | script/python3/util/env.py | setminami/IrControl | bcdd44b7f6aeca75226cdcfc611dc63032c38949 | [
"MIT"
] | 2 | 2018-09-21T11:53:28.000Z | 2018-12-30T03:37:23.000Z | script/python3/util/env.py | setminami/IrControl | bcdd44b7f6aeca75226cdcfc611dc63032c38949 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# this made for python3
from os import environ
def expand_env(params, verbose=False):
""" dotenv like function, but not dotenv """
for key, val in params.items():
_print('try %s, %s'%(key, val), verbose)
if isinstance(val, dict):
_print('ORDEREDDICT', verbose)
params[key] = expand_env(val, verbose)
elif isinstance(val, list):
_print('LIST', verbose)
params[key] = [expand_env(x, verbose) for x in val]
elif isinstance(val, str) and (val.startswith('${') \
and val.endswith('}')):
_print('LEAF', verbose)
env_key = val[2:-1]
if env_key in list(environ.keys()):
params[key] = environ[env_key]
_print('Overwrite env value {} = {}'.format(val, '***'), verbose)
_print('If not fire IFTTT triggers, Plase re-check your own IFTTT key settings.')
else:
_print('## {} not exported for {}. Please check your yaml file and env. ##'.format(env_key, key), verbose)
_print('Env {} vs keys = {}'.format(env_key, list(environ.keys())), verbose)
exit(1)
else:
_print('?? %s TYPE is %s'%(val, type(val)), verbose)
return params
def _print(msg, v=False):
if v: print(msg)
| 39.941176 | 122 | 0.54271 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 348 | 0.256259 |
266c5f9566178c353cbde59b14658db79e486f2e | 236 | py | Python | script/pipeline/setup/setup.py | cpuabuse/py-deployment-automation | aea0c48ac4c5a81f2e027c984ab65f911ad29d0d | [
"0BSD"
] | 1 | 2020-02-23T22:35:28.000Z | 2020-02-23T22:35:28.000Z | script/pipeline/setup/setup.py | cpuabuse/py-deployment-automation | aea0c48ac4c5a81f2e027c984ab65f911ad29d0d | [
"0BSD"
] | null | null | null | script/pipeline/setup/setup.py | cpuabuse/py-deployment-automation | aea0c48ac4c5a81f2e027c984ab65f911ad29d0d | [
"0BSD"
] | null | null | null | """
A file for setup.
"""
# Metadata
__author__ = "cpuabuse.com"
__copyright__ = "cpuabuse.com 2019"
__license__ = "ISC"
__version__ = "0.0.1"
__email__ = "[email protected]"
__status__ = "Development"
# Minimum python version is 3.6 | 18.153846 | 35 | 0.711864 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.610169 |
266efdf5f618ad871cc4108d4a51b575ba968601 | 6,392 | py | Python | Kinkajou/python/admin/opencode.py | app858216291-github/Kinkajou-shop | ee1e841e26407b1dcbd14601e5fe34b6422eba29 | [
"MIT"
] | null | null | null | Kinkajou/python/admin/opencode.py | app858216291-github/Kinkajou-shop | ee1e841e26407b1dcbd14601e5fe34b6422eba29 | [
"MIT"
] | null | null | null | Kinkajou/python/admin/opencode.py | app858216291-github/Kinkajou-shop | ee1e841e26407b1dcbd14601e5fe34b6422eba29 | [
"MIT"
] | null | null | null | from admin.upload import FileUploadField, ImageUploadField
from flask_babelex import Babel
from flask_admin._compat import urljoin
from flask import redirect
from flask_admin._compat import quote
from flask_admin.contrib.fileadmin import FileAdmin
from flask_admin import Admin, BaseView, expose
from flask_admin.babel import gettext, lazy_gettext
from flask import flash, redirect, abort, request, send_file
from flask_admin import form, helpers
import os.path as op
import flask_login as login
from wtforms.widgets import html_params
from common import aliyun, tools
from setting import Aliyun
allowed_file = lambda filename: '.' in filename and filename.rsplit('.', 1)[1] in set(
['png', 'jpg', 'jpeg', 'gif', 'bmp'])
def uploadFile(f):
if f and allowed_file(f.filename):
filname=aliyun.upload(f,'product')
r=tools.shopUtil.docManger(f,"https://"+Aliyun.bucketName+".oss-cn-beijing.aliyuncs.com/product/"+filname,"https://"+Aliyun.bucketName+".oss-cn-beijing.aliyuncs.com/product/")
return r
else:
return "filename is null"
class MXFileAdmin(FileAdmin):
def is_accessible(self):
if login.current_user.is_authenticated:
if login.current_user.username=='admin':
return True
return False
return False
def _save_form_files(self, directory, path, form):
super()
filename = self._separator.join([directory, form.upload.data.filename])
if self.storage.path_exists(filename):
secure_name = self._separator.join([path, form.upload.data.filename])
raise Exception(gettext('File "%(name)s" already exists.',
name=secure_name))
else:
self.save_file(filename, form.upload.data)
self.on_file_upload(directory, path, filename)
@expose('/download/<path:path>')
def download(self, path=None):
"""
Download view method.
:param path:
File path.
"""
if not self.can_download:
abort(404)
base_path, directory, path = self._normalize_path(path)
# backward compatibility with base_url
base_url = self.get_base_url()
if base_url:
base_url = urljoin(self.get_url('.index_view'), base_url)
path=path.replace('\\', '/')
print("------1------")
print(base_url)
print(path)
return redirect(urljoin(quote(base_url), quote(path)))
directory=directory.replace('\\', '/')
print("-------2-----")
print(directory)
return self.storage.send_file(directory)
@expose('/rename/', methods=('GET', 'POST'))
def rename(self):
"""
Rename view method
"""
form = self.name_form()
path = form.path.data
if path:
base_path, full_path, path = self._normalize_path(path)
return_url = self._get_dir_url('.index_view', op.dirname(path))
else:
return redirect(self.get_url('.index_view'))
if not self.can_rename:
flash(gettext('Renaming is disabled.'), 'error')
return redirect(return_url)
if not self.is_accessible_path(path):
flash(gettext('Permission denied.'), 'error')
return redirect(self._get_dir_url('.index_view'))
if not self.storage.path_exists(full_path):
flash(gettext('Path does not exist.'), 'error')
return redirect(return_url)
if self.validate_form(form):
try:
dir_base = op.dirname(full_path)
filename = form.name.data
# print(fi)
self.storage.rename_path(full_path, self._separator.join([dir_base, filename]))
self.on_rename(full_path, dir_base, filename)
flash(gettext('Successfully renamed "%(src)s" to "%(dst)s"',
src=op.basename(path),
dst=filename), 'success')
except Exception as ex:
flash(gettext('Failed to rename: %(error)s', error=ex), 'error')
return redirect(return_url)
else:
helpers.flash_errors(form, message='Failed to rename: %(error)s')
if self.rename_modal and request.args.get('modal'):
template = self.rename_modal_template
else:
template = self.rename_template
return self.render(template, form=form, path=op.dirname(path),
name=op.basename(path), dir_url=return_url,
header_text=gettext('Rename %(name)s',
name=op.basename(path)))
from flask_admin.helpers import get_url
from flask_admin._compat import string_types, urljoin
class MxImageUploadField(ImageUploadField):
def _save_file(self, data, filename):
path = self._get_path(filename)
data.seek(0)
filename=uploadFile(data)
return filename
# def __call__(self, field, **kwargs):
# kwargs.setdefault('id', field.id)
# kwargs.setdefault('name', field.name)
#
# args = {
# 'text': html_params(type='hidden',
# value=field.data,
# name=field.name),
# 'file': html_params(type='file',
# **kwargs),
# 'marker': '_%s-delete' % field.name
# }
#
# if field.data and isinstance(field.data, string_types):
# url = self.get_url(field)
# args['image'] = html_params(src=url)
#
# template = self.data_template
# else:
# template = self.empty_template
# print(template % args)
# return Markup(template % args)
def get_url(self, field):
if field.thumbnail_size:
filename = field.thumbnail_fn(field.data)
else:
filename = field.data
if field.url_relative_path:
filename = urljoin(field.url_relative_path, filename)
return get_url(field.endpoint, filename=filename)
| 37.380117 | 184 | 0.571339 | 5,181 | 0.810544 | 0 | 0 | 2,991 | 0.467929 | 0 | 0 | 1,497 | 0.234199 |
267010ecd5efb0c3498de085c2712903abc79773 | 4,137 | py | Python | liminal/runners/airflow/operators/kubernetes_pod_operator_with_input_output.py | aviemzur/incubator-liminal | 88174a6fe519f9a6052f6e5d366a37a88a915ee4 | [
"Apache-2.0"
] | 1 | 2021-03-24T08:23:03.000Z | 2021-03-24T08:23:03.000Z | liminal/runners/airflow/operators/kubernetes_pod_operator_with_input_output.py | liorsav/incubator-liminal | 88174a6fe519f9a6052f6e5d366a37a88a915ee4 | [
"Apache-2.0"
] | null | null | null | liminal/runners/airflow/operators/kubernetes_pod_operator_with_input_output.py | liorsav/incubator-liminal | 88174a6fe519f9a6052f6e5d366a37a88a915ee4 | [
"Apache-2.0"
] | null | null | null | import json
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
def _split_list(seq, num):
k, m = divmod(len(seq), num)
return list(
(seq[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(num))
)
_IS_SPLIT_KEY = 'is_split'
class PrepareInputOperator(KubernetesPodOperator):
def __init__(self,
input_type=None,
input_path=None,
split_input=False,
executors=1,
*args,
**kwargs):
namespace = kwargs.pop('namespace')
image = kwargs.pop('image')
name = kwargs.pop('name')
super().__init__(
namespace=namespace,
image=image,
name=name,
*args,
**kwargs)
self.input_type = input_type
self.input_path = input_path
self.executors = executors
self.split_input = split_input
def execute(self, context):
input_dict = {}
self.log.info(f'config type: {self.input_type}')
ti = context['task_instance']
if self.input_type:
if self.input_type == 'file':
input_dict = {} # future feature: return config from file
elif self.input_type == 'sql':
input_dict = {} # future feature: return from sql config
elif self.input_type == 'task':
self.log.info(self.input_path)
input_dict = ti.xcom_pull(task_ids=self.input_path)
elif self.input_type == 'static':
input_dict = json.loads(self.input_path)
else:
raise ValueError(f'Unknown config type: {self.input_type}')
run_id = context['dag_run'].run_id
print(f'run_id = {run_id}')
if input_dict:
self.log.info(f'Generated input: {input_dict}')
if self.split_input:
input_splits = _split_list(input_dict, self.executors)
numbered_splits = list(
zip(range(len(input_splits)), input_splits)
)
self.log.info(numbered_splits)
ti.xcom_push(key=_IS_SPLIT_KEY, value=True)
return input_splits
else:
return input_dict
else:
return {}
def run_pod(self, context):
return super().execute(context)
class KubernetesPodOperatorWithInputAndOutput(KubernetesPodOperator):
"""
TODO: pydoc
"""
_LIMINAL_INPUT_ENV_VAR = 'LIMINAL_INPUT'
def __init__(self,
task_split,
input_task_id=None,
*args,
**kwargs):
namespace = kwargs.pop('namespace')
image = kwargs.pop('image')
name = kwargs.pop('name')
super().__init__(
namespace=namespace,
image=image,
name=name,
*args,
**kwargs)
self.input_task_id = input_task_id
self.task_split = task_split
def execute(self, context):
task_input = {}
if self.input_task_id:
ti = context['task_instance']
self.log.info(f'Fetching input for task {self.task_split}.')
task_input = ti.xcom_pull(task_ids=self.input_task_id)
is_split = ti.xcom_pull(task_ids=self.input_task_id, key=_IS_SPLIT_KEY)
self.log.info(f'is_split = {is_split}')
if is_split:
self.log.info(f'Fetching split {self.task_split} of input.')
task_input = task_input[self.task_split]
if task_input:
self.log.info(f'task input = {task_input}')
self.env_vars.update({self._LIMINAL_INPUT_ENV_VAR: json.dumps(task_input)})
else:
self.env_vars.update({self._LIMINAL_INPUT_ENV_VAR: '{}'})
self.log.info(f'Empty input for task {self.task_split}.')
run_id = context['dag_run'].run_id
print(f'run_id = {run_id}')
self.env_vars.update({'run_id': run_id})
return super().execute(context)
| 28.93007 | 87 | 0.555958 | 3,839 | 0.927967 | 0 | 0 | 0 | 0 | 0 | 0 | 596 | 0.144066 |
2670f782ce4049f02c248c80f13a94aafff1be8d | 1,440 | py | Python | game/content/ghplots/__init__.py | AmkG/gearhead-caramel | 0238378295a09b4b33adb2ec0854fa06b0ad7b1b | [
"Apache-2.0"
] | null | null | null | game/content/ghplots/__init__.py | AmkG/gearhead-caramel | 0238378295a09b4b33adb2ec0854fa06b0ad7b1b | [
"Apache-2.0"
] | null | null | null | game/content/ghplots/__init__.py | AmkG/gearhead-caramel | 0238378295a09b4b33adb2ec0854fa06b0ad7b1b | [
"Apache-2.0"
] | null | null | null | import inspect
from . import actionscenes
from . import dd_combatmission
from . import dd_customobjectives
from . import dd_distanttown
from . import dd_homebase
from . import dd_intro
from . import dd_lancedev
from . import dd_main
from . import dd_roadedge
from . import dd_roadedge_propp
from . import dd_roadstops
from . import dd_tarot
from . import dd_tarotsupport
from . import encounters
from . import lancemates
from . import missionbuilder
from . import mocha
from . import recovery
from . import utility
from game.content import mechtarot, PLOT_LIST, UNSORTED_PLOT_LIST, CARDS_BY_NAME
from pbge.plots import Plot
def harvest( mod ):
for name in dir( mod ):
o = getattr( mod, name )
if inspect.isclass( o ) and issubclass( o , Plot ) and o is not Plot and o is not mechtarot.TarotCard:
PLOT_LIST[ o.LABEL ].append( o )
UNSORTED_PLOT_LIST.append( o )
# print o.__name__
if issubclass(o,mechtarot.TarotCard):
CARDS_BY_NAME[o.__name__] = o
harvest(actionscenes)
harvest(dd_combatmission)
harvest(dd_customobjectives)
harvest(dd_distanttown)
harvest(dd_homebase)
harvest(dd_intro)
harvest(dd_lancedev)
harvest(dd_main)
harvest(dd_roadedge)
harvest(dd_roadedge_propp)
harvest(dd_roadstops)
harvest(dd_tarot)
harvest(dd_tarotsupport)
harvest(encounters)
harvest(lancemates)
harvest(missionbuilder)
harvest(mocha)
harvest(recovery)
harvest(utility)
| 26.181818 | 110 | 0.758333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.0125 |
2671a284c0ed4b2cd6f0faa0d1f0db0edd38447c | 27,696 | py | Python | reV/handlers/collection.py | pjstanle/reV | c22c620749747022a65d2a98a99beef804849ee6 | [
"BSD-3-Clause"
] | 37 | 2020-03-04T05:24:23.000Z | 2022-02-24T14:39:49.000Z | reV/handlers/collection.py | pjstanle/reV | c22c620749747022a65d2a98a99beef804849ee6 | [
"BSD-3-Clause"
] | 174 | 2020-03-03T18:18:53.000Z | 2022-03-08T22:00:40.000Z | reV/handlers/collection.py | pjstanle/reV | c22c620749747022a65d2a98a99beef804849ee6 | [
"BSD-3-Clause"
] | 16 | 2020-08-10T13:43:36.000Z | 2021-11-19T22:43:36.000Z | # -*- coding: utf-8 -*-
"""
Base class to handle collection of profiles and means across multiple .h5 files
"""
import logging
import numpy as np
import os
import sys
import psutil
import pandas as pd
import time
import shutil
from warnings import warn
from reV.handlers.outputs import Outputs
from reV.utilities.exceptions import (CollectionRuntimeError,
CollectionValueError,
CollectionWarning)
from reV.utilities import log_versions
from rex.utilities.loggers import log_mem
logger = logging.getLogger(__name__)
class DatasetCollector:
"""
Class to collect single datasets from several source files into a final
output file.
"""
def __init__(self, h5_file, source_files, gids, dset_in, dset_out=None,
mem_util_lim=0.7):
"""
Parameters
----------
h5_file : str
Path to h5_file into which dataset is to be collected
source_files : list
List of source filepaths.
gids : list
list of gids to be collected
dset_in : str
Dataset to collect
dset_out : str
Dataset into which collected data is to be written
mem_util_lim : float
Memory utilization limit (fractional). This sets how many sites
will be collected at a time.
"""
self._h5_file = h5_file
self._source_files = source_files
self._gids = gids
self._dset_in = dset_in
if dset_out is None:
dset_out = dset_in
self._dset_out = dset_out
tot_mem = psutil.virtual_memory().total
self._mem_avail = mem_util_lim * tot_mem
self._attrs, self._axis, self._site_mem_req = self._pre_collect()
logger.debug('Available memory for collection is {} bytes'
.format(self._mem_avail))
logger.debug('Site memory requirement is: {} bytes'
.format(self._site_mem_req))
@staticmethod
def parse_meta(h5_file):
"""
Extract and convert meta data from a rec.array to pandas.DataFrame
Parameters
----------
h5_file : str
Path to .h5 file from which meta is to be parsed
Returns
-------
meta : pandas.DataFrame
Portion of meta data corresponding to sites in h5_file
"""
with Outputs(h5_file, mode='r') as f:
meta = f.meta
return meta
@staticmethod
def _get_site_mem_req(shape, dtype, n=100):
"""Get the memory requirement to collect one site from a dataset of
shape and dtype
Parameters
----------
shape : tuple
Shape of dataset to be collected (n_time, n_sites)
dtype : np.dtype
Numpy dtype of dataset (disk dtype)
n : int
Number of sites to prototype the memory req with.
Returns
-------
site_mem : float
Memory requirement in bytes for one site from a dataset with
shape and dtype.
"""
m = 1
if len(shape) > 1:
m = shape[0]
site_mem = sys.getsizeof(np.ones((m, n), dtype=dtype)) / n
return site_mem
def _pre_collect(self):
"""Run a pre-collection check and get relevant dset attrs.
Returns
-------
attrs : dict
Dictionary of dataset attributes for the dataset being collected.
axis : int
Axis size (1 is 1D array, 2 is 2D array)
site_mem_req : float
Memory requirement in bytes to collect a single site from one
source file.
"""
with Outputs(self._source_files[0], mode='r') as f:
shape, dtype, chunks = f.get_dset_properties(self._dset_in)
attrs = f.get_attrs(self._dset_in)
axis = len(f[self._dset_in].shape)
with Outputs(self._h5_file, mode='a') as f:
if axis == 1:
dset_shape = (len(f),)
elif axis == 2:
if 'time_index' in f.datasets:
dset_shape = f.shape
else:
m = ("'time_index' must be combined "
"before profiles can be "
"combined.")
logger.error(m)
raise CollectionRuntimeError(m)
else:
m = ('Cannot collect dset "{}" with '
'axis {}'.format(self._dset_in, axis))
logger.error(m)
raise CollectionRuntimeError(m)
if self._dset_out not in f.datasets:
f._create_dset(self._dset_out, dset_shape, dtype,
chunks=chunks, attrs=attrs)
site_mem_req = self._get_site_mem_req(shape, dtype)
return attrs, axis, site_mem_req
@staticmethod
def _get_gid_slice(gids_out, source_gids, fn_source):
"""Find the site slice that the chunked set of source gids belongs to.
Parameters
----------
gids_out : list
List of resource GIDS in the final output meta data f_out
source_gids : list
List of resource GIDS in one chunk of source data.
fn_source : str
Source filename for warning printout.
Returns
-------
site_slice : slice | np.ndarray
Slice in the final output file to write data to from source gids.
If gids in destination file are non-sequential, a boolean array of
indexes is returned and a warning is printed.
"""
locs = np.where(np.isin(gids_out, source_gids))[0]
if not any(locs):
e = ('DatasetCollector could not locate source gids in '
'output gids. \n\t Source gids: {} \n\t Output gids: {}'
.format(source_gids, gids_out))
logger.error(e)
raise CollectionRuntimeError(e)
sequential_locs = np.arange(locs.min(), locs.max() + 1)
if not len(locs) == len(sequential_locs):
w = ('GID indices for source file "{}" are not '
'sequential in destination file!'.format(fn_source))
logger.warning(w)
warn(w, CollectionWarning)
site_slice = np.isin(gids_out, source_gids)
else:
site_slice = slice(locs.min(), locs.max() + 1)
return site_slice
def _get_source_gid_chunks(self, f_source):
"""Split the gids from the f_source into chunks based on memory req.
Parameters
----------
f_source : reV.handlers.outputs.Output
Source file handler
Returns
-------
all_source_gids : list
List of all source gids to be collected
source_gid_chunks : list
List of source gid chunks to collect.
"""
all_source_gids = f_source.get_meta_arr('gid')
mem_req = (len(all_source_gids) * self._site_mem_req)
if mem_req > self._mem_avail:
n = 2
while True:
source_gid_chunks = np.array_split(all_source_gids, n)
new_mem_req = (len(source_gid_chunks[0]) * self._site_mem_req)
if new_mem_req > self._mem_avail:
n += 1
else:
logger.debug('Collecting dataset "{}" in {} chunks with '
'an estimated {} bytes in each chunk '
'(mem avail limit is {} bytes).'
.format(self._dset_in, n, new_mem_req,
self._mem_avail))
break
else:
source_gid_chunks = [all_source_gids]
return all_source_gids, source_gid_chunks
def _collect_chunk(self, all_source_gids, source_gids, f_out,
f_source, fp_source):
"""Collect one set of source gids from f_source to f_out.
Parameters
----------
all_source_gids : list
List of all source gids to be collected
source_gids : np.ndarray | list
Source gids to be collected
f_out : reV.handlers.outputs.Output
Output file handler
f_source : reV.handlers.outputs.Output
Source file handler
fp_source : str
Source filepath
"""
out_slice = self._get_gid_slice(self._gids, source_gids,
os.path.basename(fp_source))
source_i0 = np.where(all_source_gids == np.min(source_gids))[0][0]
source_i1 = np.where(all_source_gids == np.max(source_gids))[0][0]
source_slice = slice(source_i0, source_i1 + 1)
source_indexer = np.isin(source_gids, self._gids)
logger.debug('\t- Running low mem collection of "{}" for '
'output site {} from source site {} and file : {}'
.format(self._dset_in, out_slice, source_slice,
os.path.basename(fp_source)))
try:
if self._axis == 1:
data = f_source[self._dset_in, source_slice]
if not all(source_indexer):
data = data[source_indexer]
f_out[self._dset_out, out_slice] = data
elif self._axis == 2:
data = f_source[self._dset_in, :, source_slice]
if not all(source_indexer):
data = data[:, source_indexer]
f_out[self._dset_out, :, out_slice] = data
except Exception as e:
logger.exception('Failed to collect source file {}. '
'Raised the following exception:\n{}'
.format(os.path.basename(fp_source), e))
raise e
def _collect(self):
"""Simple & robust serial collection optimized for low memory usage."""
with Outputs(self._h5_file, mode='a') as f_out:
for fp in self._source_files:
with Outputs(fp, mode='r') as f_source:
x = self._get_source_gid_chunks(f_source)
all_source_gids, source_gid_chunks = x
for source_gids in source_gid_chunks:
self._collect_chunk(all_source_gids, source_gids,
f_out, f_source, fp)
log_mem(logger, log_level='DEBUG')
@classmethod
def collect_dset(cls, h5_file, source_files, gids, dset_in, dset_out=None,
mem_util_lim=0.7):
"""Collect a single dataset from a list of source files into a final
output file.
Parameters
----------
h5_file : str
Path to h5_file into which dataset is to be collected
source_files : list
List of source filepaths.
gids : list
list of gids to be collected
dset_in : str
Dataset to collect
dset_out : str
Dataset into which collected data is to be written
mem_util_lim : float
Memory utilization limit (fractional). This sets how many sites
will be collected at a time.
"""
dc = cls(h5_file, source_files, gids, dset_in, dset_out=dset_out,
mem_util_lim=mem_util_lim)
dc._collect()
class Collector:
"""
Class to handle the collection and combination of .h5 files
"""
def __init__(self, h5_file, h5_dir, project_points, file_prefix=None,
clobber=False):
"""
Parameters
----------
h5_file : str
Path to .h5 file into which data will be collected
h5_dir : str
Root directory containing .h5 files to combine
project_points : str | slice | list | pandas.DataFrame | None
Project points that correspond to the full collection of points
contained in the .h5 files to be collected. None if points list is
to be ignored (collect all data in h5_files)
file_prefix : str
.h5 file prefix, if None collect all files in h5_dir
clobber : bool
Flag to purge .h5 file if it already exists
"""
log_versions(logger)
if clobber:
if os.path.isfile(h5_file):
warn('{} already exists and is being replaced'.format(h5_file),
CollectionWarning)
os.remove(h5_file)
self._h5_out = h5_file
ignore = os.path.basename(self._h5_out)
self._h5_files = self.find_h5_files(h5_dir, file_prefix=file_prefix,
ignore=ignore)
if project_points is not None:
self._gids = self.parse_project_points(project_points)
else:
self._gids = self.parse_gids_from_files(self._h5_files)
self.combine_meta()
@staticmethod
def find_h5_files(h5_dir, file_prefix=None, ignore=None):
"""
Search h5_dir for .h5 file, return sorted
If file_prefix is not None, only return .h5 files with given prefix
Parameters
----------
h5_dir : str
Root directory to search
file_prefix : str
Prefix for .h5 file in h5_dir, if None return all .h5 files
ignore : str | list | NoneType
File name(s) to ignore.
"""
if not isinstance(ignore, list):
ignore = [ignore]
h5_files = []
logger.debug('Looking for source files in {}'.format(h5_dir))
for file in os.listdir(h5_dir):
if file.endswith('.h5'):
if file_prefix is not None:
if file.startswith(file_prefix) and file not in ignore:
logger.debug('\t- Found source file to collect: {}'
.format(file))
h5_files.append(os.path.join(h5_dir, file))
elif file not in ignore:
logger.debug('\t- Found source file to collect: {}'
.format(file))
h5_files.append(os.path.join(h5_dir, file))
h5_files = sorted(h5_files)
logger.debug('Final list of {} source files: {}'
.format(len(h5_files), h5_files))
return h5_files
@staticmethod
def parse_project_points(project_points):
"""
Extract resource gids from project points
Parameters
----------
project_points : str | slice | list | pandas.DataFrame
Reference to resource points that were processed and need
collecting
Returns
-------
gids : list
List of resource gids that are to be collected
"""
if isinstance(project_points, str):
gids = pd.read_csv(project_points)['gid'].values
elif isinstance(project_points, pd.DataFrame):
gids = project_points['gid'].values
elif isinstance(project_points, list):
gids = project_points
elif isinstance(project_points, slice):
s = project_points.start
if s is None:
s = 0
e = project_points.stop
if e is None:
m = "slice must be bounded!"
logger.error(m)
raise CollectionValueError(m)
step = project_points.step
if step is None:
step = 1
gids = list(range(s, e, step))
else:
m = 'Cannot parse project_points'
logger.error(m)
raise CollectionValueError(m)
gids = sorted([int(g) for g in gids])
return gids
@staticmethod
def parse_gids_from_files(h5_files):
"""
Extract a sorted gid list from a list of h5_files.
Parameters
----------
h5_files : list
List of h5 files to be collected.
Returns
-------
gids : list
List of sorted resource gids to be collected.
"""
meta = [DatasetCollector.parse_meta(file) for file in h5_files]
meta = pd.concat(meta, axis=0)
gids = list(set(meta['gid'].values.tolist()))
gids = sorted([int(g) for g in gids])
return gids
def get_dset_shape(self, dset_name):
"""
Extract the dataset shape from the first file in the collection list.
Parameters
----------
dset_name : str
Dataset to be collected whose shape is in question.
Returns
-------
shape : tuple
Dataset shape tuple.
"""
with Outputs(self.h5_files[0], mode='r') as f:
shape = f.shapes[dset_name]
return shape
@property
def h5_files(self):
"""
List of .h5 files to be combined
Returns
-------
list
"""
return self._h5_files
@property
def gids(self):
"""
List of gids corresponding to all sites to be combined
Returns
-------
list
"""
return self._gids
def combine_time_index(self):
"""
Extract time_index, None if not present in .h5 files
"""
with Outputs(self.h5_files[0], mode='r') as f:
if 'time_index' in f.datasets:
time_index = f.time_index
attrs = f.get_attrs('time_index')
else:
time_index = None
warn("'time_index' was not processed as it is not "
"present in .h5 files to be combined.",
CollectionWarning)
if time_index is not None:
with Outputs(self._h5_out, mode='a') as f:
f._set_time_index('time_index', time_index, attrs=attrs)
def _check_meta(self, meta):
"""
Check combined meta against self._gids to make sure all sites
are present in self._h5_files
Parameters
----------
meta : pandas.DataFrame
DataFrame of combined meta from all files in self._h5_files
Parameters
----------
meta : pandas.DataFrame
DataFrame of combined meta from all files in self._h5_files.
Duplicate GIDs are dropped and a warning is raised.
"""
meta_gids = meta['gid'].values
gids = np.array(self.gids)
missing = gids[~np.in1d(gids, meta_gids)]
if any(missing):
# TODO: Write missing gids to disk to allow for automated re-run
m = "gids: {} are missing".format(missing)
logger.error(m)
raise CollectionRuntimeError(m)
if len(set(meta_gids)) != len(meta):
m = ('Meta of length {} has {} unique gids! '
'There are duplicate gids in the source file list: {}'
.format(len(meta), len(set(meta_gids)), self.h5_files))
logger.warning(m)
warn(m, CollectionWarning)
meta = meta.drop_duplicates(subset='gid', keep='last')
meta = meta.sort_values('gid')
meta = meta.reset_index(drop=True)
return meta
def _purge_chunks(self):
"""Remove the chunked files (after collection). Will not delete files
if any datasets were not collected."""
with Outputs(self._h5_out, mode='r') as out:
dsets_collected = out.datasets
with Outputs(self.h5_files[0], mode='r') as out:
dsets_source = out.datasets
missing = [d for d in dsets_source if d not in dsets_collected]
if any(missing):
w = ('Not purging chunked output files. These dsets '
'have not been collected: {}'.format(missing))
warn(w, CollectionWarning)
logger.warning(w)
else:
for fpath in self.h5_files:
os.remove(fpath)
def _move_chunks(self, sub_dir):
"""Move the chunked files to a sub dir (after collection).
Parameters
----------
sub_dir : str | None
Sub directory name to move chunks to. None to not move files.
"""
if sub_dir is not None:
for fpath in self.h5_files:
base_dir, fn = os.path.split(fpath)
new_dir = os.path.join(base_dir, sub_dir)
if not os.path.exists(new_dir):
os.makedirs(new_dir)
new_fpath = os.path.join(new_dir, fn)
shutil.move(fpath, new_fpath)
def combine_meta(self):
"""
Load and combine meta data from .h5
"""
with Outputs(self._h5_out, mode='a') as f:
if 'meta' in f.datasets:
self._check_meta(f.meta)
else:
with Outputs(self.h5_files[0], mode='r') as f_in:
global_attrs = f_in.get_attrs()
meta_attrs = f_in.get_attrs('meta')
for key, value in global_attrs.items():
f._h5.attrs[key] = value
meta = [DatasetCollector.parse_meta(file)
for file in self.h5_files]
meta = pd.concat(meta, axis=0)
meta = self._check_meta(meta)
logger.info('Writing meta data with shape {}'
.format(meta.shape))
f._set_meta('meta', meta, attrs=meta_attrs)
@classmethod
def collect(cls, h5_file, h5_dir, project_points, dset_name, dset_out=None,
file_prefix=None, mem_util_lim=0.7):
"""
Collect dataset from h5_dir to h5_file
Parameters
----------
h5_file : str
Path to .h5 file into which data will be collected
h5_dir : str
Root directory containing .h5 files to combine
project_points : str | slice | list | pandas.DataFrame | None
Project points that correspond to the full collection of points
contained in the .h5 files to be collected. None if points list is
to be ignored (collect all data in h5_files)
dset_name : str
Dataset to be collected. If source shape is 2D, time index will be
collected.
dset_out : str
Dataset to collect means into
file_prefix : str
.h5 file prefix, if None collect all files on h5_dir
mem_util_lim : float
Memory utilization limit (fractional). This sets how many sites
will be collected at a time.
"""
if file_prefix is None:
h5_files = "*.h5"
else:
h5_files = "{}*.h5".format(file_prefix)
logger.info('Collecting dataset "{}" from {} files in {} to {}'
.format(dset_name, h5_files, h5_dir, h5_file))
ts = time.time()
clt = cls(h5_file, h5_dir, project_points, file_prefix=file_prefix,
clobber=True)
logger.debug("\t- 'meta' collected")
dset_shape = clt.get_dset_shape(dset_name)
if len(dset_shape) > 1:
clt.combine_time_index()
logger.debug("\t- 'time_index' collected")
DatasetCollector.collect_dset(clt._h5_out, clt.h5_files, clt.gids,
dset_name, dset_out=dset_out,
mem_util_lim=mem_util_lim)
logger.debug("\t- Collection of '{}' complete".format(dset_name))
tt = (time.time() - ts) / 60
logger.info('Collection complete')
logger.debug('\t- Collection took {:.4f} minutes'
.format(tt))
@classmethod
def add_dataset(cls, h5_file, h5_dir, dset_name, dset_out=None,
file_prefix=None, mem_util_lim=0.7):
"""
Collect and add dataset to h5_file from h5_dir
Parameters
----------
h5_file : str
Path to .h5 file into which data will be collected
h5_dir : str
Root directory containing .h5 files to combine
dset_name : str
Dataset to be collected. If source shape is 2D, time index will be
collected.
dset_out : str
Dataset to collect means into
file_prefix : str
.h5 file prefix, if None collect all files on h5_dir
mem_util_lim : float
Memory utilization limit (fractional). This sets how many sites
will be collected at a time.
"""
if file_prefix is None:
h5_files = "*.h5"
else:
h5_files = "{}*.h5".format(file_prefix)
logger.info('Collecting "{}" from {} files in {} and adding to {}'
.format(dset_name, h5_files, h5_dir, h5_file))
ts = time.time()
with Outputs(h5_file, mode='r') as f:
points = f.meta
clt = cls(h5_file, h5_dir, points, file_prefix=file_prefix)
dset_shape = clt.get_dset_shape(dset_name)
if len(dset_shape) > 1:
clt.combine_time_index()
logger.debug("\t- 'time_index' collected")
DatasetCollector.collect_dset(clt._h5_out, clt.h5_files, clt.gids,
dset_name, dset_out=dset_out,
mem_util_lim=mem_util_lim)
logger.debug("\t- Collection of '{}' complete".format(dset_name))
tt = (time.time() - ts) / 60
logger.info('{} collected'.format(dset_name))
logger.debug('\t- Collection took {:.4f} minutes'
.format(tt))
@classmethod
def purge_chunks(cls, h5_file, h5_dir, project_points, file_prefix=None):
"""
Purge (remove) chunked files from h5_dir (after collection).
Parameters
----------
h5_file : str
Path to .h5 file into which data will be collected
h5_dir : str
Root directory containing .h5 files to combine
project_points : str | slice | list | pandas.DataFrame
Project points that correspond to the full collection of points
contained in the .h5 files to be collected
file_prefix : str
.h5 file prefix, if None collect all files on h5_dir
"""
clt = cls(h5_file, h5_dir, project_points, file_prefix=file_prefix)
clt._purge_chunks()
logger.info('Purged chunk files from {}'.format(h5_dir))
@classmethod
def move_chunks(cls, h5_file, h5_dir, project_points, file_prefix=None,
sub_dir='chunk_files'):
"""
Move chunked files from h5_dir (after collection) to subdir.
Parameters
----------
h5_file : str
Path to .h5 file into which data will be collected
h5_dir : str
Root directory containing .h5 files to combine
project_points : str | slice | list | pandas.DataFrame
Project points that correspond to the full collection of points
contained in the .h5 files to be collected
file_prefix : str
.h5 file prefix, if None collect all files on h5_dir
sub_dir : str | None
Sub directory name to move chunks to. None to not move files.
"""
clt = cls(h5_file, h5_dir, project_points, file_prefix=file_prefix)
clt._move_chunks(sub_dir)
logger.info('Moved chunk files from {} to sub_dir: {}'
.format(h5_dir, sub_dir))
| 35.326531 | 79 | 0.55239 | 27,096 | 0.978336 | 0 | 0 | 13,541 | 0.488915 | 0 | 0 | 12,417 | 0.448332 |
267391fe6f529c4f578f96fdbf6f647ec6e040d3 | 964 | py | Python | utility/templatetags/to_price.py | hosseinmoghimi/waiter | 9f5f332b6f252a29aa14f67655b423fd9c40fba3 | [
"MIT"
] | 1 | 2021-12-02T11:16:53.000Z | 2021-12-02T11:16:53.000Z | utility/templatetags/to_price.py | hosseinmoghimi/waiter | 9f5f332b6f252a29aa14f67655b423fd9c40fba3 | [
"MIT"
] | null | null | null | utility/templatetags/to_price.py | hosseinmoghimi/waiter | 9f5f332b6f252a29aa14f67655b423fd9c40fba3 | [
"MIT"
] | null | null | null | from core.errors import LEO_ERRORS
from django import template
register = template.Library()
from utility.currency import to_price as to_price_origin
from utility.num import to_horuf as to_horuf_num,to_tartib as to_tartib_
@register.filter
def to_price(value):
return to_price_origin(value=value)
@register.filter
def to_horuf(value):
return to_horuf_num(value)
@register.filter
def to_tartib(value):
return to_tartib_(value)
@register.filter
def to_price_pure(value):
"""converts int to string"""
try:
sign=''
if value<0:
value=0-value
sign='- '
a=separate(value)
return sign+a
except:
# return LEO_ERRORS.error_to_price_template_tag
return ""
def separate(price):
try:
price=int(price)
except:
return None
if price<1000:
return str(price)
else:
return separate(price/1000)+','+str(price)[-3:]
| 18.538462 | 72 | 0.65249 | 0 | 0 | 0 | 0 | 516 | 0.53527 | 0 | 0 | 86 | 0.089212 |
2674b3c10e1e9d8ebf4b7b0491fb0687920f7025 | 3,119 | py | Python | Python/maximal-rectangle.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | 1 | 2022-01-30T06:55:28.000Z | 2022-01-30T06:55:28.000Z | Python/maximal-rectangle.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | null | null | null | Python/maximal-rectangle.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | 1 | 2021-12-31T03:56:39.000Z | 2021-12-31T03:56:39.000Z | # Time: O(m*n)
# Space: O(n)
# 85
# Given a 2D binary matrix filled with 0's and 1's,
# find the largest rectangle containing all ones and return its area.
# Ascending stack solution.
class Solution(object):
def maximalRectangle(self, matrix): # USE THIS
"""
:type matrix: List[List[str]]
:rtype: int
"""
def largestRectangleArea(heights):
heights.append(0) # KENG:一定要延申到数列末端之外 e.g. [2,4,5]
stk, ans = [-1], 0
for i, h in enumerate(heights):
while len(stk) > 1 and h <= heights[stk[-1]]: # 右边界确定。相同高度值也弹出,只保留最后一个
last = stk.pop()
width = i - 1 - stk[-1]
ans = max(ans, heights[last] * width)
stk.append(i)
return ans
if not matrix:
return 0
result = 0
heights = [0] * len(matrix[0])
for i in range(len(matrix)):
for j in range(len(matrix[0])):
heights[j] = heights[j] + 1 if matrix[i][j] == '1' else 0
result = max(result, largestRectangleArea(heights))
return result
# DP solution.
class Solution2(object):
# Time: O(m*n^2) Space: O(n)
def maximalRectangle(self, A): # DONT USE: time complexity
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not A: return 0
m, n, ans = len(A), len(A[0]), 0
dp = [(0,0)] * (n+1) # number of consecutive 1s on left and top direction
for i in range(1, m+1):
for j in range(1, n+1):
if A[i-1][j-1] == '1':
dp[j] = (1+dp[j-1][0], 1+dp[j][1])
minHght = float('inf')
for k in range(dp[j][0]):
minHght = min(minHght, dp[j-k][1])
ans = max(ans, (k+1)*minHght)
else:
dp[j] = (0, 0) # need to reset because we reuse the storage
return ans
# Time: O(n^2) Space: O(n)
def maximalRectangle2(self, matrix): # hard to understand: 3 dp array L, H, R
if not matrix: return 0
result = 0
m, n = len(matrix), len(matrix[0])
L, H, R = [0] * n, [0] * n, [0] * n
for i in range(m):
left = 0
for j in range(n):
if matrix[i][j] == '1':
L[j] = max(L[j], left)
H[j] += 1
else:
L[j] = 0
H[j] = 0
R[j] = n
left = j + 1
right = n
for j in reversed(range(n)):
if matrix[i][j] == '1':
R[j] = min(R[j], right)
result = max(result, H[j] * (R[j] - L[j]))
else:
right = j
return result
if __name__ == "__main__":
matrix = ["01101",
"11010",
"01110",
"11110",
"11111",
"00000"]
print(Solution2().maximalRectangle(matrix)) # 9
| 31.505051 | 91 | 0.430907 | 2,765 | 0.867043 | 0 | 0 | 0 | 0 | 0 | 0 | 766 | 0.240201 |
2676b926254be1cabd4bc81bb9a3da1c42ddb39a | 299 | py | Python | Maximum_Increase.py | Milon34/Python_Learning | b24efbba4751ed51758bf6b907e71f384415a9d5 | [
"MIT"
] | null | null | null | Maximum_Increase.py | Milon34/Python_Learning | b24efbba4751ed51758bf6b907e71f384415a9d5 | [
"MIT"
] | null | null | null | Maximum_Increase.py | Milon34/Python_Learning | b24efbba4751ed51758bf6b907e71f384415a9d5 | [
"MIT"
] | null | null | null | n=int(input())
a = [int(s) for s in input().split(' ')]
increment=1
max_increment=1
for i in range(1,n):
if a[i]>a[i-1]:
increment=increment+1
else:
max_increment=max(max_increment,increment)
increment=1
max_increment=max(max_increment,increment)
print(max_increment) | 24.916667 | 50 | 0.672241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.010033 |
2676fe4e4181d8ea15429d8939404231084cca25 | 8,869 | py | Python | makechart.py | preeve9534/signalk-sensor-log | 7f6afd188b1ed95dad0b4d798f66d145a1f10978 | [
"Apache-2.0"
] | null | null | null | makechart.py | preeve9534/signalk-sensor-log | 7f6afd188b1ed95dad0b4d798f66d145a1f10978 | [
"Apache-2.0"
] | null | null | null | makechart.py | preeve9534/signalk-sensor-log | 7f6afd188b1ed95dad0b4d798f66d145a1f10978 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
from SocketServer import TCPServer, StreamRequestHandler
import socket
from subprocess import call
import datetime
import json
import re
import sys
import os
CONF = {}
RRDTOOL = '/usr/bin/rrdtool'
PERIODS = []
CHART_BACKGROUNDCOLOR = '#000000'
CHART_CANVASCOLOR = '#000000'
CHART_DIRECTORY = '/tmp/'
CHART_FONT = 'LEGEND:8:Courier New'
CHART_FONTCOLOR = '#804040'
CHART_IMAGETYPE = 'SVG'
DISPLAYGROUP_LIST = []
RRDDATABASE_DATABASES = []
RRDDATABASE_DIRECTORY = '/tmp'
SENSOR_LIST= []
def init(config):
global CONF, PERIODS, CHART_BACKGROUNDCOLOR, CHART_CANVASCOLOR, CHART_DIRECTORY, CHART_FONTCOLOR, DISPLAYGROUP_LIST, RRDDATABASE_DATABASES, RRDDATABASE_DIRECTORY, SENSOR_LIST
with open(config) as data_file:
CONF = json.load(data_file)["configuration"]
PERIODS = CONF['rrddatabase']['periods']
CHART_BACKGROUNDCOLOR = CONF["chart"]["backgroundcolor"]
CHART_CANVASCOLOR = CONF["chart"]["canvascolor"]
CHART_DIRECTORY = CONF['chart']['directory']
CHART_FONTCOLOR = CONF["chart"]["fontcolor"]
DISPLAYGROUP_LIST = CONF['displaygroups']
SENSOR_LIST = CONF['paths']
RRDDATABASE_DATABASES = CONF['rrddatabase']['databases']
RRDDATABASE_DIRECTORY = CONF['rrddatabase']['directory']
return True
def makeGraph(group, chart, directory):
command = ""
if group in map(lambda x: x['id'], DISPLAYGROUP_LIST):
displayGroup = reduce(lambda a, v: (v if (v['id'] == group) else a), DISPLAYGROUP_LIST, None)
if (chart in map(lambda s: s['name'], PERIODS)):
dsIds = map(lambda datasource: datasource['datasource'][datasource['datasource'].find(':') + 1:], displayGroup['datasources'])
dsDatabases = map(lambda datasource: datasource['datasource'][0: datasource['datasource'].find(':')], displayGroup['datasources'])
dsColors = map(lambda datasource: datasource['color'], displayGroup['datasources'])
dsNames = map(lambda datasource: datasource['displayname'], displayGroup['datasources'])
dsLineTypes = map(lambda datasource: 'AREA' if ('area' in datasource['options']) else 'LINE', displayGroup['datasources'])
dsStack = map(lambda datasource: ('stack' in datasource['options']), displayGroup['datasources'])
command = RRDTOOL
command += " graph '" + directory + "/" + group + "." + chart + "." + CHART_IMAGETYPE.lower() + "'"
command += " -T 80"
command += " --imgformat " + CHART_IMAGETYPE
command += " --font '" + CHART_FONT + "'"
command += " --title '" + displayGroup["title"] + "'"
command += " --vertical-label '" + displayGroup["ylabel"] + "'"
command += " --watermark 'Generated on " + datetime.datetime.now().replace(microsecond=0).isoformat(' ') + "'"
command += " --start '" + reduce(lambda a, v: (v['tag'] if (v['name'] == chart) else a), PERIODS,"end-1h") + "'"
command += (" --lower-limit=" + displayGroup["ymin"]) if (displayGroup["ymin"] != "") else ""
command += (" --upper-limit=" + displayGroup["ymax"]) if (displayGroup["ymax"] != "") else ""
command += " --slope-mode"
command += " --rigid"
command += " --color CANVAS" + CHART_CANVASCOLOR
command += " --color BACK" + CHART_BACKGROUNDCOLOR
command += " --color FONT" + CHART_FONTCOLOR
command += " --full-size-mode"
command += " --width=800"
command += " --height=300"
for index, dsid in enumerate(dsIds):
command += " DEF:" + dsid + "=" + RRDDATABASE_DIRECTORY + "/" + dsDatabases[index] + ":" + dsid + ":" + reduce(lambda a, v: (v['consolidate'] if (v['name'] == chart) else a), PERIODS,"AVERAGE")
command += (" VDEF:" + dsid + "min=" + dsid + ",MINIMUM") if ("min" in displayGroup["options"]) else ""
command += (" VDEF:" + dsid + "max=" + dsid + ",MAXIMUM") if ("max" in displayGroup["options"]) else ""
command += (" VDEF:" + dsid + "avg=" + dsid + ",AVERAGE") if ("avg" in displayGroup["options"]) else ""
command += (" VDEF:" + dsid + "lst=" + dsid + ",LAST") if ("lst" in displayGroup["options"]) else ""
command += (" CDEF:" + dsid + "eeg=" + dsid + "," + str(index * 1.1) + ",+") if ("eeg" in displayGroup["options"]) else ""
#//command += " CDEF:" + dsname + "filled=" + dsname + ",UN," + dsname + "avg," + dsname + ",IF";
#command += " CDEF:" + dsid + "filled=" + dsid + ",UN,PREV," + dsid + ",IF"
#command += " CDEF:" + dsid + "fixed=" + dsid + "filled," + str(reduce(lambda a, v: (v['seconds'] if (v['name'] == chart) else a), PERIODS,"1")) + ",/"
#command += " VDEF:" + dsid + "total=" + dsid + "fixed,TOTAL"
comments = reduce(lambda a, v: (a | (v in displayGroup["options"])), ["min","max","avg","lst"], False)
command += (" COMMENT:'" + "Data source".ljust(23) + "'") if (comments) else ""
command += (" COMMENT:'" + "Min ".rjust(10) + "'") if ("min" in displayGroup["options"]) else ""
command += (" COMMENT:'" + "Max ".rjust(10) + "'") if ("max" in displayGroup["options"]) else ""
command += (" COMMENT:'" + "Average ".rjust(10) + "'") if ("avg" in displayGroup["options"]) else ""
command += (" COMMENT:'" + "Last ".rjust(10) + "'") if ("lst" in displayGroup["options"]) else ""
command += (" COMMENT:'\\n'") if (comments) else ""
#command += " COMMENT:'" + "Data stream".ljust(19) + "Min ".rjust(13) + "Max ".rjust(14) + "Average ".rjust(14) + "Derived".rjust(13) + "\\n'";
for i, dsid in enumerate(dsIds):
plot = (dsid + "eeg") if ("eeg" in displayGroup["options"]) else dsid
command += " " + dsLineTypes[i] + ":" + plot + dsColors[i] + ":'" + dsNames[i].ljust(19) + "'" + (":STACK" if (dsStack[i]) else "")
command += (" GPRINT:" + dsid + "min:'%10.2lf'") if ("min" in displayGroup["options"]) else ""
command += (" GPRINT:" + dsid + "max:'%10.2lf'") if ("max" in displayGroup["options"]) else ""
command += (" GPRINT:" + dsid + "avg:'%10.2lf'") if ("avg" in displayGroup["options"]) else ""
command += (" GPRINT:" + dsid + "lst:'%10.2lf'") if ("lst" in displayGroup["options"]) else ""
#command += (" GPRINT:" + dsid + "total:'%10.2lf\\n'"
command += (" COMMENT:'\\n'") if (comments) else ""
call(command, shell=True)
return command
def dropPrivileges(user, group):
import pwd, grp
# Get the uid/gid from the name
runningUid = pwd.getpwnam(user).pw_uid
runningGid = grp.getgrnam(group).gr_gid
# Remove group privileges
os.setgroups([])
# Try setting the new uid/gid
os.setgid(runningGid)
os.setuid(runningUid)
# Reset logging
# self.resetLogging()
class Handler(StreamRequestHandler):
def handle(self):
line = self.rfile.readline()
while (line):
#self.wfile.write(line)
line = line.decode('ascii').strip()
if (line == "quit"):
break
parts = re.split('\s+', line)
if (len(parts) == 2):
makeGraph(parts[0], parts[1], CHART_DIRECTORY)
line = self.rfile.readline()
class Server(TCPServer):
# The constant would be better initialized by a systemd module
SYSTEMD_FIRST_SOCKET_FD = 3
def __init__(self, server_address, handler_cls):
# Invoke base but omit bind/listen steps (performed by systemd activation!)
TCPServer.__init__(self, server_address, handler_cls, bind_and_activate=False)
# Override socket
self.socket = socket.fromfd(self.SYSTEMD_FIRST_SOCKET_FD, self.address_family, self.socket_type)
if __name__ == '__main__':
DAEMONISE = False
CONFIG = "/root/.signalk/plugin-config-data/sensor-log.json"
USER = None
GROUP = None
args = sys.argv[1:]
if (len(args) > 0) and (args[0] == "-"):
DAEMONISE = True;
args = args[1:]
if (len(args) > 1) and (args[0] == "-c"):
CONFIG = args[1]
args = args[2:]
if (len(args) > 1) and (args[0] == "-U"):
USER = args[1]
args = args[2:]
if (len(args) > 1) and (args[0] == "-G"):
GROUP = args[1]
args = args[2:]
if (init(CONFIG)):
if (DAEMONISE):
if ((USER != None) and (GROUP != None)):
dropPrivileges(USER, GROUP)
server = Server(('127.0.0.1', 9999), Handler)
server.serve_forever()
else:
if (len(args) > 1):
print(makeGraph(args[0], args[1], "."))
| 49.272222 | 209 | 0.560379 | 915 | 0.103168 | 0 | 0 | 0 | 0 | 0 | 0 | 2,558 | 0.28842 |
267701db0df3dc5669a6ef8609e548969a09888e | 410 | py | Python | way/python/exercises/various/turtle_draws/turtle_spiral_name.py | only-romano/junkyard | b60a25b2643f429cdafee438d20f9966178d6f36 | [
"MIT"
] | null | null | null | way/python/exercises/various/turtle_draws/turtle_spiral_name.py | only-romano/junkyard | b60a25b2643f429cdafee438d20f9966178d6f36 | [
"MIT"
] | null | null | null | way/python/exercises/various/turtle_draws/turtle_spiral_name.py | only-romano/junkyard | b60a25b2643f429cdafee438d20f9966178d6f36 | [
"MIT"
] | null | null | null | # цветная спираль из имени пользователя
import turtle
t = turtle.Pen()
turtle.bgcolor("black")
colors = ["red", "yellow", "blue", "green"]
# gui text input
name = turtle.textinput("Введи своё имя", "Как тебя зовут?")
for x in range(100):
t.pencolor(colors[x%4])
t.penup()
t.forward(x*4)
t.pendown()
t.write(name, font=("Arial", int((x + 4) / 4), "bold"))
t.left(92)
| 22.777778 | 61 | 0.592683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.413276 |
26796efa4885d9b90f7bb3e4e595ebd4603db189 | 1,537 | py | Python | config/base_config.py | xuyouze/DropNet | edbaeb72075b819b96e1ca66e966999a40d3645e | [
"Apache-2.0"
] | 1 | 2021-06-28T06:27:06.000Z | 2021-06-28T06:27:06.000Z | config/base_config.py | xuyouze/DropNet | edbaeb72075b819b96e1ca66e966999a40d3645e | [
"Apache-2.0"
] | null | null | null | config/base_config.py | xuyouze/DropNet | edbaeb72075b819b96e1ca66e966999a40d3645e | [
"Apache-2.0"
] | null | null | null | # coding:utf-8
# @Time : 2019/5/15
# @Author : xuyouze
# @File Name : base_config.py
import importlib
import os
import sys
import torch
import logging
from .dataset_config import build_dataset_config
from .logger_config import config
__all__ = ["BaseConfig"]
class BaseConfig(object):
def __init__(self):
# model component parameters
self.checkpoints_dir = "ckp"
# dataset name [celebA | lfwa | duke | market]
self.dataset_name = "celebA"
# self.dataset_name = "lfwa"
# self.dataset_name = "duke"
# self.dataset_name = "market"
# model name [common]
self.model_name = "common"
# model name [resnet]
self.network_name = "resnet"
# loss name [focal | ghm-c | drop | bce]
# self.loss_name = "drop"
# self.loss_name = "focal"
# self.loss_name = "ghmc"
self.loss_name = "bce"
# network initialization type [normal]
self.init_type = "normal"
self.init_gain = 0.2 # scaling factor for normal
# global saving and loading parameters
self.batch_size = 100
self.num_threads = 4
self.last_epoch = "last"
self.load_iter = 0
self.isTrain = None
# dataset parameters
self.dataset_config = build_dataset_config(self.dataset_name)
self.balance_attr_pos_prop = torch.FloatTensor([0.5] * self.dataset_config.attribute_num)
# logging config
logging.config.dictConfig(config)
self.logger = logging.getLogger("TrainLogger")
self.test_logger = logging.getLogger("TestLogger")
if not os.path.exists(self.checkpoints_dir):
os.makedirs(self.checkpoints_dir)
| 23.646154 | 91 | 0.709824 | 1,253 | 0.815224 | 0 | 0 | 0 | 0 | 0 | 0 | 641 | 0.417046 |
267b0451a5289dfdcefad895acd9541e3d77721e | 814 | py | Python | test/test_utils.py | fact-project/ratescan | 69a2eb8b2c66024f10e59d6dbf15c84c9b12ede4 | [
"MIT"
] | null | null | null | test/test_utils.py | fact-project/ratescan | 69a2eb8b2c66024f10e59d6dbf15c84c9b12ede4 | [
"MIT"
] | null | null | null | test/test_utils.py | fact-project/ratescan | 69a2eb8b2c66024f10e59d6dbf15c84c9b12ede4 | [
"MIT"
] | null | null | null | from fact.io import read_data
def test_sumupCountsOfRun():
from ratescan.utils import sumupCountsOfRun
df = read_data("test/test.hdf5", key="ratescan")
df_summed = sumupCountsOfRun(df)
assert df_summed.run_id.unique() == 182
assert len(df_summed.ratescan_trigger_thresholds) == 1000
def test_compileRatescanForRun():
from ratescan.utils import compileRatescanForRun
df = read_data("test/test.hdf5", key="ratescan")
df = compileRatescanForRun(df, ontime=160)
assert df[df["ratescan_trigger_counts"] == 75840]["ratescan_trigger_rate"].unique() == 474.0
#
# def test_joinOnTimesFromRunDB():
# from ratescan.utils import joinOnTimesFromRunDB
#
# df = read_data("test/test.hdf5", key="ratescan")
#
# df_res = joinOnTimesFromRunDB(df)
| 29.071429 | 96 | 0.701474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 288 | 0.353808 |
267b7ae665db4a183786b0a16f0d7887f1bbb20e | 4,080 | py | Python | rbac/cli/cli_test_auth.py | shawnmckinney/py-fortress | ead12bf9b7e37e923c42ccdadd8fd3c5adf027cf | [
"Apache-2.0"
] | 16 | 2018-03-19T02:19:01.000Z | 2021-12-30T15:24:40.000Z | rbac/cli/cli_test_auth.py | shawnmckinney/py-fortress | ead12bf9b7e37e923c42ccdadd8fd3c5adf027cf | [
"Apache-2.0"
] | 1 | 2021-12-18T16:46:04.000Z | 2021-12-18T16:46:04.000Z | rbac/cli/cli_test_auth.py | shawnmckinney/py-fortress | ead12bf9b7e37e923c42ccdadd8fd3c5adf027cf | [
"Apache-2.0"
] | 2 | 2018-03-14T21:48:43.000Z | 2018-03-19T03:25:40.000Z | '''
@copyright: 2022 - Symas Corporation
'''
import sys
import pickle
import argparse
from rbac.util import global_ids
from rbac.model import Perm, User
from rbac import access
from rbac.util import RbacError
from ..cli.utils import print_user, print_entity
from rbac.cli.utils import (
load_entity, add_args, ADD, DELETE, AUTH, CHCK, ROLES, PERMS, SHOW, DROP
)
OUT_SESS_FILE = "sess.pickle"
def process(args):
sess = None
result = False
user = load_entity (User(), args)
perm = load_entity (Perm(), args)
print(args.operation)
try:
if args.operation == AUTH:
sess = access.create_session(user, False)
result = True
elif args.operation == CHCK:
sess = un_pickle()
result = access.check_access(sess, perm)
elif args.operation == ROLES:
sess = un_pickle()
roles = access.session_roles(sess)
for idx, role in enumerate(roles):
print_entity(role, role.name + ':' + str(idx))
result = True
elif args.operation == PERMS:
sess = un_pickle()
perms = access.session_perms(sess)
for idx, perm in enumerate(perms):
print_entity(perm, perm.obj_name + '.' + perm.op_name + ':' + str(idx))
result = True
elif args.operation == SHOW:
sess = un_pickle()
print_entity(sess, 'session')
print_user(sess.user, 'user')
result = True
elif args.operation == ADD:
sess = un_pickle()
if not args.role:
print("error --role required for this op")
return False
print('role=' + args.role)
access.add_active_role(sess, args.role)
result = True
elif args.operation == DROP:
sess = un_pickle()
if not args.role:
print("error --role required for this op")
return False
print('role=' + args.role)
access.drop_active_role(sess, args.role)
result = True
else:
print('process failed, invalid operation=' + args.operation)
if result:
print('success')
else:
print('failed')
pickle_it(sess)
except RbacError as e:
if e.id == global_ids.ACTV_FAILED_DAY:
print('failed day of week, id=' + str(e.id) + ', msg=' + e.msg)
elif e.id == global_ids.ACTV_FAILED_DATE:
print('failed for date, id=' + str(e.id) + ', msg=' + e.msg)
elif e.id == global_ids.ACTV_FAILED_TIME:
print('failed for time of day, id=' + str(e.id) + ', msg=' + e.msg)
elif e.id == global_ids.ACTV_FAILED_TIMEOUT:
print('failed inactivity timeout, id=' + str(e.id) + ', msg=' + e.msg)
elif e.id == global_ids.ACTV_FAILED_LOCK:
print('failed locked date')
else:
print('RbacError id=' + str(e.id) +', ' + e.msg)
def pickle_it(sess):
if sess is not None:
pickling_on = open(OUT_SESS_FILE,"wb")
pickle.dump(sess, pickling_on)
pickling_on.close()
def un_pickle():
pickle_off = open(OUT_SESS_FILE,"rb")
sess = pickle.load(pickle_off)
return sess
def main(argv=None):
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = 'Process py-fortress access commands.'
parser = argparse.ArgumentParser(description=program_name)
parser.add_argument('operation', metavar='operand', choices=[AUTH,CHCK,ROLES,PERMS,ADD,DELETE,SHOW,DROP], help='operation name')
parser.add_argument('-r', '--role', dest='role', help='role name')
add_args(parser, User())
add_args(parser, Perm())
args = parser.parse_args()
process(args)
if __name__ == "__main__":
sys.exit(main()) | 34.576271 | 132 | 0.554412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 547 | 0.134069 |
267b9ff6b529eb0367e6acbbd247f37b5d0c7a4d | 1,678 | py | Python | httprider/presenters/utility_functions_presenter.py | iSWORD/http-rider | 5d9e5cc8c5166ab58f81d30d21b3ce2497bf09b9 | [
"MIT"
] | 27 | 2019-12-20T00:10:28.000Z | 2022-03-09T18:04:23.000Z | httprider/presenters/utility_functions_presenter.py | iSWORD/http-rider | 5d9e5cc8c5166ab58f81d30d21b3ce2497bf09b9 | [
"MIT"
] | 6 | 2019-10-13T08:50:21.000Z | 2020-06-05T12:23:08.000Z | httprider/presenters/utility_functions_presenter.py | iSWORD/http-rider | 5d9e5cc8c5166ab58f81d30d21b3ce2497bf09b9 | [
"MIT"
] | 7 | 2019-08-10T01:38:31.000Z | 2021-08-23T05:28:46.000Z | from httprider.core.generators import utility_func_map
class UtilityFunctionsPresenter:
def __init__(self, view, parent):
self.view = view
self.parent = parent
# update list of functions
for f in utility_func_map.keys():
self.view.function_selector.addItem(f)
# Event handlers to refresh generated values
self.view.function_selector.currentIndexChanged[str].connect(
self.transform_selected_text
)
self.view.btn_copy_transformed.clicked.connect(self.on_copy_clipboard)
def init(self):
whole_text = self.parent.text()
selected_text = self.parent.selected_text
self.view.lbl_selected_text.setText(
selected_text or whole_text or "Select some text"
)
self.transform_selected_text()
def apply_transformation(self, selected_text, func_name):
try:
return utility_func_map.get(func_name)(selected_text)
except Exception as e:
return "Error: {}".format(e)
def on_copy_clipboard(self):
self.view.txt_transformed_text.selectAll()
self.view.txt_transformed_text.copy()
def transform_selected_text(self):
selected_text = self.view.lbl_selected_text.text()
func_name = self.view.function_selector.currentText()
self.view.txt_transformed_text.setPlainText(
self.apply_transformation(selected_text, func_name)
)
def get_function(self):
selected_text = self.view.lbl_selected_text.text()
func_name = self.view.function_selector.currentText()
return f'${{utils("{func_name}", "{selected_text}")}}'
| 34.958333 | 78 | 0.6764 | 1,620 | 0.965435 | 0 | 0 | 0 | 0 | 0 | 0 | 146 | 0.087008 |
267d4a279fad22068d75718ec410431f6a3cbe63 | 12,745 | py | Python | ensembling_sgd.py | suswei/RLCT | e9e04ca5e64250dfbb94134ec5283286dcdc4358 | [
"MIT"
] | null | null | null | ensembling_sgd.py | suswei/RLCT | e9e04ca5e64250dfbb94134ec5283286dcdc4358 | [
"MIT"
] | null | null | null | ensembling_sgd.py | suswei/RLCT | e9e04ca5e64250dfbb94134ec5283286dcdc4358 | [
"MIT"
] | null | null | null | import argparse
import numpy as np
import os
from numpy.linalg import inv
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset
import torch.optim as optim
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.uniform import Uniform
from torch.distributions.normal import Normal
from matplotlib import pyplot as plt
#TODO: currently only supports realizable reduced rank regression, need to add realizable tanh
def main():
# Training settings
parser = argparse.ArgumentParser(description='RLCT Variational Inference')
parser.add_argument('--n', type=int, default=500)
parser.add_argument('--batchsize', type=int, default=10)
parser.add_argument('--epochs', type=int, default=500)
parser.add_argument('--H', type=int, default=5)
parser.add_argument('--dataset',type=str, choices=['tanh','rr'])
parser.add_argument('--prior-std', type=float, default=1.0)
parser.add_argument('--y-std', type=float, default=1.0)
parser.add_argument('--betasbegin', type=float, default=1.0,
help='where beta range should begin')
parser.add_argument('--betasend', type=float, default=5.0,
help='where beta range should end')
parser.add_argument('--numbetas', type=int, default=10,
help='how many betas should be swept between betasbegin and betasend')
parser.add_argument('--R', type=int, default=5)
parser.add_argument('--MC', type=int, default=1)
parser.add_argument('--taskid',type=int, default=1)
args = parser.parse_args()
# %%
if args.dataset == 'rr':
args.output_dim = 6
args.input_dim = 6
args.H = 6
args.H0 = 3
# args.a_params = torch.transpose(
# torch.cat((torch.eye(args.H), torch.ones([args.H, args.input_dim - args.H], dtype=torch.float32)), 1), 0,
# 1) # input_dim * H
# args.b_params = torch.eye(args.output_dim)
a = Normal(0.0, 1.0)
args.a_params = 0.2 * a.sample((args.H0, args.input_dim))
b = Normal(0.0, 1.0)
args.b_params = 0.2 * b.sample((args.output_dim,args.H0))
m = MultivariateNormal(torch.zeros(args.input_dim), torch.eye(args.input_dim)) # the input_dim=output_dim + 3, output_dim = H (the number of hidden units)
X = 3.0*m.sample(torch.Size([2 * args.n]))
mean = torch.matmul(torch.matmul(X, args.b_params), args.a_params)
y_rv = MultivariateNormal(torch.zeros(args.output_dim), torch.eye(args.output_dim))
y = mean + args.y_std * y_rv.sample(torch.Size([2 * args.n]))
# The splitting ratio of training set, validation set, testing set is 0.7:0.15:0.15
train_size = args.n
valid_size = int(args.n * 0.5)
test_size = 2 * args.n - train_size - valid_size
dataset_train, dataset_valid, dataset_test = torch.utils.data.random_split(TensorDataset(X, y),
[train_size, valid_size, test_size])
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=args.batchsize, shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_size=args.batchsize, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=args.batchsize, shuffle=True)
args.loss_criterion = nn.MSELoss(reduction='sum')
args.trueRLCT = (args.output_dim * args.H - args.H ** 2 + args.input_dim * args.H) / 2 # rank r = H for the 'reducedrank_synthetic' dataset
elif args.dataset == 'tanh':
# generate features X from unif(-1,1)
m = Uniform(torch.tensor([-1.0]), torch.tensor([1.0]))
X = m.sample(torch.Size([2 * args.n]))
# generate target from N(0,1) i.e. tanh network with zero layers
# w = {(a_m,b_m)}_{m=1}^p, p(y|x,w) = N(f(x,w),1) where f(x,w) = \sum_{m=1}^p a_m tanh(b_m x)
y_rv = Normal(0.0, args.y_std) # torch.distributions.normal.Normal(loc, scale) where scale is standard deviation
y = y_rv.sample(torch.Size([2 * args.n, 1]))
# The splitting ratio of training set, validation set, testing set is 0.7:0.15:0.15
train_size = args.n
valid_size = int(args.n * 0.5)
test_size = 2 * args.n - train_size - valid_size
dataset_train, dataset_valid, dataset_test = torch.utils.data.random_split(TensorDataset(X, y),
[train_size, valid_size, test_size])
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=args.batchsize, shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_size=args.batchsize, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=args.batchsize, shuffle=True)
args.input_dim = X.shape[1]
args.output_dim = y.shape[1]
args.loss_criterion = nn.MSELoss(reduction='sum')
max_integer = int(np.sqrt(args.H))
args.trueRLCT = (args.H + max_integer * max_integer + max_integer) / (4 * max_integer + 2)
# %%
# define network
class reducedrank(nn.Module):
def __init__(self, input_dim, output_dim, H):
super(reducedrank, self).__init__()
self.fc1 = nn.Linear(input_dim, H, bias=False)
self.fc2 = nn.Linear(H, output_dim, bias=False)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
class tanh(nn.Module):
def __init__(self, input_dim, output_dim, H):
super(tanh, self).__init__()
self.fc1 = nn.Linear(input_dim, H, bias=False)
self.fc2 = nn.Linear(H, output_dim, bias=False)
def forward(self, x):
x = torch.tanh(self.fc1(x))
x = self.fc2(x)
return x
args.w_dim = (args.input_dim + args.output_dim) * args.H
# TODO: is the log n scale really necessary?
# get B inverse temperatures
# args.betas = 1 / np.linspace(np.log(args.n) / args.betasbegin, np.log(args.n) / args.betasend, args.numbetas)
args.betas = 1 / np.linspace(1 / args.betasbegin, 1 / args.betasend, args.numbetas)
# args.betas = np.linspace(args.betasbegin, args.betasend, args.numbetas)/np.log(args.n)
# args.recip = np.linspace(0.1,args.numbetas,args.numbetas) #1/beta
# args.betas = 1/args.recip
# args.betas = np.linspace(args.betasbegin, args.betasend, args.numbetas)
# TODO: set automatically?
# args.prior_std = np.sqrt(args.w_dim * (args.y_std ** 2) * np.log(args.n) / (args.betasbegin * args.n))
# args.prior_std = np.sqrt(args.w_dim / (args.betasbegin * args.n))
# args.prior_std = 10.0
# print('prior std auto set to {}'.format(args.prior_std))
# %%
# %%
# define loss function that is specific to anchor point and inverse temperature beta
def custom_loss(model, target, output, beta):
# TODO: what's the justification for using anchors?
# returns ||y-\hat y||^2_2 + \sigma_eps^2/beta*\sigma_{prior}^2 ||theta-\hat theta||^2_2
# anchor_dist = Normal(0.0, args.prior_std)
wd = torch.tensor(0.)
for p in model.parameters():
# anchor = anchor_dist.sample(p.shape)
# wd += ((p - anchor) ** 2).sum()
wd += (p ** 2).sum()
# wd_factor = torch.tensor(((args.y_std/args.prior_std)**2))
# print('model fit portion {}'.format(beta * args.loss_criterion(target, output) / (args.batchsize)))
# print('weight decay portion {}'.format(wd / ((args.prior_std ** 2) * args.n)))
# return beta * args.loss_criterion(target, output) / (2 * args.batchsize) + wd / (
# 2 * (args.prior_std ** 2) * args.n)
return beta * args.loss_criterion(target, output) /((args.y_std ** 2) * args.batchsize) + wd / ((args.prior_std ** 2) * args.n)
# return args.loss_criterion(target, output) / ((args.y_std ** 2) * args.batchsize) + wd / ((args.prior_std ** 2) * args.n)
# %%
# train ensemble
def train(beta):
# return ensemble-average of nL_n(w) = -\sum_{i=1}^n \log p(y_i|x_i,w) = \sum_i (y_i-f(x_i,w))^2/ 2\sigma_eps^2
# wd_factor = ((args.y_std/args.prior_std)**2)/beta
if args.dataset == 'rr':
model = reducedrank(args.input_dim, args.output_dim, args.H)
elif args.dataset == 'tanh':
model = tanh(args.input_dim, args.output_dim, args.H)
# optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=wd_factor)
# TODO: how to scale lr automatically so it doesn't explode, does it include beta or not?
# lr = 0.01*args.batchsize / (beta * args.n)
lr = args.batchsize / args.n
optimizer = optim.SGD(model.parameters(), lr=lr)
wholex = train_loader.dataset[:][0]
wholey = train_loader.dataset[:][1]
for epoch in range(1, args.epochs + 1):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
output = model(data)
loss = custom_loss(model, target, output, beta)
# loss = args.loss_criterion(target, output)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 100 == 0:
model.eval()
with torch.no_grad():
output = model(wholex)
eval_loss = custom_loss(model, wholey, output, beta)
# eval_loss = args.loss_criterion(wholey, output)
print('Epoch {}: total loss on training {}, negloglik {}'.format(epoch, eval_loss, args.loss_criterion(wholey, output).detach() / (2 * (args.y_std ** 2))))
final_output = model(wholex)
return ((wholey - final_output) ** 2).sum() / (2 * (args.y_std ** 2))
nll = np.empty((args.numbetas, args.R))
for beta_index in range(0, args.numbetas):
beta = args.betas[beta_index]
for r in range(0, args.R):
print('Training {}/{} ensemble at {}/{} inverse temp, getting colder (negloglik smaller)'.format(r + 1, args.R, beta_index + 1, args.numbetas))
nll[beta_index, r] = train(beta)
if beta_index > 0:
design_x = np.vstack((np.ones(beta_index+1), 1 / args.betas[0:beta_index+1])).T
design_y = np.mean(nll[0:beta_index+1,:], 1)
design_y = design_y[:, np.newaxis]
fit = inv(design_x.T.dot(design_x)).dot(design_x.T).dot(design_y)
print('true RLCT {}, current RLCT estimate {}'.format(args.trueRLCT,fit[1][0]))
plt.hist(nll[beta_index,:])
plt.title('nLn(w) at inverse temp {}'.format(beta[beta_index]))
plt.show()
# %%
# average nll array over r
# ols_model = OLS(np.mean(nll, 1), add_constant(1 / args.betas)).fit()
# ols_intercept_estimate = ols_model.params[0]
# RLCT_estimate = ols_model.params[1]
design_x = np.vstack((np.ones(args.numbetas), 1/args.betas)).T
design_y = np.mean(nll,1)
design_y = design_y[:, np.newaxis]
fit = inv(design_x.T.dot(design_x)).dot(design_x.T).dot(design_y)
ols_intercept_estimate = fit[0][0]
RLCT_estimate =fit[1][0]
print('RLCT estimate: {}'.format(RLCT_estimate))
print('true RLCT: {}'.format(args.trueRLCT))
# robust ls fit
# regr = ElasticNet(random_state=0, fit_intercept=True, alpha=0.5)
# regr.fit((1 / args.betas).reshape(args.numbetas, 1), np.mean(nll, 1))
# robust_intercept_estimate = regr.intercept_
# # slope_estimate = min(regr.coef_[0],args.w_dim/2)
# robust_slope_estimate = regr.coef_[0]
path = './taskid{}'.format(args.taskid)
if not os.path.exists(path):
os.makedirs(path)
args_dict = vars(args)
print(args_dict)
torch.save(args_dict, '{}/mc{}_config.pt'.format(path, args.MC))
plt.scatter(1 / args.betas, np.mean(nll, 1), label='nll beta')
# plt.plot(1 / args.betas, robust_intercept_estimate + robust_slope_estimate * 1 / args.betas, 'g-',
# label='robust ols')
plt.plot(1 / args.betas, ols_intercept_estimate + RLCT_estimate * 1 / args.betas, 'b-', label='ols')
plt.title("d_on_2 = {}, true lambda = {:.1f} "
"\n hat lambda ols = {:.1f}"
.format(args.w_dim / 2, args.trueRLCT, RLCT_estimate), fontsize=8)
plt.xlabel("1/beta", fontsize=8)
plt.ylabel("ensemble estimate of E^beta_w [nL_n(w)]", fontsize=8)
plt.savefig('{}/mc{}.png'.format(path, args.MC))
plt.legend()
plt.show()
if __name__ == "__main__":
main() | 42.342193 | 175 | 0.612475 | 714 | 0.056022 | 0 | 0 | 0 | 0 | 0 | 0 | 4,307 | 0.337936 |
2681adb86e53921fccea1f698b21e0752d4e5ac6 | 1,764 | py | Python | edge_server/script_server.py | Dspatharakis/alphabot-ppl | 1f234548bf3fc7d447663f94cfa49591724563b0 | [
"MIT"
] | null | null | null | edge_server/script_server.py | Dspatharakis/alphabot-ppl | 1f234548bf3fc7d447663f94cfa49591724563b0 | [
"MIT"
] | null | null | null | edge_server/script_server.py | Dspatharakis/alphabot-ppl | 1f234548bf3fc7d447663f94cfa49591724563b0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import requests
import json
import time
import sys
import subprocess
import csv
import json
import numpy as np
def main():
start_time = time.time()
post_url = "http://0.0.0.0:8000/cpu"
mean_cpu = 0
counter = 0
s = 0.75
container_name = subprocess.check_output(["docker", "ps", "-aqf", "name=edge"])
while True:
measured_cpu = subprocess.check_output(["docker", "stats", "--no-stream", "--format", "{{ .CPUPerc }}"] )# , ">", "./log.txt"])
measured_cpu = (float(measured_cpu[:-2]))
if measured_cpu > 0.2 :
mean_cpu += measured_cpu
counter += 1
now_time = time.time ()
#print now_time - start_time
if now_time-start_time > 10 :
start_time = time.time()
if counter > 0 :
mean_cpu = mean_cpu /float(counter)
mean_cpu = s
print "Cpu utilization of this time interval: "+ str(mean_cpu)
payload = [{"cpu": mean_cpu,"s": s}]
r = requests.post(post_url, json=payload)
counter = 0
mean_cpu = 0
##### KATANOMI GIA UPDATE CORES STO CONTAINER
mu, sigma = 0.75, 0.16 # mean and standard deviation
s = np.random.normal(mu, sigma)
if s < 0.3: s = 0.3
print "Number of cores: "+str(s)
number_of_cores = s #/ float(100 )
with open(os.devnull, 'wb') as devnull: # suppress output
subprocess.check_call(["docker update --cpus=\"" + str(round(number_of_cores,2)) + "\" " +str(container_name)], shell=True, stdout=devnull, stderr=subprocess.STDOUT)
if __name__ == "__main__":
main()
| 34.588235 | 196 | 0.549887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 405 | 0.229592 |
2683c81a292c3d694af782d7d6d9714cc7c5d078 | 5,264 | py | Python | madic/tests/test_io.py | dcroote/madic | fb00f312f5abc9f5a0bfc4a00a5a2e6e1c4ee563 | [
"BSD-3-Clause"
] | 2 | 2017-12-08T03:24:22.000Z | 2017-12-13T10:22:09.000Z | madic/tests/test_io.py | dcroote/madic | fb00f312f5abc9f5a0bfc4a00a5a2e6e1c4ee563 | [
"BSD-3-Clause"
] | null | null | null | madic/tests/test_io.py | dcroote/madic | fb00f312f5abc9f5a0bfc4a00a5a2e6e1c4ee563 | [
"BSD-3-Clause"
] | null | null | null | import os
import pandas as pd
import numpy as np
from pandas.testing import assert_series_equal
from madic import io
class TestChromatogramExpansion(object):
def setup_method(self):
# two rows of comma separated intensity chromatograms
self.df = pd.DataFrame([['1,2,3,4,5,6,5,4,3,2,1'],
['1,2,3,4,5,4,3,2,1']],
columns=['intensities'])
def test_expand_comma_sep_series_no_smoothing(self):
expected_series = pd.Series([np.array([1., 2., 3., 4., 5., 6., 5.,
4., 3., 2., 1.]),
np.array([1., 2., 3., 4., 5., 4., 3.,
2., 1.])],
name='intensities')
result = io._expand_comma_sep_series(self.df.intensities)
assert_series_equal(result, expected_series)
def test_expand_comma_sep_series_with_smoothing(self):
expected_series = pd.Series([np.array([1., 2., 3., 4., 4.6, 4.8,
4.6, 4., 3., 2., 1.]),
np.array([1., 2., 3., 3.6, 3.8, 3.6,
3., 2., 1.])],
name='intensities')
result = io._expand_comma_sep_series(self.df.intensities,
smooth=True)
assert_series_equal(result, expected_series)
class TestReplicateColumnSplit(object):
def setup_method(self):
self.series = pd.Series(['Site4_ConditionA_Part2_094',
'Site4_ConditionA_Part3_095',
'Site4_ConditionB_Part2_096',
'Site4_ConditionB_Part3_097'
])
def test_split_delimiter_position(self):
expected_series = pd.Series(['ConditionA', 'ConditionA', 'ConditionB',
'ConditionB'], name='sample_name')
result = io.replicate_to_sample_name(self.series, '_', 1)
assert_series_equal(result, expected_series)
def test_load_skyline_transition_report():
report_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../examples/'
'madic_skyline_daily_data.csv'))
df = io.read_transition_report(report_path,
delimiter='_',
delimiter_pos=1)
assert sorted(df.label.unique()) == ['heavy', 'light']
assert df.shape[0] == 40
assert df.pep.unique().size == 2
assert df.sample_name.unique().size == 2
assert df.rep.unique().size == 4
def test_write_out_summary(tmpdir):
summary = pd.DataFrame([
['sample1','PEP1',True,True,True,True,False],
['sample1','PEP2',True,False,False,True,False]],
columns=['sample_name', 'pep',
'pass_signal_to_noise',
'pass_transition_ratio',
'pass_retention_time',
'pass_all_replicate',
'interference_corrected'])
# write data
path = tmpdir.join('summary.csv')
summary.to_csv(str(path), index=False)
# load expected file contents
testsdir = os.path.abspath(os.path.dirname(__file__))
expected_file = os.path.join(testsdir, 'data/for_testing_summary.csv')
with open(expected_file) as f:
expected = f.read()
# compare contents
assert path.read() == expected
def test_write_out_data(tmpdir):
df = pd.DataFrame([
['rep1','PEP1','y5',True,True,True,True,False],
['rep1','PEP1','y6',True,True,True,True,False],
['rep1','PEP1','y7',True,True,True,True,False],
['rep2','PEP1','y5',True,True,True,True,False],
['rep2','PEP1','y6',True,True,True,True,False],
['rep2','PEP1','y7',True,True,True,True,False],
['rep1','PEP2','y5',True,True,False,True,False],
['rep1','PEP2','y6',True,True,True,True,False],
['rep1','PEP2','y7',True,True,True,True,False],
['rep2','PEP2','y5',True,False,True,True,False],
['rep2','PEP2','y6',True,False,True,True,False],
['rep2','PEP2','y7',True,False,True,True,False]
],
columns=['rep', 'pep', 'prod_ion',
'pass_signal_to_noise',
'pass_transition_ratio',
'pass_retention_time',
'pass_all_replicate',
'interference'])
df['sample_name'] = 'sample1'
df['label'] = 'light'
df['times_arr'] = [np.arange(3)]*12
df['intensities_arr'] = [[500.1, 800.9, 500.1]]*12
path = tmpdir.join('data.csv')
io.write_out_data(df, str(path))
testsdir = os.path.abspath(os.path.dirname(__file__))
expected_file = os.path.join(testsdir, 'data/for_testing_data.csv')
with open(expected_file) as f:
expected = f.read()
assert path.read() == expected
| 37.6 | 79 | 0.510448 | 2,066 | 0.392477 | 0 | 0 | 0 | 0 | 0 | 0 | 1,047 | 0.198898 |
2688264518d4b814e3d6dda29397dc244b099dd8 | 226 | py | Python | plot.py | arthtyagi/makefile | d64c38ddad63c7f90dd23c26e3f398229aa7dfa4 | [
"MIT"
] | null | null | null | plot.py | arthtyagi/makefile | d64c38ddad63c7f90dd23c26e3f398229aa7dfa4 | [
"MIT"
] | null | null | null | plot.py | arthtyagi/makefile | d64c38ddad63c7f90dd23c26e3f398229aa7dfa4 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
x = []
y = []
with open('points') as f:
for point in map(lambda x: x.split(), f.readlines()):
x.append(int(point[0]))
y.append(int(point[1]))
plt.scatter(x, y)
plt.show()
| 17.384615 | 57 | 0.588496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.035398 |
2689410c8429db9fcdde4ffcfbc17820cb6d3055 | 509 | py | Python | pos_debranding/__manifest__.py | jromeroarg/itpp-labs_pos-addons | ec688a1b06999dc2fa408623a79e93356bf900b3 | [
"MIT"
] | null | null | null | pos_debranding/__manifest__.py | jromeroarg/itpp-labs_pos-addons | ec688a1b06999dc2fa408623a79e93356bf900b3 | [
"MIT"
] | null | null | null | pos_debranding/__manifest__.py | jromeroarg/itpp-labs_pos-addons | ec688a1b06999dc2fa408623a79e93356bf900b3 | [
"MIT"
] | 1 | 2020-08-20T04:21:42.000Z | 2020-08-20T04:21:42.000Z | # Copyright 2015-2018,2020 Ivan Yelizariev
# License MIT (https://opensource.org/licenses/MIT).
{
"name": "POS debranding",
"version": "13.0.1.0.0",
"author": "IT-Projects LLC, Ivan Yelizariev",
"license": "Other OSI approved licence", # MIT
"category": "Debranding",
"support": "[email protected]",
"website": "https://odoo-debranding.com",
"depends": ["point_of_sale"],
"data": ["template.xml"],
"qweb": ["static/src/xml/pos_debranding.xml"],
"installable": True,
}
| 31.8125 | 52 | 0.624754 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 403 | 0.791749 |
268a1a32773db807fdd5256fd06cf225b0146c23 | 335 | py | Python | drafts/vanish_explode_gradients/freeze_lower_layers.py | quanhua92/deeplearning_tutorials | 32fec492ca21c248dd1fb234db0a95a532df3469 | [
"MIT"
] | 1 | 2017-07-06T13:00:36.000Z | 2017-07-06T13:00:36.000Z | drafts/vanish_explode_gradients/freeze_lower_layers.py | quanhua92/deeplearning_tutorials | 32fec492ca21c248dd1fb234db0a95a532df3469 | [
"MIT"
] | null | null | null | drafts/vanish_explode_gradients/freeze_lower_layers.py | quanhua92/deeplearning_tutorials | 32fec492ca21c248dd1fb234db0a95a532df3469 | [
"MIT"
] | null | null | null | import tensorflow as tf
# We will train all layers except hidden[12]. Therefore, Layers 1 and 2 are frozen
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="hidden[34]|outputs")
loss = None # Your loss is here
train_op = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss, var_list=train_vars) | 47.857143 | 100 | 0.79403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 121 | 0.361194 |
268c51ed50d4a8d0b92613024c9ad4e9c61f0c83 | 371 | py | Python | Statistics/PopulationMean.py | cadibemma/Statistical-Calculator | 4135487577af9e17b51317e72d7b07c09390f3f6 | [
"MIT"
] | 1 | 2020-06-27T22:14:11.000Z | 2020-06-27T22:14:11.000Z | Statistics/PopulationMean.py | cadibemma/Statistical-Calculator | 4135487577af9e17b51317e72d7b07c09390f3f6 | [
"MIT"
] | 28 | 2020-06-28T15:03:56.000Z | 2020-07-07T16:29:27.000Z | Statistics/PopulationMean.py | cadibemma/Statistical-Calculator | 4135487577af9e17b51317e72d7b07c09390f3f6 | [
"MIT"
] | 1 | 2020-06-27T14:33:20.000Z | 2020-06-27T14:33:20.000Z | # from Calculator.Addition import addition
from Calculator.Division import division
def populationmean(num):
try:
num_values = len(num)
total = sum(num)
return division(total, num_values)
except ZeroDivisionError:
print("Error: Enter values greater than 0")
except ValueError:
print("Error: insert correct data type") | 28.538462 | 51 | 0.684636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.299191 |
268d50e291169e0e5fc4a8c8469d8e4d8109bcce | 585 | py | Python | code/app/configuration/configuration.py | WesleyAdriann/discord_bot_dota | 8a2921ee548f2fc6066bf15e7aed6688037dd434 | [
"MIT"
] | null | null | null | code/app/configuration/configuration.py | WesleyAdriann/discord_bot_dota | 8a2921ee548f2fc6066bf15e7aed6688037dd434 | [
"MIT"
] | null | null | null | code/app/configuration/configuration.py | WesleyAdriann/discord_bot_dota | 8a2921ee548f2fc6066bf15e7aed6688037dd434 | [
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
class Configuration:
def __init__(self):
self.DOTA_BUFF_BASE_URL = 'https://pt.dotabuff.com'
self.HEROES = f'{self.DOTA_BUFF_BASE_URL}/heroes'
self.HERO_COUNTERS = '/counters'
self.HERO_COUNTERS_FULL = f'{self.DOTA_BUFF_BASE_URL}/hero-name/{self.HERO_COUNTERS}'
self.DISCORD_BOT_KEY = os.environ.get('DISCORD_BOT_KEY')
def configure_hero_counters_full(self, hero):
hero_name = '-'.join(hero.split()).lower()
self.HERO_COUNTERS_FULL = f'{self.HEROES}/{hero_name}{self.HERO_COUNTERS}'
| 34.411765 | 93 | 0.676923 | 547 | 0.935043 | 0 | 0 | 0 | 0 | 0 | 0 | 221 | 0.377778 |
268f327d0be7b5ea3314ce2aea07749bbbce81aa | 908 | py | Python | grpc_examples/sending_image/client.py | iwmq/coding_notes | 8e406124bcd2dbf3228e945701e952c3a12025c6 | [
"MIT"
] | null | null | null | grpc_examples/sending_image/client.py | iwmq/coding_notes | 8e406124bcd2dbf3228e945701e952c3a12025c6 | [
"MIT"
] | null | null | null | grpc_examples/sending_image/client.py | iwmq/coding_notes | 8e406124bcd2dbf3228e945701e952c3a12025c6 | [
"MIT"
] | null | null | null | """
The Python implementation of the GRPC image client.
Modified from grpc/examples/python/helloworld/greeting_client.py.
"""
from __future__ import print_function
import logging
from io import BytesIO
from PIL import Image
import grpc
import image_pb2
import image_pb2_grpc
def run():
# NOTE(gRPC Python Team): .close() is possible on a channel and should be
# used in circumstances in which the with statement does not fit the needs
# of the code.
with grpc.insecure_channel('localhost:50051') as channel:
stub = image_pb2_grpc.GreeterStub(channel)
response = stub.SayHello(image_pb2.HelloRequest(name='you'))
message = response.message
buf = BytesIO(response.image)
buf.seek(0)
image = Image.open(buf, formats=["JPEG"])
print(f"Greeter client received: {message}")
image.show()
if __name__ == '__main__':
logging.basicConfig()
run()
| 25.222222 | 78 | 0.718062 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 362 | 0.398678 |
26931376c81cc95ed098daf30d28fcc4518c0ee9 | 1,842 | py | Python | bot/NFQ.py | cyber-meow/Robotic_state_repr_learning | d74fe372bea0b1cf42107450a8c3344a99279e91 | [
"MIT"
] | null | null | null | bot/NFQ.py | cyber-meow/Robotic_state_repr_learning | d74fe372bea0b1cf42107450a8c3344a99279e91 | [
"MIT"
] | null | null | null | bot/NFQ.py | cyber-meow/Robotic_state_repr_learning | d74fe372bea0b1cf42107450a8c3344a99279e91 | [
"MIT"
] | null | null | null |
import numpy as np
from sklearn.neural_network import MLPRegressor
from sklearn.exceptions import NotFittedError
from inter.interfaces import QLearning
from utility import set_all_args
class NFQ(QLearning):
gamma = 0.9
beta = 0.8
def __init__(self, **kwargs):
self.mlp = MLPRegressor(
hidden_layer_sizes=(5,5), activation='logistic', batch_size=400)
set_all_args(self, kwargs)
def fit(self, data, max_iter=300, intra_step=50):
"""
data is the triple (ss, as, rs)
"""
for _ in range(max_iter):
inputs, targets = self.compute_inputs_targets(data)
for _ in range(intra_step):
self.mlp.partial_fit(inputs, targets)
def compute_inputs_targets(self, data):
inputs, targets = [], []
for i in range(len(data[0])-1):
s, a, r = list(data[0][i]), data[1][i], data[2][i]
s_next = list(data[0][i+1])
inputs.append(s + [self.actions.index(a)])
to_prs = [s_next + [act] for act in range(len(self.actions))]
try:
q_values = self.mlp.predict(to_prs)
targets.append(r + self.gamma * np.max(q_values))
except NotFittedError:
targets.append(r)
return np.array(inputs), np.array(targets)
def score(self, data):
inputs, targes = self.compute_inputs_targets(data)
return self.mlp.score(inputs, targes)
def decision(self, state):
state = list(state)
to_prs = [state + [act] for act in range(len(self.actions))]
q_values = self.mlp.predict(to_prs)
ps = np.exp(self.beta * q_values)
a_num = np.random.choice(len(self.actions), p=ps/np.sum(ps))
return self.actions[a_num]
| 29.238095 | 76 | 0.58089 | 1,649 | 0.895223 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.035288 |
26941f15190450548abb826ab5e7dae30c53951d | 1,062 | py | Python | database/chemprop/endpoint_acceptable_parameters.py | mshobair/invitro_cheminformatics | 17201496c73453accd440646a1ee81726119a59c | [
"MIT"
] | null | null | null | database/chemprop/endpoint_acceptable_parameters.py | mshobair/invitro_cheminformatics | 17201496c73453accd440646a1ee81726119a59c | [
"MIT"
] | null | null | null | database/chemprop/endpoint_acceptable_parameters.py | mshobair/invitro_cheminformatics | 17201496c73453accd440646a1ee81726119a59c | [
"MIT"
] | null | null | null | import datetime
from database.chemprop.endpoints import Endpoints
from database.chemprop.parameters import Parameters
from database.database_schemas import Schemas
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey
from sqlalchemy.orm import relationship
from database.base import Base
class EndpointAcceptableParameters(Base):
"""Maps to endpoint_acceptable_parameters table in chemprop databases."""
__tablename__ = "endpoint_acceptable_parameters"
__table_args__ = {'schema': Schemas.chemprop_schema}
id = Column(Integer, primary_key=True, nullable=False)
fk_endpoint_id = Column(ForeignKey(Endpoints.id))
fk_parameter_id = Column(ForeignKey(Parameters.id))
created_by = Column(String(255), nullable=False)
updated_by = Column(String(255), nullable=False)
created_at = Column(DateTime, default=datetime.datetime.now, nullable=False)
updated_at = Column(DateTime, default=datetime.datetime.now, nullable=False)
endpoint = relationship("Endpoints")
parameter = relationship("Parameters")
| 37.928571 | 80 | 0.785311 | 753 | 0.70904 | 0 | 0 | 0 | 0 | 0 | 0 | 136 | 0.12806 |
13fa947a98ef38378e4092dccc8d0f5bf6aced03 | 2,009 | py | Python | label_mapping.py | YoushaaMurhij/CenterPoint_Segmentation_Head | 0610c1da18994f494a8609d983ffa21d62d7d876 | [
"MIT"
] | null | null | null | label_mapping.py | YoushaaMurhij/CenterPoint_Segmentation_Head | 0610c1da18994f494a8609d983ffa21d62d7d876 | [
"MIT"
] | null | null | null | label_mapping.py | YoushaaMurhij/CenterPoint_Segmentation_Head | 0610c1da18994f494a8609d983ffa21d62d7d876 | [
"MIT"
] | null | null | null | label_name_mapping = {
0: "unlabeled",
1: "outlier",
10: "car",
11: "bicycle",
13: "bus",
15: "motorcycle",
16: "on-rails",
18: "truck",
20: "other-vehicle",
30: "person",
31: "bicyclist",
32: "motorcyclist",
40: "road",
44: "parking",
48: "sidewalk",
49: "other-ground",
50: "building",
51: "fence",
52: "other-structure",
60: "lane-marking",
70: "vegetation",
71: "trunk",
72: "terrain",
80: "pole",
81: "traffic-sign",
99: "other-object",
252: "moving-car",
253: "moving-bicyclist",
254: "moving-person",
255: "moving-motorcyclist",
256: "moving-on-rails",
257: "moving-bus",
258: "moving-truck",
259: "moving-other-vehicle"
}
class2id = {
0: 0,
1: 1,
10: 2,
11: 3,
13: 4,
15: 5,
16: 6,
18: 7,
20: 8,
30: 9,
31: 10,
32: 11,
40: 12,
44: 13,
48: 14,
49: 15,
50: 16,
51: 17,
52: 18,
60: 19,
70: 20,
71: 21,
72: 22,
80: 23,
81: 24,
99: 25,
252: 2, #26,
253: 10, #27,
254: 9, #28,
255: 11, #29,
256: 6, #30,
257: 4, #31,
258: 7, #32,
259: 8 #33
}
id2class = {
0:0,
1:1,
2:10,
3:11,
4:13,
5:15,
6:16,
7:18,
8:20,
9:30,
10:31,
11:32,
12:40,
13:44,
14:48,
15:49,
16:50,
17:51,
18:52,
19:60,
20:70,
21:71,
22:72,
23:80,
24:81,
25:99,
26:252,
27:253,
28:254,
29:255,
30:256,
31:257,
32:258,
33:259
}
_kitti_labels = {
0: 'unlabeled',
1: 'car',
2: 'bicycle',
3: 'motorcycle',
4: 'truck',
5: 'other-vehicle',
6: 'person',
7: 'bicyclist',
8: 'motorcyclist',
9: 'road',
10: 'parking',
11: 'sidewalk',
12: 'other-ground',
13: 'building',
14: 'fence',
15: 'vegetation',
16: 'trunk',
17: 'terrain',
18: 'pole',
19: 'traffic-sign'
} | 14.992537 | 31 | 0.442509 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 623 | 0.310105 |
13fb266bbbe1b42018ffaaf4a8dc92668ab6d95f | 137 | py | Python | src/__init__.py | gitlabbin/zookeeper-kazoo-barrier-snippet | 2ab88fa3735b61dd9a94e0e0294e88a12c458ee3 | [
"MIT"
] | null | null | null | src/__init__.py | gitlabbin/zookeeper-kazoo-barrier-snippet | 2ab88fa3735b61dd9a94e0e0294e88a12c458ee3 | [
"MIT"
] | null | null | null | src/__init__.py | gitlabbin/zookeeper-kazoo-barrier-snippet | 2ab88fa3735b61dd9a94e0e0294e88a12c458ee3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import absolute_import
def main():
import src.barrier
if __name__ == '__main__':
main() | 13.7 | 38 | 0.693431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.226277 |
13fca46c746999501de84f40b50ef047a4b2c610 | 295 | py | Python | archives/02/tootle/tests/test_state.py | asmodehn/caerbannog | 47bb1138190748041a4c0d02e522c0924a9af962 | [
"MIT"
] | 1 | 2019-01-22T20:57:45.000Z | 2019-01-22T20:57:45.000Z | archives/03/footle/tests/test_state.py | asmodehn/functional-python | 47bb1138190748041a4c0d02e522c0924a9af962 | [
"MIT"
] | 3 | 2019-03-14T17:33:39.000Z | 2019-03-14T18:09:42.000Z | archives/03/footle/tests/test_state.py | asmodehn/caerbannog | 47bb1138190748041a4c0d02e522c0924a9af962 | [
"MIT"
] | null | null | null | from state import TurtleState
# TODO : improve... but with useful things for illustrating the point...
def test_move(distance):
s = TurtleState()
s.move(distance)
def test_turn(angle):
s = TurtleState()
s.turn(angle)
def test_random_path():
s = TurtleState
# TODO
| 15.526316 | 72 | 0.671186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.264407 |
13fe23236e035adcc7cad3112d9cc94bfc4481fa | 66,843 | py | Python | TransitionListener/transitionFinder.py | tasicarl/TransitionListerner_public | b231467e731f51521a85dd962cc08da07eca8226 | [
"MIT"
] | null | null | null | TransitionListener/transitionFinder.py | tasicarl/TransitionListerner_public | b231467e731f51521a85dd962cc08da07eca8226 | [
"MIT"
] | null | null | null | TransitionListener/transitionFinder.py | tasicarl/TransitionListerner_public | b231467e731f51521a85dd962cc08da07eca8226 | [
"MIT"
] | 1 | 2021-11-04T08:12:10.000Z | 2021-11-04T08:12:10.000Z | """
The transitionFinder module is used to calculate finite temperature
cosmological phase transitions: it contains functions to find the phase
structure as a function of temperature, and functions to find the transition
(bubble nucleation) temperature for each phase.
In contrast, :mod:`.pathDefomration` is useful for finding the tunneling
solution for a fixed potential or a potential at a fixed temperature.
The most directly used functions in this module will likely be
:func:`traceMultiMin` for finding the phase structure, and
:func:`findAllTransitions` and :func:`findCriticalTemperatures` for calculating
properties of the phase transitions.
"""
from collections import namedtuple
import numpy as np
from scipy import linalg, interpolate, optimize
from scipy.optimize import curve_fit
from scipy.interpolate import UnivariateSpline
import matplotlib.pyplot as plt
from . import pathDeformation
from . import tunneling1D
import sys
_traceMinimum_rval = namedtuple("traceMinimum_rval", "X T dXdT overX overT")
def traceMinimum(f, d2f_dxdt, d2f_dx2, x0, t0, tstop, dtstart, deltaX_target,
dtabsMax=20.0, dtfracMax=.25, dtmin=1e-3,
deltaX_tol=1.2, minratio=1e-2, verbose = False):
"""
Trace the minimum `xmin(t)` of the function `f(x,t)`, starting at `x0, t0`.
Parameters
----------
f : callable
The scalar function `f(x,t)` which needs to be minimized. The input will
be of the same type as `(x0,t0)`.
d2f_dxdt, d2f_dx2 : callable
Functions which return returns derivatives of `f(x)`. `d2f_dxdt` should
return the derivative of the gradient of `f(x)` with respect to `t`, and
`d2f_dx2` should return the Hessian matrix of `f(x)` evaluated at `t`.
Both should take as inputs `(x,t)`.
x0 : array_like
The initial starting point. Must be an array even if the potential is
one-dimensional (in which case the array should have length 1).
t0 : float
The initial starting parameter `t`.
tstop : float
Stop the trace when `t` reaches `tstop`.
dtstart : float
Initial stepsize.
deltaX_target : float
The target error in x at each step. Determines the
stepsize in t by extrapolation from last error.
dtabsMax : float, optional
dtfracMax : float, optional
The largest stepsize in t will be the LARGEST of
``abs(dtstart)*dtabsMax`` and ``t*dtfracMax``.
dtmin : float, optional
The smallest stepsize we'll allow before assuming the transition ends,
relative to `dtstart`
deltaX_tol : float, optional
``deltaX_tol*deltaX_target`` gives the maximum error in x
before we want to shrink the stepsize and recalculate the minimum.
minratio : float, optional
The smallest ratio between smallest and largest eigenvalues in the
Hessian matrix before treating the smallest eigenvalue as zero (and
thus signaling a saddle point and the end of the minimum).
Returns
-------
X, T, dXdT : array_like
Arrays of the minimum at different values of t, and
its derivative with respect to t.
overX : array_like
The point beyond which the phase seems to disappear.
overT : float
The t-value beyond which the phase seems to disappear.
Notes
-----
In prior versions, `d2f_dx2` was optional and called `d2f`, while `d2f_dxdt`
was calculated from an optional parameter `df` using finite differences. If
Neither of these were supplied, they would be calculated directly from
`f(x,t)` using finite differences. This lead to a messier calling signature,
since additional parameters were needed to find the finite differences. By
instead requiring that the derivatives be supplied, the task of creating the
derivative functions can be delegated to more general purpose routines
(see e.g. :class:`helper_functions.gradientFunction` and
:class:`helper_functions.hessianFunction`).
Also new in this version, `dtmin` and `dtabsMax` are now relative to
`dtstart`. The idea here is that there should be some required parameter
that sets the scale, and then optional parameters can set the tolerances
relative to this scale. `deltaX_target` is now not optional for the same
reasoning.
"""
if verbose:
print("traceMinimum t0 = %0.6g" % t0)
Ndim = len(x0)
M0 = d2f_dx2(x0,t0)
minratio *= min(abs(linalg.eigvalsh(M0)))/max(abs(linalg.eigvalsh(M0)))
def dxmindt(x,t):
M = d2f_dx2(x,t)
if abs(linalg.det(M)) < (1e-3*np.max(abs(M)))**Ndim:
# Assume matrix is singular
return None, False
b = -d2f_dxdt(x,t)
eigs = linalg.eigvalsh(M)
try:
dxdt = linalg.solve(M,b, overwrite_a=False, overwrite_b=False)
# dxdt = linalg.solve(M,b, overwrite_a=True, overwrite_b=True)
isneg = ((eigs <= 0).any() or min(eigs)/max(eigs) < minratio)
except:
dxdt = None
isneg = False
return dxdt, isneg
xeps = deltaX_target * 1e-2
def fmin(x,t):
return optimize.fmin(f, x, args=(t,), xtol=xeps, ftol=np.inf,
disp=False)
deltaX_tol = deltaX_tol * deltaX_target
tscale = abs(dtstart)
dtabsMax = dtabsMax * tscale
dtmin = dtmin * tscale
x,t,dt,xerr = x0,t0,dtstart,0.0
dxdt, negeig = dxmindt(x,t)
X,T,dXdT = [x],[t],[dxdt]
overX = overT = None
while dxdt is not None:
if verbose:
sys.stdout.write('.')
sys.stdout.flush()
# Get the values at the next step
tnext = t+dt
xnext = fmin(x+dxdt*dt, tnext)
dxdt_next, negeig = dxmindt(xnext,tnext)
if dxdt_next is None or negeig == True:
# We got stuck on a saddle, so there must be a phase transition
# there.
dt *= .5
overX, overT = xnext, tnext
else:
# The step might still be too big if it's outside of our error
# tolerance.
xerr = max(np.sum((x+dxdt*dt - xnext)**2),
np.sum((xnext-dxdt_next*dt - x)**2))**.5
if xerr < deltaX_tol: # Normal step, error is small
T.append(tnext)
X.append(xnext)
dXdT.append(dxdt_next)
if overT is None:
# change the stepsize only if the last step wasn't
# troublesome
dt *= deltaX_target/(xerr+1e-100)
x,t,dxdt = xnext, tnext, dxdt_next
overX = overT = None
else:
# Either stepsize was too big, or we hit a transition.
# Just cut the step in half.
dt *= .5
overX, overT = xnext, tnext
# Now do some checks on dt.
if abs(dt) < abs(dtmin):
# Found a transition! Or at least a point where the step is really
# small.
break
if dt > 0 and t >= tstop or dt < 0 and t <= tstop:
# Reached tstop, but we want to make sure we stop right at tstop.
dt = tstop-t
x = fmin(x+dxdt*dt, tstop)
dxdt,negeig = dxmindt(x,tstop)
t = tstop
X[-1], T[-1], dXdT[-1] = x,t,dxdt
break
dtmax = max(t*dtfracMax, dtabsMax)
if abs(dt) > dtmax:
dt = np.sign(dt)*dtmax
if overT is None:
overX, overT = X[-1], T[-1]
if verbose:
sys.stdout.write('\n')
sys.stdout.flush()
X = np.array(X)
T = np.array(T)
dXdT = np.array(dXdT)
return _traceMinimum_rval(X, T, dXdT, overX, overT)
class Phase(object):
"""
Describes a temperature-dependent minimum, plus second-order transitions
to and from that minimum.
Attributes
----------
key : hashable
A unique identifier for the phase (usually an int).
X, T, dXdT : array_like
The minima and its derivative at different temperatures.
tck : tuple
Spline knots and coefficients, used in `interpolate.splev`.
low_trans : set
Phases (identified by keys) which are joined by a second-order
transition to this phase.
high_trans : set
Phases (identified by keys) which are joined by a second-order
transition to this phase.
"""
def __init__(self, key, X, T, dXdT):
self.key = key
# We shouldn't ever really need to sort the array, but there must be
# some bug in the above code that makes it so that occasionally the last
# step goes backwards. This should fix that.
i = np.argsort(T)
T, X, dXdT = T[i], X[i], dXdT[i]
self.X = X
self.T = T
self.dXdT = dXdT
# Make the spline:
k = 3 if len(T) > 3 else 1
tck, u = interpolate.splprep(X.T, u=T, s=0, k=k)
self.tck = tck
# Make default connections
self.low_trans = set()
self.high_trans = set()
def valAt(self, T, deriv=0):
"""
Find the minimum at the value `T` using a spline.
Parameters
----------
T : float or array_like
deriv : int
If deriv > 0, instead return the derivative of the minimum with
respect to `T`. Can return up to the third derivative for cubic
splines (when ``len(X) > 3``) or first derivative for linear
splines.
"""
T = np.asanyarray(T).T
y = interpolate.splev(T, self.tck)
return np.asanyarray(y).T
def addLinkFrom(self, other_phase):
"""
Add a link from `other_phase` to this phase, checking to see if there
is a second-order transition.
"""
if np.min(self.T) >= np.max(other_phase.T):
self.low_trans.add(other_phase.key)
other_phase.high_trans.add(self.key)
if np.max(self.T) <= np.min(other_phase.T):
self.high_trans.add(other_phase.key)
other_phase.low_trans.add(self.key)
def __repr__(self):
popts = np.get_printoptions()
np.set_printoptions(formatter={'float': lambda x: "%0.4g" % x})
if len(self.X) > 1:
Xstr = "[%s, ..., %s]" % (self.X[0], self.X[-1])
else:
Xstr = "[%s]" % self.X[0]
if len(self.T) > 1:
Tstr = "[%0.4g, ..., %0.4g]" % (self.T[0], self.T[-1])
else:
Tstr = "[%0.4g]" % self.T[0]
if len(self.dXdT) > 1:
dXdTstr = "[%s, ..., %s]" % (self.dXdT[0], self.dXdT[-1])
else:
dXdTstr = "[%s]" % self.dXdT[0]
s = "Phase(key=%s, X=%s, T=%s, dXdT=%s" % (
self.key, Xstr, Tstr, dXdTstr)
np.set_printoptions(**popts)
return s
def traceMultiMin(f, d2f_dxdt, d2f_dx2,
points, tLow, tHigh, deltaX_target,
dtstart=1e-6, tjump=1e-5, forbidCrit=None, verbose = False,
single_trace_args={}, local_min_args={}):
"""
Trace multiple minima `xmin(t)` of the function `f(x,t)`.
This function will trace the minima starting from the initial `(x,t)` values
given in `points`. When a phase disappears, the function will search for
new nearby minima, and trace them as well. In this way, if each minimum
corresponds to a different phase, this function can find the (possibly)
complete phase structure of the potential.
Parameters
----------
f : callable
The scalar function `f(x,t)` which needs to be minimized. The input will
be of the same type as each entry in the `points` parameter.
d2f_dxdt, d2f_dx2 : callable
Functions which return returns derivatives of `f(x)`. `d2f_dxdt` should
return the derivative of the gradient of `f(x)` with respect to `t`, and
`d2f_dx2` should return the Hessian matrix of `f(x)` evaluated at `t`.
Both should take as inputs `(x,t)`.
points : list
A list of points [(x1,t1), (x2,t2),...] that we want to trace, where
`x1`, `x2`, etc. are each a one-dimensional array.
tLow, tHigh : float
Lowest and highest temperatures between which to trace.
deltaX_target : float
Passed to :func:`traceMinimum` and used to set the tolerance in
minimization.
dtstart : float, optional
The starting stepsize, relative to ``tHigh-tLow``.
tjump : float, optional
The jump in `t` from the end of one phase to the initial tracing point
in another. If this is too large, intermediate phases may be skipped.
Relative to ``tHigh-tLow``.
forbidCrit : callable or None, optional
A function that determines whether or not to forbid a phase with a given
starting point. Should take a point `x` as input, and return True (if
the phase should be discarded) or False (if the phase should be kept).
single_trace_args : dict, optional
Arguments to pass to :func:`traceMinimum`.
local_min_args : dict, optoinal
Arguments to pass to :func:`findApproxLocalMinima`.
Returns
-------
phases : dict
A dictionary of :class:`Phase` instances. The keys in the dictionary
are integers corresponding to the order in which the phases were
constructed.
"""
#dtstart = 1e-6
#tjump = 1e-6
# We want the minimization here to be very accurate so that we don't get
# stuck on a saddle or something. This isn't much of a bottle neck.
xeps = deltaX_target*1e-2
def fmin(x,t):
return optimize.fmin(f, x+xeps, args=(t,), xtol=xeps*1e-3,
ftol=np.inf, disp=False)
dtstart = dtstart * (tHigh-tLow)
tjump = tjump * (tHigh-tLow)
phases = {}
nextPoint = []
for p in points:
x,t = p
nextPoint.append([t,dtstart,fmin(x,t),None])
while len(nextPoint) != 0:
t1,dt1,x1,linkedFrom = nextPoint.pop()
x1 = fmin(x1, t1) # make sure we start as accurately as possible.
# Check to see if this point is outside the bounds
if t1 < tLow or (t1 == tLow and dt1 < 0):
continue
if t1 > tHigh or (t1 == tHigh and dt1 > 0):
continue
if forbidCrit is not None and forbidCrit(x1) == True:
continue
# Check to see if it's redudant with another phase
for i in list(phases.keys()):
phase = phases[i]
if (t1 < min(phase.T[0], phase.T[-1]) or
t1 > max(phase.T[0], phase.T[-1])):
continue
x = fmin(phase.valAt(t1), t1)
if np.sum((x-x1)**2)**.5 < 2*deltaX_target:
# The point is already covered
# Skip this phase and change the linkage.
if linkedFrom != i and linkedFrom is not None:
phase.addLinkFrom(phases[linkedFrom])
break
else:
# The point is not already covered. Trace the phase.
if verbose:
print("Tracing phase starting at x =", x1, "; t =", t1)
phase_key = len(phases)
oldNumPoints = len(nextPoint)
if (t1 > tLow):
if verbose:
print("Tracing minimum down")
down_trace = traceMinimum(f, d2f_dxdt, d2f_dx2, x1,
t1, tLow, -dt1, deltaX_target, verbose = verbose,
**single_trace_args)
X_down, T_down, dXdT_down, nX, nT = down_trace
t2,dt2 = nT-tjump, .1*tjump
x2 = fmin(nX,t2)
nextPoint.append([t2,dt2,x2,phase_key])
if np.sum((X_down[-1]-x2)**2) > deltaX_target**2:
for point in findApproxLocalMin(f,X_down[-1],x2,(t2,)):
nextPoint.append([t2,dt2,fmin(point,t2),phase_key])
X_down = X_down[::-1]
T_down = T_down[::-1]
dXdT_down = dXdT_down[::-1]
if (t1 < tHigh):
if verbose:
print("Tracing minimum up")
up_trace = traceMinimum(f, d2f_dxdt, d2f_dx2, x1,
t1, tHigh, +dt1, deltaX_target, verbose = verbose,
**single_trace_args)
X_up, T_up, dXdT_up, nX, nT = up_trace
t2,dt2 = nT+tjump, .1*tjump
x2 = fmin(nX,t2)
nextPoint.append([t2,dt2,x2,phase_key])
if np.sum((X_up[-1]-x2)**2) > deltaX_target**2:
for point in findApproxLocalMin(f,X_up[-1],x2,(t2,)):
nextPoint.append([t2,dt2,fmin(point,t2),phase_key])
# Then join the two together
if (t1 <= tLow):
X,T,dXdT = X_up, T_up, dXdT_up
elif (t1 >= tHigh):
X,T,dXdT = X_down, T_down, dXdT_down
else:
X = np.append(X_down, X_up[1:], 0)
T = np.append(T_down, T_up[1:], 0)
dXdT = np.append(dXdT_down, dXdT_up[1:], 0)
if forbidCrit is not None and (forbidCrit(X[0]) or
forbidCrit(X[-1])):
# The phase is forbidden.
# Don't add it, and make it a dead-end.
nextPoint = nextPoint[:oldNumPoints]
elif len(X) > 1:
newphase = Phase(phase_key, X,T,dXdT)
if linkedFrom is not None:
newphase.addLinkFrom(phases[linkedFrom])
phases[phase_key] = newphase
else:
# The phase is just a single point.
# Don't add it, and make it a dead-end.
nextPoint = nextPoint[:oldNumPoints]
if verbose:
print(phases)
return phases
def findApproxLocalMin(f, x1, x2, args=(), n=100, edge=.05):
"""
Find minima on a straight line between two points.
When jumping between phases, we want to make sure that we
don't jump over an intermediate phase. This function does a rough
calculation to find any such intermediate phases.
Parameters
----------
f : callable
The function `f(x)` to minimize.
x1, x2 : array_like
The points between which to find minima.
args : tuple, optional
Extra arguments to pass to `f`.
n : int, optional
Number of points to test for local minima.
edge : float, optional
Don't test for minima directly next to the input points. If ``edge==0``,
the minima potentially go all the way to input points. If ``edge==0.5``,
the range of tested minima shrinks to a single point at the center of
the two points.
Returns
-------
list
A list of approximate minima, with each minimum having the same shape
as `x1` and `x2`.
"""
x1,x2 = np.array(x1), np.array(x2)
dx = np.sum((x1-x2)**2)**.5
#if dx < mindeltax:
# return np.array([]).reshape(0,len(x1))
x = x1 + (x2-x1)*np.linspace(edge,1-edge,n).reshape(n,1)
y = f(x,*args)
i = (y[2:] > y[1:-1]) & (y[:-2] > y[1:-1])
return x[1:-1][i]
def _removeRedundantPhase(phases, removed_phase, redundant_with_phase):
for key in removed_phase.low_trans:
if key != redundant_with_phase.key:
p = phases[key]
p.high_trans.discard(removed_phase.key)
redundant_with_phase.addLinkFrom(p)
for key in removed_phase.high_trans:
if key != redundant_with_phase.key:
p = phases[key]
p.low_trans.discard(removed_phase.key)
redundant_with_phase.addLinkFrom(p)
del phases[removed_phase.key]
def removeRedundantPhases(f, phases, xeps=1e-5, diftol=1e-2, verbose = False):
"""
Remove redundant phases from a dictionary output by :func:`traceMultiMin`.
Although :func:`traceMultiMin` attempts to only trace each phase once, there
are still instances where a single phase gets traced twice. If a phase is
included twice, the routines for finding transition regions and tunneling
get very confused. This attempts to avoid that problem.
Parameters
----------
f : callable
The function `f(x,t)` which was passed to :func:`traceMultiMin`.
phases : dict
The output of :func:`traceMultiMin`.
xeps : float, optional
Error tolerance in minimization.
diftol : float, optional
Maximum separation between two phases before they are considered to be
coincident.
Returns
-------
None
Notes
-----
If two phases are merged to get rid of redundancy, the resulting phase has
a key that is a string combination of the two prior keys.
.. todo:: Make sure to test removeRedundantPhases().
.. todo::
Possibly add extra logic to account for phases which coinincide
at one end but not the other.
Warning
-------
This hasn't been thoroughly tested yet.
"""
# I want to make the logic extremely simple at the cost of checking the
# same thing multiple times.
# There's just no way this function is going to be the bottle neck.
def fmin(x,t):
return np.array(optimize.fmin(f, x, args=(t,),
xtol=xeps, ftol=np.inf, disp=False))
has_redundant_phase = True
while has_redundant_phase:
has_redundant_phase = False
for i in list(phases.keys()):
for j in list(phases.keys()):
if i == j:
continue
phase1, phase2 = phases[i], phases[j]
tmax = min(phase1.T[-1], phase2.T[-1])
tmin = max(phase1.T[0], phase2.T[0])
if tmin > tmax: # no overlap in the phases
continue
if tmax == phase1.T[-1]:
x1 = phase1.X[-1]
else:
x1 = fmin(phase1.valAt(tmax), tmax)
if tmax == phase2.T[-1]:
x2 = phase2.X[-1]
else:
x2 = fmin(phase2.valAt(tmax), tmax)
dif = np.sum((x1-x2)**2)**.5
same_at_tmax = (dif < diftol)
if tmin == phase1.T[0]:
x1 = phase1.X[0]
else:
x1 = fmin(phase1.valAt(tmin), tmin)
if tmin == phase2.T[0]:
x2 = phase2.X[0]
else:
x2 = fmin(phase2.valAt(tmin), tmin)
dif = np.sum((x1-x2)**2)**.5
same_at_tmin = (dif < diftol)
if same_at_tmin and same_at_tmax:
# Phases are redundant
has_redundant_phase = True
p_low = phase1 if phase1.T[0] < phase2.T[0] else phase2
p_high = phase1 if phase1.T[-1] > phase2.T[-1] else phase2
if p_low is p_high:
p_reject = phase1 if p_low is phase2 else phase2
_removeRedundantPhase(phases, p_reject, p_low)
else:
i = p_low.T <= tmax
T_low = p_low.T[i]
X_low = p_low.X[i]
dXdT_low = p_low.dXdT[i]
i = p_high.T > tmax
T_high = p_high.T[i]
X_high = p_high.X[i]
dXdT_high = p_high.dXdT[i]
T = np.append(T_low, T_high, axis=0)
X = np.append(X_low, X_high, axis=0)
dXdT = np.append(dXdT_low, dXdT_high, axis=0)
newkey = str(p_low.key) + "_" + str(p_high.key)
newphase = Phase(newkey, X, T, dXdT)
phases[newkey] = newphase
_removeRedundantPhase(phases, p_low, newphase)
_removeRedundantPhase(phases, p_high, newphase)
break
elif same_at_tmin or same_at_tmax:
if verbose:
print("ERROR, Two phases have been found and the necessary function to combine them is not implemented.")
# raise NotImplementedError(
# "Two phases have been found to coincide at one end "
# "but not the other. Ideally, this function would "
# "find where the two diverge, make a cut, and join them "
# "such there are no more phase redundancies.\n"
# "Instead, just raise an exception."
# )
if has_redundant_phase:
break
def getStartPhase(phases, V=None):
"""
Find the key for the high-T phase.
Parameters
----------
phases : dict
Output from :func:`traceMultiMin`.
V : callable
The potential V(x,T). Only necessary if there are
multiple phases with the same Tmax.
"""
startPhases = []
startPhase = None
Tmax = None
assert len(phases) > 0
for i in list(phases.keys()):
if phases[i].T[-1] == Tmax:
# add this to the startPhases list.
startPhases.append(i)
elif Tmax is None or phases[i].T[-1] > Tmax:
startPhases = [i]
Tmax = phases[i].T[-1]
if len(startPhases) == 1 or V is None:
startPhase = startPhases[0]
else:
# more than one phase have the same maximum temperature
# Pick the stable one at high temp.
Vmin = None
for i in startPhases:
V_ = V(phases[i].X[-1], phases[i].T[-1])
if Vmin is None or V_ < Vmin:
Vmin = V_
startPhase = i
assert startPhase in phases
return startPhase
def _tunnelFromPhaseAtT(T, phases, start_phase, V, dV,
phitol, overlapAngle, nuclCriterion,
fullTunneling_params, verbose, outdict):
"""
Find the lowest action tunneling solution.
Return ``nuclCriterion(S,T)``, and store a dictionary describing the
transition in outdict for key `T`.
"""
try:
T = T[0] # need this when the function is run from optimize.fmin
except:
pass
if T in outdict:
return nuclCriterion(outdict[T]['action'], T)
def fmin(x):
return optimize.fmin(V, x, args=(T,),
xtol=phitol, ftol=np.inf, disp=False)
# Loop through all the phases, adding acceptable minima
x0 = fmin(start_phase.valAt(T))
V0 = V(x0, T)
tunnel_list = []
for key in phases.keys():
if key == start_phase.key:
continue
p = phases[key]
if (p.T[0] > T or p.T[-1] < T):
continue
x1 = fmin(p.valAt(T))
V1 = V(x1, T)
if V1 >= V0:
continue
tdict = dict(low_vev=x1, high_vev=x0, Tnuc=T,
low_phase=key, high_phase=start_phase.key)
tunnel_list.append(tdict)
# Check for overlap
if overlapAngle > 0:
excluded = []
cos_overlap = np.cos(overlapAngle * np.pi/180)
for i in range(1, len(tunnel_list)):
for j in range(i):
xi = tunnel_list[i]['low_vev']
xj = tunnel_list[j]['low_vev']
xi2 = np.sum((xi-x0)**2)
xj2 = np.sum((xj-x0)**2)
dotij = np.sum((xj-x0)*(xi-x0))
if dotij >= np.sqrt(xi2*xj2) * cos_overlap:
excluded.append(i if xi2 > xj2 else j)
for i in sorted(excluded)[::-1]:
del tunnel_list[i]
# Get rid of the T parameter for V and dV
def V_(x,T=T,V=V): return V(x,T)
def dV_(x,T=T,dV=dV): return dV(x,T)
# For each item in tunnel_list, try tunneling
lowest_action = np.inf
lowest_tdict = dict(action=np.inf)
for tdict in tunnel_list:
x1 = tdict['low_vev']
try:
print("Tunneling from phase %s to phase %s at T=%0.4g"
% (tdict['high_phase'], tdict['low_phase'], T))
print("high_vev =", tdict['high_vev'])
print("low_vev =", tdict['low_vev'])
tobj = pathDeformation.fullTunneling(
[x1,x0], V_, dV_, callback_data=T,
**fullTunneling_params)
tdict['instanton'] = tobj
tdict['action'] = tobj.action
tdict['trantype'] = 1
except tunneling1D.PotentialError as err:
if err.args[1] == "no barrier":
tdict['trantype'] = 0
tdict['action'] = 0.0
elif err.args[1] == "stable, not metastable":
tdict['trantype'] = 0
tdict['action'] = np.inf
else:
print("Unexpected error message.")
raise
if tdict['action'] <= lowest_action:
lowest_action = tdict['action']
lowest_tdict = tdict
outdict[T] = lowest_tdict
return nuclCriterion(lowest_action, T)
def _tunnelFromPhaseAtTGW(T, phases, start_phase, V, dV, phitol, overlapAngle, nuclCriterion, fullTunneling_params, verbose, outdict, SEucl, TEucl):
"""
Extends _tunnelFromPhaseAtT by a feature defining the sign of the action in
case that it is calculated as np.inf. This is useful since otherways in some
cases the used root finder brentq won't work: A different sign for f(a) and
f(b) is required, where a and b are the interval's boundaries. Additionally,
all potentially interesting points S(T) for the calculation of the derivative
of the action are passed over using SEucl and TEucl. Potentially interesting
points are defined as those, at which the nucleation criterion is fulfilled
up to np.abs(nuclCriterion(lowest_action, T)) < 1.
Attributes:
-----------
SEucl : list, float
Calculated actions while executing the program. Used to calculated the
derivative of the action in tunnelFromPhaseGW.
TEucl : list, float
The temperatures at which the interesting actions SEucl have been
calculated.
"""
sign = 1
if T <= start_phase.T[0]:
# Set the sign to -1 for temperatures below Tmin
# where Tmin is the lowest t of the start_phase
sign = -1
try:
T = T[0] # need this when the function is run from optimize.fmin
except:
pass
if T in outdict:
return nuclCriterion(outdict[T]['action'], T)
def fmin(x):
return optimize.fmin(V, x, args=(T,), xtol=phitol, ftol=np.inf, disp=False)
# Loop through all the phases, adding acceptable minima
x0 = fmin(start_phase.valAt(T))
V0 = V(x0, T)
tunnel_list = []
for key in list(phases.keys()):
if key == start_phase.key:
continue
p = phases[key]
if (p.T[0] > T or p.T[-1] < T):
continue
x1 = fmin(p.valAt(T))
V1 = V(x1, T)
if V1 >= V0:
#if verbose:
# print(T, x1, x0, V1, V0)
continue
tdict = dict(low_vev=x1, high_vev=x0, Tnuc=T,
low_phase=key, high_phase=start_phase.key)
tunnel_list.append(tdict)
# Check for overlap
if overlapAngle > 0:
excluded = []
cos_overlap = np.cos(overlapAngle * np.pi/180)
for i in range(1, len(tunnel_list)):
for j in range(i):
xi = tunnel_list[i]['low_vev']
xj = tunnel_list[j]['low_vev']
xi2 = np.sum((xi-x0)**2)
xj2 = np.sum((xj-x0)**2)
dotij = np.sum((xj-x0)*(xi-x0))
if dotij >= np.sqrt(xi2*xj2) * cos_overlap:
excluded.append(i if xi2 > xj2 else j)
for i in sorted(excluded)[::-1]:
del tunnel_list[i]
# Get rid of the T parameter for V and dV
def V_(x,T=T,V=V): return V(x,T)
def dV_(x,T=T,dV=dV): return dV(x,T)
#print(T)
#lin = np.linspace(0, 200, 1000)
#V3 = np.zeros(lin.shape)
#for i in range(1000):
# V3[i] = V_([lin[i]])
#plt.plot(lin, V3)
#plt.show()
#print(tunnel_list)
# For each item in tunnel_list, try tunneling
# Set here the sign to get -np.inf if necessary.
lowest_action = sign*np.inf
lowest_tdict = dict(action=sign*np.inf)
for tdict in tunnel_list:
x1 = tdict['low_vev']
try:
if verbose:
print("Tunneling from phase %s to phase %s at T=%0.8g" % (tdict['high_phase'], tdict['low_phase'], T))
print("high_vev =", tdict['high_vev'])
print("low_vev =", tdict['low_vev'])
tobj = pathDeformation.fullTunneling(
[x1,x0], V_, dV_, callback_data=T,
**fullTunneling_params)
tdict['instanton'] = tobj
tdict['action'] = tobj.action
tdict['trantype'] = 1
except tunneling1D.PotentialError as err:
if err.args[1] == "no barrier":
if verbose:
print("No barrier!")
tdict['trantype'] = 0
tdict['action'] = 0.0
elif err.args[1] == "stable, not metastable":
if verbose:
print("Stable, not metastable!")
tdict['trantype'] = 0
tdict['action'] = np.inf
else:
if verbose:
print("Unexpected error message.")
raise
if tdict['action'] <= lowest_action:
lowest_action = tdict['action']
lowest_tdict = tdict
outdict[T] = lowest_tdict
if verbose:
# Print the currently calculated euclidean action, the temperature,
# their ratio and and the nucleation criterion at that point
print("S = ", lowest_action, ", T = ", T, ", S/T = ", lowest_action/(T + 1e-100), ", S/T - crit = ", nuclCriterion(lowest_action, T), "\n")
# Fill SEucl and TEucl to pass it over to tunnelFromPhaseGW
if np.abs(nuclCriterion(lowest_action, T)) < 1:
#if np.abs(lowest_action/(T + 1e-100)) != np.inf:
# activate lower 'if' for plot of nucleation criterion in function tunnelFromPhase
SEucl.append(lowest_action/(T + 1e-100))
TEucl.append(T + 1e-100)
return nuclCriterion(lowest_action, T)
def _potentialDiffForPhase(T, start_phase, other_phases, V):
"""
Returns the maximum difference between the other phases and `start_phase`.
Return value is positive/negative when `start_phase` is stable/unstable.
"""
V0 = V(start_phase.valAt(T),T)
delta_V = np.inf
for phase in other_phases:
V1 = V(phase.valAt(T),T)
if V1-V0 < delta_V:
delta_V = V1-V0
return delta_V
def _maxTCritForPhase(phases, start_phase, V, Ttol):
"""
Find the maximum temperature at which `start_phase` is degenerate with one
of the other phases.
"""
other_phases = []
for phase in list(phases.values()):
if phase.key != start_phase.key:
other_phases.append(phase)
if len(other_phases) == 0:
# No other phases, just return the lowest temperature
return start_phase.T[0]
Tmin = min([phase.T[0] for phase in other_phases])
Tmax = max([phase.T[-1] for phase in other_phases])
Tmin = max(Tmin, start_phase.T[0])
Tmax = min(Tmax, start_phase.T[-1])
DV_Tmin = _potentialDiffForPhase(Tmin, start_phase, other_phases, V)
DV_Tmax = _potentialDiffForPhase(Tmax, start_phase, other_phases, V)
if DV_Tmin >= 0: return Tmin # stable at Tmin
if DV_Tmax <= 0: return Tmax # unstable at Tmax
return optimize.brentq(
_potentialDiffForPhase, Tmin, Tmax,
args=(start_phase, other_phases, V),
xtol=Ttol, maxiter=200, disp=False)
def tunnelFromPhase(phases, start_phase, V, dV, Tmax,
Ttol=1e-3, maxiter=100, phitol=1e-8, overlapAngle=45.0,
nuclCriterion=lambda S,T: S/(T+1e-100) - 140.0,
verbose=True,
fullTunneling_params={}):
"""
Find the instanton and nucleation temeprature for tunneling from
`start_phase`.
Parameters
----------
phases : dict
Output from :func:`traceMultiMin`.
start_phase : Phase object
The metastable phase from which tunneling occurs.
V, dV : callable
The potential V(x,T) and its gradient.
Tmax : float
The highest temperature at which to try tunneling.
Ttol : float, optional
Tolerance for finding the nucleation temperature.
maxiter : int, optional
Maximum number of times to try tunneling.
phitol : float, optional
Tolerance for finding the minima.
overlapAngle : float, optional
If two phases are in the same direction, only try tunneling to the
closer one. Set to zero to always try tunneling to all available phases.
nuclCriterion : callable
Function of the action *S* and temperature *T*. Should return 0 for the
correct nucleation rate, > 0 for a low rate and < 0 for a high rate.
Defaults to ``S/T - 140``.
verbose : bool
If true, print a message before each attempted tunneling.
fullTunneling_params : dict
Parameters to pass to :func:`pathDeformation.fullTunneling`.
Returns
-------
dict or None
A description of the tunneling solution at the nucleation temperature,
or None if there is no found solution. Has the following keys:
- *Tnuc* : the nucleation temperature
- *low_vev, high_vev* : vevs for the low-T phase (the phase that the
instanton tunnels to) and high-T phase (the phase that the instanton
tunnels from).
- *low_phase, high_phase* : identifier keys for the low-T and high-T
phases.
- *action* : The Euclidean action of the instanton.
- *instanton* : Output from :func:`pathDeformation.fullTunneling`, or
None for a second-order transition.
- *trantype* : 1 or 2 for first or second-order transitions.
"""
outdict = {} # keys are T values
args = (phases, start_phase, V, dV,
phitol, overlapAngle, nuclCriterion,
fullTunneling_params, verbose, outdict)
Tmin = start_phase.T[0]
T_highest_other = Tmin
for phase in phases.values():
T_highest_other = max(T_highest_other, phase.T[-1])
Tmax = min(Tmax, T_highest_other)
assert Tmax >= Tmin
try:
Tnuc = optimize.brentq(_tunnelFromPhaseAtT, Tmin, Tmax, args=args,
xtol=Ttol, maxiter=maxiter, disp=False)
except ValueError as err:
if err.args[0] != "f(a) and f(b) must have different signs":
raise
if nuclCriterion(outdict[Tmax]['action'], Tmax) > 0:
if nuclCriterion(outdict[Tmin]['action'], Tmax) < 0:
# tunneling *may* be possible. Find the minimum.
# It's important to make an appropriate initial guess;
# otherwise the minimization routine may get get stuck in a
# region where the action is infinite. Modify Tmax.
Tmax = _maxTCritForPhase(phases, start_phase, V, Ttol)
def abort_fmin(T, outdict=outdict, nc=nuclCriterion):
T = T[0] # T is an array of size 1
if nc(outdict[T]['action'], T) <= 0:
raise StopIteration(T)
try:
Tmin = optimize.fmin(_tunnelFromPhaseAtT, 0.5*(Tmin+Tmax),
args=args, xtol=Ttol*10, ftol=1.0,
maxiter=maxiter, disp=0,
callback=abort_fmin)[0]
except StopIteration as err:
Tmin = err.args[0]
if nuclCriterion(outdict[Tmin]['action'], Tmin) > 0:
# no tunneling possible
return None
Tnuc = optimize.brentq(
_tunnelFromPhaseAtT, Tmin, Tmax,
args=args, xtol=Ttol, maxiter=maxiter, disp=False)
else:
# no tunneling possible
return None
else:
# tunneling happens right away at Tmax
Tnuc = Tmax
rdict = outdict[Tnuc]
return rdict if rdict['trantype'] > 0 else None
def tunnelFromPhaseGW(phases, start_phase, V, dV, Tmax, Ttol = 1e-3, maxiter = 100, phitol = 1e-8, overlapAngle = 45.0,
nuclCriterion = lambda S, T: S/(T+1e-100) - 140, verbose = False, fullTunneling_params={}):
"""
Extends the function tunnelFromPhase by the calculation of the GW
parameter beta / H.
Find the instanton and nucleation temeprature for tunneling from
`start_phase`. Compute the derivative of the instanton action at
the nucleation temperature to calculate the GW spectrum parameter
beta / H. To do that, use _tunnelFromPhaseAtTGW and not
_tunnelFromPhaseAtT to pass over points of support of the
action S(T) as a function of temperature with which the necessary
derivative can be calculated by fitting a linear function to the
passed points. To avoid numerical errors, an upper and a lower fit
of the data (T > Tnuc, T < Tnuc) are calculated and then compared.
If during this procedure an error occurrs: return a dictionary with
a code explaining the reason and position of the error.
Parameters
----------
phases : dict
Output from :func:`traceMultiMin`.
start_phase : Phase object
The metastable phase from which tunneling occurs.
V, dV : callable
The potential V(x,T) and its gradient.
transC : dict
Found transition (including a critical temperature) from findCriticalTemperature.
This is necessary to pass over the model's nucleation criterion
Tmax : float
The highest temperature at which to try tunneling.
Ttol : float, optional
Tolerance for finding the nucleation temperature.
maxiter : int, optional
Maximum number of times to try tunneling.
phitol : float, optional
Tolerance for finding the minima.
overlapAngle : float, optional
If two phases are in the same direction, only try tunneling to the
closer one. Set to zero to always try tunneling to all available phases.
nuclCriterion : callable
Function of the action *S* and temperature *T*. Should return 0 for the
correct nucleation rate, > 0 for a low rate and < 0 for a high rate.
Defaults to ``S/T - 140``.
verbose : bool
If true, print a message before each attempted tunneling.
fullTunneling_params : dict
Parameters to pass to :func:`pathDeformation.fullTunneling`.
Returns
-------
dict or None
A description of the tunneling solution at the nucleation temperature,
or None if there is no found solution. Has the following keys:
- *Tnuc* : the nucleation temperature
- *betaH*: the GW spectrum parameter beta/H
- *low_vev, high_vev* : vevs for the low-T phase (the phase that the
instanton tunnels to) and high-T phase (the phase that the instanton
tunnels from).
- *low_phase, high_phase* : identifier keys for the low-T and high-T
phases.
- *action* : The Euclidean action of the instanton.
- *instanton* : Output from :func:`pathDeformation.fullTunneling`, or
None for a second-order transition.
- *trantype* : 1 or 2 for first or second-order transitions.
"""
rel_slope_error = 0.01 # 0.05
N_first_try = 50 # 32 number of points of support generated for a fit above and below Tnuc
N_second_try = 100 # 15
N_cutted_fit = 15 # 10
Tmin = start_phase.T[0]
if Tmin >= Tmax:
if verbose:
print("Found that Tmin is bigger than Tmax: Tunneling cannot occurr.")
errordict = dict(error = 10)
return errordict
SEucl = []
TEucl = []
outdict = {} # keys are T values
args = (phases, start_phase, V, dV, phitol, overlapAngle, nuclCriterion, fullTunneling_params, verbose, outdict, SEucl, TEucl)
try:
# Calculate the nucleation temperature by finding the root of the nucleation criterion
if verbose:
print("\n# Brentq at Tmin = ", Tmin, " Tmax = ", Tmax)
Tnuc = optimize.brentq(_tunnelFromPhaseAtTGW, Tmin, Tmax, args=args, xtol=Ttol, maxiter=maxiter, disp=False)
if verbose:
print("# Brentq done, found Tnuc = ", Tnuc, "\n")
except:
if verbose:
print("No temperature at which the nucleation criterion can be fulfilled was found.")
errordict = dict(error = 7)
return errordict
def plot_nucl_crit(Tmin = Tmin, Tmax = Tmax, N = 100):
# Plot the value of the nucleation criterion with N points of support
# between Tmin and Tmax
Tminplot = Tmin
Tmaxplot = Tmax
T_list = np.linspace(Tminplot, Tmaxplot, N)
for T_extra in T_list:
try:
_tunnelFromPhaseAtTGW(T_extra, phases, start_phase, V, dV, phitol, overlapAngle, nuclCriterion, fullTunneling_params, verbose, outdict, SEucl, TEucl)
except:
if verbose:
print("Had a problem adding the temperature ", T_extra, " to the outdict.")
STEucl = list(zip(TEucl,SEucl))
STEucl.sort()
SEucl_srt = [x for T, x in STEucl]
TEucl_srt = [y for y, S in STEucl]
nucl_list = np.zeros(len(SEucl_srt))
nucl2_list = np.zeros(len(SEucl_srt))
plt.plot(TEucl_srt, SEucl_srt)
plt.title("S/T vs. temperature")
#plt.plot(TEucl_srt, SEucl_srt, "x")
plt.xlim(Tminplot, Tmaxplot)
plt.yscale("log")
plt.grid()
#plt.savefig("nuclcrit_T_g_is_085_l_is_001")
plt.show()
for i in range(len(SEucl_srt)):
nucl_list[i] = nuclCriterion(SEucl_srt[i]*TEucl_srt[i], TEucl_srt[i])
plt.plot(TEucl_srt, np.abs(nucl_list))
plt.title("|Nucleation criterion| vs. temperature")
#plt.plot(TEucl_srt, np.abs(nucl_list), "x")
plt.xlim(Tminplot, Tmaxplot)
plt.yscale("log")
plt.grid()
#plt.savefig("nuclcrit_T_g_is_085_l_is_001")
plt.show()
for i in range(len(SEucl_srt)):
nucl2_list[i] = -nuclCriterion(SEucl_srt[i]*TEucl_srt[i], TEucl_srt[i]) + SEucl_srt[i]
plt.plot(TEucl_srt, nucl2_list)
plt.plot(TEucl_srt, SEucl_srt)
plt.title("action and criterion vs. temperature")
#plt.plot(TEucl_srt, np.abs(nucl_list), "x")
plt.xlim(Tminplot, Tmaxplot)
plt.yscale("log")
plt.grid()
#plt.savefig("nuclcrit_T_g_is_085_l_is_001")
plt.show()
#np.savetxt("T_plot.txt", TEucl_srt)
#np.savetxt("S_plot.txt", SEucl_srt)
#np.savetxt("N_plot.txt", nucl_list)
#plot_nucl_crit(Tmin = 46, Tmax = 51.3, N = 100)
# Check, whether there are enough points of support to calculate the necessary derivative
# If not the case, calculate more points of support.
if len(TEucl) == 0:
if verbose:
print("Found a temperature which fulfills the nucleation criterion sufficiently well in brentq, but not good enough (|_tunnelFromPhaseAtT| < 1) to go on from here. I.e. most probably: a too-supercooled phase.")
errordict = dict(error = 0)
return errordict
if len(TEucl) == 1:
if verbose:
print("Found only one point that fulfills the nucleation criterion sufficiently well (|_tunnelFromPhaseAtT| < 1). This isn't suffient for a proper calculation of the beta parameter.")
errordict = dict(error = 1)
return errordict
if len(TEucl) <= N_first_try: #need more points for good regression!
N_to_add = N_first_try - len(TEucl)
if verbose:
print("Found only", len(TEucl), " points. Add", N_to_add, "more to calculate the derivative of the euclidean action properly.")
TEucl_min = min(TEucl)
TEucl_max = max(TEucl)
TEucl_between = np.linspace(TEucl_min, TEucl_max, N_to_add)
# Add more points such that alltogether 30 points of support
for T_extra in TEucl_between:
try:
_tunnelFromPhaseAtTGW(T_extra, phases, start_phase, V, dV, phitol, overlapAngle, nuclCriterion, fullTunneling_params, verbose, outdict, SEucl, TEucl)
except:
if verbose:
print("Had a problem adding the temperature ", T_extra, " to the outdict.")
# Calculation of beta / H
def lin_fit(x, a, b):
# General linear function
return a*x + b
def make_fit():
# Make a linear fit using TEucl and SEucl
# return the slope, its error and wether the covariance matrix
# has an infinitely large element
STEucl = list(zip(TEucl,SEucl))
STEucl.sort()
SEucl_srt = [x for T, x in STEucl]
TEucl_srt = [y for y, S in STEucl]
if verbose:
print("\nFit through ", len(TEucl_srt), " points.")
np.seterr(over = 'ignore')
try:
params, covariance_matrix = curve_fit(lin_fit, TEucl_srt, SEucl_srt)
except:
if verbose:
print("Fit was not successfull.")
return -1, 1, True
errors = np.sqrt(np.diag(np.abs(covariance_matrix)))
np.seterr(over = 'warn')
def plot_fit():
plt.plot(TEucl_srt, SEucl_srt, "X")
a = np.zeros(len(TEucl_srt))
for i in range(len(TEucl_srt)):
a[i] = params[0]*TEucl_srt[i] + params[1]
plt.plot(TEucl_srt, a)
plt.axvline(x=Tnuc)
plt.show()
#plot_fit()
covisinf = np.any(covariance_matrix[covariance_matrix == np.inf])
if verbose:
print("Fit was successfull.")
return params[0], errors[0], covisinf
def make_cutted_fit():
# Make two linear fits above and below Tnuc using TEucl and SEucl.
# If slopes don't match up, use single linear regression
# make_fit. If slopes coincide within an uncertainty of 5 percent
# return a weighted mean of them (weights: number of points of
# support of respective fit). If one of the fits is trustworthy,
# but the other one is not, take that one to return the
# expected values.
STEucl = list(zip(TEucl,SEucl))
STEucl.sort()
SEucl_srt = [x for T, x in STEucl]
TEucl_srt = [y for y, S in STEucl]
SEucl_srt_lo = []
TEucl_srt_lo = []
SEucl_srt_hi = []
TEucl_srt_hi = []
for i in range(len(TEucl_srt)):
if TEucl_srt[i] < Tnuc:
TEucl_srt_lo.append(TEucl_srt[i])
SEucl_srt_lo.append(SEucl_srt[i])
else:
TEucl_srt_hi.append(TEucl_srt[i])
SEucl_srt_hi.append(SEucl_srt[i])
if verbose:
print("\nFit through ", len(TEucl_srt_lo), " points below and ", len(TEucl_srt_hi), " points above Tnuc.")
if len(TEucl_srt_lo) < N_cutted_fit or len(TEucl_srt_hi) < N_cutted_fit:
if verbose:
print("Not enough points for a fit below or above. Try fitting all points in one line.")
return make_fit()
np.seterr(over = 'ignore')
try:
params_hi, covariance_matrix_hi = curve_fit(lin_fit, TEucl_srt_hi, SEucl_srt_hi)
errors_hi = np.sqrt(np.diag(np.abs(covariance_matrix_hi)))
except:
if verbose:
print("Upper fit was not successfull.")
return -1, 1, True
try:
params_lo, covariance_matrix_lo = curve_fit(lin_fit, TEucl_srt_lo, SEucl_srt_lo)
errors_lo = np.sqrt(np.diag(np.abs(covariance_matrix_lo)))
except:
if verbose:
print("Lower fit was not successfull.")
return -1, 1, True
np.seterr(over = 'warn')
def plot_fit():
plt.plot(TEucl_srt_lo, SEucl_srt_lo, "X")
plt.plot(TEucl_srt_hi, SEucl_srt_hi, "X")
a = np.zeros(len(TEucl_srt))
b = np.zeros(len(TEucl_srt))
for i in range(len(TEucl_srt)):
a[i] = params_hi[0]*TEucl_srt[i] + params_hi[1]
b[i] = params_lo[0]*TEucl_srt[i] + params_lo[1]
plt.plot(TEucl_srt, a)
plt.plot(TEucl_srt, b)
plt.axvline(x=Tnuc)
plt.show()
#plot_fit()
if np.abs((params_lo[0] - params_hi[0])/ params_hi[0]) < rel_slope_error: # if upper and lower slopes are compatible
covisinf = np.any(covariance_matrix_hi[covariance_matrix_hi == np.inf])
covisinf = covisinf or np.any(covariance_matrix_lo[covariance_matrix_lo == np.inf])
slope_mean = (params_lo[0] * len(TEucl_srt_lo) + params_hi[0] * len(TEucl_srt_hi)) / len(TEucl_srt)
errors = max(errors_hi[0], errors_lo[0])
return slope_mean, errors, covisinf
elif np.abs(errors_hi[0]/params_hi[0]) < rel_slope_error:
covisinf = np.any(covariance_matrix_hi[covariance_matrix_hi == np.inf])
return params_hi[0], errors_hi[0], covisinf
elif np.abs(errors_lo[0]/params_lo[0]) < rel_slope_error:
covisinf = np.any(covariance_matrix_lo[covariance_matrix_lo == np.inf])
return params_lo[0], errors_lo[0], covisinf
else:
return make_fit()
slope, slope_error, covisinf = make_cutted_fit()
# Test if the calculation of the derivative is certain enough,
# else, calculate more points of support and retry.
if np.abs(slope_error/slope) > rel_slope_error:
if verbose:
print(r"Found a regression with an too high uncertainty. Calculate more points of support.")
TEucl_min = min(TEucl)
TEucl_max = max(TEucl)
TEucl_between = np.linspace(TEucl_min, TEucl_max, N_second_try)
for T_extra in TEucl_between:
try:
_tunnelFromPhaseAtTGW(T_extra, phases, start_phase, V, dV, phitol, overlapAngle, nuclCriterion, fullTunneling_params, verbose, outdict, SEucl, TEucl)
except:
if verbose:
print("Had a problem adding the temperature ", T_extra, " to the outdict.")
slope, slope_error, covisinf = make_cutted_fit()
if np.abs(slope_error/slope) > rel_slope_error: # check if still uncertain
if covisinf:
if verbose:
print("There were at least two same points when calculating the linear regression for calculating beta. This yielded an infinte covariance matrix.")
print("Slope:", slope, "Slope error: ", slope_error, "Relative slope error: ", slope_error/slope, "Covariance matrix is infinte:", covisinf)
errordict = dict(error = 8)
if verbose:
print(r"Adding more points of support to calculate the derivative of the euclidean action didn't yield a result with a relative error below 1 percent.")
print("Slope:", slope, "Slope error: ", slope_error, "Relative slope error: ", slope_error/slope, "Covariance matrix is infinte:", covisinf)
errordict = dict(error = 3)
return errordict
# Calculate beta/H as given by
# beta/ H = Tnuc * (dS/dT)_(T = Tnuc)
# and pass it to the dictionary
rdict = outdict[Tnuc]
rdict['betaH'] = Tnuc * slope
if rdict['betaH'] < 0:
if verbose:
print("Due to some strange reason a negative beta was calculated. This shouldn't happen at all.")
errordict = dict(error = 2)
return errordict
if not rdict['trantype'] > 0:
if verbose:
print("Due to some strange reason a neither first, nor second-order transition has been documented. This shouldn't happen at all.")
errordict = dict(error = 6)
return errordict
return rdict
def secondOrderTrans(high_phase, low_phase, Tstr='Tnuc'):
"""
Assemble a dictionary describing a second-order phase transition.
"""
rdict = {}
rdict[Tstr] = 0.5*(high_phase.T[0] + low_phase.T[-1])
rdict['low_vev'] = rdict['high_vev'] = high_phase.X[0]
rdict['low_phase'] = low_phase.key
rdict['high_phase'] = high_phase.key
rdict['action'] = 0.0
rdict['instanton'] = None
rdict['trantype'] = 2
# MANUALLY ADDED GW PARAMETERS
rdict['alpha'] = -2.
rdict['beta'] = -2.
return rdict
def findAllTransitions(phases, V, dV, tunnelFromPhase_args={}):
"""
Find the complete phase transition history for the potential `V`.
This functions uses :func:`tunnelFromPhase` to find the transition
temperature and instanton for each phase, starting at the highest phase
in the potential. Note that if there are multiple transitions that could
occur at the same minimum (if, for example, there is a Z2 symmetry or
a second-order transition breaks in multiple directions), only one of the
transitions will be used.
Parameters
----------
phases : dict
Output from :func:`traceMultiMin`.
V, dV : callable
The potential function and its gradient, each a function of field
value (which should be an array, not a scalar) and a temperature.
tunnelFromPhase_args : dict
Parameters to pass to :func:`tunnelFromPhase`.
Returns
-------
list of transitions
Each item is a dictionary describing the transition (see
:func:`tunnelFromPhase` for keys). The first transition is the one at
the highest temperature.
"""
phases = phases.copy()
start_phase = phases[getStartPhase(phases, V)]
Tmax = start_phase.T[-1]
transitions = []
while start_phase is not None:
del phases[start_phase.key]
trans = tunnelFromPhase(phases, start_phase, V, dV, Tmax,
**tunnelFromPhase_args)
if trans is None and not start_phase.low_trans:
start_phase = None
elif trans is None:
low_key = None
for key in start_phase.low_trans:
if key in phases:
low_key = key
break
if low_key is not None:
low_phase = phases[low_key]
transitions.append(secondOrderTrans(start_phase, low_phase))
start_phase = low_phase
Tmax = low_phase.T[-1]
else:
start_phase = None
else:
transitions.append(trans)
start_phase = phases[trans['low_phase']]
Tmax = trans['Tnuc']
return transitions
def findAllTransitionsGW(phases, V, dV, transC, verbose = False, tunnelFromPhase_args={}):
"""
Works just as findAllTransitions, but doesn't call tunnelFromPhase to compute
the tunneling parameters but tunnelFromPhaseGW, which takes also transC as an input
and calculates beta/H as well (and puts it into the dictionary that findAllTransitionsGW
returns). The additional input is necessary to provide the investigated model's
nucleation criterion, which the original tunnelFromPhase didn't use.
Parameters
----------
phases : dict
Output from :func:`traceMultiMin`.
V, dV : callable
The potential function and its gradient, each a function of field
value (which should be an array, not a scalar) and a temperature.
transC : dict
Found transition (including a critical temperature) from findCriticalTemperature.
This is necessary to pass over the model's nucleation criterion
verbose : Boolean
Decides whether there will be an output or not during the following calculations.
tunnelFromPhase_args : dict
Parameters to pass to :func:`tunnelFromPhase`.
Returns
-------
list of transitions
Each item is a dictionary describing the transition (see
:func:`tunnelFromPhase` for keys). The first transition is the one at
the highest temperature. In the case of an error, the dictionary will
have the key 'error' in which an explanatory errorkey is saved.
"""
phases = phases.copy()
start_phase = phases[getStartPhase(phases, V)]
#start_phase = phases[1]
#Tmax = start_phase.T[-1]
Tmax = transC['Tcrit']
transitions = []
while start_phase is not None:
del phases[start_phase.key]
trans = tunnelFromPhaseGW(phases, start_phase, V, dV, Tmax, verbose = verbose, **tunnelFromPhase_args)
if 'error' in trans:
return trans
if trans is None and not start_phase.low_trans:
start_phase = None
elif trans is None:
low_key = None
for key in start_phase.low_trans:
if key in phases:
low_key = key
break
if low_key is not None:
low_phase = phases[low_key]
transitions.append(secondOrderTrans(start_phase, low_phase))
start_phase = low_phase
Tmax = low_phase.T[-1]
else:
start_phase = None
else:
if "low_phase" not in trans:
trans["error"] = 8
transitions.append(trans)
return transitions
transitions.append(trans)
start_phase = phases[trans['low_phase']]
Tmax = trans['Tnuc']
start_phase = None # to stop program from another run with T interval between 0 and old Tmin
return transitions
def findCriticalTemperatures(phases, V, start_high=False):
"""
Find all temperatures `Tcrit` such that there is degeneracy between any
two phases.
Parameters
----------
phases : dict
Output from :func:`traceMultiMin`.
V : callable
The potential function `V(x,T)`, where `x` is the field value (which
should be an array, not a scalar) and `T` is the temperature.
start_high : bool, optional
If True, only include those transitions which could be reached starting
from the high-T phase. NOT IMPLEMENTED YET.
Returns
-------
list of transitions
Transitions are sorted in decreasing temperature. Each transition is a
dictionary with the following keys:
- *Tcrit* : the critical temperature
- *low_vev, high_vev* : vevs for the low-T phase (the phase that the
model transitions to) and high-T phase (the phase that the model
transitions from).
- *low_phase, high_phase* : identifier keys for the low-T and high-T
phases.
- *trantype* : 1 or 2 for first or second-order transitions.
"""
transitions = []
for i in list(phases.keys()):
for j in list(phases.keys()):
if i == j:
continue
# Try going from i to j (phase1 -> phase2)
phase1, phase2 = phases[i], phases[j]
#print(phase1, "\n", phase2)
tmax = min(phase1.T[-1], phase2.T[-1])
tmin = max(phase1.T[0], phase2.T[0])
#print(tmin, tmax)
if tmin >= tmax:
# No overlap. Try for second-order.
if phase2.key in phase1.low_trans:
transitions.append(
secondOrderTrans(phase1, phase2, 'Tcrit'))
continue
def DV(T):
return V(phase1.valAt(T), T) - V(phase2.valAt(T), T)
if DV(tmin) < 0:
# phase1 is lower at tmin, no tunneling
continue
if DV(tmax) > 0:
#phase1 is higher even at tmax, no critical temperature
continue
Tcrit = optimize.brentq(DV, tmin, tmax, disp=False)
tdict = {}
tdict['Tcrit'] = Tcrit
tdict['high_vev'] = phase1.valAt(Tcrit)
tdict['high_phase'] = phase1.key
tdict['low_vev'] = phase2.valAt(Tcrit)
tdict['low_phase'] = phase2.key
tdict['trantype'] = 1
transitions.append(tdict)
if not start_high:
return sorted(transitions, key=lambda x: x['high_phase'])[::-1] # changed, before key=lambda x: x['Tcrit']
start_phase = getStartPhase(phases, V)
raise NotImplementedError("start_high=True not yet supported")
def addCritTempsForFullTransitions(phases, crit_trans, full_trans):
"""
For each transition dictionary in `full_trans`, find the corresponding
transition in `crit_trans` and add it to the dictionary for the key
`crit_trans`, or add None if no corresponding transition is found.
Notes
-----
The phases in the supercooled transitions might not be exactly
the same as the phases in the critical temperature transitions. This would
be the case, for example, if in `full_trans` the phase transitions go like
1 -> 2 -> 3, but in `crit_trans` they go like 1 -> (2 or 3).
Parameters
----------
phases : dict
crit_trans : list
full_trans : list
"""
parents_dict = {}
for i in list(phases.keys()):
parents = [i]
for tcdict in crit_trans[::-1]:
j = tcdict['high_phase']
if tcdict['low_phase'] in parents and j not in parents:
parents.append(j)
parents_dict[i] = parents
for tdict in full_trans:
low_parents = parents_dict[tdict['low_phase']]
high_parents = parents_dict[tdict['high_phase']]
common_parents = set.intersection(
set(low_parents), set(high_parents))
for p in common_parents:
# exclude the common parents
try:
k = low_parents.index(p)
low_parents = low_parents[:k]
except: pass
try:
k = high_parents.index(p)
high_parents = high_parents[:k+1]
except: pass
for tcdict in crit_trans[::-1]: # start at low-T
if tcdict['Tcrit'] < tdict['Tnuc']:
continue
if (tcdict['low_phase'] in low_parents
and tcdict['high_phase'] in high_parents):
tdict['crit_trans'] = tcdict
break
else:
tdict['crit_trans'] = None
| 40.412938 | 222 | 0.585536 | 3,100 | 0.046377 | 0 | 0 | 0 | 0 | 0 | 0 | 29,901 | 0.447332 |
13fecb8c46693f75faf20fe0071fb2ddb03a2ed2 | 3,720 | py | Python | red-scare/instance-generators/make-words.py | Sebastian-ba/DoDoBing | 6edcc18de22ad76505d2c13ac6a207a2c274cc95 | [
"MIT"
] | 3 | 2017-09-25T11:59:20.000Z | 2017-11-20T12:55:21.000Z | red-scare/instance-generators/make-words.py | ITU-2019/DoDoBing | 6edcc18de22ad76505d2c13ac6a207a2c274cc95 | [
"MIT"
] | 6 | 2017-09-25T12:04:51.000Z | 2017-11-13T07:51:40.000Z | red-scare/instance-generators/make-words.py | ITU-2019/DoDoBing | 6edcc18de22ad76505d2c13ac6a207a2c274cc95 | [
"MIT"
] | null | null | null | import sys
import random
import networkx as nx
from write_nx_graph import write_graph
uncommons = set() # Everything except the 3300 common words in SGB
f = open ('data/words.txt','r')
words = []
for line in f:
if len(line)>0 and line[0] == '*': continue
word = line.strip()[:5]
words.append(word)
if not (len(line.strip())>5 and line[5] == '*'): uncommons.add(word)
f.close()
def starredwords(word, numstars):
# given 'AWORD' returns ['*WORD', 'A*ORD',...,'AWOR*']
if numstars == 1: return [word[:i] + '*' +word[i+1:] for i in range(5)]
else: return [word[:i] + '*' + word[i+1:j] + '*' + word[j+1:] for j in range (1,5) for i in range(j)]
def _numvowels(word):
# returns the number of vowels in word
counter = 0
for c in word: counter += (c in 'aeiou')
return counter
def sorted(word):
letters = list(word)
letters.sort()
sorted = "".join(letters)
return sorted
class Words(nx.Graph):
def __init__(self,L, numstars):
nx.Graph.__init__(self)
self.add_nodes_from(L)
N = dict()
for word in L:
for starred in starredwords(word,numstars):
if not starred in N:
N[starred] = set([word])
else: N[starred].add(word)
s = sorted(word)
if not s in N:
N[s] = set([word])
else:
N[s].add(word)
S = set(L)
for word in self.nodes():
for starred in starredwords(word,numstars):
for neighbour in N[starred]:
if word != neighbour:
self.add_edge(word, neighbour)
for neighbour in N[sorted(word)]:
if word != neighbour:
self.add_edge(word, neighbour)
def wordgraph(n, numstars, musthaves):
L = []
for word in words:
if word not in musthaves: L.append(word)
random.seed(0)
random.shuffle(L)
L = L[:n-len(musthaves)]
L.extend(musthaves)
return Words(L, numstars)
def write_rusties():
for n in [2000, 2500, 3000, 3500, 4000, 4500, 5000, 10000]:
for numstars in [1,2]:
G = wordgraph(n, numstars, ['begin','ender','rusty'])
name = "rusty-{0}-{1}".format(numstars, len(G))
write_graph(G, name, 'begin', 'ender', ['rusty'])
# write a small graph as well:
G = Words(words, 1)
V = set()
P = nx.all_shortest_paths(G, 'begin', 'rusty')
for L in P:
for w in L: V.add(w)
P = nx.all_shortest_paths(G, 'ender', 'rusty')
for L in P:
for w in L: V.add(w)
L = list(V)
for v in L:
V.add(v)
G = Words(V, 1)
name = "rusty-1-{0}".format(len(G))
write_graph(G, name, 'begin', 'ender', ['rusty'])
def write_commons():
for n in [20, 50, 100, 250, 500,1000,1500,2000, 2500, 3000, 3500, 4000, 4500, 5000, 10000]:
for numstars in [1,2]:
G = wordgraph(n, numstars, ['start', 'ender'])
name = "common-{0}-{1}".format(numstars,len(G))
R = [word for word in G.nodes() if word in uncommons]
write_graph(G, name, 'start', 'ender', R)
write_rusties()
write_commons()
| 36.116505 | 109 | 0.470968 | 1,177 | 0.316398 | 0 | 0 | 0 | 0 | 0 | 0 | 376 | 0.101075 |
13ff78cbd83636d6edec29d58b60fdaa0be4d91a | 7,490 | py | Python | scripts/strelka-2.9.2.centos6_x86_64/share/scoringModelTraining/somatic/bin/vcf_to_feature_csv.py | dongxuemin666/RNA-combine | 13e178aae585e16a9a8eda8151d0f34316de0475 | [
"Apache-2.0"
] | 7 | 2021-09-03T09:11:00.000Z | 2022-02-14T15:02:12.000Z | scripts/strelka-2.9.2.centos6_x86_64/share/scoringModelTraining/somatic/bin/vcf_to_feature_csv.py | dongxuemin666/RNA-combine | 13e178aae585e16a9a8eda8151d0f34316de0475 | [
"Apache-2.0"
] | null | null | null | scripts/strelka-2.9.2.centos6_x86_64/share/scoringModelTraining/somatic/bin/vcf_to_feature_csv.py | dongxuemin666/RNA-combine | 13e178aae585e16a9a8eda8151d0f34316de0475 | [
"Apache-2.0"
] | 2 | 2022-01-10T13:07:29.000Z | 2022-01-11T22:14:11.000Z | #!/usr/bin/env python2
#
# Strelka - Small Variant Caller
# Copyright (c) 2009-2018 Illumina, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
"""
Convert a Strelka somatic VCF to CSV format, annotate TP and FP given a
truth VCF and FP / ambiguous region bed files.
"""
__author__ = "Peter Krusche <[email protected]>"
import os
import sys
import pandas
scriptDir = os.path.abspath(os.path.dirname(__file__))
scriptName = os.path.basename(__file__)
workflowDir = os.path.abspath(os.path.join(scriptDir, "../lib"))
sys.path.append(workflowDir)
import evs
import evs.features
from evs.tools.bedintervaltree import BedIntervalTree
def parseArgs():
import argparse
parser = argparse.ArgumentParser(description="Converts somatic VCF to annotated CSV",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("input", help="Strelka VCF file", nargs=1)
parser.add_argument("-o", "--output", required=True,
help="Output CSV filename for training data")
parser.add_argument("--testSet", action='append', help="Chromosome (e.g. chr20) to hold out as test data (may be specified more than once; if omitted, all data will be used for training)")
parser.add_argument("--testOutput", help="Output CSV filename for test data")
parser.add_argument("--truth", help="Truth VCF file")
parser.add_argument("--fp-regions", dest="fpRegionsFile",
help="Bed file indicating regions where variants that are not true can be labeled as false positives. Outside of these regions variants will be labeled as unknown.")
parser.add_argument("--ambiguous", dest="ambiguousRegionsFiles", action='append',
help="Bed file conforming to the curium ambiguous region file format"
" (may be specified more than once)")
parser.add_argument("--features", required=True,
choices=evs.features.FeatureSet.sets.keys(),
help="Select a feature table to output.")
args = parser.parse_args()
def checkFile(filename, label) :
if not os.path.isfile(filename) :
raise Exception("Can't find input %s file: '%s'" % (label,filename))
def checkOptionalFile(filename, label) :
if filename is None : return
checkFile(filename, label)
checkOptionalFile(args.truth,"truth")
checkOptionalFile(args.fpRegionsFile,"false positive regions")
if args.ambiguousRegionsFiles is not None :
for ambiguousRegionsFile in args.ambiguousRegionsFiles :
checkFile(ambiguousRegionsFile,"ambiguous regions")
return args
def main():
args = parseArgs()
fset = evs.features.FeatureSet.make(args.features)
featuretable = fset.collect(args.input[0])
featuretable["tag"] = "FP" # If no truth set is specified, label all variants as FP. Useful for normal-normal.
if args.truth:
fset2 = evs.features.FeatureSet.make("posandalleles")
truth_alleles = fset2.collect(args.truth)
truth_alleles["tag"] = "TP"
featuretable = pandas.merge(featuretable, truth_alleles, how="outer", on=["CHROM", "POS", "REF", "ALT"],
suffixes=(".query", ".truth"))
featuretable["tag.truth"].fillna("", inplace=True)
featuretable["tag.query"].fillna("", inplace=True)
featuretable.loc[(featuretable["tag.query"] == "FP") & (featuretable["tag.truth"] == "TP"), "tag"] = "TP"
featuretable.loc[(featuretable["tag.query"] == "") & (featuretable["tag.truth"] == "TP"), "tag"] = "FN"
featuretable.loc[(featuretable["tag.query"] == "FP") & (featuretable["tag.truth"] == ""), "tag"] = "FP"
to_keep = [x for x in list(featuretable) if not x.endswith(".query") and not x.endswith(".truth")]
featuretable = featuretable[to_keep]
if args.ambiguousRegionsFiles or args.fpRegionsFile:
#
# 1. Load all false positive and ambiguous region information into labeledIntervals
#
labeledIntervals = BedIntervalTree()
if args.fpRegionsFile:
labeledIntervals.addFromBed(args.fpRegionsFile, "FP")
if args.ambiguousRegionsFiles:
# can have multiple ambiguous BED files
for ambiguousRegionsFile in args.ambiguousRegionsFiles:
labeledIntervals.addFromBed(ambiguousRegionsFile, lambda xe: xe[4])
#
# 2. Resolve all interaction rules between truth sets, fp and amiguous regions to produce a final labeling
#
areFPRegionsProvided = (labeledIntervals.count("FP") > 0) or (labeledIntervals.count("fp") > 0 and args.ambiguousRegionsFiles)
def relabeller(xx):
"""
Resolve various rules regarding how variants should interact with the fp and ambiguous regions they
intersect.
Rules:
- All TP and FN calls are untouched -- even if they fall in a false positive or ambiguous region
- Otherwise...
- Any call intersecting an FP region is labeled as "FP", regardless of ambiguous region input
- Any call intersecting an ambiguous region gets a comma separated list of all ambiguous region labels
- Any call falling outside of an ambiguous or fp region will be labeled as:
- FP if no fp regions are given the ambiguous region file contains no false positive regions
- UNK otherwise.
"""
if xx["tag"] == "TP" or xx["tag"] == "FN":
return xx
chrom = xx["CHROM"]
start = xx["POS"]
stop = xx["POS"] + len(xx["REF"])
overlap = labeledIntervals.intersect(chrom, start, stop)
is_fp = False
is_ambi = False
classes_this_pos = set()
for o in overlap:
reason = o.value[0].upper()
classes_this_pos.add(reason)
if reason == "FP":
is_fp = True
else:
is_ambi = True
if is_fp:
xx["tag"] = "FP"
elif is_ambi:
xx["tag"] = ",".join(list(classes_this_pos))
elif not areFPRegionsProvided:
# when we don't have FP regions, unk stuff becomes FP
xx["tag"] = "FP"
else:
xx["tag"] = "UNK"
return xx
featuretable = featuretable.apply(relabeller, axis=1)
if args.testSet is not None:
if args.testOutput is not None:
featuretable[featuretable["CHROM"].isin(args.testSet)].to_csv(args.testOutput)
featuretable = featuretable[~featuretable["CHROM"].isin(args.testSet)]
featuretable.to_csv(args.output)
if __name__ == '__main__':
main()
| 41.153846 | 192 | 0.633111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,189 | 0.425768 |
cd0344b2d15b20e60fd3ab647958bb726ead940c | 2,750 | py | Python | qiskit_neko/backend_plugin.py | garrison/qiskit-neko | 50c6f0f6975425c7ff86417cedc094e984dc5d1c | [
"Apache-2.0"
] | 5 | 2022-01-11T16:07:48.000Z | 2022-02-01T22:05:34.000Z | qiskit_neko/backend_plugin.py | garrison/qiskit-neko | 50c6f0f6975425c7ff86417cedc094e984dc5d1c | [
"Apache-2.0"
] | 1 | 2022-02-03T14:10:57.000Z | 2022-02-03T14:10:57.000Z | qiskit_neko/backend_plugin.py | garrison/qiskit-neko | 50c6f0f6975425c7ff86417cedc094e984dc5d1c | [
"Apache-2.0"
] | 1 | 2022-03-07T15:06:21.000Z | 2022-03-07T15:06:21.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Backend plugin interface."""
import abc
import logging
import stevedore
LOG = logging.getLogger(__name__)
class BackendPlugin(abc.ABC):
"""Abstract class for providing :class:`~qiskit.providers.Backend` objects to tests
This class is designed to be implemented by qiskit providers packages or any
other user that needs to provide custom backend objects to the test suite.
In general the authentication and initialization of working with backends
from different vendors or with different simulators are all unique
qiskit-neko provides the backend plugin interface to enable a standard
interface to return backend objects and leave the specifics of
authentication or initialization of providers.
"""
@abc.abstractmethod
def get_backend(self, backend_selection=None):
"""Return the Backend object to run tests on.
:param str backend_selection: An optional user supplied value to select
a specific backend. The exact behavior of this option is up to
each individual plugin and should be clearly documented in the
plugin how this is used if at all. If the plugin doesn't support
a selection string a string should still be accepted and a warning
just logged. If a string is provided (and they're accepted) but
the string is invalid raising an exception is expected.
"""
pass
class BackendPluginManager:
"""Class to manage installed backend plugins"""
def __init__(self):
self.ext_plugins = stevedore.ExtensionManager(
"qiskit_neko.backend_plugins",
invoke_on_load=True,
propagate_map_exceptions=True,
on_load_failure_callback=self.failure_hook,
)
@staticmethod
def failure_hook(_, ep, err):
"""Hook method to execute on import failure."""
LOG.error("Could not load %r: %s", ep.name, err)
raise err
def get_plugin_backends(self, backend_selection=None):
"""Return a dictionary of plugin names to backend objects."""
return {
plug.name: plug.obj.get_backend(backend_selection=backend_selection)
for plug in self.ext_plugins
}
| 36.184211 | 87 | 0.704 | 2,121 | 0.771273 | 0 | 0 | 858 | 0.312 | 0 | 0 | 1,916 | 0.696727 |
cd037892f04f2a0ec86c850579b001591ee90f88 | 1,381 | py | Python | libkol/request/clan_rumpus.py | danheath/pykol-lib | bdc9aa8dbae64ead07e7dbc36f9d6ba802f65ddc | [
"BSD-3-Clause"
] | 6 | 2019-06-11T19:25:32.000Z | 2022-01-21T17:05:01.000Z | libkol/request/clan_rumpus.py | danheath/pykol-lib | bdc9aa8dbae64ead07e7dbc36f9d6ba802f65ddc | [
"BSD-3-Clause"
] | 8 | 2019-06-17T11:41:14.000Z | 2019-08-07T17:28:50.000Z | libkol/request/clan_rumpus.py | python-kol/pykollib | bdc9aa8dbae64ead07e7dbc36f9d6ba802f65ddc | [
"BSD-3-Clause"
] | 9 | 2019-06-09T22:23:06.000Z | 2021-07-10T00:49:00.000Z | import re
from enum import Enum
from typing import List
import libkol
from .request import Request
class Furniture(Enum):
Nail = (1, 0)
GirlsCalendar = (1, 1)
BoyCalendar = (1, 2)
Painting = (1, 3)
MeatOrchid = (1, 4)
Bookshelf = (2, 0)
ArcaneTomes = (2, 1)
SportsMemorabilia = (2, 2)
SelfHelpBooks = (2, 3)
Outlet = (3, 0)
SodaMachine = (3, 1)
Jukebox = (3, 2)
MrKlaw = (3, 3)
Endtable = (4, 0)
Radio = (4, 1)
MeatBush = (4, 2)
InspirationalCalendar = (4, 3)
Rug = (5, 0)
WrestlingMat = (5, 1)
TanningBed = (5, 2)
ComfySofa = (5, 3)
Corner = (9, 0)
HoboFlex = (9, 1)
SnackMachine = (9, 2)
MeatTree = (9, 3)
@classmethod
def has_value(cls, value):
return any(value == item.value for item in cls)
furniture_pattern = re.compile(r"rump([0-9])_([0-9])\.gif")
class clan_rumpus(Request[List[Furniture]]):
def __init__(self, session: "libkol.Session"):
super().__init__(session)
self.request = session.request("clan_rumpus.php")
@staticmethod
async def parser(content: str, **kwargs) -> List[Furniture]:
return [
Furniture(coords)
for coords in (
(f.group(1), f.group(2)) for f in furniture_pattern.finditer(content)
)
if Furniture.has_value(coords)
]
| 23.40678 | 85 | 0.569153 | 1,212 | 0.877625 | 0 | 0 | 405 | 0.293266 | 288 | 0.208545 | 60 | 0.043447 |
cd03952161db20fd79bc08d5412273256911f00a | 2,155 | py | Python | utils/utils.py | ZhenqiSong/OCR_Pytorch | df4e8c53353b6c515509241d4c9af3b153224a10 | [
"MIT"
] | null | null | null | utils/utils.py | ZhenqiSong/OCR_Pytorch | df4e8c53353b6c515509241d4c9af3b153224a10 | [
"MIT"
] | null | null | null | utils/utils.py | ZhenqiSong/OCR_Pytorch | df4e8c53353b6c515509241d4c9af3b153224a10 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# __author__:Song Zhenqi
# 2021-01-20
import os
import sys
import yaml
import logging
import functools
logger_initialized = set()
def get_img_list(img_file):
img_lists = []
if img_file is None or not os.path.exists(img_file):
raise FileNotFoundError("file path: {} is not exist".format(img_file))
if os.path.isfile(img_file):
img_lists.append(img_file)
elif os.path.isdir(img_file):
for file_name in os.listdir(img_file):
file_path = os.path.join(img_file, file_name)
if os.path.isfile(file_path):
img_lists.append(file_name)
if len(img_lists) == 0:
raise Exception('not find any img file in {}'.format(img_file))
return img_lists
def get_config(file):
"""
读取yaml配置文件,获取网络配置
:param file: 配置文件,只支持yaml/yml格式
:return: 配置 dict
"""
_, ext = os.path.splitext(file)
assert ext in ['.yaml', '.yml'], "只支持yaml/yml格式的文件"
config = yaml.load(open(file, 'rb'), Loader=yaml.Loader)
return config
@functools.lru_cache()
def get_logger(name: str = 'root', file: str = None, level=logging.INFO) -> logging.Logger:
"""
初始化日志logger,配置日志的设置
:param name: 日志名称
:param file: 保存本地的日志文件
:param level: 日志显示的等级
:return: 使用的Logger对象
"""
logger = logging.getLogger(name)
if name in logger_initialized:
return logger
for logger_name in logger_initialized:
if name.startswith(logger_name):
return logger
# 设置日志的显示格式
formatter = logging.Formatter('[%(asctime)s] %(name)s %(levelname)s: %(message)s',
datefmt="%Y/%m/%d %H:%M:%S")
# 设置日志流句柄
stream_handler = logging.StreamHandler(stream=sys.stdout)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
# 设置日志文件
if file is not None:
log_file_folder = os.path.split(file)[0]
os.makedirs(log_file_folder, exist_ok=True)
file_handle = logging.FileHandler(file, 'a')
file_handle.setFormatter(formatter)
logger.addHandler(file_handle)
logger.setLevel(level)
return logger
| 27.278481 | 91 | 0.645476 | 0 | 0 | 0 | 0 | 1,226 | 0.523708 | 0 | 0 | 669 | 0.285775 |
cd0601891b2dad5746ac7c08ac9655b6e8d13ab9 | 2,130 | py | Python | monitoring/uss_qualifier/webapp/tasks.py | interuss/InterUSS-Platform | 099abaa1159c4c143f8f1fde6b88956c86608281 | [
"Apache-2.0"
] | null | null | null | monitoring/uss_qualifier/webapp/tasks.py | interuss/InterUSS-Platform | 099abaa1159c4c143f8f1fde6b88956c86608281 | [
"Apache-2.0"
] | 1 | 2021-11-29T21:53:39.000Z | 2021-11-29T21:53:39.000Z | monitoring/uss_qualifier/webapp/tasks.py | interuss/InterUSS-Platform | 099abaa1159c4c143f8f1fde6b88956c86608281 | [
"Apache-2.0"
] | null | null | null | from monitoring.uss_qualifier.test_data import test_report
from monitoring.uss_qualifier.utils import USSQualifierTestConfiguration
from monitoring.uss_qualifier.main import uss_test_executor
from monitoring.uss_qualifier.rid.simulator import flight_state_from_kml
from monitoring.uss_qualifier.rid.utils import FullFlightRecord
from monitoring.uss_qualifier.rid.utils import FullFlightRecord
import json
from typing import List
import redis
import rq
import uuid
from . import resources
from monitoring.monitorlib.typing import ImplicitDict
def get_rq_job(job_id):
try:
rq_job = resources.qualifier_queue.fetch_job(job_id)
except (redis.exceptions.RedisError, rq.exceptions.NoSuchJobError):
return None
return rq_job
def remove_rq_job(job_id):
"""Removes a job from the queue."""
try:
rq_job = resources.qualifier_queue.remove(job_id)
except (redis.exceptions.RedisError, rq.exceptions.NoSuchJobError):
return None
return rq_job
def call_test_executor(
user_config_json: str,
auth_spec: str,
flight_record_jsons: List[str],
testruns_id,
debug=False,
scd_test_definitions_path=None,
):
config_json = json.loads(user_config_json)
config: USSQualifierTestConfiguration = ImplicitDict.parse(
config_json, USSQualifierTestConfiguration
)
flight_records: List[FullFlightRecord] = [
ImplicitDict.parse(json.loads(j), FullFlightRecord) for j in flight_record_jsons
]
if debug:
report = json.dumps(test_report.test_data)
else:
report = json.dumps(
uss_test_executor(
config, auth_spec, flight_records, scd_test_definitions_path
)
)
resources.redis_conn.hset(resources.REDIS_KEY_TEST_RUNS, testruns_id, report)
return report
def call_kml_processor(kml_content, output_path):
flight_states = flight_state_from_kml.main(
kml_content, output_path, from_string=True
)
resources.redis_conn.hset(
resources.REDIS_KEY_UPLOADED_KMLS, str(uuid.uuid4()), json.dumps(flight_states)
)
return flight_states
| 30.869565 | 88 | 0.746948 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.016432 |
cd06e20fb3a5b8f7301bbddc6604a232ac3d8294 | 11,853 | py | Python | grenades_services/modules/basket.py | Parveen3300/Reans | 6dfce046b01099284a8c945a04600ed83e5099a4 | [
"Apache-2.0"
] | null | null | null | grenades_services/modules/basket.py | Parveen3300/Reans | 6dfce046b01099284a8c945a04600ed83e5099a4 | [
"Apache-2.0"
] | null | null | null | grenades_services/modules/basket.py | Parveen3300/Reans | 6dfce046b01099284a8c945a04600ed83e5099a4 | [
"Apache-2.0"
] | null | null | null | """
BasketManagementRelated modules
"""
# import basket models
from basket.models import Basket
from basket.models import BasketProductLine
# import configuration models
from grenades_services.all_configuration_data import get_currency_instance
from grenades_services.all_configuration_data import get_customer_instance_from_request_user
from grenades_services.all_configuration_data import product_price_calculator
# import home modules
from grenades_services.modules.home import Home
# import serializers modules
from grenades_services.separate_serializers.basket_serializers import \
BasketProductSerializer
class UpdateProductsBasket:
"""
UpdateProductsBasket
"""
def __init__(self, **kwargs):
self.basket_data = kwargs
self._request = kwargs.get('request')
self.basket_id = None
self.customer_instance = None
self.filter_query_data = kwargs.get(
'filter_query_data', {'status': 'Open'})
@staticmethod
def _use_common_module(filter_input_data):
"""
in this '_use_common_module' used to get the common request instance
as per request filter data
In this Home class module will cover all filter logic to help of product basket class modules
"""
return Home(**filter_input_data)
@staticmethod
def calculate_offer_value(product_offer_instance, product_price):
"""
in this calculate_offer_value method we have calculate the product pricing according to the offer
product_offer_instance: for get the offer related key fields
manage two types of offer price
RUPPPEES & PERCENTAGE
"""
if product_offer_instance.offer_price_type == 'RUPPPEES':
if product_price > product_offer_instance.value:
return product_price - product_offer_instance.value
return product_price
if product_offer_instance.offer_price_type == 'PERCENTAGE':
if product_price > product_offer_instance.value:
return (product_offer_instance.value * product_price) / 100
return product_price
def get_basket_instance(self, _filter_query_data=None):
"""
This 'get_basket_instance' method used to get the basket instance according
to auth user and session basket id or with inherit
"""
try:
print(self.filter_query_data)
return Basket.objects.get(**_filter_query_data) \
if _filter_query_data else Basket.objects.get(**self.filter_query_data)
except Exception as e:
print('Basket.DoesNotExist.Error')
print(e)
return None
def collect_basket_product_values(self):
"""collect_basket_product_values
This 'collect_basket_product_values' method used to collect the all basket related value data
to entered in basket table with customer and session maintain instance
"""
home_instance = self._use_common_module(dict(
product_get_data={
'product_alias_name': self.basket_data['product_alias_name']
}
)
)
product_instance = home_instance.get_product_instance()
if product_instance:
home_instance = self._use_common_module(
dict(filter_input_data={'mapped_products__id__in': [product_instance.id]}))
category_product_mapping_instance = \
home_instance.category_product_mapping_instance()
home_instance = self._use_common_module(
dict(filter_input_data={
'included_products__id__in': [product_instance.id],
'offer_type': 'offer'
})
)
product_offer_instance = home_instance.offer_products()
payable_amount = self.calculate_offer_value(
product_offer_instance,
product_instance.price) if product_offer_instance else product_instance.price
return (product_instance,
category_product_mapping_instance,
payable_amount)
@staticmethod
def create_basket_product_line(basket_create_data):
"""
This 'create_basket_product_line' method used to create the basket
"""
create_basket_line = BasketProductLine.objects.create(
**basket_create_data)
return True if create_basket_line else False
def collect_basket_details(self, basket_instance):
"""
This 'collect_basket_details' method collect the basket common code details
"""
product_instance, category_product_mapping_instance, payable_amount = \
self.collect_basket_product_values()
return {
'basket': basket_instance,
'line_reference': str(product_instance.id),
'product': product_instance,
'category': category_product_mapping_instance.last(
).category if category_product_mapping_instance else None,
'quantity': self.basket_data.get('quantity', 1),
'price_currency': get_currency_instance(),
'price_excl_tax': None,
'price_incl_tax': None,
'payable_amount': payable_amount
}
def add_new_basket(self):
"""
This 'add_new_basket' method used to create a fresh basket for a customer or user
"""
if self.customer_instance:
self.filter_query_data['owner'] = self.customer_instance
create_basket = Basket.objects.create(**self.filter_query_data)
print("63546735435463543564", create_basket)
if create_basket:
if self.create_basket_product_line(self.collect_basket_details(create_basket)):
self._request.session['basket_id'] = create_basket.id
return True
return False
def update_product_basket(self):
"""
This 'update_product_basket' method used to update the product in the basket
"""
if self.basket_id:
self.filter_query_data['id'] = self.basket_id
if self.customer_instance:
self.filter_query_data['owner'] = self.customer_instance
basket_instance = self.get_basket_instance()
if basket_instance:
if self.create_basket_product_line(self.collect_basket_details(
basket_instance)):
return True
else:
return False
def add_to_basket(self):
"""
This 'add_to_basket' method used to add the product in the basket
"""
self.customer_instance = get_customer_instance_from_request_user(
self._request.user)
if 'basket_id' in self._request.session.keys():
self.basket_id = self._request.session['basket_id']
return self.update_product_basket()
else:
return self.add_new_basket()
class DisplayProductsBasket(UpdateProductsBasket):
"""
DisplayProductsBasket
return: {
'products_description': {
'id': 14,
'products_list': [],
'line_reference': '2',
'quantity': 1,
'price_currency': 'INR',
'price_excl_tax': None,
'price_incl_tax': None,
'payable_amount': '1000.00',
'date_created': '2021-11-01T10:29:50.091484Z',
'date_updated': '2021-11-01T10:29:50.091502Z',
'basket': 5,
'product': 2,
'category': 5,
'collection': None
},
'product_price_details': {'total_item': 0},
'random_products_list': <QuerySet [<Product: Instruments>]>
}
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._request = kwargs.get('request')
self.customer_instance = None
self.basket_id = None
self.products_description_data = []
self.product_price_details = {}
self.filter_data = {'status': 'Open'}
self.estimate_tax = 0
self.offer_name = '-'
self.coupon_name = '-'
@staticmethod
def get_basket_product_lines(filter_query_data=None):
"""get_basket_product_lines
This 'get_basket_product_lines' method is used to get the all instance of
products of basket
"""
_product_line_instance = BasketProductLine.objects.filter(
**filter_query_data)
if _product_line_instance:
return _product_line_instance
def basket_product_description(self):
"""basket_product_description
This 'basket_product_description' method used to get the all product description with
all products details from baskets
"""
if self.basket_id:
self.filter_data['id'] = self.basket_id
if self.customer_instance:
self.filter_data['owner'] = self.customer_instance
basket_instance = self.get_basket_instance(self.filter_data)
if basket_instance:
product_line_last_obj = self.get_basket_product_lines(
{'basket': basket_instance}).last()
self.products_description_data = BasketProductSerializer(
product_line_last_obj).data
def create_product_order_summary_dict(self, order_summary_dict):
"""
This 'create_product_order_summary_dict' method used to create dict for product order summary
total_price, coupon_price, offer_price
"""
self.product_price_details['total'] = order_summary_dict['total_price']
self.product_price_details['sub_total'] = order_summary_dict['total_price']
self.product_price_details['estimate_tax'] = self.estimate_tax
self.product_price_details['coupon_name'] = self.coupon_name
self.product_price_details['coupon_price'] = order_summary_dict['coupon_price']
self.product_price_details['offer_name'] = self.offer_name
self.product_price_details['offer_price'] = order_summary_dict['offer_price']
def order_product_price_details(self):
"""order_product_price_details
This 'order_product_price_details' method used to get the all product order summary with price calculation
and manage the all coupon and offers
"""
self.product_price_details['total_item'] = len(
self.products_description_data['products_list'])
for _products_details in self.products_description_data['products_list']:
order_summary_dict = product_price_calculator(_products_details,
self.coupon_details,
self.offer_details)
# create product order summary
# return total_price, coupon_price, offer_price
self.create_product_order_summary_dict(order_summary_dict)
def display_products(self):
"""
This 'display_products' method used to get the all session and customer related
basket products for help on display
"""
if 'basket_id' in self._request.session.keys():
self.basket_id = self._request.session.get('basket_id')
else:
self.basket_id = None
self.customer_instance = get_customer_instance_from_request_user(
self._request.user)
self.basket_product_description()
self.order_product_price_details()
home_instance = Home()
random_products_list = home_instance.random_products_list()
return {
'products_description': self.products_description_data,
'product_price_details': self.product_price_details,
'random_products_list': random_products_list if random_products_list else []
}
| 40.731959 | 114 | 0.651312 | 11,229 | 0.947355 | 0 | 0 | 1,883 | 0.158863 | 0 | 0 | 3,884 | 0.327681 |
cd08e29c15d2756e6bc4a870585c434ad2c07d7a | 2,935 | py | Python | plots.py | klowrey/speed_arch | edb002b6d57915fa5e2024b36eb66acf30a7130a | [
"MIT"
] | null | null | null | plots.py | klowrey/speed_arch | edb002b6d57915fa5e2024b36eb66acf30a7130a | [
"MIT"
] | null | null | null | plots.py | klowrey/speed_arch | edb002b6d57915fa5e2024b36eb66acf30a7130a | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
acc = np.array([7.95549917, 7.46641684, 8.16141701, 8.80025005, 7.29208231,
7.73391724, 8.16333294, 9.02033329, 7.60566664, 7.88175011,
7.77574968, 8.79116631, 8.24524975, 8.98549938, 7.3717494 ,
7.32324982, 8.14583302, 8.53608322, 9.30125046, 8.53458309,
8.01708317, 8.36941624, 8.23241711, 8.93550014, 8.73683262,
8.05008316, 8.68758297, 8.59083271, 9.0852499 , 9.07924938,
7.3904171 , 8.82283497, 9.41650009, 8.45791626, 8.04416656,
7.70391607, 9.05191612, 7.78883314, 8.56858349, 9.07366657,
8.77991581, 7.94008255, 8.1746664 , 8.28074932, 7.91550064,
7.4872508 , 8.59158325, 9.33758259, 8.21591663, 8.64350033,
9.00899982, 9.26983356, 8.7885828 , 9.43066692, 9.09299946,
8.55266666, 8.73725033, 7.50575018, 7.99300003, 8.16366673,
8.97633266, 8.19683361, 7.71091652, 8.65974998, 8.97108364,
8.03375053, 8.99700069, 9.18599987, 8.26491737, 8.64508343,
8.00825024, 7.80483294, 7.45008326, 8.23791695, 8.90425014,
9.47108269, 8.0963335 , 8.88658333, 7.99116659, 7.48541689,
8.23633289, 8.61583424, 7.75775003, 8.10883331, 8.57058334,
7.72616577, 7.29199982, 8.26725006, 7.80841637, 8.8257494 ,
9.35824871, 8.85208321, 7.50433302, 8.03266716, 8.77825069,
8.94516659, 8.56558323, 8.64266682, 8.70541668, 8.4321661 ])
spd = np.array([-15.733922 , -17.69332123, -15.09789562, -14.98722076,
-19.22259712, -20.7837429 , -19.90324211, -13.48655987,
-13.42676544, -10.76375103, -18.15335083, -9.28313065,
-11.35249805, -12.09126663, -13.63445187, -17.17600822,
-11.39536953, -13.01688385, -14.5902586 , -9.40825558,
-11.72452641, -9.74875546, -15.47906494, -17.58286476,
-13.81764889, -15.5894928 , -9.33745289, -11.58790493,
-12.6633606 , -12.95300007, -6.5169816 , -15.54349899,
-9.18311691, -11.59814739, -11.74293232, -18.68121147,
-12.44590282, -13.20860291, -8.75187683, -23.9044342 ,
-10.90840054, -11.39770985, -14.83057499, -13.2543335 ,
-13.18600559, -13.31662369, -12.91320515, -9.9495573 ,
-10.87206936, -11.35480595, -13.06026745, -10.52530384,
-13.57276917, -13.95710754, -9.0244627 , -12.21132755,
-9.00012493, -9.07794476, -12.50325108, -9.44294643,
-12.86182499, -8.95974827, -10.34585476, -16.70100594,
-7.63287163, -11.60797215, -11.73308086, -10.89833736,
-11.40105438, -8.59499645, -11.1452837 , -11.61797333,
-9.25040531, -9.30110741, -8.68466759, -10.68533611,
-11.68466282, -10.05351353, -11.61765003, -9.72268772,
-9.05587578, -10.88561535, -11.85619068, -12.46191692,
-8.43530369, -6.79801893, -9.91088772, -9.89115238,
-16.34910393, -12.32227421, -13.36759472, -17.33267021,
-10.66337585, -10.35019398, -11.29328632, -9.45415211,
-10.61021137, -14.06766415, -8.31783295, -11.77228069])
| 56.442308 | 79 | 0.65247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
cd08e9d07146abb0712c59ab83d9a3d247ba38c2 | 27 | py | Python | tool/klint/bpf/__init__.py | kylerky/klint | 77be216ec3f4315a835b7bcdaef1b66ed3144603 | [
"MIT"
] | 2 | 2022-03-08T16:10:27.000Z | 2022-03-11T14:14:04.000Z | tool/klint/bpf/__init__.py | kylerky/klint | 77be216ec3f4315a835b7bcdaef1b66ed3144603 | [
"MIT"
] | null | null | null | tool/klint/bpf/__init__.py | kylerky/klint | 77be216ec3f4315a835b7bcdaef1b66ed3144603 | [
"MIT"
] | 1 | 2022-03-24T09:27:41.000Z | 2022-03-24T09:27:41.000Z | """
BPF-related stuff.
"""
| 6.75 | 18 | 0.555556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.962963 |
cd094ee5dcfd76a9bf766f06eb8cdcb0b8027094 | 4,485 | py | Python | tests/test_geojson.py | geographika/mappyfile-geojson | 9525bb840ae243a0f5072730f6432bf98dcacbe9 | [
"MIT"
] | 8 | 2018-08-08T06:47:38.000Z | 2022-01-30T13:25:35.000Z | tests/test_geojson.py | geographika/mappyfile-geojson | 9525bb840ae243a0f5072730f6432bf98dcacbe9 | [
"MIT"
] | 4 | 2020-09-24T05:28:19.000Z | 2022-03-29T22:18:13.000Z | tests/test_geojson.py | geographika/mappyfile-geojson | 9525bb840ae243a0f5072730f6432bf98dcacbe9 | [
"MIT"
] | 1 | 2018-08-08T06:47:42.000Z | 2018-08-08T06:47:42.000Z | import os
import json
import geojson
import mappyfile_geojson
import mappyfile
import pytest
def get_geojson(fn):
tests = os.path.dirname(os.path.realpath(__file__))
fn = os.path.join(tests, fn)
with open(fn) as f:
gj = geojson.load(f)
return gj
def test_point():
gj = get_geojson("Point.json")
layer = mappyfile_geojson.convert(gj)
s = mappyfile.dumps(layer)
print(s)
assert s == """LAYER
EXTENT 102 0.5 102 0.5
STATUS ON
TYPE POINT
PROCESSING "ITEMS=prop0"
FEATURE
POINTS
102.0 0.5
END
ITEMS "value0"
END
END"""
def test_pointZ():
"""
Z-values are simply removed as they are not supported by inline
MapServer Features
"""
gj = get_geojson("PointZ.json")
layer = mappyfile_geojson.convert(gj)
s = mappyfile.dumps(layer)
print(s)
assert s == """LAYER
EXTENT 102 0.5 102 0.5
STATUS ON
TYPE POINT
PROCESSING "ITEMS=prop0"
FEATURE
POINTS
102.0 0.5
END
ITEMS "value0"
END
END"""
def test_linestring():
gj = get_geojson("LineString.json")
layer = mappyfile_geojson.convert(gj)
s = mappyfile.dumps(layer)
print(s)
assert s == """LAYER
EXTENT 102 0 105 1
STATUS ON
TYPE LINE
PROCESSING "ITEMS=prop0,prop1"
FEATURE
POINTS
102.0 0.0
103.0 1.0
104.0 0.0
105.0 1.0
END
ITEMS "value0;0.0"
END
END"""
def test_polygon():
gj = get_geojson("Polygon.json")
layer = mappyfile_geojson.convert(gj)
print(json.dumps(layer, indent=4))
s = mappyfile.dumps(layer)
print(s)
assert s == """LAYER
EXTENT 100 0 101 1
STATUS ON
TYPE POLYGON
PROCESSING "ITEMS=prop0,prop1"
FEATURE
POINTS
100.0 0.0
101.0 0.0
101.0 1.0
100.0 1.0
100.0 0.0
END
ITEMS "value0;value1"
END
END"""
def test_multipoint():
gj = get_geojson("MultiPoint.json")
layer = mappyfile_geojson.convert(gj)
s = mappyfile.dumps(layer)
print(s)
assert s == """LAYER
EXTENT 10 10 40 40
STATUS ON
TYPE POINT
PROCESSING "ITEMS=prop0"
FEATURE
POINTS
10.0 40.0
40.0 30.0
20.0 20.0
30.0 10.0
END
ITEMS "value0"
END
END"""
def test_multilinestring():
gj = get_geojson("MultiLineString.json")
layer = mappyfile_geojson.convert(gj)
s = mappyfile.dumps(layer)
print(s)
assert s == """LAYER
EXTENT 10 10 40 40
STATUS ON
TYPE LINE
PROCESSING "ITEMS=prop0,prop1"
FEATURE
POINTS
10.0 10.0
20.0 20.0
10.0 40.0
END
POINTS
40.0 40.0
30.0 30.0
40.0 20.0
30.0 10.0
END
ITEMS "value0;0.0"
END
END"""
def test_multipolygon():
gj = get_geojson("MultiPolygon.json")
layer = mappyfile_geojson.convert(gj)
s = mappyfile.dumps(layer)
print(s)
assert s == """LAYER
EXTENT 5 5 45 40
STATUS ON
TYPE POLYGON
PROCESSING "ITEMS=prop0,prop1"
FEATURE
POINTS
30.0 20.0
45.0 40.0
10.0 40.0
30.0 20.0
END
POINTS
15.0 5.0
40.0 10.0
10.0 20.0
5.0 10.0
15.0 5.0
END
ITEMS "value0;value1"
END
END"""
def test_featurecollection():
gj = get_geojson("FeatureCollection.json")
layer = mappyfile_geojson.convert(gj)
print(json.dumps(layer, indent=4))
s = mappyfile.dumps(layer)
print(s)
assert s == """LAYER
EXTENT 102 0 105 1
STATUS ON
TYPE LINE
PROCESSING "ITEMS=prop0"
FEATURE
POINTS
102.0 0.0
103.0 1.0
104.0 0.0
105.0 1.0
END
ITEMS "value0"
END
FEATURE
POINTS
102.0 0.0
103.0 1.0
104.0 0.0
105.0 1.0
END
ITEMS "value1"
END
END"""
def run_tests():
pytest.main(["tests/test_geojson.py", "-vv"])
if __name__ == '__main__':
# test_multipolygon()
run_tests()
print("Done!")
| 19.933333 | 68 | 0.515942 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,733 | 0.609365 |
cd0b619e6db23ae007998ba9f088e9c319778c9d | 517 | py | Python | 230.py | BYOUINZAKA/LeetCodeNotes | 48e1b4522c1f769eeec4944cfbd57abf1281d09a | [
"MIT"
] | null | null | null | 230.py | BYOUINZAKA/LeetCodeNotes | 48e1b4522c1f769eeec4944cfbd57abf1281d09a | [
"MIT"
] | null | null | null | 230.py | BYOUINZAKA/LeetCodeNotes | 48e1b4522c1f769eeec4944cfbd57abf1281d09a | [
"MIT"
] | null | null | null | '''
@Author: Hata
@Date: 2020-05-24 15:30:19
@LastEditors: Hata
@LastEditTime: 2020-05-24 15:32:04
@FilePath: \LeetCode\230.py
@Description: https://leetcode-cn.com/problems/kth-smallest-element-in-a-bst/
'''
class Solution:
def kthSmallest(self, root, k):
def gen(r):
if r is not None:
yield from gen(r.left)
yield r.val
yield from gen(r.right)
it = gen(root)
for _ in range(k):
ans = next(it)
return ans
| 22.478261 | 77 | 0.558994 | 305 | 0.589942 | 285 | 0.551257 | 0 | 0 | 0 | 0 | 208 | 0.402321 |
cd0c0c186a507173da38fb9c91812fd94be9043a | 3,430 | py | Python | Scripts/TestParsers/PyUnittestTestParser.py | davidbrownell/v3-Common_Environment | 8f42f256e573cbd83cbf9813db9958025ddf12f2 | [
"BSL-1.0"
] | null | null | null | Scripts/TestParsers/PyUnittestTestParser.py | davidbrownell/v3-Common_Environment | 8f42f256e573cbd83cbf9813db9958025ddf12f2 | [
"BSL-1.0"
] | 1 | 2018-06-08T06:45:16.000Z | 2018-06-08T06:45:16.000Z | Scripts/TestParsers/PyUnittestTestParser.py | davidbrownell/v3-Common_Environment | 8f42f256e573cbd83cbf9813db9958025ddf12f2 | [
"BSL-1.0"
] | 1 | 2018-06-08T04:15:17.000Z | 2018-06-08T04:15:17.000Z | # ----------------------------------------------------------------------
# |
# | PythonUnittestTestParser.py
# |
# | David Brownell <[email protected]>
# | 2018-05-22 07:59:46
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2018-22.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
"""Contains the TestParser object"""
import os
import re
import CommonEnvironment
from CommonEnvironment.Interface import staticderived, override, DerivedProperty
from CommonEnvironment.TestParserImpl import TestParserImpl
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
@staticderived
class TestParser(TestParserImpl):
"""Parses content produced by Python's unittest library"""
# ----------------------------------------------------------------------
# | Public Properties
Name = DerivedProperty("PyUnittest")
Description = DerivedProperty("Parses Python unittest output.")
# ----------------------------------------------------------------------
# | Public Methods
@staticmethod
@override
def IsSupportedCompiler(compiler):
# Supports any compiler that supports python; use this file as a test subject
return compiler.IsSupported(_script_fullpath if os.path.splitext(_script_name)[1] == ".py" else "{}.py".format(os.path.splitext(_script_fullpath)[0]))
# ----------------------------------------------------------------------
_IsSupportedTestItem_imports = [
re.compile("^\s*import unittest"),
re.compile("^\s*from unittest import"),
]
@classmethod
@override
def IsSupportedTestItem(cls, item):
# Use this parser for any python file that imports 'unittest'
assert os.path.isfile(item), item
with open(item) as f:
for line in f.readlines():
for regex in cls._IsSupportedTestItem_imports:
if regex.search(line):
return True
return
# ----------------------------------------------------------------------
_Parse_failed = re.compile(r"^FAILED", re.DOTALL | re.MULTILINE)
_Parse_ok = re.compile(r"^OK\s*", re.DOTALL | re.MULTILINE)
@classmethod
@override
def Parse(cls, test_data):
if cls._Parse_failed.search(test_data):
return -1
if cls._Parse_ok.search(test_data):
return 0
return 1
# ----------------------------------------------------------------------
@classmethod
@override
def CreateInvokeCommandLine(cls, context, debug_on_error):
command_line = super(TestParser, cls).CreateInvokeCommandLine(context, debug_on_error)
return 'python "{}"'.format(command_line)
| 38.539326 | 159 | 0.473178 | 2,285 | 0.666181 | 0 | 0 | 2,301 | 0.670845 | 0 | 0 | 1,536 | 0.447813 |
cd0c8d9af792a61f23cb21cb4b226023ec5c2f1f | 7,116 | py | Python | fairseq/models/transformer_xlm_iwslt_decoder.py | jm-glowienke/fairseq | ca45353322f92776e34a7308bf3fab75af9c1d50 | [
"MIT"
] | null | null | null | fairseq/models/transformer_xlm_iwslt_decoder.py | jm-glowienke/fairseq | ca45353322f92776e34a7308bf3fab75af9c1d50 | [
"MIT"
] | null | null | null | fairseq/models/transformer_xlm_iwslt_decoder.py | jm-glowienke/fairseq | ca45353322f92776e34a7308bf3fab75af9c1d50 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any, Dict
from fairseq import checkpoint_utils
from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import (
TransformerDecoder,
TransformerEncoder,
TransformerModel,
base_architecture as transformer_base_architecture,
)
@register_model("transformer_xlm_iwslt_decoder")
class TransformerFromPretrainedXLMModel(TransformerModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
TransformerModel.add_args(parser)
parser.add_argument(
"--pretrained-xlm-checkpoint",
type=str,
metavar="STR",
help="XLM model to use for initializing transformer encoder "
"and/or decoder",
)
parser.add_argument(
"--init-encoder-only",
action="store_true",
help="if set, don't load the XLM weights and embeddings into "
"decoder",
)
parser.add_argument(
"--init-decoder-only",
action="store_true",
help="if set, don't load the XLM weights and embeddings into "
"encoder",
)
@classmethod
def build_model(self, args, task, cls_dictionary=MaskedLMDictionary):
assert hasattr(args, "pretrained_xlm_checkpoint"), (
"You must specify a path for --pretrained-xlm-checkpoint to use "
"--arch transformer_from_pretrained_xlm"
)
assert isinstance(task.source_dictionary,
cls_dictionary) and isinstance(
task.target_dictionary, cls_dictionary
), (
"You should use a MaskedLMDictionary when using --arch "
"transformer_from_pretrained_xlm because the pretrained XLM model "
"was trained using data binarized with MaskedLMDictionary. "
"For translation, you may want to use --task "
"translation_from_pretrained_xlm"
)
assert not (
getattr(args, "init_encoder_only", False)
and getattr(args, "init_decoder_only", False)
), "Only one of --init-encoder-only and --init-decoder-only can be set."
return super().build_model(args, task)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoderFromPretrainedXLM(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(
args,
tgt_dict,
embed_tokens,
no_encoder_attn=getattr(args, "no_cross_attention", False),
)
def upgrade_state_dict_with_xlm_weights(
state_dict: Dict[str, Any], pretrained_xlm_checkpoint: str
) -> Dict[str, Any]:
"""
Load XLM weights into a Transformer encoder or decoder model.
Args:
state_dict: state dict for either TransformerEncoder or
TransformerDecoder
pretrained_xlm_checkpoint: checkpoint to load XLM weights from
Raises:
AssertionError: If architecture (num layers, attention heads, etc.)
does not match between the current Transformer encoder or
decoder and the pretrained_xlm_checkpoint
"""
if not os.path.exists(pretrained_xlm_checkpoint):
raise IOError(
"Model file not found: {}".format(pretrained_xlm_checkpoint))
state = checkpoint_utils.load_checkpoint_to_cpu(pretrained_xlm_checkpoint)
xlm_state_dict = state["model"]
for key in xlm_state_dict.keys():
for search_key in ["embed_tokens", "embed_positions", "layers"]:
if search_key in key:
subkey = key[key.find(search_key):]
if "in_proj_weight" in subkey or \
"in_proj_bias" in subkey:
continue
else:
assert subkey in state_dict, (
"{} \nTransformer encoder / decoder "
"state_dict does not contain {}. \nCannot "
"load {} from pretrained XLM checkpoint "
"{} into Transformer.".format(
str(state_dict.keys()), subkey, key,
pretrained_xlm_checkpoint
)
)
state_dict[subkey] = xlm_state_dict[key]
return state_dict
class TransformerEncoderFromPretrainedXLM(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
if getattr(args, "init_decoder_only", False):
# Don't load XLM weights for encoder if --init-decoder-only
return
assert hasattr(args, "pretrained_xlm_checkpoint"), (
"--pretrained-xlm-checkpoint must be specified to load Transformer "
"encoder from pretrained XLM"
)
if args.pretrained_xlm_checkpoint != 'interactive':
xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights(
state_dict=self.state_dict(),
pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint,
)
self.load_state_dict(xlm_loaded_state_dict, strict=True)
# class TransformerDecoderFromPretrainedXLM(TransformerDecoder):
# def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
# super().__init__(args, dictionary, embed_tokens, no_encoder_attn)
# if getattr(args, "init_encoder_only", False):
# # Don't load XLM weights for decoder if --init-encoder-only
# return
# assert hasattr(args, "pretrained_xlm_checkpoint"), (
# "--pretrained-xlm-checkpoint must be specified to load Transformer "
# "decoder from pretrained XLM"
# )
#
# xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights(
# state_dict=self.state_dict(),
# pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint,
# )
# self.load_state_dict(xlm_loaded_state_dict, strict=True)
@register_model_architecture(
"transformer_xlm_iwslt_decoder", "transformer_xlm_iwslt_decoder")
def transformer_xlm_iwslt_decoder(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 3072)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
transformer_base_architecture(args)
| 40.662857 | 82 | 0.647274 | 3,201 | 0.449831 | 0 | 0 | 3,075 | 0.432125 | 0 | 0 | 3,035 | 0.426504 |
cd0d1977c612b5942005c2d4eceddb8039516a10 | 7,249 | py | Python | test/unit/mysql_db_admin/process_request.py | mjpernot/mysql-mysql-db-admin | 4821d6923155a48362869a6f2bf8c69fe3e533d4 | [
"MIT"
] | null | null | null | test/unit/mysql_db_admin/process_request.py | mjpernot/mysql-mysql-db-admin | 4821d6923155a48362869a6f2bf8c69fe3e533d4 | [
"MIT"
] | null | null | null | test/unit/mysql_db_admin/process_request.py | mjpernot/mysql-mysql-db-admin | 4821d6923155a48362869a6f2bf8c69fe3e533d4 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Classification (U)
"""Program: process_request.py
Description: Unit testing of process_request in mysql_db_admin.py.
Usage:
test/unit/mysql_db_admin/process_request.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import mock
# Local
sys.path.append(os.getcwd())
import mysql_db_admin
import lib.gen_libs as gen_libs
import version
__version__ = version.__version__
def func_holder(server, dbs, tbl):
"""Method: func_holder
Description: Function stub holder for a generic function call.
Arguments:
server
dbs
tbl
"""
status = True
if server and dbs and tbl:
status = True
return status
class Server(object):
"""Class: Server
Description: Class stub holder for mysql_class.Server class.
Methods:
__init__
"""
def __init__(self):
"""Method: __init__
Description: Class initialization.
Arguments:
"""
pass
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_mysql_80
test_pre_mysql_80
test_single_miss_tbl
test_single_tbl
test_all_tbls
test_all_dbs
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.server = Server()
self.func_name = func_holder
self.db_name = None
self.db_name2 = ["db1"]
self.tbl_name = None
self.tbl_name2 = ["tbl1"]
self.tbl_name3 = ["tbl3"]
self.version = {"version": "5.7"}
self.version2 = {"version": "8.0"}
@mock.patch("mysql_db_admin.mysql_class.fetch_sys_var")
@mock.patch("mysql_db_admin.gen_libs.dict_2_list")
@mock.patch("mysql_db_admin.mysql_libs.fetch_tbl_dict")
@mock.patch("mysql_db_admin.mysql_libs.fetch_db_dict")
def test_mysql_80(self, mock_fetch_db, mock_fetch_tbl, mock_list,
mock_version):
"""Function: test_mysql_80
Description: Test with processing all databases.
Arguments:
"""
mock_version.return_value = self.version2
mock_fetch_db.return_value = True
mock_fetch_tbl.return_value = True
mock_list.side_effect = [["db1"], ["tbl1", "tbl2"]]
self.assertFalse(
mysql_db_admin.process_request(
self.server, self.func_name, self.db_name, self.tbl_name))
@mock.patch("mysql_db_admin.mysql_class.fetch_sys_var")
@mock.patch("mysql_db_admin.gen_libs.dict_2_list")
@mock.patch("mysql_db_admin.mysql_libs.fetch_tbl_dict")
@mock.patch("mysql_db_admin.mysql_libs.fetch_db_dict")
def test_pre_mysql_80(self, mock_fetch_db, mock_fetch_tbl, mock_list,
mock_version):
"""Function: test_pre_mysql_80
Description: Test with processing all databases.
Arguments:
"""
mock_version.return_value = self.version
mock_fetch_db.return_value = True
mock_fetch_tbl.return_value = True
mock_list.side_effect = [["db1"], ["tbl1", "tbl2"]]
self.assertFalse(
mysql_db_admin.process_request(
self.server, self.func_name, self.db_name, self.tbl_name))
@mock.patch("mysql_db_admin.mysql_class.fetch_sys_var")
@mock.patch("mysql_db_admin.detect_dbs")
@mock.patch("mysql_db_admin.gen_libs.dict_2_list")
@mock.patch("mysql_db_admin.mysql_libs.fetch_tbl_dict")
@mock.patch("mysql_db_admin.mysql_libs.fetch_db_dict")
def test_single_miss_tbl(self, mock_fetch_db, mock_fetch_tbl, mock_list,
mock_detect, mock_version):
"""Function: test_single_miss_tbl
Description: Test with single missing table in a database.
Arguments:
"""
mock_version.return_value = self.version
mock_fetch_db.return_value = True
mock_fetch_tbl.return_value = True
mock_list.side_effect = [["db1"], ["tbl1", "tbl2"]]
mock_detect.return_value = True
with gen_libs.no_std_out():
self.assertFalse(
mysql_db_admin.process_request(
self.server, self.func_name, self.db_name2,
self.tbl_name3))
@mock.patch("mysql_db_admin.mysql_class.fetch_sys_var")
@mock.patch("mysql_db_admin.detect_dbs")
@mock.patch("mysql_db_admin.gen_libs.dict_2_list")
@mock.patch("mysql_db_admin.mysql_libs.fetch_tbl_dict")
@mock.patch("mysql_db_admin.mysql_libs.fetch_db_dict")
def test_single_tbl(self, mock_fetch_db, mock_fetch_tbl, mock_list,
mock_detect, mock_version):
"""Function: test_single_tbl
Description: Test with single table in a database.
Arguments:
"""
mock_version.return_value = self.version
mock_fetch_db.return_value = True
mock_fetch_tbl.return_value = True
mock_list.side_effect = [["db1"], ["tbl1", "tbl2"]]
mock_detect.return_value = True
self.assertFalse(
mysql_db_admin.process_request(
self.server, self.func_name, self.db_name2, self.tbl_name2))
@mock.patch("mysql_db_admin.mysql_class.fetch_sys_var")
@mock.patch("mysql_db_admin.detect_dbs")
@mock.patch("mysql_db_admin.gen_libs.dict_2_list")
@mock.patch("mysql_db_admin.mysql_libs.fetch_tbl_dict")
@mock.patch("mysql_db_admin.mysql_libs.fetch_db_dict")
def test_all_tbls(self, mock_fetch_db, mock_fetch_tbl, mock_list,
mock_detect, mock_version):
"""Function: test_all_tbls
Description: Test with all tables in a database.
Arguments:
"""
mock_version.return_value = self.version
mock_fetch_db.return_value = True
mock_fetch_tbl.return_value = True
mock_list.side_effect = [["db1"], ["tbl1", "tbl2"]]
mock_detect.return_value = True
self.assertFalse(
mysql_db_admin.process_request(
self.server, self.func_name, self.db_name2, self.tbl_name))
@mock.patch("mysql_db_admin.mysql_class.fetch_sys_var")
@mock.patch("mysql_db_admin.gen_libs.dict_2_list")
@mock.patch("mysql_db_admin.mysql_libs.fetch_tbl_dict")
@mock.patch("mysql_db_admin.mysql_libs.fetch_db_dict")
def test_all_dbs(self, mock_fetch_db, mock_fetch_tbl, mock_list,
mock_version):
"""Function: test_all_dbs
Description: Test with processing all databases.
Arguments:
"""
mock_version.return_value = self.version
mock_fetch_db.return_value = True
mock_fetch_tbl.return_value = True
mock_list.side_effect = [["db1"], ["tbl1", "tbl2"]]
self.assertFalse(
mysql_db_admin.process_request(
self.server, self.func_name, self.db_name, self.tbl_name))
if __name__ == "__main__":
unittest.main()
| 26.36 | 76 | 0.646986 | 6,347 | 0.875569 | 0 | 0 | 5,252 | 0.724514 | 0 | 0 | 2,992 | 0.412747 |
cd0e89b4b693cd65319eaacf6298dcfed09dbd78 | 594 | py | Python | fsttest/__init__.py | eddieantonio/fsttest | 8ff71a9aa41a70a30832fa219b72e7478872c16f | [
"MIT"
] | null | null | null | fsttest/__init__.py | eddieantonio/fsttest | 8ff71a9aa41a70a30832fa219b72e7478872c16f | [
"MIT"
] | 1 | 2020-01-27T21:43:04.000Z | 2020-01-28T15:57:05.000Z | fsttest/__init__.py | eddieantonio/fsttest | 8ff71a9aa41a70a30832fa219b72e7478872c16f | [
"MIT"
] | 1 | 2021-04-26T17:46:19.000Z | 2021-04-26T17:46:19.000Z | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
FST test -- test your Foma finite-state transducers!
"""
from .__version__ import VERSION as __version__
from ._fst import FST
from ._results import FailedTestResult, PassedTestResult, TestResults
from ._run import execute_test_case, run_tests
from ._test_case import TestCase
from .exceptions import FSTTestError, TestCaseDefinitionError
__all__ = [
"FST",
"FSTTestError",
"FailedTestResult",
"PassedTestResult",
"TestCase",
"TestCaseDefinitionError",
"TestResults",
"execute_test_case",
"run_tests",
]
| 22.846154 | 69 | 0.725589 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.400673 |
cd0ff0154f3a2ed2059c34dae1964cf271d9a2e1 | 3,674 | py | Python | analysis/sharpness.py | sanketvmehta/lifelong-learning-pretraining-and-sam | 2fee18a4b13c918f6005f88c19089b86f4a8aae2 | [
"Apache-2.0"
] | null | null | null | analysis/sharpness.py | sanketvmehta/lifelong-learning-pretraining-and-sam | 2fee18a4b13c918f6005f88c19089b86f4a8aae2 | [
"Apache-2.0"
] | null | null | null | analysis/sharpness.py | sanketvmehta/lifelong-learning-pretraining-and-sam | 2fee18a4b13c918f6005f88c19089b86f4a8aae2 | [
"Apache-2.0"
] | null | null | null | import copy
import numpy as np
import torch
from scipy import optimize
import logging
def sharpness(model, criterion_fn, A, epsilon=1e-3, p=0, bounds=None):
"""Computes sharpness metric according to https://arxiv.org/abs/1609.04836.
Args:
model: Model on which to compute sharpness
criterion_fn: Function that takes in a model and returns the loss
value and gradients on the appropriate data that will be used in
the loss maximization done in the sharpness calculation.
A: Projection matrix that defines the subspace in which the loss
maximization will be done. If A=1, no projection will be done.
epsilon: Defines the size of the neighborhood that will be used in the
loss maximization.
p: The dimension of the random projection subspace in which maximization
will be done. If 0, assumed to be the full parameter space.
"""
run_fn = create_run_model(model, A, criterion_fn)
if bounds is None:
bounds = compute_bounds(model, A, epsilon)
dim = flatten_parameters(model).shape[0] if p == 0 else p
# Find the maximum loss in the neighborhood of the minima
y = optimize.minimize(
lambda x: run_fn(x),
np.zeros(dim),
method="L-BFGS-B",
bounds=bounds,
jac=True,
options={"maxiter": 10},
).x.astype(np.float32)
model_copy = copy.deepcopy(model)
if A is 1:
flat_diffs = y
else:
flat_diffs = A @ y
apply_diffs(model_copy, flat_diffs)
maximum = criterion_fn(model_copy)["loss"]
loss_value = criterion_fn(model)["loss"]
sharpness = 100 * (maximum - loss_value) / (1 + loss_value)
return sharpness
def flatten_parameters(model):
"""Returns a flattened numpy array with the parameters of the model."""
return np.concatenate(
[
param.detach().cpu().numpy().flatten()
for param in model.parameters()
if param.requires_grad
]
)
def compute_bounds(model, A, epsilon):
"""Computes the bounds in which to search for the maximum loss."""
x = flatten_parameters(model)
if A is 1:
bounds = epsilon * (np.abs(x) + 1)
else:
b, _, _, _ = np.linalg.lstsq(A, x)
bounds = epsilon * (np.abs(b) + 1)
return optimize.Bounds(-bounds, bounds)
def create_run_model(model, A, criterion_fn):
"""Creates a run function that takes in parameters in the subspace that loss
maximization takes place in, and computes the loss and gradients
corresponding to those parameters.
"""
def run(y):
y = y.astype(np.float32)
model_copy = copy.deepcopy(model)
model_copy.zero_grad()
if A is 1:
flat_diffs = y
else:
flat_diffs = A @ y
apply_diffs(model_copy, flat_diffs)
metrics = criterion_fn(model_copy)
objective = -metrics["loss"]
gradient = -metrics["gradients"]
logging.info("Loss: %f", objective)
if A is not 1:
gradient = gradient @ A
return objective, gradient.astype(np.float64)
return run
def apply_diffs(model, diffs):
"""Adds deltas to the parameters in the model corresponding to diffs."""
parameters = model.parameters()
idx = 0
for parameter in parameters:
if parameter.requires_grad:
n_elements = parameter.nelement()
cur_diff = diffs[idx : idx + n_elements]
parameter.data = parameter.data + torch.tensor(
cur_diff.reshape(parameter.shape)
).to(device=parameter.device)
idx += n_elements
| 32.803571 | 80 | 0.631464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,292 | 0.35166 |
cd10ef939588bc49c75df0d3a2c4ba2f987aa04b | 1,164 | py | Python | benchmark_runner.py | mamrehn/simplification | cb43ccadcbe011b89845142910d844b0bf7ca510 | [
"MIT"
] | null | null | null | benchmark_runner.py | mamrehn/simplification | cb43ccadcbe011b89845142910d844b0bf7ca510 | [
"MIT"
] | null | null | null | benchmark_runner.py | mamrehn/simplification | cb43ccadcbe011b89845142910d844b0bf7ca510 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Standalone benchmark runner
"""
import cProfile
import pstats
import profile
import numpy as np
print("Running Rust + Cython benchmarks")
# calibrate
pr = profile.Profile()
calibration = np.mean([pr.calibrate(100000) for x in xrange(5)])
# add the bias
profile.Profile.bias = calibration
cProfile.run(open('simplification/test/cprofile_rust_cython.py', 'rb'), 'simplification/test/output_stats_rust_cython')
rust_cython = pstats.Stats('simplification/test/output_stats_rust_cython')
cProfile.run(open('simplification/test/cprofile_rust_cython_complex.py', 'rb'), 'simplification/test/output_stats_rust_cython_complex')
rust_cython_c = pstats.Stats('simplification/test/output_stats_rust_cython_complex')
cProfile.run(open('simplification/test/cprofile_rust_cython_shapely.py', 'rb'), 'simplification/test/output_stats_rust_cython_shapely')
shapely = pstats.Stats('simplification/test/output_stats_rust_cython_shapely')
print("Rust Cython Benchmarks\n")
rust_cython.sort_stats('cumulative').print_stats(5)
rust_cython_c.sort_stats('cumulative').print_stats(5)
shapely.sort_stats('cumulative').print_stats(5)
| 35.272727 | 135 | 0.803265 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 671 | 0.57646 |
cd13a01142ccf63d717a89caf8e588ed9c337f8d | 850 | py | Python | D_QuickS.py | rut999/Algo | 9180f66452597a758a31073cb2b8fa4a3e6a93fe | [
"MIT"
] | null | null | null | D_QuickS.py | rut999/Algo | 9180f66452597a758a31073cb2b8fa4a3e6a93fe | [
"MIT"
] | null | null | null | D_QuickS.py | rut999/Algo | 9180f66452597a758a31073cb2b8fa4a3e6a93fe | [
"MIT"
] | null | null | null | import time
from random import randint
def random_int(x):
value = []
for i in range(x):
value.append(randint(0, x))
return value
def Quick_sort(list1):
N = len(list1)
if N <=1:
return list1
pivot = list1.pop()
# mid = len(list1)//2
Left_H = []
Right_H = []
for i in range(len(list1)):
if(list1[i]>pivot):
Right_H.append(list1[i])
else:
Left_H.append(list1[i])
return (Quick_sort(Left_H)+[pivot]+Quick_sort(Right_H))
random_list = random_int(100000)
#list2 = [0,0,99,34,56,54,-1,-1,32,2.5,-1.1,1000,1000,-2,30,21,24,15,10,6]
t1 = time.time()
Quick_sort(random_list)
t2 = time.time()
print(t2-t1)
# def Quick_Sort(list1):
# if (list1[0]<list1[-1]):
# partition_index =partition(list1)
# quicksort(list1,)
# quicksort()
| 22.368421 | 74 | 0.583529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 240 | 0.282353 |
cd18f52d3cd9d807fe305ade001766cc89245405 | 1,929 | py | Python | cride/circles/views/circles.py | LhernerRemon/Rider | 30783cf58513698d23730f5fa477dfeddda8ee6b | [
"MIT"
] | null | null | null | cride/circles/views/circles.py | LhernerRemon/Rider | 30783cf58513698d23730f5fa477dfeddda8ee6b | [
"MIT"
] | null | null | null | cride/circles/views/circles.py | LhernerRemon/Rider | 30783cf58513698d23730f5fa477dfeddda8ee6b | [
"MIT"
] | null | null | null | #REST
from rest_framework import viewsets,mixins
from rest_framework.permissions import IsAuthenticated
#Filters
from rest_framework.filters import SearchFilter,OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
#Models, serializers
from cride.circles.models import Circle,Membership
from cride.circles.serializers import CircleModelSerializer
#Permission
from cride.circles.permissions import IsCircleAdmin
class CircleViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""
def destroy(self, request, pk=None):
raise MethodNotAllowed('DELETE')
Circle view set
"""
serializer_class=CircleModelSerializer
lookup_field="slug_name"
#permission_classes=(IsAuthenticated,)
filter_backends=(SearchFilter,OrderingFilter,DjangoFilterBackend)
search_fields=("slug_name","name")
ordering_fields=("rides_offered","rides_taken","name","created","member_limit")
ordering=("-members__count","-rides_offered","-rides_taken")
filter_fields=("verified","is_limit")
def get_permissions(self):
permissions=[IsAuthenticated]
if self.action in ["update","partial_update"]:
permissions.append(IsCircleAdmin)
return [permission() for permission in permissions]
def get_queryset(self):
queryset=Circle.objects.all()
if self.action=="list":
return queryset.filter(is_public=True)
return queryset
def perform_create(self,serializer):
circle=serializer.save()
user=self.request.user
profile=user.profile
Membership.objects.create(
user=user,
profile=profile,
circle=circle,
is_admin=True,
remaining_invitation=10
) | 32.694915 | 83 | 0.690513 | 1,491 | 0.772939 | 0 | 0 | 0 | 0 | 0 | 0 | 377 | 0.195438 |
cd18f82e759c1f805c2c156a96b2d6d4fe352c3d | 780 | py | Python | api/service/cidades_atendimento_service.py | FinotelliCarlos/ewipesimple-adminweb-python | 3bf779250efeb9f85b4283ffbf210bf227aa8e8c | [
"MIT"
] | 1 | 2021-06-17T06:13:33.000Z | 2021-06-17T06:13:33.000Z | api/service/cidades_atendimento_service.py | FinotelliCarlos/ewipesimple-adminweb-python | 3bf779250efeb9f85b4283ffbf210bf227aa8e8c | [
"MIT"
] | null | null | null | api/service/cidades_atendimento_service.py | FinotelliCarlos/ewipesimple-adminweb-python | 3bf779250efeb9f85b4283ffbf210bf227aa8e8c | [
"MIT"
] | null | null | null | from adminweb.services import cep_service
from adminweb.models import Profissional
from rest_framework import serializers
import json
def listar_profissionais_cidade(cep):
codigo_ibge = buscar_cidade_cep(cep)['ibge']
try:
profissionais = Profissional.objects.filter(codigo_ibge=codigo_ibge).order_by('id')
return profissionais
except Profissional.DoesNotExist:
return []
def buscar_cidade_cep(cep):
response = cep_service.buscar_cidade_cep(cep)
if response.status_code == 400:
raise serializers.ValidationError('O CEP informado está incorreto!')
cidade_api = json.loads(response.content)
if 'erro' in cidade_api:
raise serializers.ValidationError('O CEP informado não foi encontrado!')
return cidade_api
| 32.5 | 91 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.112532 |
cd190e09b3c36d0f4700cf8693b8dfde027f164e | 6,680 | py | Python | 초보를 위한 셀레니움/#1 Google Screenshots Scrapping/main.py | donddog/Nomad_Academy_Online_Course_Codes | 391fde26052a67f7b533219ab0de6096830697b6 | [
"MIT"
] | 1 | 2021-02-11T16:45:22.000Z | 2021-02-11T16:45:22.000Z | 초보를 위한 셀레니움/#1 Google Screenshots Scrapping/main.py | donddog/Nomad_Academy_Online_Course_Codes | 391fde26052a67f7b533219ab0de6096830697b6 | [
"MIT"
] | null | null | null | 초보를 위한 셀레니움/#1 Google Screenshots Scrapping/main.py | donddog/Nomad_Academy_Online_Course_Codes | 391fde26052a67f7b533219ab0de6096830697b6 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
import shutil
import os
class GoogleKeywordScreenshooter:
def __init__(self, keyword, screenshots_dir):
self.browser = webdriver.Chrome('../chromedriver.exe')
self.keyword = keyword
self.screenshots_dir = screenshots_dir
def start(self):
try:
if not os.path.exists('screenshots'):
os.makedirs('screenshots')
except Exception:
pass
self.browser.get("https://google.com")
search_bar = self.browser.find_element_by_class_name("gLFyf")
search_bar.send_keys(self.keyword)
search_bar.send_keys(Keys.ENTER)
def repetitive(self):
try:
shitty_element = WebDriverWait(self.browser, 10).until(
EC.presence_of_element_located((By.CLASS_NAME, "g-blk"))
)
self.browser.execute_script(
"""
const shitty = arguments[0];
shitty.parentElement.removeChild(shitty)
""",
shitty_element,
)
except Exception:
pass
repetitive(self)
search_results = self.browser.find_elements_by_class_name("g")
for index, search_result in enumerate(search_results):
search_result.screenshot(
f"{self.screenshots_dir}/{self.keyword}x{index}.png"
)
# ---------------------------------------------------------------------------
next_page_button = self.browser.find_element_by_xpath('//*[@id="xjs"]/div/table/tbody/tr/td[3]/a')
next_page_button.click()
repetitive(self)
search_results = self.browser.find_elements_by_class_name("g")
for index, search_result in enumerate(search_results):
search_result.screenshot(
f"{self.screenshots_dir}/{self.keyword}x{index+10}.png"
)
# ---------------------------------------------------------------------------------
next_page_button = self.browser.find_element_by_xpath('//*[@id="xjs"]/div/table/tbody/tr/td[4]/a')
next_page_button.click()
repetitive(self)
search_results = self.browser.find_elements_by_class_name("g")
for index, search_result in enumerate(search_results):
search_result.screenshot(
f"{self.screenshots_dir}/{self.keyword}x{index + 20}.png"
)
# ----------------------------------------------------------------------
next_page_button = self.browser.find_element_by_xpath('//*[@id="xjs"]/div/table/tbody/tr/td[5]/a')
next_page_button.click()
repetitive(self)
search_results = self.browser.find_elements_by_class_name("g")
for index, search_result in enumerate(search_results):
search_result.screenshot(
f"{self.screenshots_dir}/{self.keyword}x{index + 30}.png"
)
# ----------------------------------------------------------------------
next_page_button = self.browser.find_element_by_xpath('//*[@id="xjs"]/div/table/tbody/tr/td[6]/a')
next_page_button.click()
repetitive(self)
search_results = self.browser.find_elements_by_class_name("g")
for index, search_result in enumerate(search_results):
search_result.screenshot(
f"{self.screenshots_dir}/{self.keyword}x{index + 40}.png"
)
# ----------------------------------------------------------------------
next_page_button = self.browser.find_element_by_xpath('//*[@id="xjs"]/div/table/tbody/tr/td[7]/a')
next_page_button.click()
repetitive(self)
search_results = self.browser.find_elements_by_class_name("g")
for index, search_result in enumerate(search_results):
search_result.screenshot(
f"{self.screenshots_dir}/{self.keyword}x{index + 50}.png"
)
# ----------------------------------------------------------------------
next_page_button = self.browser.find_element_by_xpath('//*[@id="xjs"]/div/table/tbody/tr/td[8]/a')
next_page_button.click()
repetitive(self)
search_results = self.browser.find_elements_by_class_name("g")
for index, search_result in enumerate(search_results):
search_result.screenshot(
f"{self.screenshots_dir}/{self.keyword}x{index + 60}.png"
)
# ----------------------------------------------------------------------
next_page_button = self.browser.find_element_by_xpath('//*[@id="xjs"]/div/table/tbody/tr/td[9]/a')
next_page_button.click()
repetitive(self)
search_results = self.browser.find_elements_by_class_name("g")
for index, search_result in enumerate(search_results):
search_result.screenshot(
f"{self.screenshots_dir}/{self.keyword}x{index + 70}.png"
)
# ----------------------------------------------------------------------
next_page_button = self.browser.find_element_by_xpath('//*[@id="xjs"]/div/table/tbody/tr/td[10]/a')
next_page_button.click()
repetitive(self)
search_results = self.browser.find_elements_by_class_name("g")
for index, search_result in enumerate(search_results):
search_result.screenshot(
f"{self.screenshots_dir}/{self.keyword}x{index + 80}.png"
)
# ----------------------------------------------------------------------
next_page_button = self.browser.find_element_by_xpath('//*[@id="xjs"]/div/table/tbody/tr/td[11]/a')
next_page_button.click()
repetitive(self)
search_results = self.browser.find_elements_by_class_name("g")
for index, search_result in enumerate(search_results):
search_result.screenshot(
f"{self.screenshots_dir}/{self.keyword}x{index + 90}.png"
)
def finish(self):
self.browser.quit()
def tozipfile(self):
shutil.make_archive('screentshotresults', 'zip', 'screenshots')
shutil.rmtree('screenshots/')
domain_competitors = GoogleKeywordScreenshooter("buy domain", "screenshots")
domain_competitors.start()
domain_competitors.finish()
domain_competitors.tozipfile()
# python_competitors = GoogleKeywordScreenshooter("python book", "screenshots")
# python_competitors.start()
# python_competitors.finish() | 38.171429 | 107 | 0.572156 | 6,107 | 0.914222 | 0 | 0 | 0 | 0 | 0 | 0 | 2,065 | 0.309132 |
cd1a66acf2cfd6c3c481c4c94e53d436215cbbe7 | 9,414 | py | Python | omicron/core/numpy_extensions.py | evimacs/omicron | abe77fd25a93cf3d0d17661ae957373474724535 | [
"MIT"
] | 4 | 2020-11-09T02:23:51.000Z | 2021-01-24T00:45:21.000Z | omicron/core/numpy_extensions.py | evimacs/omicron | abe77fd25a93cf3d0d17661ae957373474724535 | [
"MIT"
] | 14 | 2020-11-09T02:31:34.000Z | 2021-12-22T10:15:47.000Z | omicron/core/numpy_extensions.py | evimacs/omicron | abe77fd25a93cf3d0d17661ae957373474724535 | [
"MIT"
] | 2 | 2021-01-24T00:45:25.000Z | 2021-12-24T06:18:37.000Z | """Extension function related to numpy
"""
from __future__ import annotations
from typing import List, Tuple
import numpy as np
import pandas
from numpy.typing import ArrayLike
def dict_to_numpy_array(d: dict, dtype: List[Tuple]) -> np.array:
"""convert dictionary to numpy array
Examples:
>>> d = {"aaron": 5, "jack": 6}
>>> dtype = [("name", "S8"), ("score", "<i4")]
>>> dict_to_numpy_array(d, dtype)
array([(b'aaron', 5), (b'jack', 6)],
dtype=[('name', 'S8'), ('score', '<i4')])
Args:
d (dict): [description]
dtype (List[Tuple]): [description]
Returns:
np.array: [description]
"""
return np.fromiter(d.items(), dtype=dtype, count=len(d))
def dataframe_to_structured_array(
df: pandas.DataFrame, dtypes: List[Tuple] = None
) -> ArrayLike:
"""convert dataframe (with all columns, and index possibly) to numpy structured arrays
`len(dtypes)` should be either equal to `len(df.columns)` or `len(df.columns) + 1`. In the later case, it implies to include `df.index` into converted array.
Args:
df: the one needs to be converted
dtypes: Defaults to None. If it's `None`, then dtypes of `df` is used, in such case, the `index` of `df` will not be converted.
Returns:
ArrayLike: [description]
"""
v = df
if dtypes is not None:
dtypes_in_dict = {key: value for key, value in dtypes}
col_len = len(df.columns)
if len(dtypes) == col_len + 1:
v = df.reset_index()
rename_index_to = set(dtypes_in_dict.keys()).difference(set(df.columns))
v.rename(columns={"index": list(rename_index_to)[0]}, inplace=True)
elif col_len != len(dtypes):
raise ValueError(
f"length of dtypes should be either {col_len} or {col_len + 1}, is {len(dtypes)}"
)
# re-arrange order of dtypes, in order to align with df.columns
dtypes = []
for name in v.columns:
dtypes.append((name, dtypes_in_dict[name]))
else:
dtypes = df.dtypes
return np.array(np.rec.fromrecords(v.values), dtype=dtypes)
def numpy_array_to_dict(arr: np.array, key: str, value: str) -> dict:
return {item[key]: item[value] for item in arr}
def find_runs(x):
"""Find runs of consecutive items in an array."""
# ensure array
x = np.asanyarray(x)
if x.ndim != 1:
raise ValueError("only 1D array supported")
n = x.shape[0]
# handle empty array
if n == 0:
return np.array([]), np.array([]), np.array([])
else:
# find run starts
loc_run_start = np.empty(n, dtype=bool)
loc_run_start[0] = True
np.not_equal(x[:-1], x[1:], out=loc_run_start[1:])
run_starts = np.nonzero(loc_run_start)[0]
# find run values
run_values = x[loc_run_start]
# find run lengths
run_lengths = np.diff(np.append(run_starts, n))
return run_values, run_starts, run_lengths
def count_between(arr, start, end):
"""计算数组中,`start`元素与`end`元素之间共有多少个元素
要求arr必须是已排序。计算结果会包含区间边界点。
Examples:
>>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111]
>>> count_between(arr, 20050104, 20050111)
6
>>> count_between(arr, 20050104, 20050109)
4
"""
pos_start = np.searchsorted(arr, start, side="right")
pos_end = np.searchsorted(arr, end, side="right")
counter = pos_end - pos_start + 1
if start < arr[0]:
counter -= 1
if end > arr[-1]:
counter -= 1
return counter
def shift(arr, start, offset):
"""在numpy数组arr中,找到start(或者最接近的一个),取offset对应的元素。
要求`arr`已排序。`offset`为正,表明向后移位;`offset`为负,表明向前移位
Examples:
>>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111]
>>> shift(arr, 20050104, 1)
20050105
>>> shift(arr, 20050105, -1)
20050104
>>> # 起始点已右越界,且向右shift,返回起始点
>>> shift(arr, 20050120, 1)
20050120
Args:
arr : 已排序的数组
start : numpy可接受的数据类型
offset (int): [description]
Returns:
移位后得到的元素值
"""
pos = np.searchsorted(arr, start, side="right")
if pos + offset - 1 >= len(arr):
return start
else:
return arr[pos + offset - 1]
def floor(arr, item):
"""
在数据arr中,找到小于等于item的那一个值。如果item小于所有arr元素的值,返回arr[0];如果item
大于所有arr元素的值,返回arr[-1]
与`minute_frames_floor`不同的是,本函数不做回绕与进位.
Examples:
>>> a = [3, 6, 9]
>>> floor(a, -1)
3
>>> floor(a, 9)
9
>>> floor(a, 10)
9
>>> floor(a, 4)
3
>>> floor(a,10)
9
Args:
arr:
item:
Returns:
"""
if item < arr[0]:
return arr[0]
index = np.searchsorted(arr, item, side="right")
return arr[index - 1]
def join_by_left(key, r1, r2, mask=True):
"""左连接 `r1`, `r2` by `key`
如果`r1`中存在`r2`中没有的行,则该行对应的`r2`中的那些字段的取值将使用`fill`来填充。如果
same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows
r1 have duplicat keys
[Reference: stackoverflow](https://stackoverflow.com/a/53261882/13395693)
Examples:
>>> # to join the following
>>> # [[ 1, 2],
>>> # [ 1, 3], x [[1, 5],
>>> # [ 2, 3]] [4, 7]]
>>> # only first two rows in left will be joined
>>> r1 = np.array([(1, 2), (1,3), (2,3)], dtype=[('seq', 'i4'), ('score', 'i4')])
>>> r2 = np.array([(1, 5), (4,7)], dtype=[('seq', 'i4'), ('age', 'i4')])
>>> joined = join_by_left('seq', r1, r2)
>>> print(joined)
[(1, 2, 5) (1, 3, 5) (2, 3, --)]
>>> print(joined.dtype)
(numpy.record, [('seq', '<i4'), ('score', '<i4'), ('age', '<i4')])
>>> joined[2][2]
masked
>>> joined.tolist()[2][2] == None
True
Args:
key : join关键字
r1 : 数据集1
r2 : 数据集2
fill : 对匹配不上的cell进行填充时使用的值
Returns:
a numpy array
"""
# figure out the dtype of the result array
descr1 = r1.dtype.descr
descr2 = [d for d in r2.dtype.descr if d[0] not in r1.dtype.names]
descrm = descr1 + descr2
# figure out the fields we'll need from each array
f1 = [d[0] for d in descr1]
f2 = [d[0] for d in descr2]
# cache the number of columns in f1
ncol1 = len(f1)
# get a dict of the rows of r2 grouped by key
rows2 = {}
for row2 in r2:
rows2.setdefault(row2[key], []).append(row2)
# figure out how many rows will be in the result
nrowm = 0
for k1 in r1[key]:
if k1 in rows2:
nrowm += len(rows2[k1])
else:
nrowm += 1
# allocate the return array
# ret = np.full((nrowm, ), fill, dtype=descrm)
_ret = np.recarray(nrowm, dtype=descrm)
if mask:
ret = np.ma.array(_ret, mask=True)
else:
ret = _ret
# merge the data into the return array
i = 0
for row1 in r1:
if row1[key] in rows2:
for row2 in rows2[row1[key]]:
ret[i] = tuple(row1[f1]) + tuple(row2[f2])
i += 1
else:
for j in range(ncol1):
ret[i][j] = row1[j]
i += 1
return ret
def numpy_append_fields(base, names, data, dtypes):
"""给现有的数组`base`增加新的字段
实现了`numpy.lib.recfunctions.rec_append_fields`的功能。因为`rec_append_fields`不能处
理`data`元素的类型为Object的情况
Example:
>>> # 新增单个字段
>>> import numpy
>>> old = np.array([i for i in range(3)], dtype=[('col1', '<f4')])
>>> new_list = [2 * i for i in range(3)]
>>> res = numpy_append_fields(old, 'new_col', new_list, [('new_col', '<f4')])
>>> print(res)
... # doctest: +NORMALIZE_WHITESPACE
[(0., 0.) (1., 2.) (2., 4.)]
>>> # 新增多个字段
>>> data = [res['col1'].tolist(), res['new_col'].tolist()]
>>> print(numpy_append_fields(old, ('col3', 'col4'), data, [('col3', '<f4'), ('col4', '<f4')]))
... # doctest: +NORMALIZE_WHITESPACE
[(0., 0., 0.) (1., 1., 2.) (2., 2., 4.)]
Args:
base ([numpy.array]): 基础数组
name ([type]): 新增字段的名字,可以是字符串(单字段的情况),也可以是字符串列表
data (list): 增加的字段的数据,list类型
dtypes ([type]): 新增字段的dtype
"""
if isinstance(names, str):
names = [
names,
]
data = [
data,
]
result = np.empty(base.shape, dtype=base.dtype.descr + dtypes)
for col in base.dtype.names:
result[col] = base[col]
for i in range(len(names)):
result[names[i]] = data[i]
return result
def ffill_na(s: np.array) -> np.array:
"""前向替换一维数组中的np.NaN
如果s以np.NaN起头,则起头处的np.NaN将无法被替换。
Examples:
>>> arr = np.arange(6, dtype=np.float32)
>>> arr[3:5] = np.NaN
>>> ffill_na(arr)
... # doctest: +NORMALIZE_WHITESPACE
array([0., 1., 2., 2., 2., 5.], dtype=float32)
>>> arr[0:2] = np.nan
>>> ffill_na(arr)
... # doctest: +NORMALIZE_WHITESPACE
array([nan, nan, 2., 2., 2., 5.], dtype=float32)
Args:
s (np.array): [description]
Returns:
np.array: [description]
"""
mask = np.isnan(s)
idx = np.where(~mask, np.arange(len(mask)), 0)
np.maximum.accumulate(idx, out=idx)
return s[idx]
| 26.222841 | 161 | 0.546633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,242 | 0.613766 |
cd1bfaec0e66cc493fec447100454ceabadeff14 | 838 | py | Python | pepy/domain/read_model.py | daghan/pepy | 11e15e0a7af922cf72647dde95b6cc26760ee8ab | [
"MIT"
] | null | null | null | pepy/domain/read_model.py | daghan/pepy | 11e15e0a7af922cf72647dde95b6cc26760ee8ab | [
"MIT"
] | null | null | null | pepy/domain/read_model.py | daghan/pepy | 11e15e0a7af922cf72647dde95b6cc26760ee8ab | [
"MIT"
] | null | null | null | from datetime import date
from typing import List
from attr import attrs, attrib
@attrs()
class ProjectListProjection:
name: str = attrib()
total_downloads: int = attrib()
@attrs()
class DownloadProjection:
date: date = attrib()
downloads: int = attrib()
@attrs()
class ProjectProjection:
name: str = attrib()
total_downloads: int = attrib()
last_downloads: List[DownloadProjection] = attrib() # the last 30 days downloads
@property
def total_downloads_last_30_days(self) -> int:
downloads = 0
for d in self.last_downloads:
downloads += d.downloads
return downloads
@property
def total_downloads_last_7_days(self) -> int:
downloads = 0
for d in self.last_downloads[:7]:
downloads += d.downloads
return downloads
| 22.052632 | 85 | 0.657518 | 720 | 0.859189 | 0 | 0 | 747 | 0.891408 | 0 | 0 | 28 | 0.033413 |
cd1c390db89d68211aa13e58ba3a2a89676c5247 | 3,039 | py | Python | finetuning/pretrain_scripts/create_sentiment_mask.py | tatsu-lab/mlm_inductive_bias | 2d99e2477293036949ba356c88513729244dc1f9 | [
"MIT"
] | 10 | 2021-04-14T22:06:19.000Z | 2022-01-12T19:41:12.000Z | finetuning/pretrain_scripts/create_sentiment_mask.py | tatsu-lab/mlm_inductive_bias | 2d99e2477293036949ba356c88513729244dc1f9 | [
"MIT"
] | null | null | null | finetuning/pretrain_scripts/create_sentiment_mask.py | tatsu-lab/mlm_inductive_bias | 2d99e2477293036949ba356c88513729244dc1f9 | [
"MIT"
] | 3 | 2021-06-06T09:43:14.000Z | 2022-02-20T00:40:42.000Z | """
This script computes word masks based on sentiment lexicons
"""
import os
import torch
import argparse
from tqdm import tqdm
from transformers import AutoTokenizer
from transformers import GlueDataTrainingArguments as DataTrainingArguments
from transformers import GlueDataset as Dataset
parser = argparse.ArgumentParser()
parser.add_argument("--data-dir", type=str, default="./data/SST-2", help="path to the dir containing lm data.")
parser.add_argument("--lexicon-dir", type=str, default="./data/sentiment_lexicon", help="path to the dir containing sentiment lexicon.")
parser.add_argument("--tokenizer-name", type=str, default="bert-base-uncased", help="name of the tokenizer to use.")
parser.add_argument("--block_size", type=int, default=72, help="maximum length of the mask")
args = parser.parse_args()
positive_words = set()
with open(os.path.join(args.lexicon_dir, "positive-words.txt"), "r", encoding="ISO-8859-1") as f:
for line in f:
line = line.strip()
# skip the initial comments with ; and empty lines
if not line.startswith(";") and len(line) > 0:
positive_words.add(line.lower())
negative_words = set()
with open(os.path.join(args.lexicon_dir, "negative-words.txt"), "r", encoding="ISO-8859-1") as f:
for line in f:
line = line.strip()
# skip the initial comments with ; and empty lines
if not line.startswith(";") and len(line) > 0:
negative_words.add(line.lower())
salient_words = positive_words | negative_words
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name)
splits = ["train", "dev", "test"]
for split in splits:
with open(os.path.join(args.data_dir, f"{split}.lm"), "r") as f:
all_sens = [s.strip() for s in f.readlines()]
salient_word_masks = torch.zeros(len(all_sens), args.block_size, dtype=torch.bool)
total_word_count = 0
salient_word_count = 0
# Main loop that handles subword tokenization
for i, sen in tqdm(enumerate(all_sens), total=len(all_sens)):
words = sen.split()
curr_idx = 1 # skip the [CLS] token
total_word_count += len(words)
for word in words:
tokens = tokenizer.tokenize(word)
# Need to truncate SQuAD
if curr_idx + len(tokens) > args.block_size:
raise ValueError("Encountered examples longer than block size.")
if word in salient_words:
salient_word_count += 1
for j in range(len(tokens)):
salient_word_masks[i, curr_idx + j] = 1
curr_idx += len(tokens)
print(f"{(salient_word_count/total_word_count):.2%} salient words")
salient_pct = salient_word_masks.any(dim=1).sum().float() / len(all_sens)
print(f"{split} {salient_pct:.2%} documents have salient words")
torch.save(
salient_word_masks,
os.path.join(
args.data_dir,
f"cached_{split}_{args.tokenizer_name.replace('-', '_')}_{args.block_size}.sentiment_mask",
),
)
| 37.518519 | 136 | 0.66535 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 882 | 0.290227 |
cd1f80834765c75ab8a5bfc49335f1d5e1f2a008 | 456 | py | Python | Leetcode/443. String Compression/solution2.py | asanoviskhak/Outtalent | c500e8ad498f76d57eb87a9776a04af7bdda913d | [
"MIT"
] | 51 | 2020-07-12T21:27:47.000Z | 2022-02-11T19:25:36.000Z | Leetcode/443. String Compression/solution2.py | CrazySquirrel/Outtalent | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | [
"MIT"
] | null | null | null | Leetcode/443. String Compression/solution2.py | CrazySquirrel/Outtalent | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | [
"MIT"
] | 32 | 2020-07-27T13:54:24.000Z | 2021-12-25T18:12:50.000Z | class Solution:
def compress(self, chars: List[str]) -> int:
l = 0
while l < len(chars):
r = l + 1
while r < len(chars) and chars[l] == chars[r]: r += 1
num = r - l
for k in range(r - l, 1, -1): chars.pop(l)
if num > 1:
for i, v in enumerate(str(num)): chars.insert(l + i + 1, v)
l += len(str(num))
l += 1
return len(chars)
| 32.571429 | 75 | 0.41886 | 455 | 0.997807 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
cd26104c6eb130ab45214eda4f1934869ef8a4f2 | 3,524 | py | Python | src/data_reader.py | jazzsewera/mops-projekt | 75924546eb73c266ba81e8e22c68ad939dea19d6 | [
"MIT"
] | null | null | null | src/data_reader.py | jazzsewera/mops-projekt | 75924546eb73c266ba81e8e22c68ad939dea19d6 | [
"MIT"
] | null | null | null | src/data_reader.py | jazzsewera/mops-projekt | 75924546eb73c266ba81e8e22c68ad939dea19d6 | [
"MIT"
] | null | null | null | from logger import Logger
from numpy import average
log = Logger(None)
def show_queue_length_average(number_of_packets):
timestamps = []
vals = []
if len(number_of_packets) == 0:
log.info(f"Average number of packets: NO DATA")
return 0
for k, v in number_of_packets.items():
timestamps.append(float(k))
vals.append(v)
vals.pop()
timedeltas = []
for i in range(len(timestamps) - 1):
timedeltas.append(timestamps[i + 1] - timestamps[i])
av = average(vals, weights=timedeltas)
log.info(f"Average number of packets: {av}")
return av
def show_average_queue_waiting_time_Q1(sent_packets):
ts = []
if len(sent_packets) == 0:
log.info(f"Average waiting time: NO DATA")
return 0
for packet in sent_packets:
ts.append(packet.out_of_queue_time - packet.in_queue_time)
av = sum(ts) / len(ts)
log.info(f"Average waiting time: {av}")
return av
def show_average_delay_Q1(sent_packets):
ts = []
if len(sent_packets) == 0:
log.info(f"Average delay time: NO DATA")
return 0
for packet in sent_packets:
ts.append(packet.in_second_queue_time - packet.in_queue_time)
av = sum(ts) / len(ts)
log.info(f"Average delay time: {av}")
return av
def show_average_server_load_Q1(sent_packets):
if len(sent_packets) == 0:
log.info(f"Average server load: NO DATA")
return 0
av_service_time = (
sent_packets[0].in_second_queue_time - sent_packets[0].out_of_queue_time
)
vals = []
for i in range(len(sent_packets) - 1):
vals.append(sent_packets[i + 1].in_queue_time - sent_packets[i].in_queue_time)
if sum(vals) == 0 or len(vals) == 0:
log.info(f"Average server load: NO DATA")
return 0
av_time_between_in_queue = sum(vals) / len(vals)
influx = 1 / av_time_between_in_queue
outflow = 1 / av_service_time
av = influx / outflow
log.info(f"Average server load: {av}")
return av
def show_average_queue_waiting_time_Q2(sent_packets):
ts = []
if len(sent_packets) == 0:
log.info(f"Average waiting time: NO DATA")
return 0
for packet in sent_packets:
ts.append(packet.out_of_second_queue - packet.in_second_queue_time)
av = sum(ts) / len(ts)
log.info(f"Average waiting time: {av}")
return av
def show_average_delay_Q2(sent_packets):
ts = []
if len(sent_packets) == 0:
log.info(f"Average delay time: NO DATA")
return 0
for packet in sent_packets:
ts.append(packet.out_of_system_time - packet.in_second_queue_time)
av = sum(ts) / len(ts)
log.info(f"Average delay time: {av}")
return av
def show_average_server_load_Q2(sent_packets):
if len(sent_packets) == 0:
log.info(f"Average server load: NO DATA")
return 0
av_service_time = (
sent_packets[0].out_of_system_time - sent_packets[0].out_of_second_queue
)
vals = []
for i in range(len(sent_packets) - 1):
vals.append(
sent_packets[i + 1].in_second_queue_time
- sent_packets[i].in_second_queue_time
)
if sum(vals) == 0 or len(vals) == 0:
log.info(f"Average server load: NO DATA")
return 0
av_time_between_in_queue = sum(vals) / len(vals)
influx = 1 / av_time_between_in_queue
outflow = 1 / av_service_time
av = influx / outflow
log.info(f"Average server load: {av}")
return av
| 24.136986 | 86 | 0.638763 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 487 | 0.138195 |
cd2798a9ad4d90fcc9bb40c5df39c9d1117edd80 | 5,946 | py | Python | fetch.py | kirillvarn/grocerycomparator-stat | 861f90a2d5b4c2b52d89b6cdb574b722eae2327d | [
"MIT"
] | null | null | null | fetch.py | kirillvarn/grocerycomparator-stat | 861f90a2d5b4c2b52d89b6cdb574b722eae2327d | [
"MIT"
] | null | null | null | fetch.py | kirillvarn/grocerycomparator-stat | 861f90a2d5b4c2b52d89b6cdb574b722eae2327d | [
"MIT"
] | null | null | null | import repo
import export.csv as csv
# CONSTANTS
milk_q = "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%1l%%' OR name ILIKE '%%1 l%%') AND (name ILIKE '%%piim %%' OR name ILIKE '%%piim,%%') AND name NOT ILIKE '%%juust%%' AND name NOT ILIKE '%%kohupiim%%' AND name NOT ILIKE '%%laktoos%%' AND name NOT ILIKE '%%täis%%' AND name NOT ILIKE '%%kookos%%' AND name NOT ILIKE '%%latte%%'"
wheat_kilos = 1
query_to_parse: dict = {
"milk": milk_q,
"cookies": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%küpsised %%' OR name ILIKE '%%küpsis %%') AND name NOT ILIKE '%%koer%%';",
"sugar": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND name ILIKE '%%suhkur%%'",
#"rimi milk": f"{milk_q} AND shop ILIKE '%%rimi%%'",
#"other shop milk": f"{milk_q} AND shop NOT ILIKE '%%rimi%%'",
#"eggs": f"SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%munad %%' OR name ILIKE '%%munad, %%' OR name ILIKE '%%muna,%%') AND name NOT ilike '%%salvrät%%' AND name NOT ILIKE '%%Šokolaad%%' AND name NOT ILIKE '%%Martsipani%%' AND name NOT ILIKE '%%SELVERI KÖÖK%%' AND name NOT ILIKE '%%kitkat%%'" ,
"wheat": f"SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%{wheat_kilos}kg%%' OR name ILIKE '%%{wheat_kilos} kg%%') AND (name ILIKE '%%nisujahu %%' OR name ILIKE '%%nisujahu,%%')",
"beef": f"SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%veise %%' OR name ILIKE '%%veisepraad%%' OR name ILIKE '%%lihaveise%%') AND name NOT ILIKE '%%koera%%' AND name NOT ILIKE '%%pelmeen%%' AND name NOT ILIKE '%%põltsama%%' AND name NOT ILIKE '%%sink%%'",
"tomatoes": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%tomat %%' OR name ILIKE '%%tomat, %%') AND name NOT ILIKE '%%pasta%%' AND name NOT ILIKE '%%0g%%' AND name NOT ILIKE '%%0 g%%' AND name NOT ILIKE '%%harilik%%' AND name NOT ILIKE '%%krõpsud%%' AND name NOT ILIKE '%%marinaad%%' AND name NOT ILIKE '%%eine%%'",
#"cucumber": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND name ILIKE '%%kg%%' AND (name ILIKE '%%kurk %%' OR name ILIKE '%%kurk,%%')",
#"banana": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%kg%%' OR name ILIKE '%%chiq%%') AND (name ILIKE '%%banaan %%' OR name ILIKE '%%banaan,%%')",
"apple": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND name ILIKE '%%kg%%' AND (name ILIKE '%%õun %%' OR name ILIKE '%%õun,%%')",
"pear": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND name ILIKE '%%kg%%' AND (name ILIKE '%%pirn %%' OR name ILIKE '%%pirn,%%')",
"pizza": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%pizza%%' OR name ILIKE '%%pitsa%%' AND name NOT ILIKE '%%pitsamaitseline%%')",
"pig meat": f"SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%sea kaela%%' OR name ILIKE '%%sea välisfilee%%' OR name ILIKE '%%sea sisefilee%%')",
"cake": f"SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%kook,%%' OR name ILIKE '%%kook%%') AND name NOT ILIKE '%%van kook%%' AND name NOT ILIKE '%%selveri köök%%' AND name NOT ILIKE '%%kookos%%' AND name NOT LIKE '%%smuuti%%' AND name NOT ILIKE '%%pannkook%%'",
"chicken": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%broileri rinnafilee%%' OR name ILIKE '%%pooltiivad%%' OR name ILIKE '%%poolkoivad%%' OR name ILIKE '%%kanafilee%%' OR name ILIKE '%%broilerifilee%%') AND name NOT ILIKE '%%HAU-HAU%%'"
}
def get_products():
return repo.get_prices(repo.connect(db="naive_products"))[1]
def get_products_by_name(name: str = "", query: str = ""):
if len(name) != 0:
return repo.get_prices(repo.connect(db="naive_products"), search_string=name)[1]
else:
return repo.get_prices(repo.connect(db="naive_products"), query=query)[1]
def get_normalized_price(data: list) -> list:
new_data = list()
for index, item in enumerate(data):
if index == 0 and item == None:
new_data.append(next(item for item in data if item is not None))
elif index != 0 and data[index] == None:
new_data.append(new_data[index - 1])
else:
new_data.append(item)
return new_data
def get_trend(data: list) -> list:
new_data = list()
for index, item in enumerate(data):
if index != 0:
trend = "still"
if data[index - 1] != None:
if item > data[index - 1]:
trend = "up"
elif item < data[index - 1]:
trend = "down"
new_data.append({"value": item, "trend": trend})
return new_data
# def save_to_excel(dataset, sheet_name: str = "Sheet") -> None:
# tables = [i[0] for i in main.get_tables(main.connect(db="naive_products"))]
# # tables.remove("initial_products")
# header = ["Product name", "Shop name"] + tables
# data = []
# for item in dataset:
# prices = get_normalized_price(
# [dataset[item]["prices"][value]
# for value in dataset[item]["prices"]]
# )
# prices = get_trend(prices)
# value = [item, dataset[item]["shop"]] + prices
# data.append(value)
# table.append_header(header, sheet_name)
# table.put_data(data, sheet_name)
def save_to_csv(filename, dataset) -> None:
data = []
for item in dataset:
prices = get_normalized_price(
[dataset[item]["prices"][value]
for value in dataset[item]["prices"]]
)
value = [item] + prices
data.append(value)
csv.write_to_csv(f"datasets/{filename}.csv", zip(*data))
for i in query_to_parse:
products = get_products_by_name(query=query_to_parse[i])
save_to_csv(i, products)
| 58.871287 | 365 | 0.601245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,205 | 0.705537 |
cd27a3a7d166518d8d7678101792de0e23b578ef | 1,755 | py | Python | code1.py | roshangol/executed-path-visualize | 1759c12b0048fe117205990b151d2f5f57ad9616 | [
"MIT"
] | null | null | null | code1.py | roshangol/executed-path-visualize | 1759c12b0048fe117205990b151d2f5f57ad9616 | [
"MIT"
] | null | null | null | code1.py | roshangol/executed-path-visualize | 1759c12b0048fe117205990b151d2f5f57ad9616 | [
"MIT"
] | null | null | null | # EX1
# if x < y:
# y = 0
# x = x + 1
# else:
# x = y
def max(a, b, c):
if a > b and a > c:
print(a,' is maximum among all')
elif b > a and b > c:
print(b, ' is maximum among all')
else:
print(c, ' is maximum among all')
max(30, 28, 18)
# def triangleType(a, b, c):
# isATriangle = False
# if (a < b + c) and\
# (b < a + c) and\
# (c < a + b):
# isATriangle = True
# if isATriangle:
# if (a == b) and (b == c):
# print("the triangle was a EQUILATERAL")
# elif (a != b) and \
# (a != c) and \
# (b != c):
# print("the triangle was a SCALENE")
# else:
# print("invalid")
#
# triangleType(3, 5, 8)
# def testfunc(x, y):
# if x >= 0 and y >= 0:
# if y*y >= x*10 and y <= math.sin(math.radians(x*30))*25:
# if y >= math.cos(math.radians(x*40))*15:
# print('oooookk')
# testfunc(2, 3)
# EX2
# if (x < y):
# y = 0
# x = x + 1
# EX3
# if x < y:
# return
# print(x)
# return
# EX4
# x = 0
# while (x < y):
# y = f(x,y)
# x = x + 1
# EX5
# for x in range(10):
# y = f(x,y)
# a = [2 * x for x in y if x > 0 for y in z if y[0] < 3]
#
# digits = [0, 1, 5]
# a = 0
#
# for i in digits:
# a += i
# if i == 5:
# print("5 in list")
# break
# else:
# print("out of the loop")
# try:
# b = b + 5
# except KeyError:
# a += 1
# except ZeroDivisionError:
# a += 2
# else:
# a += 3
# finally:
# b += 1
# a = a - b
#
# x = 0
# while(x < y):
# y = f(x, y)
# if(y == 0):
# break
# elif(y < 0):
# y = y * 2
# continue
# x = x + 1
| 16.25 | 66 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,506 | 0.85812 |
cd28f531641b97aa10ded06e3c6b7fdb2de0d2e7 | 1,193 | py | Python | GameProject/dice.py | CreativeUsernameThatWontInsultAnyone/GameProject | 998274e4587d93ff0564af174f4fc1e3a3e60174 | [
"CC0-1.0"
] | 1 | 2021-11-13T17:14:03.000Z | 2021-11-13T17:14:03.000Z | GameProject/dice.py | CreativeUsernameThatWontInsultAnyone/GameProject | 998274e4587d93ff0564af174f4fc1e3a3e60174 | [
"CC0-1.0"
] | null | null | null | GameProject/dice.py | CreativeUsernameThatWontInsultAnyone/GameProject | 998274e4587d93ff0564af174f4fc1e3a3e60174 | [
"CC0-1.0"
] | null | null | null | import random
import time
while (1):
def clear(): ##Placeholder code
time.sleep(1)
clearConsole = lambda: print('\n' * 150) ##
clearConsole()
wmsg = "Good morning!"
events = {
1 : "calm",
2 : "calm",
3 : "rainy",
4 : "rainy",
5 : "rainy",
6 : "thunder",
}
array = [1,2,3,4,5,6] ## Array used to get events or smth
output = random.choice(array)
defevent = events[output]
if defevent == "calm":
print(wmsg ,"It's a sunny day outside.")
clear()
elif defevent == "rainy":
print(wmsg, "You can hear the droplets falling onto your tent.")
clear()
else:
print(wmsg,"You hear thunder rumbling outside")
clear()
del array[output - 1]
if len(array) == 0: ##Array reset
array.append('1','2','3','4','5','6')
##Actually, we could throw out them specifics outta window and use it's skelly as
##our primary dice. def could take out the variables from other files and juggle them to our delight
break
| 28.404762 | 105 | 0.506287 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 460 | 0.385583 |
cd2c1598eaae27b2b8504f6e96bc81711b260dde | 774 | py | Python | multivision/oa_image_io.py | olaals/tpktools | 50416ca554809e3d2f364b25531c78cf4751311c | [
"MIT"
] | null | null | null | multivision/oa_image_io.py | olaals/tpktools | 50416ca554809e3d2f364b25531c78cf4751311c | [
"MIT"
] | null | null | null | multivision/oa_image_io.py | olaals/tpktools | 50416ca554809e3d2f364b25531c78cf4751311c | [
"MIT"
] | null | null | null | import numpy as np
import OpenEXR as exr
import cv2
import Imath
import matplotlib.pyplot as plt
def readEXR(filename):
exrfile = exr.InputFile(filename)
header = exrfile.header()
dw = header['dataWindow']
isize = (dw.max.y - dw.min.y + 1, dw.max.x - dw.min.x + 1)
channelData = dict()
# convert all channels in the image to numpy arrays
for c in header['channels']:
C = exrfile.channel(c, Imath.PixelType(Imath.PixelType.FLOAT))
C = np.frombuffer(C, dtype=np.float32)
C = np.reshape(C, isize)
channelData[c] = C
colorChannels = ['R', 'G', 'B', 'A'] if 'A' in header['channels'] else ['R', 'G', 'B']
img = np.concatenate([channelData[c][...,np.newaxis] for c in colorChannels], axis=2)
return img
| 29.769231 | 90 | 0.630491 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.138243 |
cd314573d937025d1a50953b27cb47b89f485e85 | 2,972 | py | Python | yggdrasil/serialize/FunctionalSerialize.py | astro-friedel/yggdrasil | 5ecbfd083240965c20c502b4795b6dc93d94b020 | [
"BSD-3-Clause"
] | 22 | 2019-02-05T15:20:07.000Z | 2022-02-25T09:00:40.000Z | yggdrasil/serialize/FunctionalSerialize.py | astro-friedel/yggdrasil | 5ecbfd083240965c20c502b4795b6dc93d94b020 | [
"BSD-3-Clause"
] | 48 | 2019-02-15T20:41:24.000Z | 2022-03-16T20:52:02.000Z | yggdrasil/serialize/FunctionalSerialize.py | astro-friedel/yggdrasil | 5ecbfd083240965c20c502b4795b6dc93d94b020 | [
"BSD-3-Clause"
] | 16 | 2019-04-27T03:36:40.000Z | 2021-12-02T09:47:06.000Z | from yggdrasil.serialize.SerializeBase import SerializeBase
class FunctionalSerialize(SerializeBase):
r"""Class for serializing/deserializing a Python object into/from a bytes
message using defined functions.
Args:
encoded_datatype (schema, optional): JSON schema describing the type
that serialized objects should conform to. Defaults to the class
attribute default_encoded_datatype. If either func_serialize or
func_deserialize are not provided, this needs to be specified in
order to serialize non-bytes objects.
func_serialize (func, optional): Callable object that takes Python
objects as input and returns a representation that conforms to
encoded_datatype. Defaults to None and the default serialization
for encoded_datatype will be used.
func_deserialize (func, optional): Callable object that takes objects
of a type that conforms to encoded_datatype and returns a
deserialized Python object. Defaults to None and the default
deserialization for encoded_datatype will be used.
**kwargs: Additional keyword args are passed to the parent class's
constructor.
"""
_seritype = 'functional'
_schema_subtype_description = ('Serializer that uses provied function to '
'serialize messages.')
_schema_requried = []
_schema_properties = {
'encoded_datatype': {'type': 'schema'},
'func_serialize': {'type': 'function'},
'func_deserialize': {'type': 'function'}}
func_serialize = None
func_deserialize = None
def __init__(self, **kwargs):
if isinstance(kwargs.get('func_serialize', None), SerializeBase):
kwargs['func_serialize'] = kwargs['func_serialize'].func_serialize
if isinstance(kwargs.get('func_deserialize', None), SerializeBase):
kwargs['func_deserialize'] = kwargs['func_deserialize'].func_deserialize
super(FunctionalSerialize, self).__init__(**kwargs)
# @property
# def base_class(self):
# r"""DefaultSerialize: Default version of serialization."""
# if getattr(self, '_base_class', None) is None:
# self._base_class = DefaultSerialize(datatype=self.typedef,
# **self.serializer_info)
# return self._base_class
# TODO: In some cases this should be the object typedef
# @property
# def typedef(self):
# r"""dict: Type definition."""
# return self.encoded_typedef
@property
def serializer_info(self):
r"""dict: Serializer info."""
raise RuntimeError("Cannot define serializer information for user "
+ "supplied functions.")
@property
def empty_msg(self):
r"""obj: Object indicating empty message."""
return self.encoded_datatype._empty_msg
| 43.072464 | 84 | 0.654778 | 2,909 | 0.978802 | 0 | 0 | 341 | 0.114738 | 0 | 0 | 2,033 | 0.684051 |
cd3156dd0e4a0a15e50945d8d1d506c7eefae69c | 2,046 | py | Python | wordy_chat.py | thecodingchannel/wordy-discord-bot-tutorial | be70d237abcb302b6516f985ae900c61b598296a | [
"Apache-2.0"
] | null | null | null | wordy_chat.py | thecodingchannel/wordy-discord-bot-tutorial | be70d237abcb302b6516f985ae900c61b598296a | [
"Apache-2.0"
] | null | null | null | wordy_chat.py | thecodingchannel/wordy-discord-bot-tutorial | be70d237abcb302b6516f985ae900c61b598296a | [
"Apache-2.0"
] | 1 | 2022-03-09T04:55:56.000Z | 2022-03-09T04:55:56.000Z | '''
This file is the glue between the Discord bot and the game logic.
'''
from wordle_logic import evaluate_guess, generate_new_word
from wordy_types import ActiveGame, EndResult, LetterState
def begin_game() -> ActiveGame:
"""
Begin a game for a user.
"""
# Select a word
answer = generate_new_word()
# Create and store new game state
new_game = ActiveGame(answer=answer)
return new_game
def enter_guess(guess: str, game: ActiveGame) -> EndResult:
"""
Enter a guess for a user's game, updating the game state.
>>> game=ActiveGame(answer="abcd")
>>> enter_guess("aaaa", game) == EndResult.PLAYING
True
>>> render_result(game.results[-1])
'🟩⬛⬛⬛'
>>> game=ActiveGame(answer="abca")
>>> enter_guess("aaaz", game) == EndResult.PLAYING
True
>>> render_result(game.results[-1])
'🟩🟨⬛⬛'
>>> game=ActiveGame(answer="abca")
>>> enter_guess("aaab", game) == EndResult.PLAYING
True
>>> render_result(game.results[-1])
'🟩🟨⬛🟨'
"""
if game.state != EndResult.PLAYING:
return game.state
# Evaluate guess
result = tuple(evaluate_guess(guess, game.answer))
# Update game state
game.board_state.append(guess)
game.results.append(result)
# Check if game is over
if result == (LetterState.CORRECT,)*len(game.answer):
game.state = EndResult.WIN
elif len(game.board_state) > len(game.answer):
game.state = EndResult.LOSE
return game.state
def render_result(result: tuple[LetterState]) -> str:
"""
Render a result to a string.
>>> render_result((LetterState.ABSENT, LetterState.PRESENT, LetterState.CORRECT))
'⬛🟨🟩'
>>> render_result((LetterState.ABSENT,)*4)
'⬛⬛⬛⬛'
"""
absent, present, correct = '⬛', '🟨', '🟩'
return "".join(
absent if state == LetterState.ABSENT else
present if state == LetterState.PRESENT else correct
for state in result
)
| 26.230769 | 86 | 0.608504 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,053 | 0.501429 |
cd318b68f4231a08be74b1a2c64d0b4969b29c51 | 2,422 | py | Python | NNet/utils/readNNet.py | noyahoch/Marabou | 03eb551498287e5372d462e3c2ad4fcc3210a5fa | [
"BSD-3-Clause"
] | 7 | 2020-01-27T21:25:49.000Z | 2022-01-07T04:37:37.000Z | NNet/utils/readNNet.py | noyahoch/Marabou | 03eb551498287e5372d462e3c2ad4fcc3210a5fa | [
"BSD-3-Clause"
] | 1 | 2022-01-25T17:41:54.000Z | 2022-01-26T02:27:51.000Z | NNet/utils/readNNet.py | noyahoch/Marabou | 03eb551498287e5372d462e3c2ad4fcc3210a5fa | [
"BSD-3-Clause"
] | 3 | 2020-03-14T17:12:17.000Z | 2022-03-16T09:50:46.000Z | import numpy as np
def readNNet(nnetFile, withNorm=False):
'''
Read a .nnet file and return list of weight matrices and bias vectors
Inputs:
nnetFile: (string) .nnet file to read
withNorm: (bool) If true, return normalization parameters
Returns:
weights: List of weight matrices for fully connected network
biases: List of bias vectors for fully connected network
'''
# Open NNet file
f = open(nnetFile,'r')
# Skip header lines
line = f.readline()
while line[:2]=="//":
line = f.readline()
# Extract information about network architecture
record = line.split(',')
numLayers = int(record[0])
inputSize = int(record[1])
line = f.readline()
record = line.split(',')
layerSizes = np.zeros(numLayers+1,'int')
for i in range(numLayers+1):
layerSizes[i]=int(record[i])
# Skip extra obsolete parameter line
f.readline()
# Read the normalization information
line = f.readline()
inputMins = [float(x) for x in line.strip().split(",")[:-1]]
line = f.readline()
inputMaxes = [float(x) for x in line.strip().split(",")[:-1]]
line = f.readline()
means = [float(x) for x in line.strip().split(",")[:-1]]
line = f.readline()
ranges = [float(x) for x in line.strip().split(",")[:-1]]
# Initialize list of weights and biases
weights = [np.zeros((layerSizes[i],layerSizes[i+1])) for i in range(numLayers)]
biases = [np.zeros(layerSizes[i+1]) for i in range(numLayers)]
# Read remainder of file and place each value in the correct spot in a weight matrix or bias vector
layer=0
i=0
j=0
line = f.readline()
record = line.split(',')
while layer+1 < len(layerSizes):
while i<layerSizes[layer+1]:
while record[j]!="\n":
weights[layer][j,i] = float(record[j])
j+=1
j=0
i+=1
line = f.readline()
record = line.split(',')
i=0
while i<layerSizes[layer+1]:
biases[layer][i] = float(record[0])
i+=1
line = f.readline()
record = line.split(',')
layer+=1
i=0
j=0
f.close()
if withNorm:
return weights, biases, inputMins, inputMaxes, means, ranges
return weights, biases | 27.83908 | 103 | 0.562758 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 707 | 0.291908 |
cd336f08882633e139c7b8cf8e6bbf9503123d24 | 13,668 | py | Python | models/model.py | hearai/hearai | 2f2bc2923fa2bb170d9ed895c3f638e99811442f | [
"MIT"
] | 16 | 2021-12-16T20:19:31.000Z | 2022-03-19T15:59:23.000Z | models/model.py | hearai/hearai | 2f2bc2923fa2bb170d9ed895c3f638e99811442f | [
"MIT"
] | 34 | 2021-12-21T19:33:31.000Z | 2022-03-31T19:04:39.000Z | models/model.py | hearai/hearai | 2f2bc2923fa2bb170d9ed895c3f638e99811442f | [
"MIT"
] | 5 | 2021-12-18T22:35:20.000Z | 2022-02-20T12:26:39.000Z | from typing import Dict
import neptune.new as neptune
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
from config import NEPTUNE_API_TOKEN, NEPTUNE_PROJECT_NAME
from sklearn.metrics import classification_report, f1_score
from utils.summary_loss import SummaryLoss
from math import ceil
from models.feature_extractors.multi_frame_feature_extractor import (
MultiFrameFeatureExtractor,
)
from models.model_loader import ModelLoader
from models.common.simple_sequential_model import SimpleSequentialModel
from models.landmarks_models.lanmdarks_sequential_model import LandmarksSequentialModel
from models.head_models.head_sequential_model import HeadClassificationSequentialModel
# initialize neptune logging
def initialize_neptun(tags):
return neptune.init(
api_token=NEPTUNE_API_TOKEN,
project=NEPTUNE_PROJECT_NAME,
tags=tags,
capture_stdout=False,
capture_stderr=False,
)
class GlossTranslationModel(pl.LightningModule):
"""Awesome model for Gloss Translation"""
def __init__(
self,
general_parameters: Dict = None,
train_parameters: Dict = None,
feature_extractor_parameters: Dict = None,
transformer_parameters: Dict = None,
heads: Dict = None,
freeze_scheduler: Dict = None,
loss_function=nn.BCEWithLogitsLoss,
steps_per_epoch: int = 1000
):
"""
Args:
general_parameters (Dict): Dict containing general parameters not parameterizing training process.
[Warning] Must contain fields:
- path_to_save (str)
- neptune (bool)
feature_extractor_parameters (Dict): Dict containing parameters regarding currently used feature extractor.
[Warning] Must contain fields:
- "name" (str)
- "model_path" (str)
- "representation_size" (int)
transformer_parameters (Dict): Dict containing parameters regarding currently used transformer.
[Warning] Must contain fields:
- "name" (str)
- "output_size" (int)
- "feedforward_size" (int)
- "num_encoder_layers" (int)
- "num_attention_heads" (int)
- "dropout_rate" (float)
train_parameters (Dict): Dict containing parameters parameterizing the training process.
[Warning] Must contain fields:
- "num_segments" (int)
- "lr" (float)
- "multiply_lr_step" (float)
- "warmup_steps" (float)
- "classification_mode" (str)
heads (Dict): Dict containg information describing structure of output heads for specific tasks (gloss/hamnosys).
freeze_scheduler (Dict): Dict containing information describing feature_extractor & transformer freezing/unfreezing process.
loss_function (torch.nn.Module): Loss function.
"""
super().__init__()
if general_parameters["neptune"]:
tags = [train_parameters["classification_mode"], feature_extractor_parameters["name"], transformer_parameters["name"]]
self.run = initialize_neptun(tags)
self.run["parameters"] = {
"general_parameters": general_parameters,
"train_parameters": train_parameters,
"feature_extractor_parameters": feature_extractor_parameters,
"transformer_parameters": transformer_parameters,
"heads": heads,
"freeze_scheduler": freeze_scheduler,
"loss_function": loss_function
}
else:
self.run = None
# parameters
self.lr = train_parameters["lr"]
self.model_save_dir = general_parameters["path_to_save"]
self.warmup_steps = train_parameters["warmup_steps"]
self.multiply_lr_step = train_parameters["multiply_lr_step"]
self.use_frames = train_parameters["use_frames"]
self.use_landmarks = train_parameters["use_landmarks"]
self.classification_heads = heads[train_parameters['classification_mode']]
self.cls_head = nn.ModuleList()
self.loss_weights = []
for value in self.classification_heads.values():
self.cls_head.append(
HeadClassificationSequentialModel(
classes_number=value["num_class"],
representation_size=3 * value["num_class"],
additional_layers=1,
dropout_rate=heads["model"]["dropout_rate"]
)
)
self.loss_weights.append(value["loss_weight"])
# losses
self.summary_loss = SummaryLoss(loss_function, self.loss_weights)
# models-parts
self.model_loader = ModelLoader()
representation_size = feature_extractor_parameters["representation_size"]
self.adjustment_to_representatios_size = nn.LazyLinear(out_features=representation_size)
if self.use_frames:
self.multi_frame_feature_extractor = MultiFrameFeatureExtractor(
self.model_loader.load_feature_extractor(
feature_extractor_name=feature_extractor_parameters["name"],
representation_size=representation_size,
model_path=feature_extractor_parameters["model_path"],
)
)
else:
self.multi_frame_feature_extractor = None
self.transformer = self.model_loader.load_transformer(
transformer_name=transformer_parameters["name"],
feature_extractor_parameters=feature_extractor_parameters,
transformer_parameters=transformer_parameters,
train_parameters=train_parameters
)
self.steps_per_epoch = steps_per_epoch
if freeze_scheduler is not None:
self.freeze_scheduler = freeze_scheduler
self.configure_freeze_scheduler()
def forward(self, input, **kwargs):
predictions = []
frames, landmarks = input
if self.use_frames:
x = self.multi_frame_feature_extractor(frames.to(self.device))
if self.use_landmarks:
x_landmarks = self._prepare_landmarks_tensor(landmarks)
if self.use_frames:
x = torch.concat([x, x_landmarks], dim=-1)
else:
x = x_landmarks
x = self.adjustment_to_representatios_size(x)
x = self.transformer(x)
for head in self.cls_head:
predictions.append(head(x))
return predictions
def _prepare_landmarks_tensor(self, landmarks):
concatenated_landmarks = np.concatenate(
[landmarks[landmarks_name] for landmarks_name in landmarks.keys()],
axis=-1
)
return torch.as_tensor(concatenated_landmarks, dtype=torch.float32, device=self.device)
def training_step(self, batch, batch_idx):
targets, predictions, losses = self._process_batch(batch)
self.scheduler.step()
if self.global_step < 2:
for name, child in self.named_children():
for param in child.parameters():
param.requires_grad = True
if self.freeze_scheduler["freeze_mode"] == "step":
self.freeze_step()
if self.run:
self.run["metrics/batch/training_loss"].log(losses)
return {"loss": losses}
def validation_step(self, batch, batch_idx):
targets, predictions, losses = self._process_batch(batch)
if self.run:
self.run["metrics/batch/validation_loss"].log(losses)
return {"val_loss": losses, "targets": targets, "predictions": predictions}
def _process_batch(self, batch):
frames, landmarks, targets = batch
predictions = self((frames, landmarks))
losses = self.summary_loss(predictions, targets)
return targets, predictions, losses
def validation_epoch_end(self, out):
head_names = list(self.classification_heads.keys())
# initialize empty list with list per head
all_targets = [[] for name in head_names]
all_predictions = [[] for name in head_names]
for single_batch in out:
targets, predictions = single_batch["targets"], single_batch["predictions"]
# append predictions and targets for every head
for nr_head, head_targets in enumerate(targets):
all_targets[nr_head] += list(torch.argmax(targets[nr_head], dim=1).cpu().detach().numpy())
all_predictions[nr_head] += list(torch.argmax(predictions[nr_head], dim=1).cpu().detach().numpy())
for nr_head, targets_for_head in enumerate(all_targets):
head_name = head_names[nr_head]
predictions_for_head = all_predictions[nr_head]
head_report = "\n".join(
[
head_name,
classification_report(
targets_for_head, predictions_for_head, zero_division=0
),
]
)
print(head_report)
f1 = f1_score(targets_for_head, predictions_for_head,
average='macro', zero_division=0)
if self.run:
log_path = "/".join(["metrics/epoch/", head_name])
self.run[log_path].log(head_report)
self.run[f'/metrics/epoch/f1/{head_name}'].log(f1)
if self.trainer.global_step > 0:
print("Saving model...")
torch.save(self.state_dict(), self.model_save_dir)
self.scheduler.step()
if (self.freeze_scheduler is not None) and self.freeze_scheduler["freeze_mode"] == "epoch":
self.freeze_step()
def configure_optimizers(self):
optimizer = torch.optim.RAdam(self.parameters(), lr=self.lr)
self.scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer,
max_lr=self.lr,
div_factor=100,
final_div_factor=10,
pct_start=0.2,
total_steps=self.trainer.max_epochs * self.steps_per_epoch + 2)
return [optimizer], [self.scheduler]
def optimizer_step(
self,
epoch,
batch_idx,
optimizer,
optimizer_idx,
optimizer_closure,
on_tpu=False,
using_native_amp=False,
using_lbfgs=False,
):
optimizer.step(closure=optimizer_closure)
if self.run:
self.run["params/lr"].log(optimizer.param_groups[0]["lr"])
def configure_freeze_scheduler(self):
### TO-DO check if all params are correctly set
# e.g. check if all lists are the same length
# check if values are bools
self.freeze_scheduler["current_pattern"] = 0
self.freeze_scheduler["current_counter"] = 0
self.freeze_step()
def freeze_step(self):
### TO- DO
# If the `freeze_pattern_repeats` is set as an integer isntead of a list,
# e.g. `freeze_pattern_repeats = 3`, it is equal to a pattern
# `feature_extractor = [True, False] * freeze_pattern_repeats`,
# hence it is exactly the same as:
# ```
# "model_params": {
# "feature_extractor": [True, False, True, False, True, False],
# "transformer": [False, True,False, True, False, True],
# }
# ```
if self.freeze_scheduler is not None:
self.freeze_update()
for params_to_freeze in list(self.freeze_scheduler["model_params"].keys()):
if self.freeze_scheduler["current_pattern"] >= len(
self.freeze_scheduler["model_params"][params_to_freeze]
):
current_pattern = True
else:
current_pattern = self.freeze_scheduler["model_params"][
params_to_freeze
][self.freeze_scheduler["current_pattern"]]
for name, child in self.named_children():
if params_to_freeze in name:
for param in child.parameters():
param.requires_grad = not current_pattern
if self.freeze_scheduler["verbose"]:
print(
"Freeze status:",
params_to_freeze,
"set to",
str(current_pattern),
)
def freeze_update(self):
if self.freeze_scheduler["current_pattern"] >= len(
self.freeze_scheduler["model_params"][
list(self.freeze_scheduler["model_params"].keys())[0]
]
):
return
if (
self.freeze_scheduler["current_counter"]
>= self.freeze_scheduler["freeze_pattern_repeats"][
self.freeze_scheduler["current_pattern"]
]
):
self.freeze_scheduler["current_pattern"] += 1
self.freeze_scheduler["current_counter"] = 0
self.freeze_scheduler["current_counter"] += 1
| 41.92638 | 136 | 0.592479 | 12,701 | 0.929251 | 0 | 0 | 0 | 0 | 0 | 0 | 3,339 | 0.244293 |
cd33abe036b992ac7ac194a0541c5439617437c4 | 2,305 | py | Python | solutions/day09/solution.py | dbjohnson/advent-of-code-2021 | 2ed1d30362afa0a73c890730cea46de3291be21f | [
"MIT"
] | null | null | null | solutions/day09/solution.py | dbjohnson/advent-of-code-2021 | 2ed1d30362afa0a73c890730cea46de3291be21f | [
"MIT"
] | null | null | null | solutions/day09/solution.py | dbjohnson/advent-of-code-2021 | 2ed1d30362afa0a73c890730cea46de3291be21f | [
"MIT"
] | null | null | null | from functools import lru_cache
from collections import defaultdict
import pandas as pd
import numpy as np
with open('input.txt') as fh:
depthmap = pd.DataFrame([{
'row': row,
'col': col,
'height': int(d)
}
for row, line in enumerate(fh)
for col, d in enumerate(line.strip())
]).pivot_table(
index='row',
columns='col',
values='height'
).values
idx = (
# right neighbor
np.pad(
depthmap[:, :-1] < depthmap[:, 1:],
((0, 0), (0, 1)),
'constant',
constant_values=1
) &
# left neighbor
np.pad(
depthmap[:, 1:] < depthmap[:, :-1],
((0, 0), (1, 0)),
'constant',
constant_values=1
) &
# lower neighbor
np.pad(
depthmap[:-1, :] < depthmap[1:, :],
((0, 1), (0, 0)),
'constant',
constant_values=1
) &
# upper neighbor
np.pad(
depthmap[1:, :] < depthmap[:-1, :],
((1, 0), (0, 0)),
'constant',
constant_values=1
)
)
print('part 1', (depthmap[np.where(idx)] + 1).sum())
# lru_cache here is essentially cheap DP - once we've calculated
# the basin for any point A, we know the basin for any point B that
# flows through point A
@lru_cache(maxsize=None)
def lowpoint(row, col):
if depthmap[row, col] == 9:
return None
drains = {(row, col)}
for r, c in (
(row - 1, col),
(row + 1, col),
(row, col - 1),
(row, col + 1)
):
if (
0 <= r < depthmap.shape[0]
and 0 <= c < depthmap.shape[1]
and depthmap[r, c] < depthmap[row, col]
):
drains.add(lowpoint(r, c))
return min(
drains,
key=lambda rowcol: depthmap[rowcol]
)
lowpoint_to_basin = defaultdict(list)
for r in range(depthmap.shape[0]):
for c in range(depthmap.shape[1]):
lowpoint_to_basin[lowpoint(r, c)].append((r, c))
print(
'part 2',
np.prod(sorted([
len(points)
for basin, points in lowpoint_to_basin.items()
if basin
])[-3:])
)
# part 1 now that we solved part 2...
print(
'part 1 redux',
sum([
depthmap[lowpoint] + 1
for lowpoint in lowpoint_to_basin
if lowpoint
])
) | 21.342593 | 67 | 0.516269 | 0 | 0 | 0 | 0 | 527 | 0.228633 | 0 | 0 | 371 | 0.160954 |
cd3470135bfe7a2b8866c6a268c9e629dad7a8b7 | 3,467 | py | Python | docs/conf.py | ocefpaf/pystac-client | ddf0e0566b2b1783a4d32d3d77f9f51b80270df3 | [
"Apache-2.0"
] | 52 | 2021-04-15T23:24:12.000Z | 2022-03-09T23:02:27.000Z | docs/conf.py | ocefpaf/pystac-client | ddf0e0566b2b1783a4d32d3d77f9f51b80270df3 | [
"Apache-2.0"
] | 119 | 2021-04-13T11:42:01.000Z | 2022-02-24T10:02:35.000Z | docs/conf.py | ocefpaf/pystac-client | ddf0e0566b2b1783a4d32d3d77f9f51b80270df3 | [
"Apache-2.0"
] | 14 | 2021-04-13T19:00:19.000Z | 2022-02-23T09:17:30.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import re
import subprocess
import sys
from pathlib import Path
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
sys.path.insert(0, str(Path(__file__).parent.parent.parent.resolve()))
from pystac_client import __version__ # noqa: E402
git_branch = (
subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"])
.decode("utf-8")
.strip()
)
# -- Project information -----------------------------------------------------
project = 'pystac-client'
copyright = '2021, Jon Duckworth'
author = 'Matthew Hanson, Jon Duckworth'
github_user = 'stac-utils'
github_repo = 'pystac-client'
package_description = 'A Python client for the STAC and STAC-API specs'
# The full version, including alpha/beta/rc tags
version = re.fullmatch(r'^(\d+\.\d+\.\d).*$', __version__).group(1)
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.intersphinx', 'sphinx.ext.napoleon',
'sphinx.ext.extlinks', 'sphinxcontrib.fulltoc', 'nbsphinx', 'myst_parser'
]
extlinks = {
"tutorial": (
"https://github.com/stac-utils/pystac-client/"
"tree/{}/docs/tutorials/%s".format(git_branch),
"tutorial",
)
}
nbsphinx_allow_errors = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
source_suffix = [".rst", "*.md", "*.ipynb"]
exclude_patterns = ['build/*']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
html_theme_options = {
# 'sidebar_collapse': False,
'fixed_sidebar': True,
'github_button': True,
'github_user': github_user,
'github_repo': github_repo,
'description': package_description
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for intersphinx extension ---------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'requests': ('https://requests.readthedocs.io/en/master', None),
'pystac': ('https://pystac.readthedocs.io/en/latest', None),
'dateutil': ('https://dateutil.readthedocs.io/en/stable/', None),
}
# -- Options for autodoc extension -------------------------------------------
autodoc_typehints = "none"
| 33.660194 | 97 | 0.654168 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,527 | 0.728872 |
cd358b914861d3a881968cfd805aae9c0f7bed42 | 1,345 | py | Python | modules/lexer/token.py | DavidMacDonald11/sea-to-c-transpiler-python-based | 20c41931346b13d4bf2a12e96037f44b1add8145 | [
"MIT"
] | null | null | null | modules/lexer/token.py | DavidMacDonald11/sea-to-c-transpiler-python-based | 20c41931346b13d4bf2a12e96037f44b1add8145 | [
"MIT"
] | 11 | 2021-04-22T13:09:34.000Z | 2022-01-29T22:53:58.000Z | modules/lexer/token.py | DavidMacDonald11/sea-to-c-transpiler-python-based | 20c41931346b13d4bf2a12e96037f44b1add8145 | [
"MIT"
] | null | null | null | from .token_types import TT
from .token_types import BadTT
from .position import Position
from .keywords import is_keyword
from .keywords import keyword_declared_type
from ..lexer import errors
class Token:
def __init__(self, token_type, value = None, position = None):
self.type = token_type
self.value = value
self.position = Position() if position is None else position
def __repr__(self):
return f"{self.type}" + ("" if self.value is None else f":{self.value}")
def matches(self, token_type, value = None):
if value is None:
return self.type is token_type
return self.type is token_type and self.value == value
def matches_type_keyword(self):
return self.type is TT.KEYWORD and keyword_declared_type(self.value)
@classmethod
def match_type(cls, token_string):
for token_type in TT:
if token_type.value.fullmatch(token_string) is not None:
if token_type not in (TT.KEYWORD, TT.IDENTIFIER):
return token_type
return TT.KEYWORD if is_keyword(token_string) else TT.IDENTIFIER
for bad_type in BadTT:
if bad_type.value[0].fullmatch(token_string) is not None:
raise bad_type.value[1]()
raise errors.UnknownTokenError(token_string)
| 33.625 | 80 | 0.665428 | 1,149 | 0.854275 | 0 | 0 | 534 | 0.397026 | 0 | 0 | 32 | 0.023792 |
cd36eb6513428b0c0f981f91eaea0aa21154992a | 689 | py | Python | cb_scripts/nums_square_cube.py | christopher-burke/python-scripts | bdbea2456130e0958b6a6ab8d138f4f19b39b934 | [
"MIT"
] | 1 | 2022-02-05T06:39:05.000Z | 2022-02-05T06:39:05.000Z | cb_scripts/nums_square_cube.py | christopher-burke/python-scripts | bdbea2456130e0958b6a6ab8d138f4f19b39b934 | [
"MIT"
] | null | null | null | cb_scripts/nums_square_cube.py | christopher-burke/python-scripts | bdbea2456130e0958b6a6ab8d138f4f19b39b934 | [
"MIT"
] | 1 | 2021-06-10T22:04:35.000Z | 2021-06-10T22:04:35.000Z | #!/usr/bin/env python3
"""Squares and Cubes for a range of numbers.
Given a start and end, calucate the Square x**2 and
the Cube x**3 for all numbers.
Example of generator and functools.partial.
"""
from functools import partial
def power(base, exponent):
"""Raise a base to the exponent."""
return base ** exponent
square = partial(power, exponent=2)
cube = partial(power, exponent=3)
def main(start, end):
"""Square and cube all numbers in range of start to end."""
for i in range(start, end+1):
yield i, square(i), cube(i)
if __name__ == "__main__":
print("number\tsquare\tcube")
for x in main(1, 10):
print("{}\t{}\t{}".format(*x))
| 20.264706 | 63 | 0.651669 | 0 | 0 | 155 | 0.224964 | 0 | 0 | 0 | 0 | 337 | 0.489115 |
cd36ecd76329e8d74ce6fdd1bc24ac05a02cc921 | 101 | py | Python | Darlington/phase2/LIST/day 41 solution/qtn2.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 6 | 2020-05-23T19:53:25.000Z | 2021-05-08T20:21:30.000Z | Darlington/phase2/LIST/day 41 solution/qtn2.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 8 | 2020-05-14T18:53:12.000Z | 2020-07-03T00:06:20.000Z | Darlington/phase2/LIST/day 41 solution/qtn2.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 39 | 2020-05-10T20:55:02.000Z | 2020-09-12T17:40:59.000Z | #program to find the index of an item in a specified list.
num =[10, 30, 4, -6]
print(num.index(30))
| 25.25 | 58 | 0.683168 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.574257 |
cd36fd075f7cd95707b64e346e7a7db96e365eac | 1,748 | py | Python | mozdns/txt/tests.py | jlin/inventory | c098c98e570c3bf9fadfd811eb75e1213f6ea428 | [
"BSD-3-Clause"
] | 22 | 2015-01-16T01:36:32.000Z | 2020-06-08T00:46:18.000Z | mozdns/txt/tests.py | jlin/inventory | c098c98e570c3bf9fadfd811eb75e1213f6ea428 | [
"BSD-3-Clause"
] | 8 | 2015-12-28T18:56:19.000Z | 2019-04-01T17:33:48.000Z | mozdns/txt/tests.py | jlin/inventory | c098c98e570c3bf9fadfd811eb75e1213f6ea428 | [
"BSD-3-Clause"
] | 13 | 2015-01-13T20:56:22.000Z | 2022-02-23T06:01:17.000Z | from django.test import TestCase
from django.core.exceptions import ValidationError
from mozdns.txt.models import TXT
from mozdns.domain.models import Domain
class TXTTests(TestCase):
def setUp(self):
self.o = Domain(name="org")
self.o.save()
self.o_e = Domain(name="oregonstate.org")
self.o_e.save()
def do_generic_add(self, data):
txt = TXT(**data)
txt.__repr__()
txt.save()
self.assertTrue(txt.details())
self.assertTrue(txt.get_absolute_url())
self.assertTrue(txt.get_edit_url())
self.assertTrue(txt.get_delete_url())
rtxt = TXT.objects.filter(**data)
self.assertTrue(len(rtxt) == 1)
return txt
def do_remove(self, data):
txt = self.do_generic_add(data)
txt.delete()
rmx = TXT.objects.filter(**data)
self.assertTrue(len(rmx) == 0)
def test_add_remove_txt(self):
label = "asdf"
data = "asdf"
data = {'label': label, 'txt_data': data, 'domain': self.o_e}
self.do_generic_add(data)
label = "asdf"
data = "asdfasfd"
data = {'label': label, 'txt_data': data, 'domain': self.o_e}
self.do_generic_add(data)
label = "df"
data = "aasdf"
data = {'label': label, 'txt_data': data, 'domain': self.o_e}
self.do_generic_add(data)
label = "12314"
data = "dd"
data = {'label': label, 'txt_data': data, 'domain': self.o}
self.do_generic_add(data)
def test_bad_data(self):
label = "asdf"
data = '"dfa f'
data = {'label': label, 'txt_data': data, 'domain': self.o_e}
self.assertRaises(ValidationError, self.do_generic_add, data)
| 29.627119 | 69 | 0.587529 | 1,586 | 0.907323 | 0 | 0 | 0 | 0 | 0 | 0 | 211 | 0.120709 |
cd39f1397ad328542fed8bb62d6c47dc4c191597 | 6,698 | py | Python | xtesting/tests/unit/core/test_behaveframework.py | collivier/functest-xtesting | 17739d718901a10f7ec0aaf9a6d53141294a347d | [
"Apache-2.0"
] | 1 | 2020-05-15T12:58:58.000Z | 2020-05-15T12:58:58.000Z | xtesting/tests/unit/core/test_behaveframework.py | collivier/functest-xtesting | 17739d718901a10f7ec0aaf9a6d53141294a347d | [
"Apache-2.0"
] | null | null | null | xtesting/tests/unit/core/test_behaveframework.py | collivier/functest-xtesting | 17739d718901a10f7ec0aaf9a6d53141294a347d | [
"Apache-2.0"
] | 3 | 2018-02-28T15:55:14.000Z | 2022-02-24T15:46:12.000Z | #!/usr/bin/env python
# Copyright (c) 2019 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
"""Define the classes required to fully cover behave."""
import logging
import os
import unittest
import mock
from xtesting.core import behaveframework
__author__ = "Deepak Chandella <[email protected]>"
class ParseResultTesting(unittest.TestCase):
"""The class testing BehaveFramework.parse_results()."""
# pylint: disable=missing-docstring
_response = [{'status': 'passed'}]
def setUp(self):
self.test = behaveframework.BehaveFramework(
case_name='behave', project_name='xtesting')
@mock.patch('builtins.open', side_effect=OSError)
def test_raises_exc_open(self, *args): # pylint: disable=unused-argument
with self.assertRaises(OSError):
self.test.parse_results()
@mock.patch('json.load', return_value=[{'foo': 'bar'}])
@mock.patch('builtins.open', mock.mock_open())
def test_raises_exc_key(self, *args): # pylint: disable=unused-argument
with self.assertRaises(KeyError):
self.test.parse_results()
@mock.patch('json.load', return_value=[])
@mock.patch('builtins.open', mock.mock_open())
def test_raises_exe_zerodivision(self, *args):
# pylint: disable=unused-argument
with self.assertRaises(ZeroDivisionError):
self.test.parse_results()
def _test_result(self, response, result):
with mock.patch('builtins.open', mock.mock_open()), \
mock.patch('json.load', return_value=response):
self.test.parse_results()
self.assertEqual(self.test.result, result)
def test_null_passed(self):
data = [{'status': 'dummy'}]
self._test_result(data, 0)
def test_half_success(self):
data = [{'status': 'passed'}, {'status': 'failed'}]
self._test_result(data, 50)
def test_success(self):
data = [{'status': 'passed'}, {'status': 'passed'}]
self._test_result(data, 100)
@mock.patch('builtins.open', mock.mock_open())
def test_count(self, *args): # pylint: disable=unused-argument
self._response.extend([{'status': 'failed'}, {'status': 'skipped'}])
with mock.patch('json.load', mock.Mock(return_value=self._response)):
self.test.parse_results()
self.assertEqual(self.test.details['pass_tests'], 1)
self.assertEqual(self.test.details['fail_tests'], 1)
self.assertEqual(self.test.details['skip_tests'], 1)
self.assertEqual(self.test.details['total_tests'], 3)
class RunTesting(unittest.TestCase):
"""The class testing BehaveFramework.run()."""
# pylint: disable=missing-docstring
suites = ["foo"]
tags = ["bar"]
def setUp(self):
self.test = behaveframework.BehaveFramework(
case_name='behave', project_name='xtesting')
def test_exc_key_error(self):
self.assertEqual(self.test.run(), self.test.EX_RUN_ERROR)
@mock.patch('xtesting.core.behaveframework.behave_main')
def _test_makedirs_exc(self, *args):
with mock.patch.object(self.test, 'parse_results') as mock_method:
self.assertEqual(
self.test.run(
suites=self.suites, tags=self.tags),
self.test.EX_RUN_ERROR)
args[0].assert_not_called()
mock_method.assert_not_called()
@mock.patch('os.makedirs', side_effect=Exception)
@mock.patch('os.path.exists', return_value=False)
def test_makedirs_exc(self, *args):
self._test_makedirs_exc()
args[0].assert_called_once_with(self.test.res_dir)
args[1].assert_called_once_with(self.test.res_dir)
@mock.patch('xtesting.core.behaveframework.behave_main')
def _test_makedirs(self, *args):
with mock.patch.object(self.test, 'parse_results') as mock_method:
self.assertEqual(
self.test.run(suites=self.suites, tags=self.tags),
self.test.EX_OK)
html_file = os.path.join(self.test.res_dir, 'output.html')
args_list = [
'--junit',
f'--junit-directory={self.test.res_dir}',
'--format=json', f'--outfile={self.test.json_file}',
'--format=behave_html_formatter:HTMLFormatter',
f'--outfile={html_file}',
'--tags='+','.join(self.tags)]
args_list.append('foo')
args[0].assert_called_once_with(args_list)
mock_method.assert_called_once_with()
@mock.patch('os.makedirs')
@mock.patch('os.path.exists', return_value=False)
def test_makedirs(self, *args):
self._test_makedirs()
args[0].assert_called_once_with(self.test.res_dir)
args[1].assert_called_once_with(self.test.res_dir)
@mock.patch('os.makedirs')
@mock.patch('os.path.exists', return_value=True)
def test_makedirs_oserror17(self, *args):
self._test_makedirs()
args[0].assert_called_once_with(self.test.res_dir)
args[1].assert_not_called()
@mock.patch('os.makedirs')
@mock.patch('xtesting.core.behaveframework.behave_main')
def _test_parse_results(self, status, console, *args):
self.assertEqual(
self.test.run(
suites=self.suites, tags=self.tags, console=console),
status)
html_file = os.path.join(self.test.res_dir, 'output.html')
args_list = [
'--junit',
f'--junit-directory={self.test.res_dir}',
'--format=json', f'--outfile={self.test.json_file}',
'--format=behave_html_formatter:HTMLFormatter',
f'--outfile={html_file}',
'--tags='+','.join(self.tags)]
if console:
args_list += ['--format=pretty', '--outfile=-']
args_list.append('foo')
args[0].assert_called_once_with(args_list)
args[1].assert_called_once_with(self.test.res_dir)
def test_parse_results_exc(self, console=False):
with mock.patch.object(self.test, 'parse_results',
side_effect=Exception) as mock_method:
self._test_parse_results(self.test.EX_RUN_ERROR, console)
mock_method.assert_called_once_with()
def test_parse_results_exc_console(self):
self.test_parse_results_exc(console=True)
if __name__ == "__main__":
logging.disable(logging.CRITICAL)
unittest.main(verbosity=2)
| 37.418994 | 77 | 0.640042 | 6,067 | 0.905793 | 0 | 0 | 4,260 | 0.636011 | 0 | 0 | 1,770 | 0.264258 |
cd3a28ba018f4c08dd5b0ec2fb2ba69c859e803c | 963 | py | Python | data/test/python/cd3a28ba018f4c08dd5b0ec2fb2ba69c859e803cdjango.py | harshp8l/deep-learning-lang-detection | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | [
"MIT"
] | 84 | 2017-10-25T15:49:21.000Z | 2021-11-28T21:25:54.000Z | data/test/python/cd3a28ba018f4c08dd5b0ec2fb2ba69c859e803cdjango.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 5 | 2018-03-29T11:50:46.000Z | 2021-04-26T13:33:18.000Z | data/test/python/cd3a28ba018f4c08dd5b0ec2fb2ba69c859e803cdjango.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 24 | 2017-11-22T08:31:00.000Z | 2022-03-27T01:22:31.000Z | # coding=utf-8
from fabric.api import env, run
COMMAND_COLLECTSTATIC = 'collectstatic'
COMMAND_SYNCDB = 'syncdb'
COMMAND_MIGRATE = 'migrate'
_default_command = '{python} {manage} {command}'
_commands_list = {
COMMAND_COLLECTSTATIC: 'yes yes | {python} {manage} {command}',
COMMAND_MIGRATE: '{python} {manage} {command} --noinput',
}
def django_commands(os_environment=None):
for command in env.django_commands:
_django_command(command, os_environment)
def _django_command(command, os_environment):
command_to_run = _commands_list.get(command, _default_command)
command_to_run = command_to_run.format(
python=env.server_python,
manage=env.server_manage,
command=command
)
if os_environment is None:
run(command_to_run)
return
prefix = ' '.join([
'{}={}'.format(k, v)
for k, v in os_environment.items()
])
run('{} {}'.format(prefix, command_to_run))
| 24.075 | 67 | 0.677051 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.176532 |
cd3da08c421072d75aa5562437930fcd09889489 | 8,820 | py | Python | commercialoperator/components/bookings/utils.py | wilsonc86/ledger | a60a681e547f37e4ac81cb93dffaf90aea8c8151 | [
"Apache-2.0"
] | null | null | null | commercialoperator/components/bookings/utils.py | wilsonc86/ledger | a60a681e547f37e4ac81cb93dffaf90aea8c8151 | [
"Apache-2.0"
] | null | null | null | commercialoperator/components/bookings/utils.py | wilsonc86/ledger | a60a681e547f37e4ac81cb93dffaf90aea8c8151 | [
"Apache-2.0"
] | null | null | null | from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.conf import settings
from django.core.exceptions import ValidationError
from datetime import datetime, timedelta
from commercialoperator.components.main.models import Park
from commercialoperator.components.proposals.models import Proposal
from ledger.checkout.utils import create_basket_session, create_checkout_session, calculate_excl_gst
from ledger.payments.models import Invoice
from ledger.payments.utils import oracle_parser
import json
from decimal import Decimal
from commercialoperator.components.bookings.models import Booking, ParkBooking, ApplicationFee
import logging
logger = logging.getLogger('payment_checkout')
def create_booking(request, proposal_id):
""" Create the ledger lines - line items for invoice sent to payment system """
#import ipdb; ipdb.set_trace()
booking = Booking.objects.create(proposal_id=proposal_id)
tbody = json.loads(request.POST['payment'])['tbody']
for row in tbody:
park_id = row[0]['value']
arrival = row[1]
no_adults = int(row[2]) if row[2] else 0
no_children = int(row[3]) if row[3] else 0
no_free_of_charge = int(row[4]) if row[4] else 0
park = Park.objects.get(id=park_id)
if any([no_adults, no_children, no_free_of_charge]) > 0:
park_booking = ParkBooking.objects.create(
booking = booking,
park_id = park_id,
arrival = datetime.strptime(arrival, '%Y-%m-%d').date(),
no_adults = no_adults,
no_children = no_children,
no_free_of_charge = no_free_of_charge,
cost = no_adults*park.adult_price + no_children*park.child_price
)
if not park_booking:
raise ValidationError('Must have at least one person visiting the park')
return booking
def get_session_booking(session):
if 'cols_booking' in session:
booking_id = session['cols_booking']
else:
raise Exception('Booking not in Session')
try:
return Booking.objects.get(id=booking_id)
except Booking.DoesNotExist:
raise Exception('Booking not found for booking_id {}'.format(booking_id))
def set_session_booking(session, booking):
session['cols_booking'] = booking.id
session.modified = True
def delete_session_booking(session):
if 'cols_booking' in session:
del session['cols_booking']
session.modified = True
def get_session_application_invoice(session):
""" Application Fee session ID """
if 'cols_app_invoice' in session:
application_fee_id = session['cols_app_invoice']
else:
raise Exception('Application not in Session')
try:
#return Invoice.objects.get(id=application_invoice_id)
#return Proposal.objects.get(id=proposal_id)
return ApplicationFee.objects.get(id=application_fee_id)
except Invoice.DoesNotExist:
raise Exception('Application not found for application {}'.format(application_fee_id))
def set_session_application_invoice(session, application_fee):
""" Application Fee session ID """
session['cols_app_invoice'] = application_fee.id
session.modified = True
def delete_session_application_invoice(session):
""" Application Fee session ID """
if 'cols_app_invoice' in session:
del session['cols_app_invoice']
session.modified = True
def create_fee_lines(proposal, invoice_text=None, vouchers=[], internal=False):
""" Create the ledger lines - line item for application fee sent to payment system """
#import ipdb; ipdb.set_trace()
now = datetime.now().strftime('%Y-%m-%d %H:%M')
price = proposal.application_type.application_fee
line_items = [{
'ledger_description': 'Application Fee - {} - {}'.format(now, proposal.lodgement_number),
'oracle_code': proposal.application_type.oracle_code,
'price_incl_tax': price,
'price_excl_tax': price if proposal.application_type.is_gst_exempt else calculate_excl_gst(price),
'quantity': 1,
}]
logger.info('{}'.format(line_items))
return line_items
def create_lines(request, invoice_text=None, vouchers=[], internal=False):
""" Create the ledger lines - line items for invoice sent to payment system """
#import ipdb; ipdb.set_trace()
def add_line_item(park, arrival, age_group, price, no_persons):
price = Decimal(price)
if no_persons > 0:
return {
'ledger_description': '{} - {} - {}'.format(park.name, arrival, age_group),
'oracle_code': park.oracle_code,
'price_incl_tax': price,
'price_excl_tax': price if park.is_gst_exempt else calculate_excl_gst(price),
'quantity': no_persons,
}
return None
lines = []
tbody = json.loads(request.POST['payment'])['tbody']
for row in tbody:
park_id = row[0]['value']
arrival = row[1]
no_adults = int(row[2]) if row[2] else 0
no_children = int(row[3]) if row[3] else 0
no_free_of_charge = int(row[4]) if row[4] else 0
park= Park.objects.get(id=park_id)
if no_adults > 0:
lines.append(add_line_item(park, arrival, 'Adult', price=park.adult_price, no_persons=no_adults))
if no_children > 0:
lines.append(add_line_item(park, arrival, 'Child', price=park.child_price, no_persons=no_children))
if no_free_of_charge > 0:
lines.append(add_line_item(park, arrival, 'Free', price=0.0, no_persons=no_free_of_charge))
return lines
def checkout(request, proposal, lines, return_url_ns='public_booking_success', return_preload_url_ns='public_booking_success', invoice_text=None, vouchers=[], internal=False):
#import ipdb; ipdb.set_trace()
basket_params = {
'products': lines,
'vouchers': vouchers,
'system': settings.PS_PAYMENT_SYSTEM_ID,
'custom_basket': True,
}
basket, basket_hash = create_basket_session(request, basket_params)
#fallback_url = request.build_absolute_uri('/')
checkout_params = {
'system': settings.PS_PAYMENT_SYSTEM_ID,
'fallback_url': request.build_absolute_uri('/'), # 'http://mooring-ria-jm.dbca.wa.gov.au/'
'return_url': request.build_absolute_uri(reverse(return_url_ns)), # 'http://mooring-ria-jm.dbca.wa.gov.au/success/'
'return_preload_url': request.build_absolute_uri(reverse(return_url_ns)), # 'http://mooring-ria-jm.dbca.wa.gov.au/success/'
#'fallback_url': fallback_url,
#'return_url': fallback_url,
#'return_preload_url': fallback_url,
'force_redirect': True,
'proxy': True if internal else False,
'invoice_text': invoice_text, # 'Reservation for Jawaid Mushtaq from 2019-05-17 to 2019-05-19 at RIA 005'
}
# if not internal:
# checkout_params['check_url'] = request.build_absolute_uri('/api/booking/{}/booking_checkout_status.json'.format(booking.id))
if internal or request.user.is_anonymous():
#checkout_params['basket_owner'] = booking.customer.id
checkout_params['basket_owner'] = proposal.submitter_id
create_checkout_session(request, checkout_params)
# if internal:
# response = place_order_submission(request)
# else:
response = HttpResponseRedirect(reverse('checkout:index'))
# inject the current basket into the redirect response cookies
# or else, anonymous users will be directionless
response.set_cookie(
settings.OSCAR_BASKET_COOKIE_OPEN, basket_hash,
max_age=settings.OSCAR_BASKET_COOKIE_LIFETIME,
secure=settings.OSCAR_BASKET_COOKIE_SECURE, httponly=True
)
# if booking.cost_total < 0:
# response = HttpResponseRedirect('/refund-payment')
# response.set_cookie(
# settings.OSCAR_BASKET_COOKIE_OPEN, basket_hash,
# max_age=settings.OSCAR_BASKET_COOKIE_LIFETIME,
# secure=settings.OSCAR_BASKET_COOKIE_SECURE, httponly=True
# )
#
# # Zero booking costs
# if booking.cost_total < 1 and booking.cost_total > -1:
# response = HttpResponseRedirect('/no-payment')
# response.set_cookie(
# settings.OSCAR_BASKET_COOKIE_OPEN, basket_hash,
# max_age=settings.OSCAR_BASKET_COOKIE_LIFETIME,
# secure=settings.OSCAR_BASKET_COOKIE_SECURE, httponly=True
# )
return response
def oracle_integration(date,override):
system = '0557'
oracle_codes = oracle_parser(date, system, 'Commercial Operator Licensing', override=override)
| 40.090909 | 175 | 0.675057 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,898 | 0.328571 |
cd3eb5a68afae376fb0cdba9c6455dc19c74e74f | 3,552 | py | Python | src/application/models.py | Chitrank-Dixit/WikiG | 74d99a16afc635a991c17de8d237eb4f6eccbe86 | [
"CNRI-Python"
] | 1 | 2015-11-05T03:51:44.000Z | 2015-11-05T03:51:44.000Z | src/application/models.py | Chitrank-Dixit/WikiG | 74d99a16afc635a991c17de8d237eb4f6eccbe86 | [
"CNRI-Python"
] | null | null | null | src/application/models.py | Chitrank-Dixit/WikiG | 74d99a16afc635a991c17de8d237eb4f6eccbe86 | [
"CNRI-Python"
] | null | null | null | """
models.py
App Engine datastore models
Documentation: https://developers.google.com/appengine/docs/python/ndb/entities
"""
from google.appengine.ext import ndb
from google.appengine.ext import blobstore
from google.appengine.api import users
import functools
import flask
from flaskext import login
from flaskext.login import current_user
from flaskext import oauth
from hashlib import md5
import util
import model
import config
from application import app
import urls
# from application.metadata import Session, Base
################################################################################
# Flaskext Login
################################################################################
login_manager = login.LoginManager()
class AnonymousUser(login.AnonymousUserMixin):
id = 0
admin = False
name = 'Anonymous'
user_db = None
def key(self):
return None
login_manager.anonymous_user = AnonymousUser
class FlaskUser(AnonymousUser):
def __init__(self, user_db):
self.user_db = user_db
self.id = user_db.key.id()
self.name = user_db.name
self.email = user_db.email
self.followed = user_db.followed
self.admin = user_db.admin
def get_name(self):
return self.user_db.name
def key(self):
return self.user_db.key.urlsafe()
def get_id(self):
return self.user_db.key.urlsafe()
def is_authenticated(self):
return True
def is_active(self):
return self.user_db.active
def is_anonymous(self):
return False
def avatar(self, size):
return 'http://www.gravatar.com/avatar/' + md5(self.email).hexdigest() + '?d=mm&s=' + str(size)
def follow(self, user):
if not self.is_following(user):
self.followed.put(user)
return self
def unfollow(self, user):
if self.is_following(user):
self.followed.remove(user)
return self
def is_following(self, user):
model_ex = model.Followers.query()
for entry in model_ex:
if entry.follower_name.string_id() == current_user.name and entry.followed_name.string_id() == user.name:
return True
return False
#return (cur_user.string_id() == current_user.name and to_follow.string_id() == user.name)
def has_follower(self,user):
model_ex = model.Followers.query()
for entry in model_ex:
if entry.follower_name.string_id() == user.name and entry.followed_name.string_id() == current_user.name:
return True
return False
def followed_posts(self):
return Post.query.join(followers, (followers.c.followed_id == Post.user_id)).filter(followers.c.follower_id == self.id).order_by(Post.timestamp.desc())
def __repr__(self): # pragma: no cover
return '<User %r>' % (self.name)
@login_manager.user_loader
def load_user(key):
user_db = ndb.Key(urlsafe=key).get()
if user_db:
return FlaskUser(user_db)
return None
login_manager.init_app(app)
login_manager.login_view = 'signin'
def current_user_id():
return login.current_user.id
def current_user_key():
return login.current_user.user_db.key if login.current_user.user_db else None
def current_user_db():
return login.current_user.user_db
def is_logged_in():
return login.current_user.id != 0
class EventData(model.Event):
def __init__(self, event_db):
self.event_db = event_db
self.id = event_db.key.id()
self.name = event_db.name
self.email = event_db.email
def avatar(self, size):
return 'http://www.gravatar.com/avatar/' + md5(self.email).hexdigest() + '?d=mm&s=' + str(size)
| 23.368421 | 155 | 0.677928 | 2,249 | 0.633164 | 0 | 0 | 143 | 0.040259 | 0 | 0 | 575 | 0.161881 |
cd3ebb35376a9ad6bb35907b043a70f74ff3d06d | 2,488 | py | Python | driver.py | Nobregaigor/Robot-path-tracking-and-obstacle-avoidance-simulation | 23ab060316c5978724b3f109d851ea33206d0e10 | [
"MIT"
] | 6 | 2020-05-01T23:33:13.000Z | 2021-12-18T08:13:50.000Z | driver.py | Nobregaigor/Robot-path-tracking-and-obstacle-avoidance-simulation--Python | 23ab060316c5978724b3f109d851ea33206d0e10 | [
"MIT"
] | null | null | null | driver.py | Nobregaigor/Robot-path-tracking-and-obstacle-avoidance-simulation--Python | 23ab060316c5978724b3f109d851ea33206d0e10 | [
"MIT"
] | 2 | 2020-05-06T11:54:10.000Z | 2020-07-30T01:58:06.000Z | import pygame
import math
import path_planning as pp
class Driver():
def __init__(self, vehicle, path, settings):
""" Driver """
#_______main objects references_______
#reference to driver vehicle object:
self.vehicle = vehicle
#creating a plan object:
self.plan = pp.path_plan(path)
#___________Settings_________
#initial velocity
self.velocity = settings._velocity_
#allowed error range for angle
self.angle_allowed_error = math.radians(settings._angle_allowed_error_)
#_______Class variables_______
#amount of degrees that it needs to turn to match desired path
self.angle_to_turn = None
#diretion that it needs to turn the wheel
self.direction_to_turn = None
#Boolean to indicate when to stop
self.safe_to_drive = False
self.settings = settings
#######################################################
def update_settings(self):
self.angle_allowed_error = math.radians(self.settings._angle_allowed_error_)
self.velocity = self.settings._velocity_
def update_driving_condition(self,conditions):
self.safe_to_drive = conditions['safe_to_drive']
self.angle_to_turn = conditions['angle_to_turn']
self.direction_to_turn = conditions['direction_to_turn']
def turn_wheel(self):
if self.angle_to_turn > self.angle_allowed_error:
if self.direction_to_turn == 'CCW': #opposite to match screens coordinates
self.vehicle.turn_left(self.velocity)
return "Turning left"
elif self.direction_to_turn == 'CW': #opposite to match screens coordinates
self.vehicle.turn_right(self.velocity)
return "Turning Right"
else:
print("I am confused!")
return "I am confused!"
else:
self.vehicle.move_forward(self.velocity)
return "Moving forward"
def drive(self, win, draw_plan=False, draw_sensor=False, debug=False):
self.update_settings()
conditions = self.plan.update_plan(win,self.vehicle.position,self.vehicle.direction, draw_plan, draw_sensor, debug)
self.update_driving_condition(conditions)
# print(conditions)
if self.safe_to_drive == True:
response = self.turn_wheel()
else:
response = "Not safe to drive"
return response
| 35.542857 | 123 | 0.63545 | 2,432 | 0.977492 | 0 | 0 | 0 | 0 | 0 | 0 | 658 | 0.264469 |
cd423af6c5271daa0eac7f6a8ca5e2cf87ffc2fe | 2,752 | py | Python | test/test_api_v1_module.py | feizhihui/deepnlp | cc6647d65ec39aadd35e4a4748da92df5b79bd48 | [
"MIT"
] | null | null | null | test/test_api_v1_module.py | feizhihui/deepnlp | cc6647d65ec39aadd35e4a4748da92df5b79bd48 | [
"MIT"
] | null | null | null | test/test_api_v1_module.py | feizhihui/deepnlp | cc6647d65ec39aadd35e4a4748da92df5b79bd48 | [
"MIT"
] | 1 | 2019-05-13T14:24:15.000Z | 2019-05-13T14:24:15.000Z | #coding:utf-8
'''
Demo for calling API of deepnlp.org web service
Anonymous user of this package have limited access on the number of API calling 100/day
Please Register and Login Your Account to deepnlp.org to get unlimited access to fully support
api_service API module, now supports both windows and linux platforms.
'''
from __future__ import unicode_literals
import json, requests, sys, os
if (sys.version_info>(3,0)): from urllib.parse import quote
else : from urllib import quote
from deepnlp import api_service
login = api_service.init() # registration, if failed, load default empty login {} with limited access
login = {} # use your personal login {'username': 'your_user_name' , 'password': 'your_password'}
conn = api_service.connect(login) # save the connection with login cookies
# API Setting
text = ("我爱吃北京烤鸭").encode('utf-8') # convert text from unicode to utf-8 bytes, quote() function
# Segmentation
url_segment = "http://www.deepnlp.org/api/v1.0/segment/?" + "lang=zh" + "&text=" + quote(text)
web = requests.get(url_segment, cookies = conn)
tuples = json.loads(web.text)
wordsList = tuples['words'] # segmentation json {'words', [w1, w2,...]} return list
print ("Segmentation API:")
print (" ".join(wordsList).encode("utf-8"))
# POS tagging
url_pos = "http://www.deepnlp.org/api/v1.0/pos/?"+ "lang=zh" + "&text=" + quote(text)
web = requests.get(url_pos, cookies = conn)
tuples = json.loads(web.text)
pos_str = tuples['pos_str'] # POS json {'pos_str', 'w1/t1 w2/t2'} return string
print ("POS API:")
print (pos_str.encode("utf-8"))
# NER tagging
url_ner = "http://www.deepnlp.org/api/v1.0/ner/?" + "lang=zh" + "&text=" + quote(text)
web = requests.get(url_ner, cookies = conn)
tuples = json.loads(web.text)
ner_str = tuples['ner_str'] # NER json {'ner_str', 'w1/t1 w2/t2'} return list
print ("NER API:")
print (ner_str.encode("utf-8"))
# Pipeline
annotators = "segment,pos,ner"
url_pipeline = "http://www.deepnlp.org/api/v1.0/pipeline/?" + "lang=zh" + "&text=" + quote(text) + "&annotators=" + quote(annotators)
web = requests.get(url_pipeline, cookies = conn)
tuples = json.loads(web.text)
segment_str = tuples['segment_str'] # segment module
pos_str = tuples['pos_str'] # pos module
ner_str = tuples['ner_str'] # ner module
ner_json = tuples['ner_json'] # ner result in json
# output
def json_to_str(json_dict):
json_str = ""
for k, v in json_dict.items():
json_str += ("'" + k + "'" + ":" + "'" + v + "'" + ",")
json_str = "{" + json_str + "}"
return json_str
print ("Pipeline API:")
print (segment_str.encode("utf-8"))
print (pos_str.encode("utf-8"))
print (ner_str.encode("utf-8"))
print ("NER JSON:")
print (json_to_str(ner_json).encode("utf-8"))
| 38.222222 | 133 | 0.682776 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,371 | 0.495662 |
cd448e3854b74fee56a6672cdae1ce1e148e593d | 1,195 | py | Python | spring_cloud/commons/client/loadbalancer/round_robin.py | haribo0915/Spring-Cloud-in-Python | 0bcd7093869c797df14428bf2d1b0a779f96e573 | [
"Apache-2.0"
] | 5 | 2020-10-06T09:48:23.000Z | 2020-10-07T13:19:46.000Z | spring_cloud/commons/client/loadbalancer/round_robin.py | haribo0915/Spring-Cloud-in-Python | 0bcd7093869c797df14428bf2d1b0a779f96e573 | [
"Apache-2.0"
] | 5 | 2020-10-05T09:57:01.000Z | 2020-10-12T19:52:48.000Z | spring_cloud/commons/client/loadbalancer/round_robin.py | haribo0915/Spring-Cloud-in-Python | 0bcd7093869c797df14428bf2d1b0a779f96e573 | [
"Apache-2.0"
] | 8 | 2020-10-05T06:34:49.000Z | 2020-10-07T13:19:46.000Z | # -*- coding: utf-8 -*-
"""
The built-in Round-Robin algorithm.
"""
# standard library
from typing import Union
# scip plugin
from spring_cloud.commons.client.service_instance import ServiceInstance
from spring_cloud.utils.atomic import AtomicInteger
from .loadbalancer import LoadBalancer
from .supplier import ServiceInstanceListSupplier
__author__ = "Waterball ([email protected])"
__license__ = "Apache 2.0"
class RoundRobinLoadBalancer(LoadBalancer):
"""
A very easy thread-safe implementation adopting round-robin (RR) algorithm.
"""
def __init__(self, instances_supplier: ServiceInstanceListSupplier, service_id):
assert instances_supplier.service_id == service_id, "Inconsistent service's id."
self.__instances_supplier = instances_supplier
self.__service_id = service_id
self.__position = AtomicInteger(-1)
@property
def service_id(self) -> str:
return self.__service_id
def choose(self, request=None) -> Union[ServiceInstance, None]:
instances = self.__instances_supplier.get(request=request)
pos = abs(self.__position.increment_and_get())
return instances[pos % len(instances)]
| 30.641026 | 88 | 0.738075 | 771 | 0.645188 | 0 | 0 | 75 | 0.062762 | 0 | 0 | 264 | 0.220921 |
cd46541bba89d45678808a7b911ed3c9f61dd510 | 4,245 | py | Python | utils/dataset_utils.py | dpaiton/DeepSparseCoding | 5ea01fa8770794df5e13743aa3f2d85297c27eb1 | [
"MIT"
] | 12 | 2017-04-27T17:19:31.000Z | 2021-11-07T03:37:59.000Z | utils/dataset_utils.py | dpaiton/DeepSparseCoding | 5ea01fa8770794df5e13743aa3f2d85297c27eb1 | [
"MIT"
] | 12 | 2018-03-21T01:16:25.000Z | 2022-02-10T00:21:58.000Z | utils/dataset_utils.py | dpaiton/DeepSparseCoding | 5ea01fa8770794df5e13743aa3f2d85297c27eb1 | [
"MIT"
] | 12 | 2017-02-01T19:49:57.000Z | 2021-12-08T03:16:58.000Z | import os
import sys
import numpy as np
import torch
from torchvision import datasets, transforms
ROOT_DIR = os.path.dirname(os.getcwd())
if ROOT_DIR not in sys.path: sys.path.append(ROOT_DIR)
import DeepSparseCoding.utils.data_processing as dp
import DeepSparseCoding.datasets.synthetic as synthetic
class CustomTensorDataset(torch.utils.data.Dataset):
def __init__(self, data_tensor):
self.data_tensor = data_tensor
def __getitem__(self, index):
return self.data_tensor[index], self.data_tensor[index]
def __len__(self):
return self.data_tensor.size(0)
def load_dataset(params):
new_params = {}
if(params.dataset.lower() == 'mnist'):
preprocessing_pipeline = [
transforms.ToTensor(),
transforms.Lambda(lambda x: x.permute(1, 2, 0)) # channels last
]
if params.standardize_data:
preprocessing_pipeline.append(
transforms.Lambda(lambda x: dp.standardize(x, eps=params.eps)[0]))
if params.rescale_data_to_one:
preprocessing_pipeline.append(
transforms.Lambda(lambda x: dp.rescale_data_to_one(x, eps=params.eps, samplewise=True)[0]))
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(root=params.data_dir, train=True, download=True,
transform=transforms.Compose(preprocessing_pipeline)),
batch_size=params.batch_size, shuffle=params.shuffle_data,
num_workers=0, pin_memory=False)
val_loader = None
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(root=params.data_dir, train=False, download=True,
transform=transforms.Compose(preprocessing_pipeline)),
batch_size=params.batch_size, shuffle=params.shuffle_data,
num_workers=0, pin_memory=False)
elif(params.dataset.lower() == 'dsprites'):
root = os.path.join(*[params.data_dir])
dsprites_file = os.path.join(*[root, 'dsprites/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz'])
if not os.path.exists(dsprites_file):
import subprocess
print(f'Now downloading the dsprites-dataset to {root}/dsprites')
subprocess.call(['./scripts/download_dsprites.sh', f'{root}'])
print('Finished')
data = np.load(dsprites_file, encoding='bytes')
data = torch.from_numpy(data['imgs']).unsqueeze(1).float()
train_kwargs = {'data_tensor':data}
dset = CustomTensorDataset
train_data = dset(**train_kwargs)
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=params.batch_size,
shuffle=params.shuffle_data,
num_workers=0,
pin_memory=False)
val_loader = None
test_loader = None
elif(params.dataset.lower() == 'synthetic'):
preprocessing_pipeline = [transforms.ToTensor(),
transforms.Lambda(lambda x: x.permute(1, 2, 0)) # channels last
]
train_loader = torch.utils.data.DataLoader(
synthetic.SyntheticImages(params.epoch_size, params.data_edge_size, params.dist_type,
params.rand_state, params.num_classes,
transform=transforms.Compose(preprocessing_pipeline)),
batch_size=params.batch_size, shuffle=params.shuffle_data,
num_workers=0, pin_memory=False)
val_loader = None
test_loader = None
new_params["num_pixels"] = params.data_edge_size**2
else:
assert False, (f'Supported datasets are ["mnist", "dsprites", "synthetic"], not {dataset_name}')
new_params = {}
new_params['epoch_size'] = len(train_loader.dataset)
if(not hasattr(params, 'num_val_images')):
if val_loader is None:
new_params['num_val_images'] = 0
else:
new_params['num_val_images'] = len(val_loader.dataset)
if(not hasattr(params, 'num_test_images')):
if test_loader is None:
new_params['num_test_images'] = 0
else:
new_params['num_test_images'] = len(test_loader.dataset)
new_params['data_shape'] = list(next(iter(train_loader))[0].shape)[1:]
return (train_loader, val_loader, test_loader, new_params)
| 42.878788 | 107 | 0.660306 | 291 | 0.068551 | 0 | 0 | 0 | 0 | 0 | 0 | 465 | 0.109541 |
cd4856841cf209c6c31d8cf4b1d4a02e1669fe87 | 1,051 | py | Python | adi_study_watch/nrf5_sdk_15.2.0/adi_study_watch/cli/m2m2/inc/master_definitions/led_interface.py | ArrowElectronics/Vital-Signs-Monitoring | ba43fe9a116d94170561433910fd7bffba5726e7 | [
"Unlicense"
] | 5 | 2021-06-13T17:11:19.000Z | 2021-12-01T18:20:38.000Z | adi_study_watch/nrf5_sdk_15.2.0/adi_study_watch/cli/m2m2/inc/master_definitions/led_interface.py | ArrowElectronics/Vital-Signs-Monitoring | ba43fe9a116d94170561433910fd7bffba5726e7 | [
"Unlicense"
] | null | null | null | adi_study_watch/nrf5_sdk_15.2.0/adi_study_watch/cli/m2m2/inc/master_definitions/led_interface.py | ArrowElectronics/Vital-Signs-Monitoring | ba43fe9a116d94170561433910fd7bffba5726e7 | [
"Unlicense"
] | 1 | 2022-01-08T15:01:44.000Z | 2022-01-08T15:01:44.000Z | #!/usr/bin/env python3
from ctypes import *
import m2m2_core
class M2M2_LED_COMMAND_ENUM_t(c_uint8):
M2M2_LED_COMMAND_GET = 0x0
M2M2_LED_COMMAND_SET = 0x1
class M2M2_LED_PATTERN_ENUM_t(c_uint8):
M2M2_LED_PATTERN_OFF = 0x0
M2M2_LED_PATTERN_SLOW_BLINK_DC_12 = 0x1
M2M2_LED_PATTERN_SLOW_BLINK_DC_12_N = 0xFE
M2M2_LED_PATTERN_FAST_BLINK_DC_50 = 0xAA
M2M2_LED_PATTERN_FAST_BLINK_DC_50_N = 0x55
M2M2_LED_PATTERN_MED_BLINK_DC_50 = 0xCC
M2M2_LED_PATTERN_SLOW_BLINK_DC_50 = 0xF0
M2M2_LED_PATTERN_ON = 0xFF
class M2M2_LED_PRIORITY_ENUM_t(c_uint8):
M2M2_LED_PRIORITY_LOW = 0x0
M2M2_LED_PRIORITY_MED = 0x1
M2M2_LED_PRIORITY_HIGH = 0x2
M2M2_LED_PRIORITY_CRITICAL = 0x3
class m2m2_led_ctrl_t(Structure):
fields = [
("command", M2M2_LED_COMMAND_ENUM_t),
("priority", M2M2_LED_PRIORITY_ENUM_t),
("r_pattern", M2M2_LED_PATTERN_ENUM_t),
("g_pattern", M2M2_LED_PATTERN_ENUM_t),
("b_pattern", M2M2_LED_PATTERN_ENUM_t),
]
| 30.028571 | 53 | 0.726927 | 980 | 0.932445 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.070409 |
cd485ea8847607e1b8262b17b33a7d95c7b05c48 | 2,327 | py | Python | src/empirical_study.py | arshajithwolverine/Recommentation-System_KGNN-LS | 82ad10633a56794bbc38dc7e6c40a3636c7d570a | [
"MIT"
] | 133 | 2019-06-20T08:38:04.000Z | 2022-03-30T07:57:14.000Z | src/empirical_study.py | piaofu110/KGNN-LS | 3afd76361b623e9e38b822861c79bcd61dae41aa | [
"MIT"
] | 10 | 2019-07-06T12:53:01.000Z | 2021-11-10T12:58:50.000Z | src/empirical_study.py | piaofu110/KGNN-LS | 3afd76361b623e9e38b822861c79bcd61dae41aa | [
"MIT"
] | 40 | 2019-08-07T06:02:31.000Z | 2022-01-05T15:19:29.000Z | import networkx as nx
import numpy as np
import argparse
if __name__ == '__main__':
np.random.seed(555)
NUM = 10000
parser = argparse.ArgumentParser()
parser.add_argument('-d', type=str, default='music')
args = parser.parse_args()
DATASET = args.d
kg_np = np.load('../data/' + DATASET + '/kg_final.npy')
kg = nx.Graph()
kg.add_edges_from([(triple[0], triple[2]) for triple in kg_np]) # construct knowledge graph
rating_np = np.load('../data/' + DATASET + '/ratings_final.npy')
item_history = dict()
item_set = set()
for record in rating_np:
user = record[0]
item = record[1]
rating = record[2]
if rating == 1:
if item not in item_history:
item_history[item] = set()
item_history[item].add(user)
item_set.add(item)
item_pair_num_no_common_rater = 0
item_pair_num_with_common_rater = 0
sp_no_common_rater = dict()
sp_with_common_rater = dict()
while True:
item1, item2 = np.random.choice(list(item_set), size=2, replace=False)
if item_pair_num_no_common_rater == NUM and item_pair_num_with_common_rater == NUM:
break
if item_pair_num_no_common_rater < NUM and len(item_history[item1] & item_history[item2]) == 0:
item_pair_num_no_common_rater += 1
if not nx.has_path(kg, item1, item2):
sp = 'infinity'
else:
sp = nx.shortest_path_length(kg, item1, item2)
if sp not in sp_no_common_rater:
sp_no_common_rater[sp] = 0
sp_no_common_rater[sp] += 1
print(item_pair_num_no_common_rater, item_pair_num_with_common_rater)
if item_pair_num_with_common_rater < NUM and len(item_history[item1] & item_history[item2]) > 0:
item_pair_num_with_common_rater += 1
if not nx.has_path(kg, item1, item2):
sp = 'infinity'
else:
sp = nx.shortest_path_length(kg, item1, item2)
if sp not in sp_with_common_rater:
sp_with_common_rater[sp] = 0
sp_with_common_rater[sp] += 1
print(item_pair_num_no_common_rater, item_pair_num_with_common_rater)
print(sp_no_common_rater)
print(sp_with_common_rater)
| 36.359375 | 104 | 0.621401 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.052858 |
cd48bacc37dd1b8304c3c30daa2f346ee7aa4309 | 6,317 | py | Python | Survival_Pygame/data.py | Lily-Li828/effective-octo-fiesta | 4dbfeaec6158141bb03005aa25240dd337694ee3 | [
"Apache-2.0"
] | null | null | null | Survival_Pygame/data.py | Lily-Li828/effective-octo-fiesta | 4dbfeaec6158141bb03005aa25240dd337694ee3 | [
"Apache-2.0"
] | null | null | null | Survival_Pygame/data.py | Lily-Li828/effective-octo-fiesta | 4dbfeaec6158141bb03005aa25240dd337694ee3 | [
"Apache-2.0"
] | null | null | null | import pygame
from pygame.locals import*
from pygame import mixer
pygame.init()
# loading in background image
backgroundClassic_image=pygame.image.load('image/WallPaper.png')
backgroundAncient_image=pygame.image.load('image/WallPaper2.png')
# loading in player image
player_imageClassic=pygame.image.load('image/player.png')
player_imageAncient=pygame.image.load('image/player2.png')
player_imageClassicR=pygame.image.load('image/playerR.png')
player_imageAncientR=pygame.image.load('image/player2R.png')
#loading sound for bullet
BulletSound=mixer.Sound('sound/bullet.wav')
#Loading sound for collision with enemy:
CollidewithEnemy=mixer.Sound('sound/Collide.wav')
#Loading sound for opening of game:
Opening_Sound=mixer.Sound('sound/opening.wav')
Mouse_Sound=mixer.Sound('sound/mouseclick.wav')
Selection_Sound=mixer.Sound('sound/selection.wav')
#loading sound for end of game:
End_GameSound=mixer.Sound('sound/gameover.wav')
#loading sound for win game:
Win_GameSound=mixer.Sound('sound/wingame.wav')
Door_GameSound=mixer.Sound('sound/doorappear.wav')
#Loading in image for opening animation:
Opening_Image= [pygame.image.load('image/opening.png'),pygame.image.load('image/opening.png'),
pygame.image.load('image/opening.png'),pygame.image.load('image/opening.png'),
pygame.image.load('image/opening.png'),pygame.image.load('image/opening.png'),
pygame.image.load('image/opening.png'),pygame.image.load('image/opening.png'),
pygame.image.load('image/opening.png'),pygame.image.load('image/opening.png'),
pygame.image.load('image/opening.png'),pygame.image.load('image/opening.png'),
pygame.image.load('image/opening.png'),pygame.image.load('image/opening.png'),
pygame.image.load('image/opening.png'),pygame.image.load('image/opening.png'),
pygame.image.load('image/opening.png'),pygame.image.load('image/opening1.png'),
pygame.image.load('image/opening1.png'),pygame.image.load('image/opening1.png'),
pygame.image.load('image/opening.png')]
#loading in image for opening game mode selection:
OpeningSelect_BG=pygame.image.load('image/ModeSelection.png')
ClassicMode_image=pygame.image.load('image/ClassicMode.png')
AncientMode_image=pygame.image.load('image/AncientMode.png')
Glow_image=pygame.image.load('image/glow.png')
#Loading image for win game:
Won_Light=pygame.image.load('image/light.png')
Won_Door=pygame.image.load('image/door.png')
#Loading win game page:
Escape_image=pygame.image.load('image/Wingame.png')
#loading in image:
direction_key=pygame.image.load('image/direction1.png')
direction_arrow=pygame.image.load('image/direction2.png')
#loading in endgame page:
End_image=pygame.image.load('image/gameover.png')
# load in image of platform
platformClassic_img= pygame.image.load('image/icicle.png')
platformAncient_img=pygame.image.load('image/brickwall.png')
#Game map for two different game modes:
Classic_map = [[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0],
[1,1,1,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,1,1,1,0,0,0,0,0],
[0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
Ancient_map=[[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,1,0,0,1,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,1,1,1,0,0,0],
[1,1,0,0,1,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,0,0,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,0,1,1,1,1,0,0,0],
[1,1,0,0,1,1,1,0,0,1,1,0,0,1,0,0,1,0,0,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,0,1,1,0,0,0,0,0],
[1,1,0,0,1,1,1,0,0,1,1,0,0,1,0,0,1,0,0,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,0,0,1,0,1,0,0,1,1,0,0,0,0,0],
[1,1,0,0,1,1,1,0,0,1,1,0,0,1,0,0,1,0,0,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,0,0,1,0,1,0,0,1,1,0,0,0,0,0]]
#Upload font type:
fontd1= pygame.font.Font('font/Pieces.ttf',32)
fontd2= pygame.font.Font('font/OldeEnglish.ttf',18)
fontdO= pygame.font.Font('font/Opening.ttf',28) # Font (Opening)
fontdS= pygame.font.Font('font/Pieces.ttf',30) # Font (For Game Mode Selection)
| 54.930435 | 114 | 0.625139 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,544 | 0.24442 |
cd4c852bd50c3ecb65653b4479673255f18bc5fa | 8,170 | py | Python | test/filters_iso_test.py | todofixthis/filters-iso | da6052b49a2f71a3b3d6b66e2633debbb64f5b16 | [
"MIT"
] | null | null | null | test/filters_iso_test.py | todofixthis/filters-iso | da6052b49a2f71a3b3d6b66e2633debbb64f5b16 | [
"MIT"
] | null | null | null | test/filters_iso_test.py | todofixthis/filters-iso | da6052b49a2f71a3b3d6b66e2633debbb64f5b16 | [
"MIT"
] | null | null | null | import filters as f
from filters.test import BaseFilterTestCase
# noinspection PyProtectedMember
from iso3166 import Country, countries_by_alpha3
from language_tags import tags
from language_tags.Tag import Tag
from moneyed import Currency, get_currency
class CountryTestCase(BaseFilterTestCase):
filter_type = f.ext.Country
def test_pass_none(self):
"""
``None`` always passes this filter.
Use ``Required | Country`` if you want to reject ``None``.
"""
self.assertFilterPasses(None)
def test_pass_valid_alpha_3(self):
"""
The incoming value is a valid ISO-3316-1 alpha-3 country code.
"""
filtered = self._filter('FRA')
self.assertFilterPasses(filtered, self.skip_value_check)
country = filtered.cleaned_data
self.assertIsInstance(country, Country)
self.assertEqual(country.name, 'France')
def test_pass_valid_alpha_2(self):
"""
The incoming value is a valid ISO-3166-1 alpha-2 country code.
"""
filtered = self._filter('IE')
self.assertFilterPasses(filtered, self.skip_value_check)
country = filtered.cleaned_data
self.assertIsInstance(country, Country)
self.assertEqual(country.name, 'Ireland')
def test_pass_case_insensitive(self):
"""
The incoming value is basically valid, but it has the wrong
case.
"""
filtered = self._filter('arg')
self.assertFilterPasses(filtered, self.skip_value_check)
country = filtered.cleaned_data
self.assertIsInstance(country, Country)
self.assertEqual(country.name, 'Argentina')
def test_fail_invalid_code(self):
"""
The incoming value is not a valid ISO-3316-1 country code.
"""
# Surrender is not an option!
self.assertFilterErrors('\u2690', [f.ext.Country.CODE_INVALID])
def test_fail_subdivision(self):
"""
Subdivisions are not accepted, even though certain ones are
technically part of ISO-3166-1.
After all, the filter is named ``Country``, not ``ISO_3166_1``!
"""
self.assertFilterErrors('IE-L', [f.ext.Country.CODE_INVALID])
def test_fail_wrong_type(self):
"""
The incoming value is not a string.
"""
self.assertFilterErrors(['CHN', 'JPN'], [f.Type.CODE_WRONG_TYPE])
def test_pass_country_object(self):
"""
The incoming value is already a :py:class:`Country` object.
"""
self.assertFilterPasses(countries_by_alpha3.get('USA'))
class CurrencyTestCase(BaseFilterTestCase):
filter_type = f.ext.Currency
def test_pass_none(self):
"""
``None`` always passes this filter.
Use ``Required | Currency`` if you do want to reject ``None``.
"""
self.assertFilterPasses(None)
def test_pass_valid_code(self):
"""
The incoming value is a valid ISO-4217 currency code.
"""
filtered = self._filter('PEN')
self.assertFilterPasses(filtered, self.skip_value_check)
currency = filtered.cleaned_data
self.assertIsInstance(currency, Currency)
self.assertEqual(currency.name, 'Nuevo Sol')
def test_pass_case_insensitive(self):
"""
The incoming value is basically valid, but it has the wrong
case.
"""
filtered = self._filter('ars')
self.assertFilterPasses(filtered, self.skip_value_check)
currency = filtered.cleaned_data
self.assertIsInstance(currency, Currency)
self.assertEqual(currency.name, 'Argentine Peso')
def test_fail_invalid_code(self):
"""
The incoming value is not a valid ISO-4217 currency code.
"""
# You can't use the currency symbol, silly!
self.assertFilterErrors('\u00a3', [f.ext.Currency.CODE_INVALID])
def test_fail_wrong_type(self):
"""
The incoming value is not a string.
"""
self.assertFilterErrors(['USD', 'CNY'], [f.Type.CODE_WRONG_TYPE])
def test_pass_currency_object(self):
"""
The incoming value is already a :py:class:`moneyed.Currency`
object.
"""
self.assertFilterPasses(get_currency(code='USD'))
class LocaleTestCase(BaseFilterTestCase):
"""
Note that unit tests will focus on the functionality of the Filter
rather than the underlying library; the variety of formats and
values that the Locale Filter accepts FAR exceeds the scope
demonstrated in these tests.
References:
- http://r12a.github.io/apps/subtags/
- https://pypi.python.org/pypi/language-tags
- https://github.com/mattcg/language-tags
"""
filter_type = f.ext.Locale
def test_pass_none(self):
"""
``None`` always passes this filter.
Use `Required | Locale` if you want to reject `None`.
"""
self.assertFilterPasses(None)
def test_valid_locale(self):
"""
Valid locale string is valid.
"""
# There are a LOT of possible values that can go here.
# http://r12a.github.io/apps/subtags/
filtered = self._filter('en-cmn-Hant-HK')
self.assertFilterPasses(filtered, self.skip_value_check)
tag = filtered.cleaned_data
self.assertIsInstance(tag, Tag)
self.assertTrue(tag.valid)
#
# Language tags have LOTS of attributes.
# We will check a few of them to make sure the Filter returned
# the correct tag, but you should be aware that there is a
# LOT of information available in the value returned by the
# Locale Filter.
#
# For more information, check out the repo for the Javascript
# version of the underlying `language_tags` library (the
# Python version is a port with the same API... and no usage
# documentation of its own).
# https://github.com/mattcg/language-tags
#
self.assertEqual(str(tag), 'en-cmn-Hant-HK')
self.assertEqual(str(tag.language), 'en')
self.assertEqual(str(tag.region), 'HK')
self.assertEqual(str(tag.script), 'Hant')
def test_pass_case_insensitive(self):
"""
The incoming value is basically valid, except it uses the wrong
case.
"""
filtered = self._filter('Az-ArAb-Ir')
self.assertFilterPasses(filtered, self.skip_value_check)
tag = filtered.cleaned_data
self.assertIsInstance(tag, Tag)
self.assertTrue(tag.valid)
self.assertEqual(str(tag), 'az-Arab-IR')
self.assertEqual(str(tag.language), 'az')
self.assertEqual(str(tag.region), 'IR')
self.assertEqual(str(tag.script), 'Arab')
def test_fail_invalid_value(self):
"""
The incoming value generates parsing errors.
"""
# noinspection SpellCheckingInspection
filtered = self._filter(
'sl-Cyrl-YU-rozaj-solba-1994-b-1234-a-Foobar-x-b-1234-a-Foobar'
)
self.assertFilterErrors(filtered, [f.ext.Locale.CODE_INVALID])
# Parse errors included here for demonstration purposes.
self.assertListEqual(
filtered.filter_messages[''][0].context.get('parse_errors'),
[
# Sorry about the magic values.
# These are defined in the Tag initializer, so they're
# a bit tricky to get at without complicating the
# test.
# :py:meth:`Tag.__init__`
(11, "The subtag 'YU' is deprecated."),
(8, "Duplicate variant subtag 'solba' found."),
(8, "Duplicate variant subtag '1994' found."),
],
)
def test_fail_wrong_type(self):
"""
The incoming value is not a string.
"""
self.assertFilterErrors(['en', 'US'], [f.Type.CODE_WRONG_TYPE])
def test_pass_tag_object(self):
"""
The incoming value is already a Tag object.
"""
self.assertFilterPasses(tags.tag('en-cmn-Hant-HK'))
| 31.914063 | 75 | 0.623745 | 7,907 | 0.967809 | 0 | 0 | 0 | 0 | 0 | 0 | 3,607 | 0.441493 |
cd4d5dd7883050a254679a4b1f93de18a8465561 | 1,179 | py | Python | datacamp-master/22-introduction-to-time-series-analysis-in-python/04-moving-average-ma-and-arma-models/08-equivalance-of-ar(1)-and-ma(infinity).py | vitthal10/datacamp | 522d2b192656f7f6563bf6fc33471b048f1cf029 | [
"MIT"
] | 1 | 2020-06-11T01:32:36.000Z | 2020-06-11T01:32:36.000Z | 22-introduction-to-time-series-analysis-in-python/04-moving-average-ma-and-arma-models/08-equivalance-of-ar(1)-and-ma(infinity).py | AndreasFerox/DataCamp | 41525d7252f574111f4929158da1498ee1e73a84 | [
"MIT"
] | null | null | null | 22-introduction-to-time-series-analysis-in-python/04-moving-average-ma-and-arma-models/08-equivalance-of-ar(1)-and-ma(infinity).py | AndreasFerox/DataCamp | 41525d7252f574111f4929158da1498ee1e73a84 | [
"MIT"
] | 1 | 2021-08-08T05:09:52.000Z | 2021-08-08T05:09:52.000Z | '''
Equivalence of AR(1) and MA(infinity)
To better understand the relationship between MA models and AR models, you will demonstrate that an AR(1) model is equivalent to an MA(∞
∞
) model with the appropriate parameters.
You will simulate an MA model with parameters 0.8,0.82,0.83,…
0.8
,
0.8
2
,
0.8
3
,
…
for a large number (30) lags and show that it has the same Autocorrelation Function as an AR(1) model with ϕ=0.8
ϕ
=
0.8
.
INSTRUCTIONS
100XP
Import the modules for simulating data and plotting the ACF from statsmodels
Use a list comprehension to build a list with exponentially decaying MA parameters: 1,0.8,0.82,0.83,…
1
,
0.8
,
0.8
2
,
0.8
3
,
…
Simulate 5000 observations of the MA(30) model
Plot the ACF of the simulated series
'''
# import the modules for simulating data and plotting the ACF
from statsmodels.tsa.arima_process import ArmaProcess
from statsmodels.graphics.tsaplots import plot_acf
# Build a list MA parameters
ma = [0.8**i for i in range(30)]
# Simulate the MA(30) model
ar = np.array([1])
AR_object = ArmaProcess(ar, ma)
simulated_data = AR_object.generate_sample(nsample=5000)
# Plot the ACF
plot_acf(simulated_data, lags=30)
plt.show() | 19.983051 | 136 | 0.74894 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 892 | 0.747695 |
cd4e4c3a86cc4a31b024c46ddddde1fa3e66e93b | 3,752 | py | Python | imutils.py | shimoda-uec/ssdd | 564c3e08fae7a158516cdbd9f3599a74dc748aff | [
"MIT"
] | 33 | 2019-11-05T07:15:36.000Z | 2021-04-27T06:33:47.000Z | imutils.py | shimoda-uec/ssdd | 564c3e08fae7a158516cdbd9f3599a74dc748aff | [
"MIT"
] | 1 | 2019-11-18T13:02:40.000Z | 2019-11-18T13:02:54.000Z | imutils.py | shimoda-uec/ssdd | 564c3e08fae7a158516cdbd9f3599a74dc748aff | [
"MIT"
] | 3 | 2019-11-25T11:00:39.000Z | 2021-03-27T06:53:21.000Z | import PIL.Image
import random
import numpy as np
import cv2
class RandomHorizontalFlip():
def __init__(self):
return
def __call__(self, inputs):
if bool(random.getrandbits(1)):
outputs=[]
for inp in inputs:
out = np.fliplr(inp).copy()
outputs.append(out)
return outputs
else:
return inputs
class RandomResizeLong():
def __init__(self, min_long, max_long):
self.min_long = min_long
self.max_long = max_long
def __call__(self, inputs):
img=inputs[0]
target_long = random.randint(self.min_long, self.max_long)
#w, h = img.size
h, w, c = img.shape
target_shape = (target_long, target_long)
"""
if w > h:
target_shape = (int(round(w * target_long / h)), target_long)
else:
target_shape = (target_long, int(round(h * target_long / w)))
"""
outputs=[]
for inp in inputs:
out = cv2.resize(inp, target_shape)
if len(out.shape)==2:
out=np.expand_dims(out,2)
outputs.append(out)
return outputs
class RandomCrop():
def __init__(self, cropsize):
self.cropsize = cropsize
def __call__(self, inputs):
imgarr = np.concatenate(inputs, axis=-1)
h, w, c = imgarr.shape
ch = min(self.cropsize, h)
cw = min(self.cropsize, w)
w_space = w - self.cropsize
h_space = h - self.cropsize
if w_space > 0:
cont_left = 0
img_left = random.randrange(w_space+1)
else:
cont_left = random.randrange(-w_space+1)
img_left = 0
if h_space > 0:
cont_top = 0
img_top = random.randrange(h_space+1)
else:
cont_top = random.randrange(-h_space+1)
img_top = 0
outputs=[]
for inp in inputs:
container = np.zeros((self.cropsize, self.cropsize, inp.shape[-1]), np.float32)
container[cont_top:cont_top+ch, cont_left:cont_left+cw] = \
inp[img_top:img_top+ch, img_left:img_left+cw]
outputs.append(container)
return outputs
class Normalize():
def __init__(self, mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225)):
self.mean = mean
self.std = std
def __call__(self, img):
imgarr = np.asarray(img)
proc_img = np.empty_like(imgarr, np.float32)
proc_img[..., 0] = (imgarr[..., 0] / 255. - self.mean[0]) / self.std[0]
proc_img[..., 1] = (imgarr[..., 1] / 255. - self.mean[1]) / self.std[1]
proc_img[..., 2] = (imgarr[..., 2] / 255. - self.mean[2]) / self.std[2]
return proc_img
def HWC_to_CHW(img):
return np.transpose(img, (2, 0, 1))
class Rescale():
def __init__(self, scale):
self.scale=scale
def __call__(self, inputs):
outputs=[]
for inp in inputs:
out = cv2.resize(inp, self.scale)
if len(out.shape)==2:
out=np.expand_dims(out,2)
outputs.append(out)
return outputs
def crf_inference(img, probs, t=3, scale_factor=1, labels=21):
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import unary_from_softmax
h, w = img.shape[:2]
n_labels = labels
d = dcrf.DenseCRF2D(w, h, n_labels)
unary = unary_from_softmax(probs)
unary = np.ascontiguousarray(unary)
d.setUnaryEnergy(unary)
d.addPairwiseGaussian(sxy=3/scale_factor, compat=3)
d.addPairwiseBilateral(sxy=80/scale_factor, srgb=13, rgbim=np.copy(img), compat=10)
Q = d.inference(t)
return np.array(Q).reshape((n_labels, h, w)) | 32.068376 | 91 | 0.567697 | 3,053 | 0.813699 | 0 | 0 | 0 | 0 | 0 | 0 | 211 | 0.056237 |
cd503144da89b34c7f7e0c6f7d30f63249106454 | 398 | py | Python | dfmt/svg/run.py | wangrl2016/coding | fd6cd342cade42379c4a0447d83e17c6596fd3a3 | [
"MIT"
] | 4 | 2021-02-20T03:47:48.000Z | 2021-11-09T17:25:43.000Z | dfmt/svg/run.py | wangrl2016/coding | fd6cd342cade42379c4a0447d83e17c6596fd3a3 | [
"MIT"
] | null | null | null | dfmt/svg/run.py | wangrl2016/coding | fd6cd342cade42379c4a0447d83e17c6596fd3a3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import subprocess
if __name__ == '__main__':
out_dir = 'out'
if not os.path.exists(out_dir):
os.mkdir(out_dir)
subprocess.run(['cargo', 'build', '--release'])
exe = 'target/release/svg'
subprocess.run([exe, '-i', 'test/simple-text.svg', '-o', 'out/simple-text.png', '--perf',
'--dump-svg', 'out/simple-text.svg'])
| 26.533333 | 93 | 0.585427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.437186 |
cd517441104b9bb84c95422e46c5a618c55415fc | 79 | py | Python | project/enums/string_types_enum.py | vinibiavatti1/PythonFlaskCms | e43a4db84d1f77a5f66b1f8fcb9dc96e05e6c023 | [
"MIT"
] | null | null | null | project/enums/string_types_enum.py | vinibiavatti1/PythonFlaskCms | e43a4db84d1f77a5f66b1f8fcb9dc96e05e6c023 | [
"MIT"
] | null | null | null | project/enums/string_types_enum.py | vinibiavatti1/PythonFlaskCms | e43a4db84d1f77a5f66b1f8fcb9dc96e05e6c023 | [
"MIT"
] | null | null | null | """
String format type value enumeration.
"""
TRUE = '1'
FALSE = '0'
NONE = ''
| 11.285714 | 37 | 0.594937 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.670886 |
cd52a473787d5199c37a49a98543ea8b45caa074 | 90 | py | Python | LTA/museums/admin.py | valeriimartsyshyn/lviv_tourist_adviser | e8ce0c7ba97262b2d181e3373eb806f4dcc9bbf1 | [
"MIT"
] | null | null | null | LTA/museums/admin.py | valeriimartsyshyn/lviv_tourist_adviser | e8ce0c7ba97262b2d181e3373eb806f4dcc9bbf1 | [
"MIT"
] | 1 | 2021-09-27T06:33:26.000Z | 2021-09-27T06:33:26.000Z | LTA/museums/admin.py | valeriimartsyshyn/lviv_tourist_adviser | e8ce0c7ba97262b2d181e3373eb806f4dcc9bbf1 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Museums
admin.site.register(Museums) | 22.5 | 32 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
cd537e30b909d9612963bfa8b8f1c4d920b60f98 | 1,584 | py | Python | grailapp-osc-6a60f9376f69/setup.py | yuan7407/TD_OpenCV_PythonOSC | f4424b1f7155f7942397212b97183cb749612f50 | [
"MIT"
] | 20 | 2018-12-06T21:35:10.000Z | 2022-02-08T23:22:35.000Z | grailapp-osc-6a60f9376f69/setup.py | phoebezhung/TD_OpenCV_PythonOSC | f4424b1f7155f7942397212b97183cb749612f50 | [
"MIT"
] | null | null | null | grailapp-osc-6a60f9376f69/setup.py | phoebezhung/TD_OpenCV_PythonOSC | f4424b1f7155f7942397212b97183cb749612f50 | [
"MIT"
] | 4 | 2019-02-27T08:13:45.000Z | 2021-11-02T15:14:41.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
setup
~~~~~
Setup Script
Run the build process by running the command 'python setup.py build'
:copyright: (c) 2018 by Oleksii Lytvyn.
:license: MIT, see LICENSE for more details.
"""
import osc.osc as osc
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='osc',
version=osc.__version__,
author='Oleksii Lytvyn',
author_email='[email protected]',
description='OSC implementation in pure Python',
long_description=open('README.rst').read(),
url='https://bitbucket.org/grailapp/osc',
download_url='https://bitbucket.org/grailapp/osc/get/default.zip',
platforms='any',
packages=['osc'],
keywords=['osc', 'protocol', 'utilities', 'osc-1.0', 'network', 'communication', 'udp'],
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: System :: Networking',
'License :: OSI Approved :: MIT License'
],
install_requires=[]
)
| 31.058824 | 92 | 0.621212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,037 | 0.654672 |
cd5534b9b393b4ca6ad72c44a3438fcc6e74b3d0 | 2,501 | py | Python | socketshark/utils.py | Play2Live/socketshark | 9b1e40654bf629c593079fb44c548911d4c864af | [
"MIT"
] | null | null | null | socketshark/utils.py | Play2Live/socketshark | 9b1e40654bf629c593079fb44c548911d4c864af | [
"MIT"
] | null | null | null | socketshark/utils.py | Play2Live/socketshark | 9b1e40654bf629c593079fb44c548911d4c864af | [
"MIT"
] | null | null | null | import asyncio
import ssl
import aiohttp
from . import constants as c
def _get_rate_limit_wait(log, resp, opts):
"""
Returns the number of seconds we should wait given a 429 HTTP response and
HTTP options.
"""
max_wait = 3600
wait = opts['wait']
header_name = opts['rate_limit_reset_header_name']
if header_name and header_name in resp.headers:
header_value = resp.headers[header_name]
try:
new_wait = float(header_value)
# Make sure we have a valid value (not negative, NaN, or Inf)
if 0 <= new_wait <= max_wait:
wait = new_wait
elif new_wait > max_wait:
log.warn('rate reset value too high',
name=header_name, value=header_value)
wait = max_wait
else:
log.warn('invalid rate reset value',
name=header_name, value=header_value)
except ValueError:
log.warn('invalid rate reset value',
name=header_name, value=header_value)
return wait
async def http_post(shark, url, data):
log = shark.log.bind(url=url)
opts = shark.config['HTTP']
if opts.get('ssl_cafile'):
ssl_context = ssl.create_default_context(cafile=opts['ssl_cafile'])
else:
ssl_context = None
conn = aiohttp.TCPConnector(ssl_context=ssl_context)
async with aiohttp.ClientSession(connector=conn) as session:
wait = opts['wait']
for n in range(opts['tries']):
if n > 0:
await asyncio.sleep(wait)
try:
log.debug('http request', data=data)
async with session.post(url, json=data,
timeout=opts['timeout']) as resp:
if resp.status == 429: # Too many requests.
wait = _get_rate_limit_wait(log, resp, opts)
continue
else:
wait = opts['wait']
resp.raise_for_status()
data = await resp.json()
log.debug('http response', data=data)
return data
except aiohttp.ClientError:
log.exception('unhandled exception in http_post')
except asyncio.TimeoutError:
log.exception('timeout in http_post')
return {'status': 'error', 'error': c.ERR_SERVICE_UNAVAILABLE}
| 35.225352 | 78 | 0.551779 | 0 | 0 | 0 | 0 | 0 | 0 | 1,389 | 0.555378 | 469 | 0.187525 |
cd592812165ebec71f40378868573e5f9eda72b9 | 252 | py | Python | download_and_create_reference_datasets/v02/create_ht__clinvar.py | NLSVTN/hail-elasticsearch-pipelines | 8b895a2e46a33d347dd2a1024101a6d515027a03 | [
"MIT"
] | 15 | 2017-11-22T14:48:04.000Z | 2020-10-05T18:22:24.000Z | download_and_create_reference_datasets/v02/create_ht__clinvar.py | NLSVTN/hail-elasticsearch-pipelines | 8b895a2e46a33d347dd2a1024101a6d515027a03 | [
"MIT"
] | 94 | 2020-10-21T17:37:57.000Z | 2022-03-29T14:59:46.000Z | download_and_create_reference_datasets/v02/create_ht__clinvar.py | NLSVTN/hail-elasticsearch-pipelines | 8b895a2e46a33d347dd2a1024101a6d515027a03 | [
"MIT"
] | 7 | 2019-01-29T09:08:10.000Z | 2020-02-25T16:22:57.000Z | #!/usr/bin/env python3
from kubernetes.shell_utils import simple_run as run
run((
"python3 gcloud_dataproc/v02/run_script.py "
"--cluster create-ht-clinvar "
"download_and_create_reference_datasets/v02/hail_scripts/write_clinvar_ht.py"))
| 28 | 83 | 0.77381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 173 | 0.686508 |
cd597b04327e251c7079f983fdc1e98e38cf4a8a | 4,324 | py | Python | cogs/member_.py | himo1101/NFlegel | 7621f5d71b41b71faaf44d142f3b903b0471873a | [
"MIT"
] | null | null | null | cogs/member_.py | himo1101/NFlegel | 7621f5d71b41b71faaf44d142f3b903b0471873a | [
"MIT"
] | null | null | null | cogs/member_.py | himo1101/NFlegel | 7621f5d71b41b71faaf44d142f3b903b0471873a | [
"MIT"
] | null | null | null | from discord.ext import commands
from flegelapi.pg import default, server
from distutils.util import strtobool
import discord
member_table= """ member_(
id serial PRIMARY KEY,
server_id interger NOT NULL,
role_ld interger,
channel_id interger,
custom_mes character varying DEFAULT が入出しました。,
on_off boolean DEFAULT False)"""
class Member_(commands.Cog):
def __init__(self,bot):
self.bot=bot
self.pool=bot.pool
@commands.command()
async def enable(self, ctx, enable:str='on'):
try:
result_enable=strtobool(enable)
except ValueError:
return await ctx.send(f'{enable}は正常な値ではありません')
before_content=await server.fetch(self.pool, 'member_', ctx.guild)
await default.update(self.pool, 'member_', 'on_off', result_enable, 'server_id', ctx.guild.id)
await embed.default(ctx, 'enable change', f'{before_content} -> {"有効" if enable else "無効"}化')
@commands.command()
async def add_mrole(self, ctx, role: discord.Role=None):
if role is None:
return await ctx.send('役職が指定されていません')
before_content= await server.fetch(self.pool, 'member', ctx.guild)
await default.update(self.pool, 'member_', 'role_id', role.id, 'server_id', ctx.guild.id)
await embed.default(ctx, 'role change', f'{before_content} -> {role.name}')
@commands.command()
async def add_channel(self, ctx, channel:discord.TextChannel=None):
if channel is None:
return await ctx.send('チャンネルが指定されていません')
before_content=await server.fetch(self.pool, 'member_', ctx.guild)
await default.update(self.pool, 'member_', 'channel_id', channel, 'server_id', ctx.guild.id)
await embed.default(ctx, 'channel change', f'{before_content} -> {channel.mention}')
@commands.command()
async def add_mes(self, ctx, mes:str=None):
if mes is None:
return await ctx.send('メッセージが指定されていません')
before_content=await server.fetch(self.pool, 'member_', ctx.guild)
await default.update(self.pool, 'member_', 'custom_mes', mes, 'server_id', ctx.guild.id)
await embed.default(ctx, 'custom message change', f'{before_content} -> {mes}')
@commands.Cog.listener()
async def on_member_join(self, member):
server_date=await server.fetch(self.pool, 'member_', ctx.guild)
if server_date['on_off']== False:
return
role= member.guild.get_role(int(server_date['role_id']))
await member.add_roles(role)
status = str(member.status)
if status == 'online':
status = 'オンライン'
elif status == 'offline':
status = 'オフライン'
elif status == 'idle':
status = '退席中'
elif status == 'dnd':
status = '起こさないで'
roles = [role.name for role in member.roles if role.name != '@everyone']
roles = ', '.join(roles) if roles != [] else 'なし'
e = discord.Embed(
title = '新しい人が来ました。',
description=f'ユーザー情報: {user.display_name}',
colour=discord.Colour.purple()
)
e.set_author(name=member.name, icon_url=member.avatar_url)
e.set_thumbnail(url=member.avatar_url)
e.add_field(
name='ステータス',
value=status
)
e.add_field(
name='サーバー参加日時',
value=self.fmt.format(member.joined_at)
)
e.add_field(
name='アカウント作成日時',
value=self.fmt.format(member.created_at)
)
e.add_field(
name='役職',
value=roles
)
if server_date['custom_mes'] is not None:
e.set_footer(
text=f'ID: {user.id} '
)
else:
e.set_footer(
text= server_date['custom_mes']
)
channel= self.bot.get_channel(int(server_date['channel_id']))
await channel.send(embed=e)
def setup(bot):
bot.add_cog(Member_(bot))
bot.add_table(member_table)
| 32.757576 | 102 | 0.56568 | 4,104 | 0.89607 | 0 | 0 | 3,916 | 0.855022 | 3,791 | 0.827729 | 1,142 | 0.249345 |
cd59d9b93bd906d8d50478926274bfcb5696cb98 | 4,388 | py | Python | old/policy_grads2.py | DarkElement75/cartpole-policy-gradients | ca6b7fb826fa023e2d845408d3d16d8032b07508 | [
"MIT"
] | null | null | null | old/policy_grads2.py | DarkElement75/cartpole-policy-gradients | ca6b7fb826fa023e2d845408d3d16d8032b07508 | [
"MIT"
] | null | null | null | old/policy_grads2.py | DarkElement75/cartpole-policy-gradients | ca6b7fb826fa023e2d845408d3d16d8032b07508 | [
"MIT"
] | null | null | null | import gym
import numpy as np
import sys
import theano
import theano.tensor as T
import layers
from layers import FullyConnectedLayer, SoftmaxLayer
env = gym.make('CartPole-v0')
#Number of actions
action_n = env.action_space.n
#Number of features observed
feature_n = env.observation_space.shape[0]
epochs = 100
mini_batch_size = 10
timesteps = 100
learning_rate = 1.0
epsilon_decay_rate = -0.04
initial_epsilon = 1.0
#avg_solved_perc = 97.5
#avg_solved_threshold = (avg_solved_perc/100*timesteps)
render = False
def exp_decay(initial, rate, iteration):
#Do our k*e^(r*t) exponential decay
return initial*np.exp(rate*iteration)
def epsilon_greedy(epsilon):
#Return True if exploring, False if exploiting
r = np.random.rand(1)[0]
if r < epsilon:
return True
else:
return False
def get_action(observation):
#test_x, test_y = test_data
i = T.lscalar() # mini-batch index
self.test_mb_predictions = theano.function(
[i], self.layers[-1].y_out,
givens={
self.x: observation
}, on_unused_input='warn')
return self.test_mb_predictions(0)
#Initialize network
layers = [
FullyConnectedLayer(n_in=4, n_out=10),
FullyConnectedLayer(n_in=10, n_out=10),
SoftmaxLayer(n_in=10, n_out=2)
]
params = [param for layer in layers for param in layer.params]
iterations = mini_batch_size
x = T.vector("x")
y = T.ivector("y")
init_layer = layers[0]
init_layer.set_inpt(x, 1)
for j in xrange(1, len(layers)):
prev_layer, layer = layers[j-1], layers[j]
layer.set_inpt(
prev_layer.output, 1)
cost = T.argmax(T.log(layers[-1].output))
R = 0
#iter_grads = [theano.shared([np.zeros(shape=param.get_value().shape, dtype=theano.config.floatX) for param in params])]
#grads = [theano.shared([np.zeros(shape=param.get_value().shape, dtype=theano.config.floatX) for param in params])]
grads = T.grad(cost, params)
iter_grads = [T.zeros_like(grad) for grad in grads]
t_updates = []
iter_updates = []
mb_updates = []
#t_updates.append((iter_grads, iter_grads+T.grad(cost, params)))
#iter_updates.append((iter_grads, T.dot(T.dot(iter_grads, R), 1/mini_batch_size)))
#iter_updates.append((grads, grads+iter_grads))
#mb_updates.append((params, params+learning_rate*grads))
for param, grad in zip(params, grads):
mb_updates.append((param, param+learning_rate*grad))#Update our params as we were
#To execute our updates when necessary
exec_t_updates = theano.function([], None, updates=t_updates)
exec_iter_updates = theano.function([], None, updates=iter_updates)
#exec_mb_updates = theano.function([], None, updates=mb_updates)
"""
mb = T.iscalar()
train_mb = theano.function(
[], cost, updates=mb_updates)
"""
#To get our action a possibilities from state s
s = T.vector()
NN_output = theano.function(
[s], layers[-1].output,
givens={
x: s
})
for e in range(epochs):
#grads = T.set_subtensor(grads, T.zeros_like(grads))
grads = grads * 0
epsilon = exp_decay(initial_epsilon, epsilon_decay_rate, e)
for mb in range(mini_batch_size):
s = env.reset()
R = 0
#iter_grads = T.set_subtensor(iter_grads, T.zeros_like(iter_grads))
iter_grads = grads * 0
for t in range(timesteps):
if render:
env.render()
if epsilon_greedy(epsilon):
#Random action
action = env.action_space.sample()
tmp = T.scalar("tmp")
max_action = T.ones_like(tmp)
else:
#Policy Action
a = NN_output(s)
action = np.argmax(a, axis=1)[0]
max_action = T.max(a)
#exec_t_update()
iter_grads = iter_grads + T.grad(max_action, params)
s, r, done, info = env.step(action)
R += r
if done:
break
#exec_iter_update()
iter_grads = [iter_grad * R / mini_batch_size for iter_grad in iter_grads]
grads += iter_grads
print "Epoch: %i, Reward: %i, Epsilon: %f" % (e, R, epsilon)
#exec_mb_updates()
#cost_asdf = train_mb()
#print "Updating params..."
for param, grad in zip(params, grads):
param = param + learning_rate * grad
| 26.756098 | 120 | 0.635369 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,337 | 0.304695 |
cd5a19f0cbafdf639c273ea9eebb620d7cbc509e | 7,720 | py | Python | client.py | andreidorin13/cs544-messaging-protocol | 40d26cb20234a4ad58095150795946aceaf9e4d4 | [
"MIT"
] | null | null | null | client.py | andreidorin13/cs544-messaging-protocol | 40d26cb20234a4ad58095150795946aceaf9e4d4 | [
"MIT"
] | null | null | null | client.py | andreidorin13/cs544-messaging-protocol | 40d26cb20234a4ad58095150795946aceaf9e4d4 | [
"MIT"
] | null | null | null | #!/usr/bin/python
'''
Andrei Dorin
06/10/2018
User interface for WISP chat implementation
'''
import argparse
import logging
import signal
import sys
import time
import queue
import select
import getpass
from wisp_client import WispClient
from wisp_common import State, WispRequest, WispResponse, WispMessage, WISP_DEFAULT_PORT
class Client():
'''
Chat client class
Handles drawing menu and buttons
Uses inner WispClient for networking
'''
def __init__(self):
self._logger = logging.getLogger()
self._wclient = WispClient()
self._friend_list = None
# ---- Public Functions ----
def connect(self, host, port):
'''Connect to chat server'''
if not self._wclient.connect(host, port):
self._logger.error('Client exiting')
sys.exit(1)
def discover(self):
'''Attempt to discover IP of chat server on network'''
if not self._wclient.discover(WISP_DEFAULT_PORT):
self._logger.error('Client exiting')
sys.exit(1)
def start(self):
'''
Start WispClient event loop
Begin authentication procedure
'''
self._wclient.start()
response = self._auth()
while response.code != WispResponse.OK:
print(f'\033[31m{response.data}\033[39m')
response = self._auth()
self._wclient.state = State.Command
self._draw_main_menu()
def _draw_main_menu(self):
'''
UI
Main application menu
Delegates to sub-menus
'''
user_cmd = None
cmd_list = [
self._search,
self._delete,
self._talk,
self._quit
]
while user_cmd != len(cmd_list):
self._friends()
print(f'Commands:\n1. Search for Friends\n2. Delete Friend\n3. Talk to Friend\n4. Exit')
user_cmd = self._get_user_input(1, len(cmd_list))
cmd_list[user_cmd-1]()
# ---- Client Commands + Menus ----
def _auth(self):
'''Gather username and password, attempt blocking authentication call'''
username = input('Username: ')
while len(username) > 16:
username = input('Username too long, try again: ')
password = getpass.getpass()
while len(password) > 16:
password = input('Password too long, try again: ')
return self._blocking_request(WispRequest.AUTH, username, password)
def _search(self):
'''
Query server for users containting search phrase
Offer option of adding them as friends
'''
phrase = input('Search phrase: ')
while len(phrase) > 16:
phrase = input('Phrase too long, try again: ')
# Display search results
results = self._blocking_request(WispRequest.SEARCH, phrase).data
index = self._draw_menu('Results', results)
if index == -1:
return
# If here then make friend request
response = self._blocking_request(WispRequest.ADD, results[index])
if response.code == WispResponse.OK:
print(f'Friend added succesfully!')
else:
print(f'\033[31m{response.data}\033[39m')
def _friends(self):
'''Retrieve and draw friend list'''
self._friend_list = self._blocking_request(WispRequest.LIST).data
print('Friends:')
for i, friend in enumerate(self._friend_list):
print(f'{i+1}. {friend}')
def _delete(self):
'''Delete a friend'''
index = self._draw_menu('Deleting Friend:', self._friend_list)
if index == -1:
return
self._blocking_request(WispRequest.DEL, self._friend_list[index])
def _talk(self):
'''Start a conversation with a friend'''
index = self._draw_menu('Select Friend to talk to: ', self._friend_list)
if index == -1:
return
response = self._blocking_request(WispRequest.CONV, self._friend_list[index])
if response.code == WispResponse.OK:
self._wclient.state = State.Conversation
self._async_conv()
self._wclient.state = State.Command
else:
print(f'\033[31m{response.data}\033[39m')
def _quit(self):
'''Nicely close connection to server'''
print('Sending goodbye message')
self._wclient.reqq.put(WispRequest(WispRequest.QUIT))
time.sleep(.250) # make sure request get processed before exiting
sys.exit(0)
# ----- Helper Functions -----
def _blocking_request(self, cmd, arg1=None, arg2=None):
'''Sends command to server and awaits response'''
res = None
self._wclient.reqq.put(WispRequest(cmd, arg1, arg2))
while res is None:
try:
res = self._wclient.resq.get(block=False)
except queue.Empty:
pass
time.sleep(0.01)
return res
def _async_conv(self):
print('New conversion! Empty line to return to menu')
line = None
while line != '':
read, _, _ = select.select([sys.stdin], [], [], 0)
if read:
line = sys.stdin.readline().rstrip()
if len(line) > 127:
for batch in [line[i:i+127] for i in range(0, len(line), 127)]:
self._wclient.reqq.put(WispMessage(batch))
else:
self._wclient.reqq.put(WispMessage(line))
try:
res = self._wclient.resq.get(block=False)
print(f'\033[92m{res}\033[39m')
except queue.Empty:
pass
time.sleep(0.01)
print('Returning to menu!')
@classmethod
def _draw_menu(cls, header, options):
'''Draws menu based on list of options'''
upper = len(options)+1
print(header)
for i, opt in enumerate(options):
print(f'{i+1}. {opt}')
print(f'Press {upper} to go back')
index = cls._get_user_input(1, upper)
return -1 if index == upper else index-1
@classmethod
def _get_user_input(cls, lower, upper):
'''Gets user input as int within lower/upper bounds'''
user_cmd = -1
while not lower <= user_cmd <= upper:
try:
user_cmd = int(input('Choose Number: '))
except (ValueError, EOFError):
continue
return user_cmd
def signal_sigint(_, __):
'''
Signal handler for KeyboardInterrupt or SIGINT
'''
print('SIGINT Received, shutting down')
sys.exit(0)
def main():
'''
Main entry point of client
Argument parsing and initializing client
'''
parser = argparse.ArgumentParser(description='WISP protocol chat client')
parser.add_argument('-H', '--host', type=str,
help='IP of server, if none is specified, service discovery will be attempted')
parser.add_argument('-p', '--port', type=int, default=32500,
help='Port of server to connect, if none is specified, protocol default 32500 will be used')
parser.add_argument('-v', '--verbosity', type=int, default=4, choices=[4, 3, 2, 1],
help='Verbosity of logger, 4: Error, 3: Warning, 2: Info, 1: Debug')
args = parser.parse_args()
logging.basicConfig()
logging.getLogger().setLevel(args.verbosity * 10)
signal.signal(signal.SIGINT, signal_sigint)
# CLIENT
client = Client()
if args.host:
client.connect(args.host, args.port)
else:
client.discover()
client.start()
if __name__ == '__main__':
main()
| 32.166667 | 116 | 0.584197 | 6,178 | 0.800259 | 0 | 0 | 725 | 0.093912 | 0 | 0 | 2,244 | 0.290674 |
cd5a2073c9ceff87b49af728a52895c0f1961f0b | 61 | py | Python | newpy/loggers/__init__.py | janithPet/newpy | feb264f4e3da371c3f2ddc7633f3fdd5a25db661 | [
"MIT"
] | null | null | null | newpy/loggers/__init__.py | janithPet/newpy | feb264f4e3da371c3f2ddc7633f3fdd5a25db661 | [
"MIT"
] | 4 | 2021-09-03T06:18:29.000Z | 2021-09-03T08:36:25.000Z | newpy/loggers/__init__.py | janithPet/newpy | feb264f4e3da371c3f2ddc7633f3fdd5a25db661 | [
"MIT"
] | null | null | null | from newpy.loggers.colored_formatter import ColoredFormatter
| 30.5 | 60 | 0.901639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
cd5d8710df3d01c40879c8b39d50c6ffb79da254 | 181 | py | Python | putao/source/__init__.py | ongyx/putao | e901402308b9b4c3c9acf8dae15eb4781ddfcede | [
"MIT"
] | 7 | 2021-06-29T00:50:46.000Z | 2021-10-14T23:31:12.000Z | putao/source/__init__.py | ongyx/putao | e901402308b9b4c3c9acf8dae15eb4781ddfcede | [
"MIT"
] | 2 | 2021-08-28T05:34:01.000Z | 2021-08-29T05:03:34.000Z | putao/source/__init__.py | ongyx/putao | e901402308b9b4c3c9acf8dae15eb4781ddfcede | [
"MIT"
] | null | null | null | # coding: utf8
"""Sources provide an abstraction between a source of music notes and putao projects."""
from . import mml # noqa
from .reg import formats, loads, register # noqa
| 30.166667 | 88 | 0.734807 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.629834 |
cd5dd7dda160122dc7f0149e4f5abf4d8e95ebe4 | 206 | py | Python | parser/src/test/test-data/math_csc.py | luciansmith/sedml-script | d891645d0b3f89ff190fc7e719659c8e229c07da | [
"MIT"
] | null | null | null | parser/src/test/test-data/math_csc.py | luciansmith/sedml-script | d891645d0b3f89ff190fc7e719659c8e229c07da | [
"MIT"
] | null | null | null | parser/src/test/test-data/math_csc.py | luciansmith/sedml-script | d891645d0b3f89ff190fc7e719659c8e229c07da | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created by libsedmlscript v0.0.1
"""
from sed_roadrunner import model, task, plot
from mpmath import csc
#----------------------------------------------
csc(0.5)
| 15.846154 | 48 | 0.475728 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.563107 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.