hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2f4adf626e0639100f39276c7a36ef5fa92541f9
| 1,185 |
py
|
Python
|
parse_xlsx.py
|
UoA-eResearch/OPIMD
|
63d2279eea8de7db53b01c50e8e35b483ab572c4
|
[
"MIT"
] | null | null | null |
parse_xlsx.py
|
UoA-eResearch/OPIMD
|
63d2279eea8de7db53b01c50e8e35b483ab572c4
|
[
"MIT"
] | 2 |
2021-03-03T06:11:30.000Z
|
2021-03-05T02:57:02.000Z
|
parse_xlsx.py
|
UoA-eResearch/OPIMD
|
63d2279eea8de7db53b01c50e8e35b483ab572c4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import pandas as pd
import json
df = pd.read_excel("OPIMD Calc_checked_03Feb21AL.xlsx", sheet_name=None)
obj = {}
dz = df["OPIMD15ACCESSDATAZONERANK"]
dz = dz.dropna(subset=["datazone"])
dz.datazone = dz.datazone.astype(int)
dz.index = dz.datazone
obj["dz"] = dz.OPIMDAccPopRank_AL.to_dict()
hlth = df["HEALTHCALC"]
hlth.index = hlth.HlthPattern
obj["hlth"] = hlth.HlthRank.to_dict()
inc = df["INCOMECALC"]
inc.index = inc.IncPattern
obj["inc"] = inc.IncRank.to_dict()
house = df["HOUSECALC"]
house.index = house.HouPattern
obj["house"] = house.HouRank.to_dict()
con = df["CONNECTCALC"]
con = con.dropna(subset=["ConPattern"])
con.ConPattern = con.ConPattern.astype(int)
con.index = con.ConPattern
obj["con"] = con.ConRank.to_dict()
assets = df["ASSETSCALC"]
assets = assets.dropna(subset=["AsPattern"])
assets.AsPattern = assets.AsPattern.astype(int)
assets.index = assets.AsPattern
obj["assets"] = assets.AsRank.to_dict()
breaks = df["OPIMDRankDecile"]
breaks = breaks.iloc[3:13,0:3]
breaks.columns = ["min", "max", "decile"]
obj["breaks"] = breaks.to_dict(orient='records')
with open("data.json", "w") as f:
json.dump(obj, f)
print("Saved")
| 25.212766 | 72 | 0.709705 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 285 | 0.240506 |
2f4c73bdc5e9b2d9296a2574b70a67727f97ee93
| 1,806 |
py
|
Python
|
forvo.py
|
edoput/ForvoDownloader
|
d2d034ee5d9f22cd4faad76444172490a0ff23e2
|
[
"MIT"
] | 9 |
2018-09-03T22:36:53.000Z
|
2021-11-09T11:59:14.000Z
|
forvo.py
|
EdoPut/ForvoDownloader
|
d2d034ee5d9f22cd4faad76444172490a0ff23e2
|
[
"MIT"
] | null | null | null |
forvo.py
|
EdoPut/ForvoDownloader
|
d2d034ee5d9f22cd4faad76444172490a0ff23e2
|
[
"MIT"
] | 2 |
2015-01-27T15:09:26.000Z
|
2017-01-29T04:39:22.000Z
|
import requests
import urllib
def ForvoRequest(QUERY, LANG, apikey, ACT='word-pronunciations', FORMAT='mp3', free= True):
# action, default is 'word-pronunciations', query, language, apikey, TRUE if free api(default), FALSE if commercial
# Return a list of link to mp3 pronunciations for the word QUERY in LANG language.
# FORMAT='ogg' will return a list of link to ogg pronunciations
if free:#default
base_url = 'http://apifree.forvo.com/'
else:
#TODO: add non free base url
base_url = 'htttp://api.forvo.com/' #is it correct?
query_u8 = QUERY
query_u8.decode('utf-8')
key = [
('action',ACT),
('format','json'),
('word',urllib.quote(QUERY)),
('language',LANG),
('key',apikey)
]
url = base_url + '/'.join(['%s/%s' % a for a in key if a[1]]) + '/'
try:
r = requests.get(url)
except:
raise
return None
data = r.json()
if data[u'items']:
#we retrieved a non empty JSON.
#the JSON is structured like this:
#a dictionary with 2 items, their keys are:
#-u'attributes' (linked to info about the request we made)
#-u'items' (linked to a list of dictionaries)
#in the list there is a dictionary for every pronunciation, we will search for the "mp3path" key
paths = []
for i in data[u'items']:
audioFormat = u'path'+FORMAT
paths.append(i[audioFormat])
return paths
else:
#The json hasn't a u'items' key
return None
| 31.137931 | 121 | 0.516058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 818 | 0.452935 |
2f4c98ccb6eda3b00afce725dc8baea135ce04d6
| 422 |
py
|
Python
|
fibonacci/fibonacci.py
|
nateGeorge/whiteboard_practice_problems
|
c54f9bcf5f743b3f3e9cd4e34fff3b6afcef6c36
|
[
"MIT"
] | null | null | null |
fibonacci/fibonacci.py
|
nateGeorge/whiteboard_practice_problems
|
c54f9bcf5f743b3f3e9cd4e34fff3b6afcef6c36
|
[
"MIT"
] | null | null | null |
fibonacci/fibonacci.py
|
nateGeorge/whiteboard_practice_problems
|
c54f9bcf5f743b3f3e9cd4e34fff3b6afcef6c36
|
[
"MIT"
] | null | null | null |
def fib(x):
if not isinstance(x, int):
print "argument must be an integer"
return
if x == 0:
return 0
a, b = 0, 1
for i in range(x-1):
a, b = b, a + b
return b
def fib_rec(x):
if not isinstance(x, int):
raise ValueError("argument must be an integer") # another way to alert the user
if x > 1:
return fib_rec(x - 1) + fib_rec(x - 2)
return x
| 23.444444 | 87 | 0.537915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.2109 |
2f4cad023005927c7b37c2c98bbb63ef5319fadc
| 1,336 |
py
|
Python
|
python/src/main/python/pyalink/alink/common/sql/sql_query_utils.py
|
wenwei8268/Alink
|
c00702538c95a32403985ebd344eb6aeb81749a7
|
[
"Apache-2.0"
] | null | null | null |
python/src/main/python/pyalink/alink/common/sql/sql_query_utils.py
|
wenwei8268/Alink
|
c00702538c95a32403985ebd344eb6aeb81749a7
|
[
"Apache-2.0"
] | null | null | null |
python/src/main/python/pyalink/alink/common/sql/sql_query_utils.py
|
wenwei8268/Alink
|
c00702538c95a32403985ebd344eb6aeb81749a7
|
[
"Apache-2.0"
] | null | null | null |
import re
__all__ = ['register_table_name', 'sql_query']
batch_table_name_map = dict()
stream_table_name_map = dict()
def register_table_name(op, name: str, op_type: str):
if op_type == "batch":
batch_table_name_map[name] = op
elif op_type == "stream":
stream_table_name_map[name] = op
else:
raise Exception("op_type should be 'batch' or 'stream'.")
def clear_table_names():
batch_table_name_map.clear()
stream_table_name_map.clear()
def sql_query(query: str, op_type: str):
if op_type == "batch":
from pyalink.alink.batch.common import PySqlCmdBatchOp
table_name_map = batch_table_name_map
sql_cmd_op_cls = PySqlCmdBatchOp
elif op_type == "stream":
table_name_map = stream_table_name_map
from pyalink.alink.stream.common import PySqlCmdStreamOp
sql_cmd_op_cls = PySqlCmdStreamOp
else:
raise Exception("op_type should be 'batch' or 'stream'.")
counter = 0
ops = []
for (name, op) in table_name_map.items():
pattern = "\\b" + name + "\\b"
match = re.findall(pattern, query)
if match is None or len(match) == 0:
continue
ops.append(op)
counter += 1
sql_cmd_op = sql_cmd_op_cls().setCommand(query)
sql_cmd_op.linkFrom(*ops)
return sql_cmd_op
| 28.425532 | 65 | 0.654192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.113772 |
2f4d2891267d928eb5b2260208cbd4b134295605
| 3,790 |
py
|
Python
|
salt/utils/win_chcp.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 9,425 |
2015-01-01T05:59:24.000Z
|
2022-03-31T20:44:05.000Z
|
salt/utils/win_chcp.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 33,507 |
2015-01-01T00:19:56.000Z
|
2022-03-31T23:48:20.000Z
|
salt/utils/win_chcp.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 5,810 |
2015-01-01T19:11:45.000Z
|
2022-03-31T02:37:20.000Z
|
"""
Functions for working with the codepage on Windows systems
"""
import logging
from contextlib import contextmanager
from salt.exceptions import CodePageError
log = logging.getLogger(__name__)
try:
import pywintypes
import win32console
HAS_WIN32 = True
except ImportError:
HAS_WIN32 = False
# Although utils are often directly imported, it is also possible to use the loader.
def __virtual__():
"""
Only load if Win32 Libraries are installed
"""
if not HAS_WIN32:
return False, "This utility requires pywin32"
return "win_chcp"
@contextmanager
def chcp(page_id, raise_error=False):
"""
Gets or sets the codepage of the shell.
Args:
page_id (str, int):
A number representing the codepage.
raise_error (bool):
``True`` will raise an error if the codepage fails to change.
``False`` will suppress the error
Returns:
int: A number representing the codepage
Raises:
CodePageError: On unsuccessful codepage change
"""
if not isinstance(page_id, int):
try:
page_id = int(page_id)
except ValueError:
error = "The `page_id` needs to be an integer, not {}".format(type(page_id))
if raise_error:
raise CodePageError(error)
log.error(error)
return -1
previous_page_id = get_codepage_id(raise_error=raise_error)
if page_id and previous_page_id and page_id != previous_page_id:
set_code_page = True
else:
set_code_page = False
try:
if set_code_page:
set_codepage_id(page_id, raise_error=raise_error)
# Subprocesses started from now will use the set code page id
yield
finally:
if set_code_page:
# Reset to the old code page
set_codepage_id(previous_page_id, raise_error=raise_error)
def get_codepage_id(raise_error=False):
"""
Get the currently set code page on windows
Args:
raise_error (bool):
``True`` will raise an error if the codepage fails to change.
``False`` will suppress the error
Returns:
int: A number representing the codepage
Raises:
CodePageError: On unsuccessful codepage change
"""
try:
return win32console.GetConsoleCP()
except pywintypes.error as exc:
_, _, msg = exc.args
error = "Failed to get the windows code page: {}".format(msg)
if raise_error:
raise CodePageError(error)
else:
log.error(error)
return -1
def set_codepage_id(page_id, raise_error=False):
"""
Set the code page on windows
Args:
page_id (str, int):
A number representing the codepage.
raise_error (bool):
``True`` will raise an error if the codepage fails to change.
``False`` will suppress the error
Returns:
int: A number representing the codepage
Raises:
CodePageError: On unsuccessful codepage change
"""
if not isinstance(page_id, int):
try:
page_id = int(page_id)
except ValueError:
error = "The `page_id` needs to be an integer, not {}".format(type(page_id))
if raise_error:
raise CodePageError(error)
log.error(error)
return -1
try:
win32console.SetConsoleCP(page_id)
return get_codepage_id(raise_error=raise_error)
except pywintypes.error as exc:
_, _, msg = exc.args
error = "Failed to set the windows code page: {}".format(msg)
if raise_error:
raise CodePageError(error)
else:
log.error(error)
return -1
| 25.608108 | 88 | 0.61715 | 0 | 0 | 1,325 | 0.349604 | 1,341 | 0.353826 | 0 | 0 | 1,693 | 0.446702 |
2f4d57d728b00fc588f9af5da19650e009e95339
| 827 |
py
|
Python
|
application/server.py
|
comov/fucked-up_schedule
|
3e6a2972f46686829b655798cd641cd82559db24
|
[
"MIT"
] | null | null | null |
application/server.py
|
comov/fucked-up_schedule
|
3e6a2972f46686829b655798cd641cd82559db24
|
[
"MIT"
] | null | null | null |
application/server.py
|
comov/fucked-up_schedule
|
3e6a2972f46686829b655798cd641cd82559db24
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template
from application.settings import STATIC
from application.storage import storage
app = Flask(__name__, static_url_path=STATIC)
@app.route('/')
def hello_world():
storage_dataset = storage.load_data()
labels = []
datasets = {}
for label, dataset in storage_dataset.items():
labels.append(label)
for name, data in dataset.items():
d = datasets.get(name) or None
if d is None:
d = data
d['data'] = [d['value']]
d['name'] = name
else:
d['data'].append(data['value'])
datasets[name] = d
return render_template('index.html', **{
'country': 'Kyrgyzstan',
'labels': labels,
'dataset': list(datasets.values()),
})
| 25.84375 | 50 | 0.562273 | 0 | 0 | 0 | 0 | 655 | 0.792019 | 0 | 0 | 85 | 0.102781 |
2f4e64d9de5293438f0fe185689a4d11efc8c4c9
| 1,857 |
py
|
Python
|
cli_fun/commands/fun.py
|
e4r7hbug/cli-fun
|
43f9a1bf788745783a24f315d80ceb969ff853e4
|
[
"MIT"
] | null | null | null |
cli_fun/commands/fun.py
|
e4r7hbug/cli-fun
|
43f9a1bf788745783a24f315d80ceb969ff853e4
|
[
"MIT"
] | null | null | null |
cli_fun/commands/fun.py
|
e4r7hbug/cli-fun
|
43f9a1bf788745783a24f315d80ceb969ff853e4
|
[
"MIT"
] | null | null | null |
"""Fun section of CLI command."""
import json
import logging
import time
from pprint import pformat, pprint
import click
from fabric.colors import red
@click.group()
def cli():
"""My fun program!"""
pass
@cli.command()
def progress():
"""Sample progress bar."""
i = range(0, 200)
logging.debug('%s -> %s', i[0], i[-1])
with click.progressbar(i, width=0, fill_char=red('#')) as items:
for _ in items:
time.sleep(.01)
@cli.command('open')
def fun_open():
"""Trying out click.launch."""
sites = {
'Google': 'https://google.com',
'The Verge': 'https://theverge.com',
'Liliputing': 'https://liliputing.com'
}
sites_keys = sites.keys()
for index, site in enumerate(sites_keys):
click.echo('%i %s' % (index, site))
choice = click.prompt('Which site to open?', default=0, type=int)
click.launch(sites[sites_keys[choice]])
@cli.command()
def party():
"""Get this party started!"""
for i in range(10):
click.echo('Wub wub wub')
logging.debug(i)
@cli.command('to')
@click.option('-d', '--destination', prompt=True)
def fun_to(destination):
"""Connecting fun to stuffs!"""
click.echo('Apparently you are going to ' + destination)
@cli.command('max')
def fun_max():
"""Maximum levels achieved."""
click.echo('You found the highest peak!')
@cli.command()
def hop():
"""The hopping function."""
click.echo('Hop hop hop, \'til you just can stop!')
@cli.command()
def j():
"""Example JSON."""
test_object = {'this': 'that', 'up': 'down', 'sub': {'can': 'do'}}
print(json.dumps(test_object, indent=2))
pprint(test_object, indent=2)
print(pformat(test_object, indent=2))
print(pformat(test_object, indent=2, depth=1))
print(test_object.items())
print(test_object.values())
| 22.925926 | 70 | 0.611739 | 0 | 0 | 0 | 0 | 1,681 | 0.905223 | 0 | 0 | 565 | 0.304254 |
2f4ee2585931ea1270d6eb83cfe79d8eaf1f4d33
| 1,851 |
py
|
Python
|
tests/algorithms/descriptor_generator/test_colordescriptor.py
|
joshanderson-kw/SMQTK
|
594e7c733fe7f4e514a1a08a7343293a883a41fc
|
[
"BSD-3-Clause"
] | 82 |
2015-01-07T15:33:29.000Z
|
2021-08-11T18:34:05.000Z
|
tests/algorithms/descriptor_generator/test_colordescriptor.py
|
joshanderson-kw/SMQTK
|
594e7c733fe7f4e514a1a08a7343293a883a41fc
|
[
"BSD-3-Clause"
] | 230 |
2015-04-08T14:36:51.000Z
|
2022-03-14T17:55:30.000Z
|
tests/algorithms/descriptor_generator/test_colordescriptor.py
|
joshanderson-kw/SMQTK
|
594e7c733fe7f4e514a1a08a7343293a883a41fc
|
[
"BSD-3-Clause"
] | 65 |
2015-01-04T15:00:16.000Z
|
2021-11-19T18:09:11.000Z
|
import unittest
import unittest.mock as mock
import pytest
from smqtk.algorithms.descriptor_generator import DescriptorGenerator
from smqtk.algorithms.descriptor_generator.colordescriptor.colordescriptor \
import ColorDescriptor_Image_csift # arbitrary leaf class
from smqtk.utils.configuration import configuration_test_helper
@pytest.mark.skipif(not ColorDescriptor_Image_csift.is_usable(),
reason="ColorDescriptor generator is not currently usable")
class TestColorDescriptor (unittest.TestCase):
def test_impl_findable(self):
self.assertIn(ColorDescriptor_Image_csift.__name__,
DescriptorGenerator.get_impls())
@mock.patch('smqtk.algorithms.descriptor_generator'
'.colordescriptor.colordescriptor.safe_create_dir')
def test_configuration(self, _mock_scd):
i = ColorDescriptor_Image_csift(
model_directory='test model dir',
work_directory='test work dir',
model_gen_descriptor_limit=123764,
kmeans_k=42, flann_distance_metric='hik',
flann_target_precision=0.92, flann_sample_fraction=0.71,
flann_autotune=True, random_seed=7, use_spatial_pyramid=True,
parallel=3,
)
for inst in configuration_test_helper(i):
assert inst._model_dir == 'test model dir'
assert inst._work_dir == 'test work dir'
assert inst._model_gen_descriptor_limit == 123764
assert inst._kmeans_k == 42
assert inst._flann_distance_metric == 'hik'
assert inst._flann_target_precision == 0.92
assert inst._flann_sample_fraction == 0.71
assert inst._flann_autotune is True
assert inst._rand_seed == 7
assert inst._use_sp is True
assert inst.parallel == 3
| 42.068182 | 79 | 0.690438 | 1,368 | 0.73906 | 0 | 0 | 1,513 | 0.817396 | 0 | 0 | 234 | 0.126418 |
2f54a4c20f5d809def78444ea740f895640d9cbe
| 557 |
py
|
Python
|
conservation/migrations/0020_auto_20190418_1715.py
|
ropable/wastd
|
295c60760548d177859de9c0bebdae93342767d0
|
[
"MIT"
] | 3 |
2020-07-23T06:37:43.000Z
|
2022-01-27T09:40:40.000Z
|
conservation/migrations/0020_auto_20190418_1715.py
|
ropable/wastd
|
295c60760548d177859de9c0bebdae93342767d0
|
[
"MIT"
] | 337 |
2018-07-12T05:56:29.000Z
|
2022-03-30T02:40:41.000Z
|
conservation/migrations/0020_auto_20190418_1715.py
|
ropable/wastd
|
295c60760548d177859de9c0bebdae93342767d0
|
[
"MIT"
] | 2 |
2020-02-24T00:05:46.000Z
|
2020-07-15T07:02:29.000Z
|
# Generated by Django 2.1.7 on 2019-04-18 09:15
from django.db import migrations, models
import django.db.models.deletion
import django_fsm
class Migration(migrations.Migration):
dependencies = [
('conservation', '0019_auto_20190410_1329'),
]
operations = [
migrations.RenameModel(
old_name='TaxonGazettal',
new_name='TaxonConservationListing',
),
migrations.RenameModel(
old_name='CommunityGazettal',
new_name='CommunityConservationListing',
),
]
| 23.208333 | 52 | 0.642729 | 413 | 0.741472 | 0 | 0 | 0 | 0 | 0 | 0 | 176 | 0.315978 |
2f57b78b84caa4984e3516eb70876b6001368c78
| 1,141 |
py
|
Python
|
src/news/migrations/0005_news_base_fields.py
|
Little-Pogchamp-Team/kinopoisk_on_django
|
06e1b5ee14c7e77dd5b69140732461a02bf44566
|
[
"MIT"
] | 10 |
2021-01-10T09:39:16.000Z
|
2022-02-05T06:40:47.000Z
|
src/news/migrations/0005_news_base_fields.py
|
Little-Pogchamp-Team/kinopoisk_on_django
|
06e1b5ee14c7e77dd5b69140732461a02bf44566
|
[
"MIT"
] | null | null | null |
src/news/migrations/0005_news_base_fields.py
|
Little-Pogchamp-Team/kinopoisk_on_django
|
06e1b5ee14c7e77dd5b69140732461a02bf44566
|
[
"MIT"
] | 1 |
2021-01-11T17:04:06.000Z
|
2021-01-11T17:04:06.000Z
|
# Generated by Django 3.1.5 on 2021-04-28 16:22
import ckeditor.fields
from django.db import migrations, models
import django_minio_backend.models
class Migration(migrations.Migration):
dependencies = [
('news', '0004_profile_info'),
]
operations = [
migrations.AddField(
model_name='news',
name='image',
field=models.ImageField(null=True, storage=django_minio_backend.models.MinioBackend(bucket_name='news-images'), upload_to=django_minio_backend.models.iso_date_prefix),
),
migrations.AddField(
model_name='news',
name='preview_title',
field=models.CharField(default='-', max_length=300),
preserve_default=False,
),
migrations.AlterField(
model_name='news',
name='content',
field=ckeditor.fields.RichTextField(),
),
migrations.AlterField(
model_name='news',
name='title',
field=models.CharField(max_length=300),
),
migrations.DeleteModel(
name='NewsPhoto',
),
]
| 28.525 | 179 | 0.595092 | 990 | 0.86766 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.141104 |
2f57fea89f96d0ae41ee6422418756f6c9d832f5
| 1,433 |
py
|
Python
|
src/utils/common.py
|
Conni2461/admission_handler
|
6ea2a696100c046fd5d5ede468febd9072f3763f
|
[
"MIT"
] | 1 |
2022-02-11T04:29:18.000Z
|
2022-02-11T04:29:18.000Z
|
src/utils/common.py
|
Conni2461/admission_handler
|
6ea2a696100c046fd5d5ede468febd9072f3763f
|
[
"MIT"
] | null | null | null |
src/utils/common.py
|
Conni2461/admission_handler
|
6ea2a696100c046fd5d5ede468febd9072f3763f
|
[
"MIT"
] | null | null | null |
import socket
from threading import Thread, Timer
class Invokeable:
def __init__(self, signal, *args, **kwargs):
self._signal = signal
self._args = args
self._kwargs = kwargs
@property
def signal(self):
return self._signal
@property
def kwargs(self):
return self._kwargs
class SocketThread(Thread):
def __init__(self, queue):
super().__init__()
self.stopped = False
self._queue = queue
def emit(self, **kwargs):
signal = kwargs.pop("signal")
self._queue.put(Invokeable(signal, **kwargs))
def join(self):
self.stopped = True
super().join()
def start(self):
self.stopped = False
super().start()
class CircularList(list):
def __init__(self, *args):
super().__init__(*args)
self.i = 0
def next(self, index=None):
if index is not None:
self.i = index
if (self.i + 1) <= (len(self) - 1):
self.i += 1
else:
self.i = 0
return self[self.i]
class RepeatTimer(Timer):
def run(self):
while not self.finished.wait(self.interval):
self.function(*self.args, **self.kwargs)
def get_real_ip():
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(('1.1.1.1', 1))
return sock.getsockname()[0]
def get_hostname():
return socket.gethostname()
| 22.390625 | 59 | 0.5806 | 1,174 | 0.81926 | 0 | 0 | 118 | 0.082345 | 0 | 0 | 17 | 0.011863 |
2f585f0414875528a6779f295b93677e10c21cf1
| 5,206 |
py
|
Python
|
app/models.py
|
lilianwaweru/Oa_Online
|
58644f5dd4ae1f396b43a2da980a9c464a9bfdd4
|
[
"MIT"
] | null | null | null |
app/models.py
|
lilianwaweru/Oa_Online
|
58644f5dd4ae1f396b43a2da980a9c464a9bfdd4
|
[
"MIT"
] | null | null | null |
app/models.py
|
lilianwaweru/Oa_Online
|
58644f5dd4ae1f396b43a2da980a9c464a9bfdd4
|
[
"MIT"
] | 4 |
2019-04-30T09:07:22.000Z
|
2019-07-02T08:51:22.000Z
|
from . import db
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from . import login_manager
from datetime import datetime
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key = True)
husband_name = db.Column(db.String(255))
husband_email = db.Column(db.String(255), unique = True, index = True)
husband_ID = db.Column(db.String(255), unique = True, index = True)
wife_name = db.Column(db.String(255))
wife_email = db.Column(db.String(255), unique = True, index = True)
wife_ID = db.Column(db.String(255), unique = True, index = True)
password_hash = db.Column(db.String(255))
husband_pic_path = db.Column(db.String(255))
wife_pic_path = db.Column(db.String(255))
notices = db.relationship("Notice", backref= "user", lazy="dynamic")
certificates = db.relationship("Certificate", backref= "user", lazy="dynamic")
impediments = db.relationship("Impediment", backref= "user", lazy="dynamic")
agreements = db.relationship("Agreement", backref= "user", lazy="dynamic")
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def __repr__(self):
return f'User {self.husband_name} and {self.wife_name}'
class Notice(db.Model):
__tablename__ = 'notices'
id = db.Column(db.Integer, primary_key = True)
district = db.Column(db.String(255))
spouse = db.Column(db.String(255))
g_name = db.Column(db.String(255))
g_condition = db.Column(db.String(255))
g_occupation = db.Column(db.String(255))
g_age = db.Column(db.Integer)
g_residence = db.Column(db.String(255))
g_consent = db.Column(db.String(255))
b_name = db.Column(db.String(255))
b_condition = db.Column(db.String(255))
b_occupation = db.Column(db.String(255))
b_age = db.Column(db.Integer)
b_residence = db.Column(db.String(255))
b_consent = db.Column(db.String(255))
dd = db.Column(db.Integer)
mm = db.Column(db.String(255))
yy = db.Column(db.Integer)
signature = db.Column(db.String(255))
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
class Certificate(db.Model):
__tablename__ = 'certificates'
id = db.Column(db.Integer, primary_key = True)
g_date = db.Column(db.Date())
g_name = db.Column(db.String(255))
g_condition = db.Column(db.String(255))
g_occupation = db.Column(db.String(255))
g_age = db.Column(db.Integer)
g_residence = db.Column(db.String(255))
g_fname = db.Column(db.String(255))
g_foccupation = db.Column(db.String(255))
b_date = db.Column(db.Date())
b_name = db.Column(db.String(255))
b_condition = db.Column(db.String(255))
b_occupation = db.Column(db.String(255))
b_age = db.Column(db.Integer)
b_residence = db.Column(db.String(255))
g_fname = db.Column(db.String(255))
g_foccupation = db.Column(db.String(255))
groom = db.Column(db.String(255))
bride = db.Column(db.String(255))
witness1 = db.Column(db.String(255))
witness2 = db.Column(db.String(255))
date = db.Column(db.Date())
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
class Impediment(db.Model):
__tablename__ = 'impediments'
id = db.Column(db.Integer, primary_key = True)
spouse = db.Column(db.String(255))
at = db.Column(db.String(255))
in_input = db.Column(db.String(255))
surname = db.Column(db.String(255))
forename = db.Column(db.String(255))
country = db.Column(db.String(255))
date = db.Column(db.Date())
father = db.Column(db.String(255))
sex = db.Column(db.String(255))
race = db.Column(db.String(255))
religion = db.Column(db.String(255))
residence = db.Column(db.String(255))
condition = db.Column(db.String(255))
occupation = db.Column(db.String(255))
dd = db.Column(db.Integer)
mm = db.Column(db.String(255))
yy = db.Column(db.Integer)
signature = db.Column(db.String(255))
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
class Agreement(db.Model):
__tablename__ = 'agreements'
id = db.Column(db.Integer, primary_key = True)
husband_vows = db.Column(db.String(255))
wife_vows = db.Column(db.String(255))
dowry_agreement = db.Column(db.String(255))
other_agreements = db.Column(db.String(255))
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
class Witness(db.Model):
__tablename__ = 'witnesses'
id = db.Column(db.Integer, primary_key = True)
witness1_name = db.Column(db.String(255))
witness2_name = db.Column(db.String(255))
witness1_id = db.Column(db.String(255))
witness2_id = db.Column(db.String(255))
witness1_dob = db.Column(db.Date())
witness2_dob = db.Column(db.Date())
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
| 37.185714 | 82 | 0.680177 | 4,918 | 0.944679 | 0 | 0 | 306 | 0.058778 | 0 | 0 | 308 | 0.059163 |
2f59a50ee0f4047fe095b3e0f94aa7691fc20820
| 2,139 |
py
|
Python
|
tests/server/datasets/test_dao.py
|
davidkartchner/rubrix
|
33faa006d7498a806a9fd594036d4a42c7d70da2
|
[
"Apache-2.0"
] | 1 |
2022-01-06T09:05:06.000Z
|
2022-01-06T09:05:06.000Z
|
tests/server/datasets/test_dao.py
|
davidkartchner/rubrix
|
33faa006d7498a806a9fd594036d4a42c7d70da2
|
[
"Apache-2.0"
] | null | null | null |
tests/server/datasets/test_dao.py
|
davidkartchner/rubrix
|
33faa006d7498a806a9fd594036d4a42c7d70da2
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from rubrix.server.commons.errors import ClosedDatasetError
from rubrix.server.commons.es_wrapper import create_es_wrapper
from rubrix.server.datasets.dao import DatasetsDAO
from rubrix.server.datasets.model import DatasetDB
from rubrix.server.tasks.commons import TaskType
from rubrix.server.tasks.commons.dao.dao import dataset_records_dao
from rubrix.server.tasks.text_classification.dao.es_config import (
text_classification_mappings,
)
es_wrapper = create_es_wrapper()
records = dataset_records_dao(es_wrapper)
records.register_task_mappings(
TaskType.text_classification, text_classification_mappings()
)
dao = DatasetsDAO.get_instance(es_wrapper, records)
def test_retrieve_ownered_dataset_for_no_owner_user():
dataset = "test_retrieve_ownered_dataset_for_no_owner_user"
created = dao.create_dataset(
DatasetDB(name=dataset, owner="other", task=TaskType.text_classification)
)
assert dao.find_by_name(created.name, owner=created.owner) == created
assert dao.find_by_name(created.name, owner=None) == created
assert dao.find_by_name(created.name, owner="me") is None
def test_close_dataset():
dataset = "test_close_dataset"
created = dao.create_dataset(
DatasetDB(name=dataset, owner="other", task=TaskType.text_classification)
)
dao.close(created)
with pytest.raises(ClosedDatasetError, match=dataset):
records.search_records(dataset=created)
dao.open(created)
records.search_records(dataset=created)
| 36.254237 | 81 | 0.777466 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 695 | 0.324918 |
2f59b443158d106a76a2bebe88570da44bbc0fe9
| 5,838 |
py
|
Python
|
tests/tests_rotated_array_search.py
|
quervernetzt/find-value-in-rotated-sorted-array
|
b391b1502fd326a57973621500e984bf6f7df44a
|
[
"MIT"
] | null | null | null |
tests/tests_rotated_array_search.py
|
quervernetzt/find-value-in-rotated-sorted-array
|
b391b1502fd326a57973621500e984bf6f7df44a
|
[
"MIT"
] | null | null | null |
tests/tests_rotated_array_search.py
|
quervernetzt/find-value-in-rotated-sorted-array
|
b391b1502fd326a57973621500e984bf6f7df44a
|
[
"MIT"
] | null | null | null |
import unittest
from solution.rotated_array_search import RotatedArraySearch
class TestCasesRotatedArraySearch(unittest.TestCase):
def input_list_is_none_return_minus_one(self: object) -> None:
# Arrange
rotated_array_search: RotatedArraySearch = RotatedArraySearch()
input_list: list = None
target: int = 1
# Act
index: int = rotated_array_search.main(input_list, target)
# Assert
self.assertEqual(index, -1)
def input_list_is_empty_return_minus_one(self: object) -> None:
# Arrange
rotated_array_search: RotatedArraySearch = RotatedArraySearch()
input_list: list = []
target: int = 1
# Act
index: int = rotated_array_search.main(input_list, target)
# Assert
self.assertEqual(index, -1)
def input_list_has_one_element_nonmatching_return_minus_one(self: object) -> None:
# Arrange
rotated_array_search: RotatedArraySearch = RotatedArraySearch()
input_list: list = [2]
target: int = 1
# Act
index: int = rotated_array_search.main(input_list, target)
# Assert
self.assertEqual(index, -1)
def input_list_has_one_element_matching_return_zero(self: object) -> None:
# Arrange
rotated_array_search: RotatedArraySearch = RotatedArraySearch()
input_list: list = [2]
target: int = 2
# Act
index: int = rotated_array_search.main(input_list, target)
# Assert
self.assertEqual(index, 0)
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
def input_list_with_multiple_elements_no_pivot_nonmatching_even_return_minus_one(self: object) -> None:
# Arrange
rotated_array_search: RotatedArraySearch = RotatedArraySearch()
input_list: list = [1,4,5,8,23,50]
target: int = 15
# Act
index: int = rotated_array_search.main(input_list, target)
# Assert
self.assertEqual(index, -1)
def input_list_with_multiple_elements_no_pivot_matching_even_return_index(self: object) -> None:
# Arrange
rotated_array_search: RotatedArraySearch = RotatedArraySearch()
input_list: list = [1,4,5,8,23,50]
target: int = 23
# Act
index: int = rotated_array_search.main(input_list, target)
# Assert
self.assertEqual(index, 4)
def input_list_with_multiple_elements_no_pivot_nonmatching_odd_return_minus_one(self: object) -> None:
# Arrange
rotated_array_search: RotatedArraySearch = RotatedArraySearch()
input_list: list = [1,4,5,8,23,50,51]
target: int = 15
# Act
index: int = rotated_array_search.main(input_list, target)
# Assert
self.assertEqual(index, -1)
def input_list_with_multiple_elements_no_pivot_matching_odd_return_index(self: object) -> None:
# Arrange
rotated_array_search: RotatedArraySearch = RotatedArraySearch()
input_list: list = [1,4,5,8,23,50,51]
target: int = 23
# Act
index: int = rotated_array_search.main(input_list, target)
# Assert
self.assertEqual(index, 4)
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
def input_list_with_multiple_elements_pivot_nonmatching_even_return_minus_one(self: object) -> None:
# Arrange
rotated_array_search: RotatedArraySearch = RotatedArraySearch()
input_list: list = [6,8,23,50,-10,-8,0,1,4,5]
target: int = 15
# Act
index: int = rotated_array_search.main(input_list, target)
# Assert
self.assertEqual(index, -1)
def input_list_with_multiple_elements_pivot_matching_even_return_index(self: object) -> None:
# Arrange
rotated_array_search: RotatedArraySearch = RotatedArraySearch()
input_list: list = [6,8,23,50,-10,-8,0,1,4,5]
target: int = 1
# Act
index: int = rotated_array_search.main(input_list, target)
# Assert
self.assertEqual(index, 7)
def input_list_with_multiple_elements_pivot_nonmatching_odd_return_minus_one(self: object) -> None:
# Arrange
rotated_array_search: RotatedArraySearch = RotatedArraySearch()
input_list: list = [6,8,23,50,-10,-8,0,1,4]
target: int = 15
# Act
index: int = rotated_array_search.main(input_list, target)
# Assert
self.assertEqual(index, -1)
def input_list_with_multiple_elements_pivot_matching_odd_return_index(self: object) -> None:
# Arrange
rotated_array_search: RotatedArraySearch = RotatedArraySearch()
input_list: list = [6,8,23,50,-10,-8,0,1,4]
target: int = 1
# Act
index: int = rotated_array_search.main(input_list, target)
# Assert
self.assertEqual(index, 7)
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
def input_list_with_multiple_elements_pivot_matching_edge_cases_return_index(self: object) -> None:
# Arrange
rotated_array_search: RotatedArraySearch = RotatedArraySearch()
input_list_0: list = [-8,0,1,4,6,8,23,50,-10]
input_list_1: list = [50,-10,-8,0,1,4,6,8,23]
target: int = 1
# Act
index_0: int = rotated_array_search.main(input_list_0, target)
index_1: int = rotated_array_search.main(input_list_1, target)
# Assert
self.assertEqual(index_0, 2)
self.assertEqual(index_1, 4)
| 33.94186 | 107 | 0.601918 | 5,759 | 0.986468 | 0 | 0 | 0 | 0 | 0 | 0 | 712 | 0.12196 |
2f5b87677f26662c1ce0c7a5ee5aaf173034c184
| 3,637 |
py
|
Python
|
tests/1_local/test_keygroup.py
|
aporlowski/cloudmesh-cloud
|
247479361300f97bbb8b7b1f4c99308358e9e2b2
|
[
"Apache-2.0"
] | 5 |
2019-05-06T01:27:55.000Z
|
2020-03-12T09:50:08.000Z
|
tests/1_local/test_keygroup.py
|
aporlowski/cloudmesh-cloud
|
247479361300f97bbb8b7b1f4c99308358e9e2b2
|
[
"Apache-2.0"
] | 137 |
2019-04-06T12:35:29.000Z
|
2020-05-05T10:02:36.000Z
|
tests/1_local/test_keygroup.py
|
aporlowski/cloudmesh-cloud
|
247479361300f97bbb8b7b1f4c99308358e9e2b2
|
[
"Apache-2.0"
] | 27 |
2019-04-05T22:03:41.000Z
|
2021-03-05T00:05:00.000Z
|
###############################################################
# pytest -v --capture=no tests/test_keygroup.py
# pytest -v tests/test_keygroup.py
###############################################################
#import pytest
import os
from cloudmesh.common.variables import Variables
from cloudmesh.common.Benchmark import Benchmark
from cloudmesh.compute.vm.Provider import Provider
from cloudmesh.configuration.Config import Config
from cloudmesh.mongo.CmDatabase import CmDatabase
# import pytest
import os
from cloudmesh.common.Benchmark import Benchmark
from cloudmesh.common.variables import Variables
from cloudmesh.compute.vm.Provider import Provider
from cloudmesh.configuration.Config import Config
from cloudmesh.mongo.CmDatabase import CmDatabase
Benchmark.debug()
user = Config()["cloudmesh.profile.user"]
variables = Variables()
KEY = "test-keygroup"
cloud = variables.parameter('cloud')
print(f"Test run for {cloud} on key {KEY}")
if cloud is None:
raise ValueError("cloud is not not set")
cm = CmDatabase()
provider = Provider(name=cloud)
#@pytest.mark.incremental
class Test_Keygroup:
def test_create_keys(self):
n = 5
for i in range(0, n):
name = f"test_id_rsa{i}"
command = f"ssh-keygen -f $HOME/.ssh/{name}"
os.system(command)
# assert os.path.isfile(name)
# create test for all other functions
# create test for adding key to group
def test_add_key_to_keygroup_database(self):
n = 5
keygroup = "testKeyGroup"
for i in range(0, n):
name = f"test_id_rsa{i}"
command = f"cms keygroup add $HOME/.ssh/{name}"
os.system(command)
"""
def test_upload_key_to_database(self):
HEADING()
local = Key()
pprint(local)
Benchmark.Start()
local.add(KEY, "ssh")
Benchmark.Stop()
key = cm.find_name(KEY, "key")[0]
key['name'] == KEY
def test_upload_key_to_cloud(self):
HEADING()
if cloud == 'azure':
# todo: implement this
return
if cloud == 'aws':
all_keys = cm.find_all_by_name(KEY, "key")
for k in all_keys:
if 'public_key' in k.keys():
key = k
break
else:
key = cm.find_name(KEY, "key")[0]
pprint(key)
Benchmark.Start()
r = provider.key_upload(key)
Benchmark.Stop()
# print ("PPP", r)
def test_list_key_from_cloud(self):
HEADING()
Benchmark.Start()
keys = provider.keys()
Benchmark.Stop()
if cloud in ['azure', 'oracle']:
VERBOSE(f"{cloud} does not support key list!")
return
found = False
for key in keys:
if key['name'] == KEY:
found = True
break
assert found
def test_delete_key_from_cloud(self):
HEADING()
try:
Benchmark.Start()
r = provider.key_delete(KEY)
Benchmark.Stop()
print(r)
except Exception as e:
print(e)
def test_get_key_from_cloud(self):
HEADING()
pass
def test_key_delete(self):
HEADING()
cm.clear(collection=f"local-key")
try:
r = provider.key_delete(KEY)
except:
pass
def test_benchmark(self):
Benchmark.print(sysinfo=False, csv=True, tag=cloud)
"""
def test_list(self):
#os.system("cms keygroup add ")
os.system("cms key group list")
| 25.978571 | 63 | 0.56915 | 2,539 | 0.698103 | 0 | 0 | 0 | 0 | 0 | 0 | 2,462 | 0.676932 |
2f5cb793e2e748f1c572ea256bcf2c1a860ee543
| 2,344 |
py
|
Python
|
blinpy/tests/test_models.py
|
solbes/blinpy
|
89b4f26066c383fc07ca6b1cbfdc8a61397f3f08
|
[
"MIT"
] | 3 |
2021-02-11T14:00:08.000Z
|
2021-10-13T20:41:21.000Z
|
blinpy/tests/test_models.py
|
solbes/blinpy
|
89b4f26066c383fc07ca6b1cbfdc8a61397f3f08
|
[
"MIT"
] | null | null | null |
blinpy/tests/test_models.py
|
solbes/blinpy
|
89b4f26066c383fc07ca6b1cbfdc8a61397f3f08
|
[
"MIT"
] | null | null | null |
import pytest
import pandas as pd
import numpy as np
from blinpy import models
data = pd.DataFrame(
{'x': np.array(
[0.0, 1.0, 1.0, 2.0, 1.8, 3.0, 4.0, 5.2, 6.5, 8.0, 10.0]),
'y': np.array([5.0, 5.0, 5.1, 5.3, 5.5, 5.7, 6.0, 6.3, 6.7, 7.1, 7.5])}
)
def test_linear_model():
# 1) linear model, no priors
lm = models.LinearModel(
output_col='y',
input_cols=['x'],
bias=True,
theta_names=['th1'],
).fit(data)
np.testing.assert_allclose(
np.array([4.883977, 0.270029]),
lm.post_mu,
rtol=1e-5
)
# 2) partial prior
lm = models.LinearModel(
output_col='y',
input_cols=['x'],
bias=True,
theta_names=['th1'],
pri_cols=['th1']
).fit(data, pri_mu=[0.35], pri_cov=0.001)
np.testing.assert_allclose(
np.array([4.603935457929664, 0.34251082265349875]),
lm.post_mu,
rtol=1e-5
)
# prior for both parameters
lm = models.LinearModel(
output_col='y',
input_cols=['x'],
bias=True,
theta_names=['th1'],
).fit(data, pri_mu=[4.0, 0.35], pri_cov=[1.0, 0.001])
np.testing.assert_allclose(
np.array([4.546825637808106, 0.34442570226594676]),
lm.post_mu,
rtol=1e-5
)
def test_gam_line_fit():
# 1) line fit, no priors
gam_specs = [{
'fun': lambda df: df['x'].values[:, np.newaxis],
'name': 'slope'
},
{
'fun': lambda df: np.ones((len(df),1)),
'name': 'bias'
}
]
post_mu = models.GamModel('y', gam_specs).fit(data).post_mu
np.testing.assert_allclose(
np.array([0.270029, 4.883977]),
post_mu,
rtol=1e-5
)
# 2) partial prior
gam_specs = [{
'fun': lambda df: df['x'].values[:, np.newaxis],
'name': 'slope',
'prior': {
'B': np.eye(1),
'mu': np.array([0.35]),
'cov': np.array([0.001])
}
},
{
'fun': lambda df: np.ones((len(df), 1)),
'name': 'bias'
}
]
post_mu = models.GamModel('y', gam_specs).fit(data).post_mu
np.testing.assert_allclose(
np.array([0.34251082265349875, 4.603935457929664]),
post_mu,
rtol=1e-5
)
| 22.980392 | 80 | 0.50128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 260 | 0.110922 |
2f5daa6050352bd82f556af83e14e2830de366ac
| 3,226 |
py
|
Python
|
packages/gtmapi/lmsrvlabbook/tests/test_jobstatus_queries.py
|
gigabackup/gigantum-client
|
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
|
[
"MIT"
] | 60 |
2018-09-26T15:46:00.000Z
|
2021-10-10T02:37:14.000Z
|
packages/gtmapi/lmsrvlabbook/tests/test_jobstatus_queries.py
|
gigabackup/gigantum-client
|
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
|
[
"MIT"
] | 1,706 |
2018-09-26T16:11:22.000Z
|
2021-08-20T13:37:59.000Z
|
packages/gtmapi/lmsrvlabbook/tests/test_jobstatus_queries.py
|
griffinmilsap/gigantum-client
|
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
|
[
"MIT"
] | 11 |
2019-03-14T13:23:51.000Z
|
2022-01-25T01:29:16.000Z
|
import pprint
import time
import json
from gtmcore.dispatcher import Dispatcher, jobs
from lmsrvlabbook.tests.fixtures import fixture_working_dir
class TestLabBookServiceQueries(object):
def test_query_finished_task(self, fixture_working_dir):
"""Test listing labbooks"""
d = Dispatcher()
job_id = d.dispatch_task(jobs.test_exit_success)
time.sleep(1)
query = """
{
jobStatus(jobId: "%s") {
result
status
jobMetadata
failureMessage
startedAt
finishedAt
}
}
""" % job_id.key_str
r = fixture_working_dir[2].execute(query)
assert 'errors' not in r
assert int(r['data']['jobStatus']['result']) == 0
assert r['data']['jobStatus']['status'] == 'finished'
assert r['data']['jobStatus']['startedAt'] is not None
assert r['data']['jobStatus']['failureMessage'] is None
assert r['data']['jobStatus']['finishedAt']
assert r['data']['jobStatus']['jobMetadata'] == '{}'
def test_query_failed_task(self, fixture_working_dir):
"""Test listing labbooks"""
d = Dispatcher()
job_id = d.dispatch_task(jobs.test_exit_fail)
time.sleep(1)
query = """
{
jobStatus(jobId: "%s") {
result
status
jobMetadata
failureMessage
startedAt
finishedAt
}
}
""" % job_id
r = fixture_working_dir[2].execute(query)
assert 'errors' not in r
assert r['data']['jobStatus']['result'] is None
assert r['data']['jobStatus']['status'] == 'failed'
assert r['data']['jobStatus']['failureMessage'] == \
'Exception: Intentional Exception from job `test_exit_fail`'
assert r['data']['jobStatus']['startedAt'] is not None
assert r['data']['jobStatus']['finishedAt'] is not None
# Assert the following dict is empty
assert not json.loads(r['data']['jobStatus']['jobMetadata'])
def test_query_started_task(self, fixture_working_dir):
"""Test listing labbooks"""
d = Dispatcher()
job_id = d.dispatch_task(jobs.test_sleep, args=(2,))
time.sleep(1)
query = """
{
jobStatus(jobId: "%s") {
result
status
jobMetadata
failureMessage
startedAt
finishedAt
}
}
""" % job_id
try:
r = fixture_working_dir[2].execute(query)
pprint.pprint(r)
assert 'errors' not in r
assert r['data']['jobStatus']['result'] is None
assert r['data']['jobStatus']['status'] == 'started'
assert r['data']['jobStatus']['failureMessage'] is None
assert r['data']['jobStatus']['startedAt'] is not None
assert json.loads(r['data']['jobStatus']['jobMetadata'])['sample'] == 'test_sleep metadata'
finally:
# Make sure all the jobs finish.
time.sleep(3)
| 31.320388 | 103 | 0.532548 | 3,076 | 0.953503 | 0 | 0 | 0 | 0 | 0 | 0 | 1,506 | 0.466832 |
2f5e89412b184aa3f2abac3805b9bf927e055845
| 204 |
py
|
Python
|
valid.py
|
whitereaper25/test_2
|
47212fc977bcd36e8879ada22f319691073accb1
|
[
"Apache-2.0"
] | null | null | null |
valid.py
|
whitereaper25/test_2
|
47212fc977bcd36e8879ada22f319691073accb1
|
[
"Apache-2.0"
] | null | null | null |
valid.py
|
whitereaper25/test_2
|
47212fc977bcd36e8879ada22f319691073accb1
|
[
"Apache-2.0"
] | null | null | null |
import re
def verify(phn_no):
design = "[789]\d{9}$"
if re.match(design,phn_no):
return "yes"
else:
return "No"
n = int(input())
for i in range(n):
print(verify(input()))
| 18.545455 | 31 | 0.553922 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.107843 |
2f6096fad4d8b4fcb9ab49eab731fdc3465207c6
| 1,632 |
py
|
Python
|
MAPLEAF/Rocket/sampleStatefulRocketComponent.py
|
henrystoldt/MAPLEAF
|
af970d3e8200832f5e70d537b15ad38dd74fa551
|
[
"MIT"
] | 15 |
2020-09-11T19:25:07.000Z
|
2022-03-12T16:34:53.000Z
|
MAPLEAF/Rocket/sampleStatefulRocketComponent.py
|
henrystoldt/MAPLEAF
|
af970d3e8200832f5e70d537b15ad38dd74fa551
|
[
"MIT"
] | null | null | null |
MAPLEAF/Rocket/sampleStatefulRocketComponent.py
|
henrystoldt/MAPLEAF
|
af970d3e8200832f5e70d537b15ad38dd74fa551
|
[
"MIT"
] | 3 |
2021-12-24T19:39:53.000Z
|
2022-03-29T01:06:28.000Z
|
from MAPLEAF.Motion import ForceMomentSystem, Inertia, Vector
from MAPLEAF.Rocket import RocketComponent
__all__ = [ "SampleStatefulComponent" ]
class SampleStatefulComponent(RocketComponent):
def __init__(self, componentDictReader, rocket, stage):
self.rocket = rocket
self.stage = stage
self.name = componentDictReader.getDictName()
def getExtraParametersToIntegrate(self):
# Examples below for a single parameter to be integrated, can put as many as required in these lists
paramNames = [ "tankLevel" ]
initValues = [ 1.0 ]
derivativeFunctions = [ self.getTankLevelDerivative ]
return paramNames, initValues, derivativeFunctions
def getTankLevelDerivative(self, time, rocketState):
return -2*rocketState.tankLevel # tankLevel will asymptotically approach 0
def getAppliedForce(self, rocketState, time, envConditions, rocketCG):
mag = -2000*self.getTankLevelDerivative(time, rocketState) # Force magnitude proportional to flow rate out of the tank
forceVector = Vector(0, 0, mag)
self.rocket.appendToForceLogLine(" {:>6.4f}".format(mag)) # This will end up in the log file, in the SampleZForce column
return ForceMomentSystem(forceVector)
def getInertia(self, time, rocketState):
mass = 5 + rocketState.tankLevel*4.56 # Fixed Mass + fluid mass
MOI = Vector(mass, mass, mass*0.05) # Related to current mass
CGz = -3 + rocketState.tankLevel # Moves depending on current tank level
CG = Vector(0, 0, CGz)
return Inertia(MOI, CG, mass)
| 42.947368 | 128 | 0.694853 | 1,485 | 0.909926 | 0 | 0 | 0 | 0 | 0 | 0 | 399 | 0.244485 |
2f610ddfbb4015ca897145b09e2fa1a4b5263289
| 866 |
py
|
Python
|
Array/Final450/Sort_Array_Of_0s_1s_2s.py
|
prash-kr-meena/GoogleR
|
27aca71e51cc2442e604e07ab00406a98d8d63a4
|
[
"Apache-2.0"
] | null | null | null |
Array/Final450/Sort_Array_Of_0s_1s_2s.py
|
prash-kr-meena/GoogleR
|
27aca71e51cc2442e604e07ab00406a98d8d63a4
|
[
"Apache-2.0"
] | null | null | null |
Array/Final450/Sort_Array_Of_0s_1s_2s.py
|
prash-kr-meena/GoogleR
|
27aca71e51cc2442e604e07ab00406a98d8d63a4
|
[
"Apache-2.0"
] | null | null | null |
from Utils.Array import input_array
ZERO, ONE, TWO = 0, 1, 2
# Time -> O(n)
# Space -> O(1) inplace
def sort_by_counting(A):
cnt_0 = cnt_1 = cnt_2 = 0
# Count the number of 0s, 1s and 2s in the array
for num in A:
if num == ZERO:
cnt_0 += 1
elif num == ONE:
cnt_1 += 1
elif num == TWO:
cnt_2 += 1
# Update the array
i = 0
# Store all the 0s in the beginning
while cnt_0 > 0:
A[i] = 0
i += 1
cnt_0 -= 1
# Then all the 1s
while cnt_1 > 0:
A[i] = 1
i += 1
cnt_1 -= 1
# Finally all the 2s
while cnt_2 > 0:
A[i] = 2
i += 1
cnt_2 -= 1
if __name__ == "__main__":
A = input_array()
sort_by_counting(A)
print(A)
"""
2 1 0 1 2 0 0 0 1 2 2 2 1 1
1 1 1 1
2 1 0 2 1 0
2 1 0
"""
| 16.339623 | 52 | 0.469977 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 248 | 0.286374 |
2f6154e27302aac990c76151fe6022aab6340e63
| 8,601 |
py
|
Python
|
public/yum-3.2.28/callback.py
|
chillaxor/blogbin
|
211202d513fa80a3d22fb3963f36a01a8dec5b68
|
[
"MIT"
] | 8 |
2021-11-26T06:19:06.000Z
|
2022-01-11T01:30:11.000Z
|
initrd/usr/share/yum-cli/callback.py
|
OpenCloudOS/OpenCloudOS-tools
|
06b12aab3182f4207d78a5d8733be03f0d7b69a4
|
[
"MulanPSL-1.0"
] | 5 |
2021-02-02T08:17:10.000Z
|
2022-02-27T06:53:42.000Z
|
public/yum-3.2.28/callback.py
|
chillaxor/blogbin
|
211202d513fa80a3d22fb3963f36a01a8dec5b68
|
[
"MIT"
] | 2 |
2021-12-21T08:36:02.000Z
|
2021-12-21T08:55:38.000Z
|
#!/usr/bin/python -t
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright 2005 Duke University
"""
Progress display callback classes for the yum command line.
"""
import rpm
import os
import sys
import logging
from yum import _
from yum.constants import *
class RPMInstallCallback:
"""
Yum command line callback class for callbacks from the RPM library.
"""
def __init__(self, output=1):
self.output = output
self.callbackfilehandles = {}
self.total_actions = 0
self.total_installed = 0
self.installed_pkg_names = []
self.total_removed = 0
self.mark = "#"
self.marks = 27
self.lastmsg = None
self.logger = logging.getLogger('yum.filelogging.RPMInstallCallback')
self.filelog = False
self.myprocess = { TS_UPDATE : _('Updating'),
TS_ERASE: _('Erasing'),
TS_INSTALL: _('Installing'),
TS_TRUEINSTALL : _('Installing'),
TS_OBSOLETED: _('Obsoleted'),
TS_OBSOLETING: _('Installing')}
self.mypostprocess = { TS_UPDATE: _('Updated'),
TS_ERASE: _('Erased'),
TS_INSTALL: _('Installed'),
TS_TRUEINSTALL: _('Installed'),
TS_OBSOLETED: _('Obsoleted'),
TS_OBSOLETING: _('Installed')}
self.tsInfo = None # this needs to be set for anything else to work
def _dopkgtup(self, hdr):
tmpepoch = hdr['epoch']
if tmpepoch is None: epoch = '0'
else: epoch = str(tmpepoch)
return (hdr['name'], hdr['arch'], epoch, hdr['version'], hdr['release'])
def _makeHandle(self, hdr):
handle = '%s:%s.%s-%s-%s' % (hdr['epoch'], hdr['name'], hdr['version'],
hdr['release'], hdr['arch'])
return handle
def _localprint(self, msg):
if self.output:
print msg
def _makefmt(self, percent, progress = True):
l = len(str(self.total_actions))
size = "%s.%s" % (l, l)
fmt_done = "[%" + size + "s/%" + size + "s]"
done = fmt_done % (self.total_installed + self.total_removed,
self.total_actions)
marks = self.marks - (2 * l)
width = "%s.%s" % (marks, marks)
fmt_bar = "%-" + width + "s"
if progress:
bar = fmt_bar % (self.mark * int(marks * (percent / 100.0)), )
fmt = "\r %-10.10s: %-28.28s " + bar + " " + done
else:
bar = fmt_bar % (self.mark * marks, )
fmt = " %-10.10s: %-28.28s " + bar + " " + done
return fmt
def _logPkgString(self, hdr):
"""return nice representation of the package for the log"""
(n,a,e,v,r) = self._dopkgtup(hdr)
if e == '0':
pkg = '%s.%s %s-%s' % (n, a, v, r)
else:
pkg = '%s.%s %s:%s-%s' % (n, a, e, v, r)
return pkg
def callback(self, what, bytes, total, h, user):
if what == rpm.RPMCALLBACK_TRANS_START:
if bytes == 6:
self.total_actions = total
elif what == rpm.RPMCALLBACK_TRANS_PROGRESS:
pass
elif what == rpm.RPMCALLBACK_TRANS_STOP:
pass
elif what == rpm.RPMCALLBACK_INST_OPEN_FILE:
self.lastmsg = None
hdr = None
if h is not None:
hdr, rpmloc = h
handle = self._makeHandle(hdr)
fd = os.open(rpmloc, os.O_RDONLY)
self.callbackfilehandles[handle]=fd
self.total_installed += 1
self.installed_pkg_names.append(hdr['name'])
return fd
else:
self._localprint(_("No header - huh?"))
elif what == rpm.RPMCALLBACK_INST_CLOSE_FILE:
hdr = None
if h is not None:
hdr, rpmloc = h
handle = self._makeHandle(hdr)
os.close(self.callbackfilehandles[handle])
fd = 0
# log stuff
pkgtup = self._dopkgtup(hdr)
txmbrs = self.tsInfo.getMembers(pkgtup=pkgtup)
for txmbr in txmbrs:
try:
process = self.myprocess[txmbr.output_state]
processed = self.mypostprocess[txmbr.output_state]
except KeyError:
pass
if self.filelog:
pkgrep = self._logPkgString(hdr)
msg = '%s: %s' % (processed, pkgrep)
self.logger.info(msg)
elif what == rpm.RPMCALLBACK_INST_PROGRESS:
if h is not None:
# If h is a string, we're repackaging.
# Why the RPMCALLBACK_REPACKAGE_PROGRESS flag isn't set, I have no idea
if type(h) == type(""):
if total == 0:
percent = 0
else:
percent = (bytes*100L)/total
if self.output and sys.stdout.isatty():
fmt = self._makefmt(percent)
msg = fmt % (_('Repackage'), h)
if bytes == total:
msg = msg + "\n"
if msg != self.lastmsg:
sys.stdout.write(msg)
sys.stdout.flush()
self.lastmsg = msg
else:
hdr, rpmloc = h
if total == 0:
percent = 0
else:
percent = (bytes*100L)/total
pkgtup = self._dopkgtup(hdr)
txmbrs = self.tsInfo.getMembers(pkgtup=pkgtup)
for txmbr in txmbrs:
try:
process = self.myprocess[txmbr.output_state]
except KeyError, e:
print _("Error: invalid output state: %s for %s") % \
(txmbr.output_state, hdr['name'])
else:
if self.output and (sys.stdout.isatty() or bytes == total):
fmt = self._makefmt(percent)
msg = fmt % (process, hdr['name'])
if msg != self.lastmsg:
sys.stdout.write(msg)
sys.stdout.flush()
self.lastmsg = msg
if bytes == total:
print " "
elif what == rpm.RPMCALLBACK_UNINST_START:
pass
elif what == rpm.RPMCALLBACK_UNINST_PROGRESS:
pass
elif what == rpm.RPMCALLBACK_UNINST_STOP:
self.total_removed += 1
if self.filelog and h not in self.installed_pkg_names:
logmsg = _('Erased: %s' % (h))
self.logger.info(logmsg)
if self.output and sys.stdout.isatty():
if h not in self.installed_pkg_names:
process = _("Removing")
else:
process = _("Cleanup")
percent = 100
fmt = self._makefmt(percent, False)
msg = fmt % (process, h)
sys.stdout.write(msg + "\n")
sys.stdout.flush()
elif what == rpm.RPMCALLBACK_REPACKAGE_START:
pass
elif what == rpm.RPMCALLBACK_REPACKAGE_STOP:
pass
elif what == rpm.RPMCALLBACK_REPACKAGE_PROGRESS:
pass
| 36.914163 | 87 | 0.482153 | 7,671 | 0.891873 | 0 | 0 | 0 | 0 | 0 | 0 | 1,643 | 0.191024 |
2f61d9c0592b835198eb2ed4703fc9cefded5f37
| 1,911 |
py
|
Python
|
miradar_node/scripts/ppi_visualizer.py
|
QibiTechInc/miradar_ros1_pkgs
|
65b339147c2a1a990696d77e75b58f5fba84dc22
|
[
"Apache-2.0"
] | null | null | null |
miradar_node/scripts/ppi_visualizer.py
|
QibiTechInc/miradar_ros1_pkgs
|
65b339147c2a1a990696d77e75b58f5fba84dc22
|
[
"Apache-2.0"
] | null | null | null |
miradar_node/scripts/ppi_visualizer.py
|
QibiTechInc/miradar_ros1_pkgs
|
65b339147c2a1a990696d77e75b58f5fba84dc22
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import rospy
from miradar_node.msg import PPI, PPIData
from visualization_msgs.msg import MarkerArray, Marker
from geometry_msgs.msg import Point
import dynamic_reconfigure.client
class PPIVisualizer:
def __init__(self):
self.pub = rospy.Publisher("/miradar/markers", MarkerArray, queue_size=20)
self.sub = rospy.Subscriber("/miradar/ppidata", PPIData, self.visualizePPI)
def visualizePPI(self, data):
markerArraydel = MarkerArray()
marker = Marker()
marker.header.frame_id = "miradar"
marker.action = marker.DELETEALL
markerArraydel.markers.append(marker)
self.pub.publish(markerArraydel)
cli = dynamic_reconfigure.client.Client("miradar_node")
dynparam = cli.get_configuration()
markerArray = MarkerArray()
mindb = dynparam["min_dB"]
maxdb = dynparam["max_dB"]
for i in range(len(data.data)):
marker = Marker()
marker.header.frame_id = "miradar"
marker.type = marker.SPHERE
marker.action = marker.ADD
marker.scale.x = 0.2
marker.scale.y = 0.2
marker.scale.z = 0.2
marker.color.a = 1.0
a = 1.0/(float(maxdb) - float(mindb))
b = - (float(mindb)/(float(maxdb) - float(mindb)))
print("a : {0}, b : {1}".format(a, b))
marker.color.r = data.data[i].db * a + b
marker.color.b = 1.0 - marker.color.r
marker.color.g = 0.0
marker.pose.orientation.w = 1.0
marker.pose.position = data.data[i].position
marker.id = i
markerArray.markers.append(marker)
self.pub.publish(markerArray)
if __name__ == "__main__":
rospy.init_node("ppi_visualizer")
ppiVisualizer = PPIVisualizer()
rospy.spin()
| 32.948276 | 83 | 0.591837 | 1,574 | 0.823653 | 0 | 0 | 0 | 0 | 0 | 0 | 150 | 0.078493 |
2f62350af98cfe5e5bc543e35cb2ce81345228a2
| 3,561 |
py
|
Python
|
app/dashboard.py
|
nidheesh6/earlyearthquake
|
d0ab976629f126206afcd3dc15a76c66992f8a9e
|
[
"Apache-2.0"
] | null | null | null |
app/dashboard.py
|
nidheesh6/earlyearthquake
|
d0ab976629f126206afcd3dc15a76c66992f8a9e
|
[
"Apache-2.0"
] | null | null | null |
app/dashboard.py
|
nidheesh6/earlyearthquake
|
d0ab976629f126206afcd3dc15a76c66992f8a9e
|
[
"Apache-2.0"
] | null | null | null |
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import psycopg2
import json
import pandas as pd
import time
app = dash.Dash(__name__)
#app.css.config.serve_locally=False
#app.css.append_css(
# {'external_url': 'https://codepen.io/amyoshino/pen/jzXypZ.css'})
conn = psycopg2.connect(host='ec2-18-232-24-132.compute-1.amazonaws.com',database='earthquake', user='postgres', password='********')
cur = conn.cursor()
location = pd.read_csv("data_file.csv")
location=location.astype(str)
app.layout = html.Div([
html.Div([
html.Div([
dcc.Graph(id='graph', style={'margin-top': '20'})], className="six columns"),
html.Div([
dcc.Graph(
id='bar-graph'
)
], className='twelve columns'
),
dcc.Interval(
id='interval-component',
interval=5*1000, # in milliseconds
n_intervals=0)
], className="row")
], className="ten columns offset-by-one")
@app.callback(Output('graph', 'figure'), [Input('interval-component', 'n_intervals')])
def update_map(n):
"""
Args n: int
:rtype: dict
"""
try:
latest_reading = "select * from ereadings limit 90;"
df_map = pd.read_sql(latest_reading, conn)
map_data = df_map.merge(location, how='left', left_on=["device_id", "country_code"], right_on=["device_id","country_code"])
clrred = 'rgb(222,0,0)'
clrgrn = 'rgb(0,222,0)'
def SetColor(gal):
if gal >= .17:
return clrred
else:
return clrgrn
layout = {
'autosize': True,
'height': 500,
'font': dict(color="#191A1A"),
'titlefont': dict(color="#191A1A", size='18'),
'margin': {
'l': 35,
'r': 35,
'b': 35,
't': 45
},
'hovermode': "closest",
'plot_bgcolor': '#fffcfc',
'paper_bgcolor': '#fffcfc',
'showlegend': False,
'legend': dict(font=dict(size=10), orientation='h', x=0, y=1),
'name': map_data['country_code'],
'title': 'earthquake activity for the last 3 seconds',
'mapbox': {
'accesstoken':'*********************************',
'center': {
'lon':-98.49,
'lat':18.29
},
'zoom': 5,
'style': "dark"
}
}
return {
"data": [{
"type": "scattermapbox",
"lat": list(location['latitude']),
"lon": list(location['longitude']),
"hoverinfo": "text",
"hovertext": [["sensor_id: {} <br>country_code: {} <br>gal: {} <br>x: {} <br>y: {}".format(i, j, k, l, m)]
for i, j, k, l, m in zip(location['device_id'],location['country_code'].tolist(),map_data['gal'].tolist(),map_data['avg_x'].tolist(), map_data['avg_y'].tolist())],
"mode": "markers",
"marker": {
"size": 10,
"opacity": 1,
"color": list(map(SetColor, map_data['gal']))
}
}],
"layout": layout
}
except Exception as e:
print("Error: Couldn't update map")
print(e)
if __name__ == '__main__':
app.run_server(debug=False)
| 30.965217 | 193 | 0.48975 | 0 | 0 | 0 | 0 | 2,443 | 0.686043 | 0 | 0 | 1,184 | 0.332491 |
2f63b5c96ba7f532ebe24f05547c39f51d21dc62
| 1,291 |
py
|
Python
|
ddlc/database.py
|
UltiRequiem/ddlc_api
|
ab542b6b9b1979421531dcce636cfe30d18d3a9d
|
[
"MIT"
] | 5 |
2021-11-19T18:57:27.000Z
|
2022-03-19T23:53:45.000Z
|
ddlc/database.py
|
UltiRequiem/ddlc_api
|
ab542b6b9b1979421531dcce636cfe30d18d3a9d
|
[
"MIT"
] | 5 |
2021-11-07T02:43:26.000Z
|
2022-03-06T03:16:28.000Z
|
ddlc/database.py
|
UltiRequiem/ddlc_api
|
ab542b6b9b1979421531dcce636cfe30d18d3a9d
|
[
"MIT"
] | 3 |
2021-11-19T18:57:28.000Z
|
2021-11-19T19:02:39.000Z
|
import pymongo
from .config import DB_PASSWORD, DB_USER, CLUSTER_NAME, SUBDOMAIN, DB_NAME
from .exceptions import CharacterNotFound, PoemAuthorNotFound
class DatabaseService:
def __init__(self):
client = pymongo.MongoClient(
f"mongodb+srv://{DB_USER}:{DB_PASSWORD}@{CLUSTER_NAME}.{SUBDOMAIN}.mongodb.net/{DB_NAME}"
)
self.db = client[DB_NAME]
def get_collection(self, collection_name: str):
return self.db[collection_name].find({}, {"_id": False})
def get_characters(self):
return list(self.get_collection("characters"))
def get_character_by_name(self, name: str):
for character in self.get_characters():
if character["name"].lower() == name:
return character
raise CharacterNotFound()
def new_character(self, character):
self.db.characters.insert_one(character)
def get_poems(self):
return list(self.get_collection("poems"))
def get_poem_by_author(self, author):
print(self.get_poems())
for poem in self.get_poems():
if poem["author"] == author:
return poem
raise PoemAuthorNotFound()
def new_poem(self, poem):
self.db.poems.insert_one(poem)
DBService = DatabaseService()
| 28.065217 | 101 | 0.652982 | 1,103 | 0.854376 | 0 | 0 | 0 | 0 | 0 | 0 | 127 | 0.098373 |
2f6638f61b3058472b08244c7bbaf61f509b9975
| 4,525 |
py
|
Python
|
scripts/main_experiment.py
|
wsavran/relm_pycsep_reproducibility
|
29294dc37627e74b4fcc4d05add1efc5950ded82
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/main_experiment.py
|
wsavran/relm_pycsep_reproducibility
|
29294dc37627e74b4fcc4d05add1efc5950ded82
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/main_experiment.py
|
wsavran/relm_pycsep_reproducibility
|
29294dc37627e74b4fcc4d05add1efc5950ded82
|
[
"BSD-3-Clause"
] | null | null | null |
# imports
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as pyplot
# pycsep imports
import csep
from csep.utils import stats, plots
# experiment imports
from experiment_utilities import (
load_zechar_catalog,
plot_consistency_test_comparison,
read_zechar_csv_to_dict
)
from experiment_config import config
# runtime flags
show_target_event_rates = True
plot = False
compute_evaluations = True
# catalog from manuscript
catalog = csep.load_catalog('./data/evaluation_catalog_zechar2013_merge.txt', loader=load_zechar_catalog)
evaluation_results = defaultdict(list)
# load results from zechar
zechar_dict = read_zechar_csv_to_dict('./data/consistency_quantile_scores_from_zechar.csv')
# main evaluation loop
for name, path in config['forecasts'].items():
# load forecast
fore = csep.load_gridded_forecast(
config['forecasts'][name],
start_date=config['start_date'],
end_date=config['end_date'],
name=name
)
# assign region of forecast to catalog
catalog.region = fore.region
cat_filt = catalog.filter_spatial(in_place=False)
# assign region to new catalog
cat_filt.region = fore.region
# compute likelihood and expected number of events
spatial_magnitude_counts = cat_filt.spatial_magnitude_counts()
ll = stats.poisson_log_likelihood(spatial_magnitude_counts, fore.data).sum()
# print summary statistics
print(f"{name}\n==========================")
print(f"Nfore: {fore.sum()}\nNobs: {cat_filt.event_count}\nLL/Nobs: {ll / cat_filt.event_count}")
print("")
if show_target_event_rates:
print("Target event rates")
for lon, lat, mag in zip(cat_filt.get_longitudes(), cat_filt.get_latitudes(), cat_filt.get_magnitudes()):
try:
rate = fore.get_rates([lon], [lat], [mag])
print(lon, lat, mag, rate[0])
except ValueError:
print(lon, lat, mag, "ERROR")
print("")
# n-test
if compute_evaluations:
n_test_result = csep.poisson_evaluations.number_test(
fore,
cat_filt
)
evaluation_results['n-test'].append(n_test_result)
print(f"N-test result: {n_test_result.quantile}")
# m-test
m_test_result = csep.poisson_evaluations.magnitude_test(
fore,
cat_filt,
num_simulations=config['nsims'],
seed=config['seed']
)
evaluation_results['m-test'].append(m_test_result)
print(f"M-test result: {m_test_result.quantile}")
# s-test
s_test_result = csep.poisson_evaluations.spatial_test(
fore,
cat_filt,
num_simulations=config['nsims'],
seed=config['seed'],
)
evaluation_results['s-test'].append(s_test_result)
print(f"S-test result: {s_test_result.quantile}")
# l-test
l_test_result = csep.poisson_evaluations.likelihood_test(
fore,
cat_filt,
num_simulations=config['nsims'],
seed=config['seed'],
)
evaluation_results['l-test'].append(l_test_result)
print(f"L-test result: {l_test_result.quantile}")
print("")
# plot and save results
ax = plot_consistency_test_comparison(evaluation_results, zechar_dict)
ax.get_figure().savefig('./output/pycsep_zechar_comparison.pdf')
# visualizations
if plot:
ax = plots.plot_poisson_consistency_test(
evaluation_results['n-test'],
plot_args={'xlabel': 'Observed earthquakes'}
)
ax.set_xlim([0,100])
ax.get_figure().savefig('./output/number_test_pycsep.pdf')
ax = plots.plot_poisson_consistency_test(
evaluation_results['l-test'],
plot_args={'xlabel': 'log-likelihood'},
one_sided_lower=True
)
ax.set_xlim([-600,0])
ax.get_figure().savefig('./output/likelihood_test_pycsep.pdf')
ax = plots.plot_poisson_consistency_test(
evaluation_results['s-test'],
plot_args={'xlabel': 'log-likelihood'},
one_sided_lower=True
)
ax.set_xlim([-220, -100])
ax.get_figure().savefig('./output/spatial_test_pycsep.pdf')
ax = plots.plot_poisson_consistency_test(
evaluation_results['m-test'],
plot_args={'xlabel': 'log-likelihood'},
one_sided_lower=True
)
ax.set_xlim([-35, -10])
ax.get_figure().savefig('./output/magnitude_test_pycsep.pdf')
| 31.206897 | 113 | 0.651271 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,219 | 0.269392 |
2f677c0ab09b208476f962949003f247df030535
| 10,284 |
py
|
Python
|
kakuro.py
|
PanPapag/Kakuro
|
c2de75fff059fdb479c6c435205cf864bd057510
|
[
"MIT"
] | 5 |
2020-01-01T19:12:34.000Z
|
2020-05-16T08:57:08.000Z
|
kakuro.py
|
PanPapag/Kakuro
|
c2de75fff059fdb479c6c435205cf864bd057510
|
[
"MIT"
] | 1 |
2020-04-26T09:51:55.000Z
|
2020-04-26T10:41:25.000Z
|
kakuro.py
|
PanPapag/Kakuro
|
c2de75fff059fdb479c6c435205cf864bd057510
|
[
"MIT"
] | null | null | null |
import os
import re
import sys
import time
import puzzles
from csp import *
from search import *
from utils import *
class Kakuro(CSP):
def __init__(self, puzzle):
self.puzzle = puzzle
self.rows_size = len(puzzle)
self.cols_size = len(puzzle[0])
self.variables = self.get_variables()
self.domain = self.get_domain()
self.neighbors = self.get_neighbors()
self.sums = self.get_sums()
self.constraints = self.get_constraints
CSP.__init__(self, self.variables, self.domain, self.neighbors, self.constraints)
def get_variables(self):
variables = []
for i, row in enumerate(self.puzzle):
for j, cell in enumerate(row):
if cell == 'W':
variables.append('x' + '_' + str(i) + '_' + str(j))
return variables
def get_domain(self):
domain = {}
for variable in self.variables:
domain[variable] = []
for i in range(1,10):
domain[variable].append(i)
return domain
def get_neighbors(self):
neighbors = {}
for variable in self.variables:
neighbors[variable] = []
# Get row and col of current variable
row = int(re.search('_(.*)_', variable).group(1))
col = int(variable.rsplit('_', 1)[-1])
# Check same row for neighbors
for i in range(self.cols_size):
if i < col - 1 or i > col + 1:
continue
if isinstance(self.puzzle[row][i], str):
if self.puzzle[row][i] == 'W':
neighbor_variable = 'x' + '_' + str(row) + '_' + str(i)
if neighbor_variable in self.variables and neighbor_variable != variable:
neighbors[variable].append(neighbor_variable)
# Check same col for neighbors
for i in range(self.rows_size):
if i < row -1 or i > row + 1:
continue
if isinstance(self.puzzle[i][col], str):
if self.puzzle[i][col] == 'W':
neighbor_variable = 'x' + '_' + str(i) + '_' + str(col)
if neighbor_variable in self.variables and neighbor_variable != variable:
neighbors[variable].append(neighbor_variable)
return neighbors
def get_constraints(self, A, a, B, b):
# if two neighbors have the same value constraints are not satisfied
if a == b:
return False
# store assignments that have been made so far
assignment = self.infer_assignment()
# In this step check if a is equal to any other A's neighbor variable assigned value. In this case
# the constraints are not being satisfied
for var in self.neighbors[A]:
if var in assignment:
if assignment[var] == a:
return False
# Similarly to B
for var in self.neighbors[B]:
if var in assignment:
if assignment[var] == b:
return False
# Check if neighbors A and B satisfy their common constraints
for sum in self.sums:
if (A in sum[1]) and (B in sum[1]):
sum_of_neighbors = 0
assigned_neighbors = 0
for var in sum[1]:
if var in assignment:
if (var != A) and (var != B):
sum_of_neighbors += assignment[var]
assigned_neighbors += 1
sum_of_neighbors += a + b
assigned_neighbors += 2
if (len(sum[1]) > assigned_neighbors) and (sum_of_neighbors >= sum[0]):
return False
if (len(sum[1]) == assigned_neighbors) and (sum_of_neighbors != sum[0]):
return False
# Check if A's constraints are being satisfied
for sum in self.sums:
if (A in sum[1]) and (B not in sum[1]):
sum_of_neighbors = 0
assigned_neighbors = 0
for variable in sum[1]:
if variable in assignment:
if variable != A:
sum_of_neighbors += assignment[variable]
assigned_neighbors += 1
sum_of_neighbors += a
assigned_neighbors += 1
if (len(sum[1]) > assigned_neighbors) and (sum_of_neighbors >= sum[0]):
return False
if (len(sum[1]) == assigned_neighbors) and (sum_of_neighbors != sum[0]):
return False
# Check if B's constraints are being satisfied
for sum in self.sums:
if (A not in sum[1]) and (B in sum[1]):
sum_of_neighbors = 0
assigned_neighbors = 0
for variable in sum[1]:
if variable in assignment:
if variable != B:
sum_of_neighbors += assignment[variable]
assigned_neighbors += 1
sum_of_neighbors += b
assigned_neighbors += 1
if (len(sum[1]) > assigned_neighbors) and (sum_of_neighbors >= sum[0]):
return False
if (len(sum[1]) == assigned_neighbors) and (sum_of_neighbors != sum[0]):
return False
# Everthing ok, constraints are being satisfied so return True
return True
def get_sums(self):
sums = []
for i, row in enumerate(self.puzzle):
for j, cell in enumerate(row):
if (cell != 'W' and cell != 'B'):
# down - column
if (cell[0] != ''):
x = []
for k in range(i + 1, self.rows_size):
if (self.puzzle[k][j] != 'W'):
break
x.append('x' + '_' + str(k) + '_' + str(j))
sums.append((cell[0], x))
# right - row
if (cell[1] != ''):
x = []
for k in range(j + 1, len(self.puzzle[i])):
if (self.puzzle[i][k] != 'W'):
break
x.append('x' + '_' + str(i) + '_' + str(k))
sums.append((cell[1], x))
return sums
def BT(self):
start = time.time()
result = backtracking_search(self)
end = time.time()
return (result, end - start)
def BT_MRV(self):
start = time.time()
result = backtracking_search(self, select_unassigned_variable=mrv)
end = time.time()
return (result, end - start)
def FC(self):
start = time.time()
result = (backtracking_search(self, inference=forward_checking))
end = time.time()
return (result, end - start)
def FC_MRV(self):
start = time.time()
result = (backtracking_search(self, select_unassigned_variable=mrv, inference=forward_checking))
end = time.time()
return (result, end - start)
def MAC(self):
start = time.time()
result = (backtracking_search(self, select_unassigned_variable=mrv, inference=mac))
end = time.time()
return (result, end - start)
def display_grid(self, grid):
for i in range(self.rows_size):
for j in range(self.cols_size):
if isinstance(self.puzzle[i][j], list):
if grid[i][j][0] == '':
print('B\{}'.format(grid[i][j][1]).ljust(4), end='\t')
elif grid[i][j][1] == '':
print('{}\B'.format(grid[i][j][0]).ljust(4), end='\t')
else:
print('{}\{}'.format(grid[i][j][0], grid[i][j][1]).ljust(4), end='\t')
else:
print(grid[i][j].ljust(4), end='\t')
print()
def display_solution(self, grid, solution, time_elapsed, assigns):
if solution != None:
for variable in self.variables:
# Get row and col of current variable
row = int(re.search('_(.*)_', variable).group(1))
col = int(variable.rsplit('_', 1)[-1])
# Get value
value = solution[variable]
# Assign value of the variable to the grid
grid[row][col] = str(value)
# display assigned grid
self.display_grid(grid)
print("Number of assigns: {}".format(assigns))
print("Total time elapsed: {:.4f} seconds".format(time_elapsed))
else:
print("No solution found!")
if __name__ == "__main__":
# Get all puzzles from puzzle.py
kakuro_puzzles = []
for item in vars(puzzles).keys():
if not item.startswith("__"):
kakuro_puzzles.append((item,vars(puzzles)[item]))
for puzzle_name, puzzle in kakuro_puzzles:
print("\n----------------------------- {} Kakuro puzzle -----------------------------".format(puzzle_name))
kakuro = Kakuro(puzzle)
kakuro.display_grid(kakuro.puzzle)
# BT algorithm
print("\n> Solution using BT algorithm")
kakuro.display_solution(kakuro.puzzle, *kakuro.BT(), kakuro.nassigns)
# BT + MRV algorithm
print("\n> Solution using BT and MRV algorithm")
kakuro.display_solution(kakuro.puzzle, *kakuro.BT_MRV(), kakuro.nassigns)
# FC algorithm
print("\n> Solution using FC algorithm")
kakuro.display_solution(kakuro.puzzle, *kakuro.FC(), kakuro.nassigns)
# FC + MRV algorithm
print("\n> Solution using FC and MRV algorithm")
kakuro.display_solution(kakuro.puzzle, *kakuro.FC_MRV(), kakuro.nassigns)
# MAC algorithm
print("\n> Solution using MAC algorithm")
kakuro.display_solution(kakuro.puzzle, *kakuro.MAC(), kakuro.nassigns)
# print an empty line for better output
print()
| 40.809524 | 115 | 0.50564 | 8,843 | 0.859879 | 0 | 0 | 0 | 0 | 0 | 0 | 1,362 | 0.132439 |
2f6b470e4d68764c82c5e960377985365be6d841
| 49 |
py
|
Python
|
AD-HOC/2416.py
|
jeconiassantos/uriissues
|
f6c32f8632b9940a4886240ea5d22300922dc79a
|
[
"MIT"
] | null | null | null |
AD-HOC/2416.py
|
jeconiassantos/uriissues
|
f6c32f8632b9940a4886240ea5d22300922dc79a
|
[
"MIT"
] | null | null | null |
AD-HOC/2416.py
|
jeconiassantos/uriissues
|
f6c32f8632b9940a4886240ea5d22300922dc79a
|
[
"MIT"
] | null | null | null |
C, N = map(int, input().split(' '))
print(C % N)
| 16.333333 | 35 | 0.510204 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.061224 |
2f6b78883c06f33b085d614b2c51070339de771d
| 2,682 |
py
|
Python
|
plot.py
|
nsi88/ctg-plot
|
c623fb76d72429f0411d42ff1e380a7a46d8c04f
|
[
"MIT"
] | null | null | null |
plot.py
|
nsi88/ctg-plot
|
c623fb76d72429f0411d42ff1e380a7a46d8c04f
|
[
"MIT"
] | null | null | null |
plot.py
|
nsi88/ctg-plot
|
c623fb76d72429f0411d42ff1e380a7a46d8c04f
|
[
"MIT"
] | null | null | null |
import csv
import os
import sys
import matplotlib.pyplot as plt
from metrics.index import metrics
from plot_package.helpers import ascendence_label, descendence_label, with_sign
if len(sys.argv) != 2:
print(f"{__file__} sample_name.csv")
sys.exit(1)
sample_name = sys.argv[1]
# set size
plt.figure(figsize=(20.48, 10.24))
# draw heartbeats
seconds = []
heartbeats = []
with open(sample_name) as f:
read_csv = csv.reader(f, delimiter=",")
for row in read_csv:
seconds.append(int(row[0]))
heartbeats.append(int(row[1]))
plt.plot(seconds, heartbeats, color="blue", marker="D")
# calcualte ctg metrics
ctg_metrics = metrics(heartbeats)
x_min = 0
x_max = seconds[-1]
basic_channel_bottom = ctg_metrics.avg * 0.875 # - 12.5%
basic_channel_top = ctg_metrics.avg * 1.125 # + 12.5%
# draw ctg metrics
# average
# TODO: use a beeter way to draw line (plot(y)). See _axes.py plot method docs
plt.plot(
[x_min, x_max],
[ctg_metrics.avg, ctg_metrics.avg],
label=f"Average: {round(ctg_metrics.avg, 2)}",
color="red",
)
# basic channel
plt.plot([x_min, x_max], [basic_channel_bottom, basic_channel_bottom], color="black")
plt.plot(
[x_min, x_max],
[basic_channel_top, basic_channel_top],
color="black",
label=f"Basic channel from {round(basic_channel_bottom, 2)} to {round(basic_channel_top, 2)}. "
f"Variance: {with_sign(ctg_metrics.variance)}",
)
# ascendences
for index, ascendence in enumerate(ctg_metrics.ascendences):
plt.plot(
# NOTE: We add + 1 to ascendence.end cause slice(, end) is exclusive
seconds[slice(ascendence.start, ascendence.end + 1)],
heartbeats[slice(ascendence.start, ascendence.end + 1)],
color="green",
marker="D",
label=ascendence_label(
heartbeats=heartbeats, ascendence=ascendence, index=index
),
)
# descendences
for index, descendence in enumerate(ctg_metrics.descendences):
plt.plot(
seconds[slice(descendence.start, descendence.end + 1)],
heartbeats[slice(descendence.start, descendence.end + 1)],
color="purple",
marker="D",
label=descendence_label(
heartbeats=heartbeats, descendence=descendence, index=index
),
)
# draw common plot elements
# naming the x axis
plt.xlabel("Second")
# naming the y axis
plt.ylabel("Heartbeat")
# giving a title to my graph
plt.title("CTG")
# show a legend on the plot
plt.legend()
# function to show the plot
plt.show()
# save the plot
# sample_basename, _sample_extname = os.path.splitext(sample_name)
# figure_name = f"{sample_basename}.png"
# plt.savefig(figure_name)
# print(f"Saved to {figure_name}")
| 28.531915 | 99 | 0.688292 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 890 | 0.331842 |
2f6b87929186c7b4d57d3ad6750b0986257cf867
| 662 |
py
|
Python
|
list_prime.py
|
zm6/Python-Practice
|
c2080e1104cd7cee4af8ebc3e3f4941fc7466586
|
[
"MIT"
] | null | null | null |
list_prime.py
|
zm6/Python-Practice
|
c2080e1104cd7cee4af8ebc3e3f4941fc7466586
|
[
"MIT"
] | null | null | null |
list_prime.py
|
zm6/Python-Practice
|
c2080e1104cd7cee4af8ebc3e3f4941fc7466586
|
[
"MIT"
] | null | null | null |
#!/user/bin/env python
# -*- coding:utf-8 -*-
# 作者:zm6
# 创建:2021-03-19
# 更新:2021-03-19
# 用意:打印N以内的质数
import time # 比较代码运行时间
def list_prime(n):
num = 0
for i in range(2, n + 1):
is_prime = 1 #预设质数为是
for j in range(2, i - 1):
if i % j == 0:
is_prime = 0 #设置质数为否
break
if is_prime == 1:
print(i)
num = num + 1
return num
if __name__ == "__main__":
n = int(input("please enter the number:")) # 输入n值
start = time.time() # 开始计时
num = list_prime(n)
print(n, "以内质数个数为:", num)
end = time.time() # 结束计时
print(str(end - start))
| 16.55 | 54 | 0.493958 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 301 | 0.385897 |
2f6cee267527184d028d64eb983074f84ea9f058
| 2,246 |
py
|
Python
|
foyer/tests/test_forcefield.py
|
rmatsum836/foyer
|
c150d6f4c34e9ca7c5e4012e4406fb4ebab588cb
|
[
"MIT"
] | 1 |
2020-11-08T23:51:29.000Z
|
2020-11-08T23:51:29.000Z
|
foyer/tests/test_forcefield.py
|
rmatsum836/foyer
|
c150d6f4c34e9ca7c5e4012e4406fb4ebab588cb
|
[
"MIT"
] | null | null | null |
foyer/tests/test_forcefield.py
|
rmatsum836/foyer
|
c150d6f4c34e9ca7c5e4012e4406fb4ebab588cb
|
[
"MIT"
] | null | null | null |
import glob
import os
from pkg_resources import resource_filename
import mbuild as mb
import parmed as pmd
import pytest
from foyer import Forcefield
from foyer.tests.utils import get_fn
FF_DIR = resource_filename('foyer', 'forcefields')
FORCEFIELDS = glob.glob(os.path.join(FF_DIR, '*.xml'))
def test_load_files():
for ff_file in FORCEFIELDS:
ff1 = Forcefield(forcefield_files=ff_file)
assert len(ff1._atomTypes) > 0
ff2 = Forcefield(forcefield_files=ff_file)
assert len(ff1._atomTypes) == len(ff2._atomTypes)
def test_duplicate_type_definitions():
with pytest.raises(ValueError):
ff4 = Forcefield(name='oplsaa', forcefield_files=FORCEFIELDS)
def test_from_parmed():
mol2 = pmd.load_file(get_fn('ethane.mol2'), structure=True)
oplsaa = Forcefield(name='oplsaa')
ethane = oplsaa.apply(mol2)
assert sum((1 for at in ethane.atoms if at.type == 'opls_135')) == 2
assert sum((1 for at in ethane.atoms if at.type == 'opls_140')) == 6
assert len(ethane.bonds) == 7
assert all(x.type for x in ethane.bonds)
assert len(ethane.angles) == 12
assert all(x.type for x in ethane.angles)
assert len(ethane.rb_torsions) == 9
assert all(x.type for x in ethane.dihedrals)
mol2 = pmd.load_file(get_fn('ethane.mol2'), structure=True)
mol2.box_vectors = [[2, 0, 0], [0, 2, 0], [0, 0, 2]]
oplsaa = Forcefield(name='oplsaa')
ethane = oplsaa.apply(mol2)
assert ethane.box_vectors == mol2.box_vectors
def test_from_mbuild():
mol2 = mb.load(get_fn('ethane.mol2'))
oplsaa = Forcefield(name='oplsaa')
ethane = oplsaa.apply(mol2)
assert sum((1 for at in ethane.atoms if at.type == 'opls_135')) == 2
assert sum((1 for at in ethane.atoms if at.type == 'opls_140')) == 6
assert len(ethane.bonds) == 7
assert all(x.type for x in ethane.bonds)
assert len(ethane.angles) == 12
assert all(x.type for x in ethane.angles)
assert len(ethane.rb_torsions) == 9
assert all(x.type for x in ethane.dihedrals)
def test_write_refs():
mol2 = mb.load(get_fn('ethane.mol2'))
oplsaa = Forcefield(name='oplsaa')
ethane = oplsaa.apply(mol2, references_file='ethane.bib')
assert os.path.isfile('ethane.bib')
| 31.194444 | 72 | 0.684328 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 183 | 0.081478 |
2f6d26b42b45ea8ec8df168ee13e107fabe9add1
| 407 |
py
|
Python
|
Back/ns_portal/database/main_db/troles_model.py
|
anthonyHenryNS/NsPortal
|
dcb4b4e0a70c1c3431d5438d97e80f5d05c8e10e
|
[
"MIT"
] | 1 |
2019-01-22T15:16:43.000Z
|
2019-01-22T15:16:43.000Z
|
Back/ns_portal/database/main_db/troles_model.py
|
NaturalSolutions/NsPortal
|
bcd07fdf015948a82f4d0c3c9a02f513b2d99f5d
|
[
"MIT"
] | 16 |
2015-09-28T14:46:13.000Z
|
2020-04-20T10:34:25.000Z
|
Back/ns_portal/database/main_db/troles_model.py
|
anthonyHenryNS/NsPortal
|
dcb4b4e0a70c1c3431d5438d97e80f5d05c8e10e
|
[
"MIT"
] | 10 |
2015-05-06T08:05:09.000Z
|
2020-01-27T13:39:47.000Z
|
from ns_portal.database.meta import (
Main_Db_Base
)
from sqlalchemy import (
Column,
Integer,
String
)
class TRoles(Main_Db_Base):
__tablename__ = 'TRoles'
TRol_PK_ID = Column(
Integer,
primary_key=True
)
TRol_Label = Column(
String(250),
nullable=False
)
TRol_Definition = Column(
String(250),
nullable=True
)
| 15.653846 | 37 | 0.594595 | 284 | 0.697789 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.019656 |
2f6fd35b1512af4a2c444f8b5323ece8c562f37c
| 2,869 |
py
|
Python
|
src/authorization/validators.py
|
dmitrijbozhkov/cloudcourseproject
|
3e62a5fafef418c1c058587abc5615b03fc2325a
|
[
"Apache-2.0"
] | null | null | null |
src/authorization/validators.py
|
dmitrijbozhkov/cloudcourseproject
|
3e62a5fafef418c1c058587abc5615b03fc2325a
|
[
"Apache-2.0"
] | 7 |
2021-02-08T20:41:23.000Z
|
2022-03-12T00:21:37.000Z
|
src/authorization/validators.py
|
dmitrijbozhkov/cloudcourseproject
|
3e62a5fafef418c1c058587abc5615b03fc2325a
|
[
"Apache-2.0"
] | null | null | null |
""" Form validators for authorization """
import re
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired, Length, ValidationError, Email, EqualTo
from cloudcourseproject.src.model import User
def password_validator(password):
""" Validate password should contain capital and small latin letters and numbers """
if re.search("[0-9]", password) is None:
raise ValidationError("Password should have numbers")
if re.search("[A-Z]", password) is None:
raise ValidationError("Password should contain capital latin letters")
if re.search("[a-z]", password) is None:
raise ValidationError("Password should contain lower latin letters")
class LoginForm(FlaskForm):
""" Validate login request """
email = StringField("email", validators=[
DataRequired("Email is required"),
Email(message="Email field should be a valid email address")
])
password = PasswordField("password", validators=[
DataRequired("Password is required"),
Length(message="Password should be between 6 and 20 characters!", min=6, max=20)
])
submit = SubmitField("Login")
def validate_password(self, field):
""" Perform password validation """
password_validator(field.data)
class CreateAccountForm(FlaskForm):
""" Validate creation of user account """
username = StringField("username", validators=[
DataRequired("Username is required"),
Length(message="Password should be between 6 and 20 characters!", min=4, max=20)
])
email = StringField("email", validators=[
DataRequired("Email is required"),
Email(message="Email field should be a valid email address")
])
password = PasswordField("password", validators=[
DataRequired("Password is required"),
Length(message="Password should be between 6 and 20 characters!", min=6, max=20)
])
confirm_password = PasswordField("confirm_password", validators=[
DataRequired("Password confirmation is required"),
EqualTo("password", message="Passwords aren't equal")
])
submit = SubmitField("Register")
def validate_password(self, field):
""" Perform password validation """
password_validator(field.data)
def validate_username(self, field):
""" Check if username already taken """
result = User.query.filter_by(username=field.data).first()
if result:
raise ValidationError(f"Username {field.data} already taken!")
def validate_email(self, field):
""" Check if username already taken """
result = User.query.filter_by(email=field.data).first()
if result:
raise ValidationError(f"User by email {field.data} already exists, maybe you have forgot your password?")
| 42.191176 | 117 | 0.684559 | 2,113 | 0.736494 | 0 | 0 | 0 | 0 | 0 | 0 | 1,097 | 0.382363 |
2f705c1639774ae7481bbdfb1680d2106c872e2a
| 1,418 |
py
|
Python
|
app/pipelines/load_data/load_marketing_data/__init__.py
|
mediaimprove/mara-example-project-1
|
d1cab4cf079e78a4c0f73edac73200fac4112f34
|
[
"MIT"
] | 22 |
2020-10-07T21:32:07.000Z
|
2022-03-21T19:21:36.000Z
|
app/pipelines/load_data/load_marketing_data/__init__.py
|
mediaimprove/mara-example-project-1
|
d1cab4cf079e78a4c0f73edac73200fac4112f34
|
[
"MIT"
] | 4 |
2020-07-16T15:22:46.000Z
|
2020-10-28T15:18:32.000Z
|
app/pipelines/load_data/load_marketing_data/__init__.py
|
mediaimprove/mara-example-project-1
|
d1cab4cf079e78a4c0f73edac73200fac4112f34
|
[
"MIT"
] | 4 |
2020-10-08T10:30:04.000Z
|
2022-03-19T09:21:51.000Z
|
import pathlib
from mara_pipelines.commands.sql import ExecuteSQL, Copy
from mara_pipelines.pipelines import Pipeline, Task
from mara_pipelines import config
pipeline = Pipeline(
id="load_marketing_data",
description="Jobs related with loading marketing leads data from the backend database",
max_number_of_parallel_tasks=5,
base_path=pathlib.Path(__file__).parent,
labels={"Schema": "m_data"})
pipeline.add_initial(
Task(id="initialize_schemas", description="Recreates the marketing data schema",
commands=[
ExecuteSQL(sql_file_name='../recreate_marketing_data_schema.sql',
file_dependencies=[
pathlib.Path(__file__).parent.parent / 'recreate_marketing_data_schema.sql'])]))
tables = [
'closed_deal',
'marketing_qualified_lead'
]
for table in tables:
pipeline.add(
Task(id=f"load_{table}",
description=f'Loads the {table}s from the backend database',
commands=[
ExecuteSQL(sql_file_name=f'{table}/create_{table}_table.sql'),
Copy(sql_statement=f"""
SELECT *
FROM marketing.{table}s;
""",
source_db_alias='olist',
target_db_alias='dwh',
target_table=f'm_data.{table}',
delimiter_char=';')]
)
)
| 32.227273 | 108 | 0.61213 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 487 | 0.343441 |
2f710ce3e46ffdc56061d382495ca8df6e25a15b
| 320 |
py
|
Python
|
apps/urls.py
|
cijianciqing/myWX_l_ningmo
|
df4c80554b0f3c58060352fc0d5fc6c649f805c8
|
[
"Apache-2.0"
] | null | null | null |
apps/urls.py
|
cijianciqing/myWX_l_ningmo
|
df4c80554b0f3c58060352fc0d5fc6c649f805c8
|
[
"Apache-2.0"
] | null | null | null |
apps/urls.py
|
cijianciqing/myWX_l_ningmo
|
df4c80554b0f3c58060352fc0d5fc6c649f805c8
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path,include
from .views import menu, image, weixinFile
urlpatterns = [
path('menu/list', menu.get_menu),
path('menu/user', menu.UserMenu.as_view()),
path('image', image.ImageView.as_view()),
path('saveWX', weixinFile.saveWX),
path('getRecentWX', weixinFile.getRecentWX),
]
| 26.666667 | 48 | 0.696875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.15625 |
2f7149167478c04bd0604548dfe0f8ebb31e11a2
| 1,178 |
py
|
Python
|
mayan/apps/django_gpg/links.py
|
garrans/mayan-edms
|
e95e90cc47447a1ae72629271652824aa9868572
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/django_gpg/links.py
|
garrans/mayan-edms
|
e95e90cc47447a1ae72629271652824aa9868572
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/django_gpg/links.py
|
garrans/mayan-edms
|
e95e90cc47447a1ae72629271652824aa9868572
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from navigation import Link
from .permissions import (
permission_key_delete, permission_key_receive, permission_key_view,
permission_keyserver_query
)
link_private_keys = Link(
icon='fa fa-key', permissions=(permission_key_view,),
text=_('Private keys'), view='django_gpg:key_private_list'
)
link_public_keys = Link(
icon='fa fa-key', permissions=(permission_key_view,),
text=_('Public keys'), view='django_gpg:key_public_list'
)
link_key_delete = Link(
permissions=(permission_key_delete,), tags='dangerous', text=_('Delete'),
view='django_gpg:key_delete', args=('object.fingerprint', 'object.type',)
)
link_key_query = Link(
permissions=(permission_keyserver_query,), text=_('Query keyservers'),
view='django_gpg:key_query'
)
link_key_receive = Link(
keep_query=True, permissions=(permission_key_receive,), text=_('Import'),
view='django_gpg:key_receive', args='object.key_id'
)
link_key_setup = Link(
icon='fa fa-key', permissions=(permission_key_view,),
text=_('Key management'), view='django_gpg:key_public_list'
)
| 31.837838 | 77 | 0.749576 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 323 | 0.274194 |
2f73b6858df8269c4f5f2480de3342f864156c6c
| 1,489 |
py
|
Python
|
rosalind/splc/splc.py
|
TecKnow/learning
|
71d1ddf9d580027ecc62a067581da378a9e85f6d
|
[
"BSD-3-Clause"
] | null | null | null |
rosalind/splc/splc.py
|
TecKnow/learning
|
71d1ddf9d580027ecc62a067581da378a9e85f6d
|
[
"BSD-3-Clause"
] | null | null | null |
rosalind/splc/splc.py
|
TecKnow/learning
|
71d1ddf9d580027ecc62a067581da378a9e85f6d
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Problem : RNA Splicing
URL : http://rosalind.info/problems/splc/
Author : David P. Perkins
"""
import fasta
def getCodingRegion(DNAString, introns):
#print("DNA String", DNAString, "introns", introns)
codingString = list()
workingString = DNAString
while workingString:
for curIntron in introns:
if workingString.startswith(curIntron):
#print("Working String", workingString, "starts with", curIntron)
workingString = workingString[len(curIntron):]
#print("New Working String", workingString)
break
else:
codingString.append(workingString[0])
workingString = workingString[1:]
#print("Coding string so far", codingString)
return ''.join(codingString)
def RNAtoProt(RNAString):
import sys;
proFile = open('codon_to_amino_acid_table.txt')
proTab = proFile.read()
proTab = proTab.split()
proTab = zip(proTab[::2], proTab[1::2])
proTab = dict(proTab)
codons = map(''.join, zip(*[iter(RNAString)]*3))
res = [proTab[x] for x in codons]
res = ''.join(res)
res, y, z = res.partition("Stop")
return res
if __name__ == "__main__":
import sys
FASTAs = fasta.FASTA.fromList(sys.stdin.readline())
cr = getCodingRegion(FASTAs[0].value, [x.value for x in FASTAs[1:]])
rna = cr.replace('T','U')
prot = RNAtoProt(rna)
print(prot)
| 29.78 | 85 | 0.601746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 368 | 0.247146 |
2f74e4a6873c5b5dffd070a88fe2c8e9d487f559
| 191 |
py
|
Python
|
common/helpers.py
|
denisov-vlad/mtuci-masters
|
6fa68adb156ba4b43b4995d365f450654df70fa3
|
[
"MIT"
] | null | null | null |
common/helpers.py
|
denisov-vlad/mtuci-masters
|
6fa68adb156ba4b43b4995d365f450654df70fa3
|
[
"MIT"
] | null | null | null |
common/helpers.py
|
denisov-vlad/mtuci-masters
|
6fa68adb156ba4b43b4995d365f450654df70fa3
|
[
"MIT"
] | null | null | null |
import os
def encode(s):
if isinstance(s, bytes):
return s.decode('utf-8', 'ignore')
else:
return str(s)
path_joiner = os.path.join
path_basename = os.path.basename
| 17.363636 | 42 | 0.633508 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.078534 |
2f75db3882d66a7931843a46ed92b1bea9dfaf2f
| 6,448 |
py
|
Python
|
tests/integration/test_interrupt_fields.py
|
jvanstraten/vhdmmio
|
f166b07074a9159311a01af88497df91c19e09d1
|
[
"Apache-2.0"
] | 4 |
2019-07-01T14:41:38.000Z
|
2021-11-28T12:54:49.000Z
|
tests/integration/test_interrupt_fields.py
|
jvanstraten/vhdmmio
|
f166b07074a9159311a01af88497df91c19e09d1
|
[
"Apache-2.0"
] | 4 |
2019-08-23T15:05:24.000Z
|
2020-12-16T10:02:20.000Z
|
tests/integration/test_interrupt_fields.py
|
jvanstraten/vhdmmio
|
f166b07074a9159311a01af88497df91c19e09d1
|
[
"Apache-2.0"
] | 1 |
2021-07-16T13:41:21.000Z
|
2021-07-16T13:41:21.000Z
|
"""Interrupt field tests."""
from copy import deepcopy
from unittest import TestCase
from ..testbench import RegisterFileTestbench
class TestInterruptFields(TestCase):
"""Interrupt field tests"""
def test_fields(self):
"""test interrupt fields"""
fields = []
types = {
typ: idx * 4 for idx, typ in enumerate([
'volatile', 'flag', 'pend', 'enable', 'unmask', 'status', 'raw'])}
for typ, address in types.items():
typ_name = 'interrupt-%s' % typ
if typ == 'volatile':
typ_name = 'volatile-interrupt-flag'
fields.append({
'address': address,
'bitrange': 0,
'repeat': 8,
'name': 'x_%s' % typ,
'behavior': typ_name,
'interrupt': 'x',
})
if typ not in ('volatile', 'pend'):
fields.append({
'address': address,
'bitrange': 8,
'repeat': 4,
'name': 'y_%s' % typ,
'behavior': typ_name,
'interrupt': 'y',
})
if typ == 'flag':
fields[-1]['bus-write'] = 'disabled'
rft = RegisterFileTestbench({
'metadata': {'name': 'test'},
'interrupts': [
{
'repeat': 8,
'name': 'x',
},
{
'repeat': 4,
'name': 'y',
},
],
'fields': fields})
self.assertEqual(rft.ports, (
'bus',
'i_x_request',
'i_y_request',
))
with rft as objs:
objs.bus.write(types['enable'], 0x555)
objs.bus.write(types['unmask'], 0x333)
self.assertEqual(objs.bus.read(types['enable']), 0x555)
self.assertEqual(objs.bus.read(types['unmask']), 0x333)
self.assertEqual(int(objs.bus.interrupt), 0)
objs.i_x_request.val = 0xFF
objs.i_y_request.val = 0xF
self.assertEqual(objs.bus.read(types['raw']), 0xFFF)
self.assertEqual(objs.bus.read(types['flag']), 0x555)
self.assertEqual(objs.bus.read(types['status']), 0x111)
self.assertEqual(objs.bus.read(types['volatile']), 0x055)
self.assertEqual(objs.bus.read(types['raw']), 0xFFF)
self.assertEqual(objs.bus.read(types['flag']), 0x555)
self.assertEqual(objs.bus.read(types['status']), 0x111)
objs.i_x_request.val = 0x00
objs.i_y_request.val = 0x0
self.assertEqual(objs.bus.read(types['raw']), 0x000)
self.assertEqual(objs.bus.read(types['flag']), 0x055)
self.assertEqual(objs.bus.read(types['status']), 0x011)
objs.bus.write(types['flag'], 0x00F)
self.assertEqual(objs.bus.read(types['flag']), 0x050)
self.assertEqual(objs.bus.read(types['status']), 0x010)
objs.bus.write(types['unmask'], 0xFFF)
self.assertEqual(objs.bus.read(types['status']), 0x050)
self.assertEqual(int(objs.bus.interrupt), 1)
self.assertEqual(objs.bus.read(types['volatile']), 0x050)
rft.testbench.clock(3)
self.assertEqual(int(objs.bus.interrupt), 0)
self.assertEqual(objs.bus.read(types['raw']), 0x000)
self.assertEqual(objs.bus.read(types['flag']), 0x000)
self.assertEqual(objs.bus.read(types['status']), 0x000)
objs.bus.write(types['enable'], 0x555)
objs.bus.write(types['unmask'], 0x333)
objs.bus.write(types['pend'], 0xF0F)
self.assertEqual(objs.bus.read(types['flag']), 0x00F)
self.assertEqual(objs.bus.read(types['status']), 0x003)
self.assertEqual(int(objs.bus.interrupt), 1)
for typ in ['volatile', 'flag', 'pend', 'enable', 'unmask', 'status', 'raw']:
objs.bus.read(types[typ])
if typ in ['volatile', 'status', 'raw']:
with self.assertRaisesRegex(ValueError, 'decode'):
objs.bus.write(types[typ], 0)
else:
objs.bus.write(types[typ], 0)
def test_errors(self):
"""test interrupt field config errors"""
base_cfg = {
'metadata': {'name': 'test'},
'fields': [
{
'address': 0,
'bitrange': 0,
'name': 'x',
'behavior': 'interrupt-flag',
'interrupt': 'x',
},
],
'interrupts': [
{
'name': 'x',
},
],
}
RegisterFileTestbench(base_cfg)
cfg = deepcopy(base_cfg)
cfg['fields'][0]['behavior'] = 'interrupt'
with self.assertRaisesRegex(
Exception, 'bus cannot access the field; specify a read or '
'write operation'):
RegisterFileTestbench(cfg)
cfg = deepcopy(base_cfg)
cfg['fields'][0]['bitrange'] = '3..0'
with self.assertRaisesRegex(
Exception, 'interrupt fields cannot be vectors, use '
'repetition instead'):
RegisterFileTestbench(cfg)
cfg = deepcopy(base_cfg)
cfg['fields'][0]['behavior'] = 'interrupt'
cfg['fields'][0]['mode'] = 'raw'
cfg['fields'][0]['bus-write'] = 'enabled'
with self.assertRaisesRegex(
Exception, 'raw interrupt fields cannot be written'):
RegisterFileTestbench(cfg)
cfg = deepcopy(base_cfg)
cfg['fields'][0]['behavior'] = 'interrupt'
cfg['fields'][0]['mode'] = 'masked'
cfg['fields'][0]['bus-write'] = 'enabled'
with self.assertRaisesRegex(
Exception, 'masked interrupt fields cannot be written'):
RegisterFileTestbench(cfg)
cfg = deepcopy(base_cfg)
cfg['fields'][0]['behavior'] = 'interrupt'
cfg['fields'][0]['mode'] = 'masked'
cfg['fields'][0]['bus-read'] = 'clear'
with self.assertRaisesRegex(
Exception, 'only flag interrupt fields support clear-on-read'):
RegisterFileTestbench(cfg)
| 38.380952 | 89 | 0.498759 | 6,314 | 0.979218 | 0 | 0 | 0 | 0 | 0 | 0 | 1,444 | 0.223945 |
2f762c138bc0fd2f04d2c1539f4eca93c9446723
| 1,745 |
py
|
Python
|
app.py
|
corsmith/openshift-webhook-webex-teams-translator
|
fc20d4cdf2ca0959d2875048e6c5e5a1477ccec5
|
[
"BSD-3-Clause"
] | null | null | null |
app.py
|
corsmith/openshift-webhook-webex-teams-translator
|
fc20d4cdf2ca0959d2875048e6c5e5a1477ccec5
|
[
"BSD-3-Clause"
] | null | null | null |
app.py
|
corsmith/openshift-webhook-webex-teams-translator
|
fc20d4cdf2ca0959d2875048e6c5e5a1477ccec5
|
[
"BSD-3-Clause"
] | null | null | null |
import tornado.ioloop
import tornado.web
import tornado.options
from tornado.log import gen_log
'''
Alert Manager Documentation: https://prometheus.io/docs/alerting/configuration/
Sample alertmanager message:
{
"version": "4",
"groupKey": <string>, // key identifying the group of alerts (e.g. to deduplicate)
"status": "<resolved|firing>",
"receiver": <string>,
"groupLabels": <object>,
"commonLabels": <object>,
"commonAnnotations": <object>,
"externalURL": <string>, // backlink to the Alertmanager.
"alerts": [
{
"status": "<resolved|firing>",
"labels": <object>,
"annotations": <object>,
"startsAt": "<rfc3339>",
"endsAt": "<rfc3339>",
"generatorURL": <string> // identifies the entity that caused the alert
},
...
]
}
'''
async def f():
http_client = AsyncHTTPClient()
try:
response = await http_client.fetch("http://www.google.com")
except Exception as e:
print("Error: %s" % e)
else:
print(response.body)
class HealthHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world\n")
class MainHandler(tornado.web.RequestHandler):
def post(self, webhookkey):
gen_log.warning(f'webhookkey = { webhookkey }\nuri: { self.request.uri }\nquery: { self.request.query }\nheaders: { self.request.headers }\nbody: { self.request.body }')
self.write("Hello, %s\n" % webhookkey)
def make_app():
return tornado.web.Application([
(r"/v1/webhooks/incoming/([^/]+)", MainHandler),
(r"/", HealthHandler),
])
if __name__ == "__main__":
tornado.options.parse_command_line()
app = make_app()
app.listen(8080)
tornado.ioloop.IOLoop.current().start()
| 27.698413 | 177 | 0.638395 | 407 | 0.233238 | 0 | 0 | 0 | 0 | 224 | 0.128367 | 966 | 0.553582 |
2f76d7407f4890886daf9577fd641a2214d9abc6
| 1,848 |
py
|
Python
|
backend/serializers.py
|
richardswei/werk
|
b08b1ffc2d3aaf61f2cfb07ecc259a35eea77e91
|
[
"MIT"
] | null | null | null |
backend/serializers.py
|
richardswei/werk
|
b08b1ffc2d3aaf61f2cfb07ecc259a35eea77e91
|
[
"MIT"
] | 8 |
2020-03-21T01:57:31.000Z
|
2021-09-22T18:46:07.000Z
|
backend/serializers.py
|
richardswei/werkit
|
b08b1ffc2d3aaf61f2cfb07ecc259a35eea77e91
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from backend.models import TodoItem, Note
from django.contrib.auth import authenticate, get_user_model
class TodoItemSerializer(serializers.ModelSerializer):
class Meta:
model = TodoItem
fields = ('id', 'created', 'updated', 'due', 'title', 'description', 'priority', 'user_id')
user_id = serializers.ReadOnlyField(source='owner.id')
class NoteSerializer(serializers.ModelSerializer):
class Meta:
model = Note
fields = ('id', 'created', 'updated', 'title', 'description', 'user_id')
user_id = serializers.ReadOnlyField(source='owner.id')
class UserSerializer(serializers.ModelSerializer):
# notes = serializers.PrimaryKeyRelatedField(many=True, queryset=Note.objects.all(), allow_null=True)
# todoitems = serializers.PrimaryKeyRelatedField(many=True, queryset=TodoItem.objects.all(), allow_null=True)
class Meta:
model = get_user_model()
fields = ('id', 'username', 'password', 'email', 'first_name', 'last_name')
write_only_fields = ['password']
read_only_fields = ['id']
def create(self, validated_data):
user = get_user_model().objects.create(
username=validated_data['username'],
email=validated_data['email'],
first_name=validated_data['first_name'],
last_name=validated_data['last_name']
)
user.set_password(validated_data['password'])
user.save()
return user
class LoginUserSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def validate(self, data):
user = authenticate(**data)
if user and user.is_active:
return user
raise serializers.ValidationError("Unable to log in with provided credentials.")
| 33.6 | 113 | 0.679113 | 1,694 | 0.916667 | 0 | 0 | 0 | 0 | 0 | 0 | 510 | 0.275974 |
2f7707e7a77d86241b0db0b4c74b1a925d1c197b
| 695 |
py
|
Python
|
projects/demos/location2.py
|
readysetstem/readysetstem-api
|
01e1360f4a28a6783ee1e0fa1bc239dd999de6be
|
[
"Apache-2.0"
] | 1 |
2018-02-23T20:20:45.000Z
|
2018-02-23T20:20:45.000Z
|
projects/demos/location2.py
|
readysetstem/readysetstem-api
|
01e1360f4a28a6783ee1e0fa1bc239dd999de6be
|
[
"Apache-2.0"
] | 1 |
2016-10-25T18:00:15.000Z
|
2016-10-25T18:00:15.000Z
|
projects/demos/location2.py
|
readysetstem/readysetstem-api
|
01e1360f4a28a6783ee1e0fa1bc239dd999de6be
|
[
"Apache-2.0"
] | null | null | null |
from rstem.led_matrix import FrameBuffer
from rstem.mcpi import minecraft, control
import time
control.show()
mc = minecraft.Minecraft.create()
SCALE = 25
fb = FrameBuffer()
count = 0
FLASH_COUNT = 3
flash_lit = True
while True:
pos = mc.player.getTilePos()
x = round(pos.x/SCALE + (fb.width-1)/2)
x_out_of_bounds = not 0 <= x < fb.width
x = min(fb.width-1, max(0, x))
z = round(pos.z/SCALE + (fb.height-1)/2)
z_out_of_bounds = not 0 <= z < fb.height
z = min(fb.height-1, max(0, z))
fb.erase()
count += 1
if count > FLASH_COUNT:
flash_lit = not flash_lit
count = 0
if not x_out_of_bounds and not z_out_of_bounds or flash_lit:
fb.point(z, x)
fb.show()
time.sleep(0.01)
| 19.305556 | 61 | 0.683453 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2f793c352ccb3c8ed1a615cf95be1f974da7e115
| 10,833 |
py
|
Python
|
distributed_train.py
|
ShivamShrirao/contrastive-unpaired-translation
|
e81611a5bd8b7aee6aedab10aadf9e22a0804a63
|
[
"BSD-3-Clause"
] | null | null | null |
distributed_train.py
|
ShivamShrirao/contrastive-unpaired-translation
|
e81611a5bd8b7aee6aedab10aadf9e22a0804a63
|
[
"BSD-3-Clause"
] | null | null | null |
distributed_train.py
|
ShivamShrirao/contrastive-unpaired-translation
|
e81611a5bd8b7aee6aedab10aadf9e22a0804a63
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from os.path import join as osp
import numpy as np
from tqdm import tqdm
import wandb
import torch
import torch.nn as nn
import torch.optim as optim
from torch.cuda import amp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch import autograd
from torch.optim import lr_scheduler
# from torchinfo import summary
from options.train_options import TrainOptions
from utils import AverageMeter, reduce_loss, synchronize, cleanup, seed_everything, set_grads, log_imgs_wandb
from data import CreateDataLoader
from data.unaligned_dataset import UnAlignedDataset
from models.custom_unet import Unet, NLayerDiscriminator, PatchSampleF, GANLoss, PatchNCELoss, get_norm_layer, init_weights
class TrainModel:
def __init__(self, args):
self.device = torch.device('cuda', args.local_rank)
self.netG = Unet(args.input_nc, args.output_nc, 32, self_attn=False).to(self.device)
# init_weights(self.netG, args.init_type, args.init_gain)
norm_layer = get_norm_layer(args.normD)
self.netD = NLayerDiscriminator(args.output_nc, args.ndf, args.n_layers_D, norm_layer).to(self.device)
# init_weights(self.netD, args.init_type, args.init_gain)
with torch.no_grad():
feats = self.netG(torch.randn(8, args.input_nc, 256, 512, device=self.device), get_feat=True, encode_only=True)
self.netF = PatchSampleF(use_mlp=True, nc=args.netF_nc)
self.netF.create_mlp(feats)
self.netF = self.netF.to(self.device)
# init_weights(self.netF, args.init_type, args.init_gain)
# summary(self.netG, (1, args.input_nc, 256, 512))
# summary(self.netD, (1, args.output_nc, 256, 512))
# summary(self.netF, input_data=[feats])
dist.init_process_group(backend="nccl")
if args.sync_bn:
self.netG = nn.SyncBatchNorm.convert_sync_batchnorm(self.netG)
self.netD = nn.SyncBatchNorm.convert_sync_batchnorm(self.netD)
self.netF = nn.SyncBatchNorm.convert_sync_batchnorm(self.netF)
self.netG = DDP(self.netG, device_ids=[args.local_rank], output_device=args.local_rank,
broadcast_buffers=False)
self.netD = DDP(self.netD, device_ids=[args.local_rank], output_device=args.local_rank,
broadcast_buffers=False)
self.netF = DDP(self.netF, device_ids=[args.local_rank], output_device=args.local_rank,
broadcast_buffers=False)
self.criterion_gan = GANLoss()
self.criterionNCE = [PatchNCELoss(args).to(self.device) for _ in range(len(feats))]
self.loss_names = ['lossG', 'lossD', 'nce_loss_tot']
dataset = UnAlignedDataset(args.dataroot, (256, 512), args.phase)
self.dataloader = CreateDataLoader(dataset, args.batch_size, workers=args.workers)
# if args.local_rank == 0:
# val_dataset = UnAlignedDataset(args.dataroot, 1024, phase="test")
# val_dataset.img_names = val_dataset.img_names[:20]
# self.val_loader = CreateDataLoader(val_dataset, 2, workers=args.workers, shuffle=False, distributed=False)
self.optG = optim.Adam(self.netG.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))#, weight_decay=args.wd)
self.optD = optim.Adam(self.netD.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))#, weight_decay=args.wd)
self.optF = optim.Adam(self.netF.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))#, weight_decay=args.wd)
self.scaler = amp.GradScaler(enabled=not args.no_amp)
self.GF_params = list(self.netG.parameters()) + list(self.netF.parameters())
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + args.init_epoch - args.n_epochs) / float(args.n_epochs_decay + 1)
return lr_l
self.schedulers = [lr_scheduler.LambdaLR(opt, lr_lambda=lambda_rule) for opt in [self.optG, self.optD, self.optF]]
def calculate_NCE_loss(self, args, feat_k, feat_q):
feat_k_pool, sample_ids = self.netF(feat_k, args.num_patches, None)
feat_q_pool, _ = self.netF(feat_q, args.num_patches, sample_ids)
total_nce_loss = 0.0
for f_q, f_k, crit in zip(feat_q_pool, feat_k_pool, self.criterionNCE):
total_nce_loss += crit(f_q, f_k) * args.lambda_NCE
return total_nce_loss / len(feat_k)
def forward(self, args, real_A, real_B):
with amp.autocast(enabled=not args.no_amp):
real = torch.cat((real_A, real_B), dim=0)
pred, feats = self.netG(real, get_feat=True)
batch_size = real_A.size(0)
fake_B = pred[:batch_size]
idt_B = pred[batch_size:]
fake_out = self.netD(fake_B)#.detach())
real_out = self.netD(real_B)
lossD = (self.criterion_gan(fake_out, False)
+ self.criterion_gan(real_out, True)) * 0.5
# self.scaler.scale(lossD).backward()
set_grads(autograd.grad(self.scaler.scale(lossD), self.netD.parameters(), retain_graph=True), self.netD.parameters())
self.scaler.step(self.optD)
self.optD.zero_grad(set_to_none=True)
# fake_out = self.netD(fake_B)
lossG = self.criterion_gan(fake_out, True) * args.lambda_GAN
feat_q = self.netG(fake_B, get_feat=True, encode_only=True)
feat_k = [ft[:batch_size] for ft in feats]
nce_loss_A = self.calculate_NCE_loss(args, feat_k, feat_q)
feat_q = self.netG(idt_B, get_feat=True, encode_only=True)
feat_k = [ft[batch_size:] for ft in feats]
nce_loss_B = self.calculate_NCE_loss(args, feat_k, feat_q)
nce_loss_tot = (nce_loss_A + nce_loss_B) * 0.5
lossG = lossG + nce_loss_tot
set_grads(autograd.grad(self.scaler.scale(lossG), self.GF_params), self.GF_params)
# self.scaler.scale(lossG).backward()
self.scaler.step(self.optG)
self.optG.zero_grad(set_to_none=True)
self.scaler.step(self.optF)
self.optF.zero_grad(set_to_none=True)
self.scaler.update()
self.loss_avg['lossG'].update(reduce_loss(lossG.detach()), batch_size)
self.loss_avg['lossD'].update(reduce_loss(lossD.detach()), batch_size)
self.loss_avg['nce_loss_tot'].update(reduce_loss(nce_loss_tot.detach()), batch_size)
return fake_B.detach(), idt_B.detach()
def train_epoch(self, args, epoch):
self.loss_avg = {nm: AverageMeter() for nm in self.loss_names}
info = {}
with tqdm(self.dataloader, desc=f"Epoch {epoch:>2}", disable=args.local_rank != 0) as pbar:
for step, (real_A, real_B) in enumerate(pbar):
real_A = real_A.to(self.device, non_blocking=True)
real_B = real_B.to(self.device, non_blocking=True)
fake_B, idt_B = self.forward(args, real_A, real_B)
if args.local_rank == 0:
if not step % args.log_interval:
info = {nm: float(loss.avg) for nm, loss in self.loss_avg.items()}
pbar.set_postfix(info)
if args.use_wandb:
wandb.log(info)
if not step % args.img_log_interval:
log_imgs_wandb(real_A=real_A, fake_B=fake_B, real_B=real_B, idt_B=idt_B)
for schd in self.schedulers:
schd.step()
return info
def train_loop(self, args):
# self.validate(args)
for epoch in range(args.init_epoch, args.n_epochs):
self.netG.train()
self.netD.train()
self.netF.train()
self.dataloader.sampler.set_epoch(epoch)
info = self.train_epoch(args, epoch)
info['epoch'] = epoch
if args.local_rank == 0:
if args.use_wandb:
wandb.log({'epoch': epoch})
self.save_models(args, 'latest', info)
if not epoch % 1:
self.save_models(args, epoch, info)
# self.validate(args)
def save_models(self, args, epoch='latest', info={}):
if args.local_rank == 0:
os.makedirs(osp(args.checkpoints_dir, args.name), exist_ok=True)
torch.save(self.netG.state_dict(), osp(args.checkpoints_dir, args.name, f"{epoch}_netG.pth"))
torch.save(self.netD.state_dict(), osp(args.checkpoints_dir, args.name, f"{epoch}_netD.pth"))
torch.save(self.netF.state_dict(), osp(args.checkpoints_dir, args.name, f"{epoch}_netF.pth"))
# torch.save(self.optG.state_dict(), osp(args.checkpoints_dir, args.name, f"{epoch}_optG.pth"))
# torch.save(self.optD.state_dict(), osp(args.checkpoints_dir, args.name, f"{epoch}_optD.pth"))
# torch.save(self.optF.state_dict(), osp(args.checkpoints_dir, args.name, f"{epoch}_optF.pth"))
torch.save(info, osp(args.checkpoints_dir, args.name, f"{epoch}_info.pth"))
print("[+] Weights saved.")
def load_models(self, args, epoch='latest'):
synchronize()
map_location = {'cuda:0': f'cuda:{args.local_rank}'}
try:
self.netG.load_state_dict(torch.load(osp(args.checkpoints_dir, args.name, f"{epoch}_netG.pth"), map_location=map_location))
if args.phase == 'train':
self.netD.load_state_dict(torch.load(osp(args.checkpoints_dir, args.name, f"{epoch}_netD.pth"), map_location=map_location))
self.netF.load_state_dict(torch.load(osp(args.checkpoints_dir, args.name, f"{epoch}_netF.pth"), map_location=map_location))
# self.optG.load_state_dict(torch.load(osp(args.checkpoints_dir, args.name, f"{epoch}_optG.pth"), map_location=map_location))
# self.optD.load_state_dict(torch.load(osp(args.checkpoints_dir, args.name, f"{epoch}_optD.pth"), map_location=map_location))
# self.optF.load_state_dict(torch.load(osp(args.checkpoints_dir, args.name, f"{epoch}_optF.pth"), map_location=map_location))
if args.local_rank == 0:
print(f"[+] Weights loaded for {epoch} epoch.")
except FileNotFoundError as e:
if args.local_rank == 0:
print(f"[!] {e}, skipping weights loading.")
def main():
args = TrainOptions().parse()
torch.cuda.set_device(args.local_rank)
seed_everything(args.seed)
try:
tm = TrainModel(args)
# if args.resume:
tm.load_models(args)
tm.train_loop(args)
tm.save_models(args)
except KeyboardInterrupt:
print("[!] Keyboard Interrupt! Cleaning up and shutting down.")
finally:
cleanup()
if __name__ == '__main__':
main()
| 48.361607 | 141 | 0.63925 | 9,637 | 0.889597 | 0 | 0 | 0 | 0 | 0 | 0 | 1,975 | 0.182313 |
2f79b110e6695d3ccede296e521acb7c60c79294
| 889 |
py
|
Python
|
src/backend/common/models/notifications/tests/verification_test.py
|
bovlb/the-blue-alliance
|
29389649d96fe060688f218d463e642dcebfd6cc
|
[
"MIT"
] | null | null | null |
src/backend/common/models/notifications/tests/verification_test.py
|
bovlb/the-blue-alliance
|
29389649d96fe060688f218d463e642dcebfd6cc
|
[
"MIT"
] | null | null | null |
src/backend/common/models/notifications/tests/verification_test.py
|
bovlb/the-blue-alliance
|
29389649d96fe060688f218d463e642dcebfd6cc
|
[
"MIT"
] | null | null | null |
from backend.common.consts.notification_type import NotificationType
from backend.common.models.notifications.verification import (
VerificationNotification,
)
def test_str():
notification = VerificationNotification("https://thebluealliance.com/", "password")
assert "{'verification_key': " in str(notification)
def test_type():
assert VerificationNotification._type() == NotificationType.VERIFICATION
def test_data_payload():
notification = VerificationNotification("https://thebluealliance.com/", "password")
assert notification.data_payload is None
def test_webhook_message_data():
notification = VerificationNotification("https://thebluealliance.com/", "password")
assert notification.webhook_message_data is not None
verification_key = notification.webhook_message_data.get("verification_key", None)
assert verification_key is not None
| 34.192308 | 87 | 0.786277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.181102 |
2f7b986c2b053cb63e12ef06cb1f0c6623d1ab5a
| 4,043 |
py
|
Python
|
Generator.py
|
pawelmakarov/ORM
|
1a17599b31ce6d73b08c8fa424e0a4201abfb3d3
|
[
"MIT"
] | null | null | null |
Generator.py
|
pawelmakarov/ORM
|
1a17599b31ce6d73b08c8fa424e0a4201abfb3d3
|
[
"MIT"
] | null | null | null |
Generator.py
|
pawelmakarov/ORM
|
1a17599b31ce6d73b08c8fa424e0a4201abfb3d3
|
[
"MIT"
] | null | null | null |
class Generator(object):
def __init__(self):
self.tables = []
self.alters = []
self.triggers = []
def write_to_file(self, output_file):
with open(output_file, 'w') as sql_file:
sql_file.write('{0}{1}'.format('\n'.join(table for table in self.tables), '\n'))
sql_file.write('{0}{1}'.format('\n'.join(alter for alter in self.alters), '\n'))
sql_file.write('{0}{1}'.format('\n'.join(trigger for trigger in self.triggers), '\n'))
def read_from_file(self, input_file):
import yaml
with open(input_file, 'r') as stream:
return yaml.safe_load(stream)
def get_fields(self, table, structure):
fields = ("\'{0}_{1}\' {2}".format(table, column_name, column_type)
for column_name, column_type in structure['fields'].items())
fields = ', '.join(fields)
return fields
def create_table(self, name, fields):
create_table = ('CREATE TABLE \'{0}\' (\n\t\'{0}_id\' SERIAL PRIMARY KEY,\n\t{1}\n\t\'{0}_created\''
'INTEGER NOT NULL DEFAULT cast(extract(epoch from now()) AS INTEGER),\n\t\'{0}_updated\''
'INTEGER NOT NULL DEFAULT 0\n\t);\n'
.format(name, fields))
return create_table
def alter_table(self, table, related_table):
alter_table = ('ALTER TABLE \'{0}\' ADD \'{1}_id\' INTEGER NOT NULL,\n\t'
'ADD CONSTRAINT \'fk_{0}_{1}_id\' FOREIGN KEY (\'{1}_id\')'
'REFERENCES \'{1}\' (\'{1}_id\');\n'
.format(table, related_table))
return alter_table
def join_table(self, table, related_table):
join_table = ('CREATE TABLE \'{0}__{1}\' (\n\t\'{0}_id\' INTEGER NOT NULL,\n\t\'{1}_id\''
'INTEGER NOT NULL,\n\tPRIMARY KEY (\'{0}_{1}\', \'{1}_id\')\n);\n'
.format(table, related_table))
return join_table
def get_function(self, table):
function = ('CREATE OR REPLACE FUNCTION update_{0}_timestamp()\nRETURNS TRIGGER AS '
'$$\nBEGIN\n\tNEW.{0}_updated = cast(extract(epoch from now()) as integer);\n\t'
'RETURN NEW;\nEND;\n$$ language \'plpgsql\';\n'
.format(table))
return function
def get_trigger(self, table):
trigger = ('CREATE TRIGGER \'tr_{0}_updated\' BEFORE UPDATE ON \'{0}\''
'FOR EACH ROW EXECUTE PROCEDURE\n\t update_{0}_timestamp();\n'
.format(table))
return trigger
def set_tables(self, statements):
self.tables.append(statements)
def set_alters(self, statements):
self.alters.append(statements)
def set_triggers(self, statements):
self.triggers.append(statements)
def create_statements(self, input_file, output_file):
data_map = self.read_from_file(input_file)
statements = []
for table, structure in data_map.items():
table = table.lower()
fields = self.get_fields(table, structure)
for related_table, relations_type in structure['relations'].items():
self.set_tables(self.create_table(table, fields))
relations_status = data_map[related_table]['relations'].values()[0];
related_table = related_table.lower()
if relations_type == 'one' and relations_status == 'many':
self.set_alters(self.alter_table(table, related_table))
if relations_type == relations_status:
self.set_tables(self.join_table(table, related_table))
join_table = '{0}__{1}'.format(table, related_table)
self.set_alters(self.alter_table(join_table, table))
self.set_alters(self.alter_table(join_table, related_table))
self.set_triggers(self.get_function(table))
self.set_triggers(self.get_trigger(table))
self.write_to_file(output_file)
if __name__ == '__main__':
Generator().create_statements('many_to_many.yaml', 'schema.sql')
| 43.010638 | 108 | 0.594855 | 3,945 | 0.975761 | 0 | 0 | 0 | 0 | 0 | 0 | 991 | 0.245115 |
2f7d4b66c546d9cc934e15934759dad2ba2d7078
| 1,967 |
py
|
Python
|
src/zope/error/interfaces.py
|
zopefoundation/zope.error
|
8dc7b77a60388bdfb5b2a4606ed8b400db06d3ef
|
[
"ZPL-2.1"
] | null | null | null |
src/zope/error/interfaces.py
|
zopefoundation/zope.error
|
8dc7b77a60388bdfb5b2a4606ed8b400db06d3ef
|
[
"ZPL-2.1"
] | 9 |
2016-03-24T07:48:35.000Z
|
2018-10-19T13:28:04.000Z
|
src/zope/error/interfaces.py
|
zopefoundation/zope.error
|
8dc7b77a60388bdfb5b2a4606ed8b400db06d3ef
|
[
"ZPL-2.1"
] | 1 |
2015-04-03T08:42:41.000Z
|
2015-04-03T08:42:41.000Z
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Error Reporting Utility interfaces
"""
__docformat__ = 'restructuredtext'
from zope.interface import Interface
class IErrorReportingUtility(Interface):
"""Error Reporting Utility"""
def raising(info, request=None):
"""
Logs an exception.
:param info: The exception info, as determined by :func:`sys.exc_info`.
"""
class ILocalErrorReportingUtility(Interface):
"""Local Error Reporting Utility
This interface contains additional management functions.
"""
def getProperties():
"""Gets the properties as dictionary.
keep_entries, copy_to_logfile, ignored_exceptions
"""
def setProperties(keep_entries, copy_to_zlog=1, ignored_exceptions=(),
RESPONSE=None):
"""Sets the properties
keep_entries, copy_to_logfile, ignored_exceptions
:keyword tuple ignored_exceptions: A sequence of *str* unqualified
class names (such as ``'Unauthorized'``) that will be ignored.
The values here will be compared with the ``__name__`` of the first
member of the ``info`` passed to :meth:`raising`.
"""
def getLogEntries():
"""Returns the entries in the log, most recent first."""
def getLogEntryById(id):
"""Return LogEntry by ID"""
| 32.783333 | 79 | 0.627351 | 1,205 | 0.612608 | 0 | 0 | 0 | 0 | 0 | 0 | 1,519 | 0.772242 |
2f7fed0d5af3b96a037248f18b9e9cf73c47b30c
| 7,847 |
py
|
Python
|
python/sacconfig.py
|
Cadair/VivaTalk
|
d9bd5b0c0fca7756c07cbe1adb2f809cd00e9dae
|
[
"MIT"
] | null | null | null |
python/sacconfig.py
|
Cadair/VivaTalk
|
d9bd5b0c0fca7756c07cbe1adb2f809cd00e9dae
|
[
"MIT"
] | null | null | null |
python/sacconfig.py
|
Cadair/VivaTalk
|
d9bd5b0c0fca7756c07cbe1adb2f809cd00e9dae
|
[
"MIT"
] | null | null | null |
import os
import ConfigParser
mu0 = 1.25663706e-6
class SACConfig(object):
def __init__(self, cfg_file=os.path.dirname(__file__) + '/sac_config.cfg'):
self.cfg_file = cfg_file
self.cfg = ConfigParser.SafeConfigParser()
self.cfg.read(self.cfg_file)
def _get_value(self, section, option):
value = self.cfg.get(section, option)
return value
def _set_value(self, section, option, value):
self.cfg.set(section, option, str(value))
#==============================================================================
# SAC Configs
#==============================================================================
@property
def compiler(self):
self._compiler = self._get_value('SAC', 'compiler')
return self._compiler
@compiler.setter
def compiler(self, value):
self._set_value('SAC', 'compiler', value)
@property
def compiler_flags(self):
return self._get_value('SAC', 'compiler_flags')
@compiler_flags.setter
def compiler_flags(self, value):
self._set_value('SAC', 'compiler_flags', value)
@property
def vac_modules(self):
vm = self._get_value('SAC', 'vac_modules')
return [x.strip() for x in vm.split(',')]
@vac_modules.setter
def vac_modules(self, value):
self._set_value('SAC', 'vac_modules', value)
@property
def runtime(self):
self._runtime = self._get_value('SAC', 'runtime')
return float(self._runtime)
@runtime.setter
def runtime(self, value):
self._set_value('SAC', 'runtime', value)
@property
def mpi_config(self):
self._mpi_config = self._get_value('SAC', 'mpi_config')
return self._mpi_config
@mpi_config.setter
def mpi_config(self, value):
self._set_value('SAC', 'mpi_config')
@property
def varnames(self):
self._varnames = self._get_value('SAC', 'varnames').split(' ')
return self._varnames
@varnames.setter
def varnames(self, value):
if isinstance(str, value):
self._varnames = value.split(' ')
elif isinstance(list, value):
self._varnames = value
else:
raise TypeError("Unknown input")
self._set_value('SAC', 'varnames', self._varnames)
#==============================================================================
# Driver configs
#==============================================================================
@property
def driver(self):
return self._get_value('driver', 'driver')
@driver.setter
def driver(self, value):
self._set_value('driver', 'driver', value)
@property
def period(self):
self._period = float(self._get_value('driver', 'period'))
return self._period
@period.setter
def period(self, value):
self._period = float(value)
self._set_value('driver', 'period', str(self._period))
@property
def str_period(self):
period = 'p' + str(self.period).replace('.', '-')
return period
@property
def exp_fac(self):
self._exp_fac = float(self._get_value('driver', 'exp_fac'))
return self._exp_fac
@exp_fac.setter
def exp_fac(self, value):
self._exp_fac = float(value)
self._set_value('driver', 'exp_fac', self._exp_fac)
@property
def str_exp_fac(self):
exp_fac = 'B' + str(self.exp_fac).replace('.', '')
return exp_fac
@property
def amp(self):
self._amp = self._get_value('driver', 'amplitude')
return self._amp
@amp.setter
def amp(self, value):
self._set_value('driver', 'amplitude', value)
@property
def fort_amp(self):
self._fort_amp = self._get_value('driver', 'fort_amp')
return self._fort_amp
@fort_amp.setter
def fort_amp(self, value):
self._set_value('driver', 'fort_amp', value)
#==============================================================================
# Analysis Configs
#==============================================================================
@property
def tube_radii(self):
self._radii = self._get_value('analysis', 'tube_radii').split(',')
self._radii = [r.strip() for r in self._radii]
return self._radii
@tube_radii.setter
def tube_radii(self, value):
if isinstance(str, value):
self._radii = value.split(',')
elif isinstance(list, value):
self._radii = value
else:
raise TypeError("Unknown input")
self._set_value('analysis', 'tube_radii', self._radii)
#==============================================================================
# data configs
#==============================================================================
@property
def out_dir(self):
self._out_dir = self._get_value('data', 'out_dir')
return self._out_dir
@out_dir.setter
def out_dir(self, value):
self._out_dir = self._set_value('data', 'out_dir', value)
return self._out_dir
@property
def data_dir(self):
data_dir = self._get_value('data', 'data_dir')
return os.path.join(data_dir, self.get_identifier())
@data_dir.setter
def data_dir(self, value):
self._set_value('data', 'data_dir', value)
@property
def gdf_dir(self):
gdf_dir = self._get_value('data', 'gdf_dir')
return os.path.join(gdf_dir, self.get_identifier())
@gdf_dir.setter
def gdf_dir(self, value):
self._set_value('data', 'gdf_dir', value)
@property
def fig_dir(self):
return self._get_value('data', 'fig_dir')
@fig_dir.setter
def fig_dir(self, value):
self._set_value('data', 'fig_dir', value)
#==============================================================================
# Utils
#==============================================================================
def get_identifier(self):
return "%s_%s_%s_%s" %(self.driver, self.str_period,
self.amp, self.str_exp_fac)
def save_cfg(self):
with open(self.cfg_file, 'wb') as configfile:
self.cfg.write(configfile)
def print_config(self, section='all'):
SAC = driver = analysis = data = False
if section == 'all':
SAC = True
driver = True
analysis = True
data = True
elif section == 'SAC':
SAC = True
elif section == 'driver':
driver = True
elif section == 'analysis':
analysis = True
elif section == 'data':
data = True
else:
raise ValueError("Invalid section id")
print "Current Config is:"
if SAC:
print "-"*79
print "SAC:"
print "-"*79
print "compiler:", self.compiler
print "compiler_flags:", self.compiler_flags
print "vacmodules:", self.vac_modules
print "runtime:", self.runtime
print "mpi config:", self.mpi_config
print "varnames:", self.varnames
if driver:
print "-"*79
print "Driver:"
print "-"*79
print "period:", self.period
print "exp_fac:", self.exp_fac
print "amp:", self.amp
print "fort_amp:", self.fort_amp
if analysis:
print "-"*79
print "analysis:"
print "-"*79
print "tube_radii:", self.tube_radii
if data:
print "-"*79
print "data:"
print "-"*79
print "out_dir:", self.out_dir
print "data_dir", self.data_dir
print "gdf_dir", self.gdf_dir
print "fig_dir", self.fig_dir
| 29.836502 | 79 | 0.521601 | 7,794 | 0.993246 | 0 | 0 | 4,388 | 0.559195 | 0 | 0 | 1,800 | 0.229387 |
2f80a6cd8804248e492bd75be4cdf855bd46b3e3
| 1,606 |
py
|
Python
|
location/models.py
|
swallville/driverBackEnd
|
3599e5a2e58304e08502b10a3856b77a05c7fd16
|
[
"MIT"
] | null | null | null |
location/models.py
|
swallville/driverBackEnd
|
3599e5a2e58304e08502b10a3856b77a05c7fd16
|
[
"MIT"
] | 3 |
2021-03-30T12:53:49.000Z
|
2021-09-22T18:44:52.000Z
|
location/models.py
|
swallville/driverBackEnd
|
3599e5a2e58304e08502b10a3856b77a05c7fd16
|
[
"MIT"
] | null | null | null |
from django.contrib.gis.db import models
from django.db.models import Q
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
# Create your models here.
class Location (models.Model):
name = models.CharField(
max_length=100,
verbose_name='Name of Location')
location = models.PointField(
verbose_name='Coordinates of Location'
)
address = models.CharField(
max_length=100,
verbose_name='Address of Location')
zip_code = models.CharField(
max_length=9,
verbose_name='Zip code of Location')
city = models.CharField(
max_length=100,
verbose_name='City of Location')
class Meta:
verbose_name = 'Location'
verbose_name_plural = 'Locations'
ordering = ['address']
permissions = (
('detail_location', 'Can detail %s' % verbose_name),
('list_location', 'Can list %s' % verbose_name),
)
constraints = [
models.UniqueConstraint(fields=['location'], name='unique_location'),
]
def validate_unique(self, exclude=None):
qs = Location.objects.filter(Q(location=self.location))
if qs.count() > 1:
raise ValidationError(
_('Location must have different coordinates (%.14f, %.14f)') % self.location.coords[::-1]
)
def save(self, *args, **kwargs):
self.validate_unique()
super(Location, self).save(*args, **kwargs)
def __str__(self):
return '%s - %s' % (self.city, self.address)
| 30.301887 | 105 | 0.619552 | 1,398 | 0.870486 | 0 | 0 | 0 | 0 | 0 | 0 | 313 | 0.194894 |
2f80bcfa95cb3fbe5e797d073a38bc12a4dae7f4
| 842 |
py
|
Python
|
sitemap_urls_auditor/logger/main_logger.py
|
alena-kono/sitemap-urls-auditor
|
b9f1651c48fd8e4131eca8ee44122ffa54a4576e
|
[
"MIT"
] | null | null | null |
sitemap_urls_auditor/logger/main_logger.py
|
alena-kono/sitemap-urls-auditor
|
b9f1651c48fd8e4131eca8ee44122ffa54a4576e
|
[
"MIT"
] | null | null | null |
sitemap_urls_auditor/logger/main_logger.py
|
alena-kono/sitemap-urls-auditor
|
b9f1651c48fd8e4131eca8ee44122ffa54a4576e
|
[
"MIT"
] | null | null | null |
"""Module configures project's main logger."""
import logging
from loguru import logger
def disable_usp_logging() -> None:
"""Disable logging of ultimate-sitemap-parser (usp) library.
Usp package initializes default logging.Logger() each time it
imports something from its core submodules.
Therefore, this function disables usp loggers after it imports
one of the usp functions.
"""
from usp.tree import sitemap_tree_for_homepage # noqa: F401, WPS433
for name, each_logger in logging.root.manager.loggerDict.items():
if name.startswith('usp') and isinstance(each_logger, logging.Logger):
each_logger.disabled = True
def get_loguru_logger():
"""Get loguru Logger object.
Returns:
Loguru Logger object.
"""
return logger
main_logger = get_loguru_logger()
| 24.764706 | 78 | 0.709026 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 432 | 0.513064 |
2f8142cd627ecd115f6acdab00511ac3d94dfb10
| 14,213 |
py
|
Python
|
matroska_cache/dep/scopes.py
|
kolypto/py-matroska-cache
|
b40030f97d463aac8e3a6f4b0e0e9f081dfc92b1
|
[
"MIT"
] | null | null | null |
matroska_cache/dep/scopes.py
|
kolypto/py-matroska-cache
|
b40030f97d463aac8e3a6f4b0e0e9f081dfc92b1
|
[
"MIT"
] | null | null | null |
matroska_cache/dep/scopes.py
|
kolypto/py-matroska-cache
|
b40030f97d463aac8e3a6f4b0e0e9f081dfc92b1
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import warnings
from typing import Any, List, Callable, Tuple, Union, Collection, FrozenSet, Optional, Iterable, Set
from .base import DependencyBase, dataclass
from .tag import Tag
ExtractorFunc = Callable[[Any], Optional[dict]]
class Scopes:
""" Generate dependencies that describe lists of objects.
This tool is designed to solve the case where newly created, or freshly removed, items may enter the scope
of some listing, which may itself be filtered by some condition.
In general, if you cache a list of objects by `Id`:
cache.put(
'articles-list', [...],
dep.Id('article', 1),
dep.Id('article', 2),
...
)
you will not have this list of articles invalidated when new articles come into scope.
For instance, if your view caches the list of articles by `category`, intialize the `Scopes` object like this:
article_scopes = Scopes('article', production_mode=False)
@article_scopes.describes('category')
def article_category(article: Article):
return {'category': article.category}
This operation enables us to use `category` information as a dependency for the cached list:
# Articles filtered by category ...
articles = ssn.query(Article).filter_by(category='python').all()
cache.put(
f'articles-list:category=python', # make sure to put it here
articles,
...
# ... declare this category as their dependency
*article_scopes.condition(category='python')
)
Now, in another place, where articles are created, you can invalidate this dependency automatically
by just passing the new article to the article_scopes.invalidate_for() method:
def create_new_article(...):
...
article_scopes.invalidate_for(article, invalidate_for)
Under the hood, it will go over every condition known through @article_scopes.describes()
and invalidate all related caches.
---
NOTE: Does it seem complicated to you?
It is; but this complexity follows one goal: to make caching *declarative* and minimize hidden connections in your code.
For instance, you could have used Tag() to achieve the very same result.
Listing articles:
articles = ssn.query(Article).filter_by(category='python').all()
cache.put(
f'articles-list:category=python', # make sure to put it here
articles,
...
# ... declare this category as their dependency
dep.Tag(f'articles:category=python'),
)
Adding articles:
cache.invalidate(dep.Tag(f'books:category={article.category}'))
This code would work just fine; but then, for every caching behavior you would need *to remember* to add another line
to the place where articles are saved. Those connections would soon become numerous and lead to caching errors that
are hard to catch.
This approach with `Scopes()` is a declarative approach:
you first declare *the intention* of caching by category, and `Scopes()` will check that everything is set up properly.
"""
def __init__(self, object_type: str, *, production_mode: bool):
""" Initialize scopes for a particular kind of object
Args:
object_type: Name for the objects you're watching. Got to be unique. Example: 'article'
production_mode: Whether the cache is currently operating on a production server.
If there is an error with how you configured the `Scopes` object, its will be disabled.
In development (production_mode=False), an exception will be raised.
"""
self._object_type = object_type
self._extractor_fns: List[ExtractorInfo] = []
self._known_extractor_signatures: Set[Tuple[str]] = set()
# An invalidate-all dependency used to invalidate all caches in cases when scopes are not used properly.
# For instance, the user is attempting to cache the results that matched a filter
# .condition(category_id=10)
# but there was no extractor function that describes how `category_id` influences the cache.
self._invalidate_all = InvalidateAll(self._object_type)
self._production_mode = production_mode
def describes(self, *param_names, watch_modified: Optional[Iterable[str]] = None):
""" Decorator for a function that extracts data for a conditional dependency.
NOTE: let your function return `None` if you want a particular change to be ignored for some reason.
Whenever any object is saved, your application should call `invalidate_for()`,
and it will invalidate every cache that might see a new object enter the scope, or an old one leave it.
The arguments for the scope are described by the decorated function: if you want to cache the results of
a list filtered by `category=<something>`, you first need to define an extractor function:
@article_scopes.describes('category')
def article_category(article: Article, **info):
# Extract filter arguments from a new object
return {'category': article.category}
Only after such a condition is described, you can use it as a cache key:
cache.put(
f'articles-{category}',
articles,
...,
*article_scopes.condition(category=category),
expires=600,
)
Note that the values extracted by `article_category()` and provided to `condition()` have to match.
If they don't, cache will misbehave.
Args:
*param_names: The list of parameter names the extractor function is going to return.
These names are completely custom, but have to match those given to condition()
watch_modified: Only run this function when the following fields are modified.
Default: equal to `parameter_names`.
Setting this field manually only makes sense when your parameter names are different from attribute names.
For example:
return {'filter-by-category': article.category}
"""
def decorator(fn: ExtractorFunc):
""" Register the decorated function and return """
self._extractor_fns.append(
ExtractorInfo(
param_names=frozenset(param_names),
watch_modified=frozenset(watch_modified) if watch_modified else frozenset(param_names),
func=fn
)
)
self._known_extractor_signatures.add(tuple(sorted(param_names)))
# Done
return fn
return decorator
def invalidate_for(self, item: Any, cache: 'MatroskaCache', modified: Collection[str] = None, **info):
""" Invalidate all caches that may see `item` in their listings.
Args:
item: The new/deleted item that may enter or leave the scope of some listing
cache: MatroskaCache to invalidate
modified: (optional) list of field names that have been modified. Useful to ignore non-relevant updates.
**info: Extra info that may be passed to your extractor functions
"""
cache.invalidate(*self.object_invalidates(item, modified, **info))
def condition(self, **conditions: Any) -> List[Union[ConditionalDependency, InvalidateAll]]:
""" Get dependencies for a conditional scope.
Use this method with MatroskaCache.put() to generate dependencies for your scope.
Args:
**conditions: The description of your filtering conditions, in the `name=value` form.
Returns:
List of scope dependencies to be used on your cache entry
"""
# Signature
filter_params_signature = tuple(sorted(conditions))
if filter_params_signature in self._known_extractor_signatures:
return [
ConditionalDependency(self._object_type, conditions),
# Got to declare this kill switch as a dependency; otherwise, it won't work.
self._invalidate_all,
]
elif self._production_mode:
warnings.warn(
f'Matroska cache: no extractor @describes for {filter_params_signature!r}. '
f'Caching disabled. '
)
return [self._invalidate_all]
else:
raise RuntimeError(
f'No extractor function is described for condition {filter_params_signature!r}. '
f'Please use @.describes() on a function with matching parameters. '
f'It will not fail in production, but caching will be disabled.'
)
def object_invalidates(self, item: Any, modified: Collection[str] = None, **info) -> List[Union[ConditionalDependency, InvalidateAll]]:
""" Get dependencies that will invalidate all caches that may see `item` in their listings.
This function takes the `item` and calls every extractor function decorated by `@scope.describes()`.
The resulting value will be used to find scopes that this object will come into, and invalidate them.
Args:
item: The newly created or freshly deleted item.
modified: (optional) list of field names that have been modified. Useful to ignore non-relevant updates.
If not provided, all extractor functions will be run to invalidate dependencies.
If provided, only those that are watching those attributes will be run.
**info: Additional arguments to pass to *all* the extractor functions.
Returns:
List of dependencies to be used with `cache.invalidate()`
"""
if modified:
modified = set(modified)
ret = []
for extractor_info in self._extractor_fns:
# if `modified` was provided, skip extractors that are not interested in those fields
if modified and not (extractor_info.watch_modified & modified):
continue
# Run the extractor function and get dependency parameters
try:
params = extractor_info.func(item, **info)
except Exception:
# In production mode, just invalidate all
if self._production_mode:
return [self._invalidate_all]
# In development mode, report the error
else:
raise
# If the function returned a None, skip it altogether
if params is None:
continue
# If it returned a correct set of fields (as @describes()ed), generate a dependency
elif set(params) == extractor_info.param_names:
ret.append(ConditionalDependency(self._object_type, params))
# In production mode, just invalidate all
elif self._production_mode:
return [self._invalidate_all]
# In development mode, report an error
else:
raise RuntimeError(
f'The described extractor {extractor_info.func} was supposed to return a dict of {extractor_info.param_names!r}, '
f'but it returned only {params!r}. Please fix. '
f'It will not fail in production, but caching will be disabled.'
)
return ret
@dataclass
class ConditionalDependency(DependencyBase):
""" Internal dependency used by Scope
A dependency object of this type is generated for the output of every extractor function.
This is how the whole thing operates:
When a new article is created, it is passed to the `invalidate_for()` function.
An extractor function, described like this:
@article_scopes.describes('category')
def article_category(article: Article, **info):
# Extract filter arguments from a new object
return {'category': article.category}
will generate a dependency:
ConditionalDependency(object_type='article', conditions={'category': 'sci-fi'})
# it is just a string:
'condition:article:&category=sci-fi&'
This string invalidates any cache entries that had been created like this:
cache.put(
...
*article_scopes.condition(category=category),
)
So, in essense, this whole Scopes is just an interface to match the two strings in a declarative fashion.
"""
object_type: str
condition: str
__slots__ = 'object_type', 'condition',
def __init__(self, object_type: str, conditions: dict):
self.object_type = object_type
self.condition = '&'.join(f'{key}={value}'
# items are sorted to make sure they always match in the same way!
for key, value in sorted(conditions.items()))
# Surround it with &s to enable wildcard matching
self.condition = '&' + self.condition + '&'
PREFIX = 'condition'
def key(self) -> str:
return f'{self.PREFIX}:{self.object_type}:{self.condition}'
@dataclass
class ExtractorInfo:
# Set of parameters that the extractor function promises to return
param_names: FrozenSet[str]
# Set of parameters that it watches the modifications on.
# Default: equal to param_names_set
watch_modified: FrozenSet[str]
# The extractor function itself
func: ExtractorFunc
__slots__ = 'param_names', 'watch_modified', 'func'
class InvalidateAll(Tag):
""" A custom tag, used in production, to invalidate all scopes in cases when Scopes is misconfigured """
# Use the same prefix. Not important; just looks nice
# There will be no clashes because all `ConditionalDependency` have "&" in their names
PREFIX = ConditionalDependency.PREFIX
def __init__(self, object_type: str):
super().__init__(f'{object_type}::InvalidateAll')
| 42.810241 | 139 | 0.642651 | 13,909 | 0.978611 | 0 | 0 | 2,148 | 0.151129 | 0 | 0 | 10,122 | 0.712165 |
2f8296613ef32d75696e51924c20cb22faf6bba2
| 812 |
py
|
Python
|
dataporten/middleware.py
|
frafra/django-dataporten
|
4236017611e08d08bd810be0beae1b994cb5fc67
|
[
"MIT"
] | 4 |
2019-01-06T17:56:07.000Z
|
2021-03-21T19:16:35.000Z
|
dataporten/middleware.py
|
frafra/django-dataporten
|
4236017611e08d08bd810be0beae1b994cb5fc67
|
[
"MIT"
] | 9 |
2019-10-21T17:23:53.000Z
|
2021-06-10T21:06:25.000Z
|
dataporten/middleware.py
|
frafra/django-dataporten
|
4236017611e08d08bd810be0beae1b994cb5fc67
|
[
"MIT"
] | 2 |
2019-04-29T11:48:59.000Z
|
2020-01-06T09:54:55.000Z
|
import logging
import requests
import requests_cache
from django.conf import settings
from django.http import HttpResponse, HttpRequest
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError:
MiddlewareMixin = object
from .api import usergroups
from .models import DataportenUser
# Cache requests for 15 minutes
if settings.DATAPORTEN_CACHE_REQUESTS:
requests_cache.install_cache(
settings.DATAPORTEN_CACHE_PATH + 'dataporten_cache',
backend='sqlite',
expire_after=900,
allowable_codes=(200,),
include_get_headers=True,
)
class DataportenGroupsMiddleware(MiddlewareMixin):
def process_request(self, request: HttpRequest):
if DataportenUser.valid_request(request):
request.user.__class__ = DataportenUser
| 26.193548 | 60 | 0.758621 | 205 | 0.252463 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.070197 |
2f853129c44d31a1158c0bd481a49cd736cdcaa4
| 7,326 |
py
|
Python
|
sm4.py
|
cclauss/Pythonista-sm
|
ef5c6527f36334a2b4dc3f0a92f957161aa3bdd3
|
[
"Apache-2.0"
] | 3 |
2021-08-23T02:49:09.000Z
|
2021-08-24T01:48:14.000Z
|
sm4.py
|
cclauss/Pythonista-sm
|
ef5c6527f36334a2b4dc3f0a92f957161aa3bdd3
|
[
"Apache-2.0"
] | null | null | null |
sm4.py
|
cclauss/Pythonista-sm
|
ef5c6527f36334a2b4dc3f0a92f957161aa3bdd3
|
[
"Apache-2.0"
] | 1 |
2021-08-23T03:02:39.000Z
|
2021-08-23T03:02:39.000Z
|
# -*-coding:utf-8-*-
import base64
import copy
from .func import xor, rotl, get_uint32_be, put_uint32_be, bytes_to_list, list_to_bytes, padding, un_padding
BOXES_TABLE = [
0xd6,
0x90,
0xe9,
0xfe,
0xcc,
0xe1,
0x3d,
0xb7,
0x16,
0xb6,
0x14,
0xc2,
0x28,
0xfb,
0x2c,
0x05,
0x2b,
0x67,
0x9a,
0x76,
0x2a,
0xbe,
0x04,
0xc3,
0xaa,
0x44,
0x13,
0x26,
0x49,
0x86,
0x06,
0x99,
0x9c,
0x42,
0x50,
0xf4,
0x91,
0xef,
0x98,
0x7a,
0x33,
0x54,
0x0b,
0x43,
0xed,
0xcf,
0xac,
0x62,
0xe4,
0xb3,
0x1c,
0xa9,
0xc9,
0x08,
0xe8,
0x95,
0x80,
0xdf,
0x94,
0xfa,
0x75,
0x8f,
0x3f,
0xa6,
0x47,
0x07,
0xa7,
0xfc,
0xf3,
0x73,
0x17,
0xba,
0x83,
0x59,
0x3c,
0x19,
0xe6,
0x85,
0x4f,
0xa8,
0x68,
0x6b,
0x81,
0xb2,
0x71,
0x64,
0xda,
0x8b,
0xf8,
0xeb,
0x0f,
0x4b,
0x70,
0x56,
0x9d,
0x35,
0x1e,
0x24,
0x0e,
0x5e,
0x63,
0x58,
0xd1,
0xa2,
0x25,
0x22,
0x7c,
0x3b,
0x01,
0x21,
0x78,
0x87,
0xd4,
0x00,
0x46,
0x57,
0x9f,
0xd3,
0x27,
0x52,
0x4c,
0x36,
0x02,
0xe7,
0xa0,
0xc4,
0xc8,
0x9e,
0xea,
0xbf,
0x8a,
0xd2,
0x40,
0xc7,
0x38,
0xb5,
0xa3,
0xf7,
0xf2,
0xce,
0xf9,
0x61,
0x15,
0xa1,
0xe0,
0xae,
0x5d,
0xa4,
0x9b,
0x34,
0x1a,
0x55,
0xad,
0x93,
0x32,
0x30,
0xf5,
0x8c,
0xb1,
0xe3,
0x1d,
0xf6,
0xe2,
0x2e,
0x82,
0x66,
0xca,
0x60,
0xc0,
0x29,
0x23,
0xab,
0x0d,
0x53,
0x4e,
0x6f,
0xd5,
0xdb,
0x37,
0x45,
0xde,
0xfd,
0x8e,
0x2f,
0x03,
0xff,
0x6a,
0x72,
0x6d,
0x6c,
0x5b,
0x51,
0x8d,
0x1b,
0xaf,
0x92,
0xbb,
0xdd,
0xbc,
0x7f,
0x11,
0xd9,
0x5c,
0x41,
0x1f,
0x10,
0x5a,
0xd8,
0x0a,
0xc1,
0x31,
0x88,
0xa5,
0xcd,
0x7b,
0xbd,
0x2d,
0x74,
0xd0,
0x12,
0xb8,
0xe5,
0xb4,
0xb0,
0x89,
0x69,
0x97,
0x4a,
0x0c,
0x96,
0x77,
0x7e,
0x65,
0xb9,
0xf1,
0x09,
0xc5,
0x6e,
0xc6,
0x84,
0x18,
0xf0,
0x7d,
0xec,
0x3a,
0xdc,
0x4d,
0x20,
0x79,
0xee,
0x5f,
0x3e,
0xd7,
0xcb,
0x39,
0x48,
]
# 系统参数
FK = [0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc]
# 固定参数
CK = [
0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269, 0x70777e85, 0x8c939aa1,
0xa8afb6bd, 0xc4cbd2d9, 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9, 0xc0c7ced5, 0xdce3eaf1,
0xf8ff060d, 0x141b2229, 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209, 0x10171e25, 0x2c333a41,
0x484f565d, 0x646b7279
]
ENCRYPT = 0
DECRYPT = 1
class Crypt(object):
def __init__(self, mode=ENCRYPT):
self.sk = [0] * 32
self.mode = mode
@classmethod
def bb(cls, ka):
b = [0, 0, 0, 0]
a = put_uint32_be(ka)
b[0] = BOXES_TABLE[a[0]]
b[1] = BOXES_TABLE[a[1]]
b[2] = BOXES_TABLE[a[2]]
b[3] = BOXES_TABLE[a[3]]
bb = get_uint32_be(b[0:4])
return bb
# 计算圆形加密密钥
# args: [in] a: a is a 32 bits unsigned value;
# return: sk[i]: i{0,1,2,3,...31}.
@classmethod
def _round_key(cls, ka):
bb = cls.bb(ka)
rk = bb ^ (rotl(bb, 13)) ^ (rotl(bb, 23))
return rk
# 计算并获取加密/解密内容;
# args: [in] x0: 原始内容;
# args: [in] x1: 原始内容;
# args: [in] x2: 原始内容;
# args: [in] x3: 原始内容;
# args: [in] rk: 加密/解密密钥;
# 返回加密/解密内容的内容;
@classmethod
def _f(cls, x0, x1, x2, x3, rk):
# "T algorithm" == "L algorithm" + "t algorithm".
# args: [in] a: a is a 32 bits unsigned value;
# return: c:c用线性算法“L”和非线性算法“t”计算
def _sm4_l_t(ka):
bb = cls.bb(ka)
c = bb ^ (rotl(bb, 2)) ^ (rotl(bb, 10)) ^ (rotl(bb, 18)) ^ (rotl(bb, 24))
return c
return x0 ^ _sm4_l_t(x1 ^ x2 ^ x3 ^ rk)
def set_key(self, key, mode):
key = bytes_to_list(key)
MK = [0, 0, 0, 0]
MK[0] = get_uint32_be(key[0:4])
MK[1] = get_uint32_be(key[4:8])
MK[2] = get_uint32_be(key[8:12])
MK[3] = get_uint32_be(key[12:16])
k = [0] * 36
k[0:4] = xor(MK[0:4], FK[0:4])
for i in range(32):
k[i + 4] = k[i] ^ (self._round_key(k[i + 1] ^ k[i + 2] ^ k[i + 3] ^ CK[i]))
self.sk[i] = k[i + 4]
self.mode = mode
if mode == DECRYPT:
for idx in range(16):
t = self.sk[idx]
self.sk[idx] = self.sk[31 - idx]
self.sk[31 - idx] = t
def one_round(self, sk, in_put):
out_put = []
ul_buf = [0] * 36
ul_buf[0] = get_uint32_be(in_put[0:4])
ul_buf[1] = get_uint32_be(in_put[4:8])
ul_buf[2] = get_uint32_be(in_put[8:12])
ul_buf[3] = get_uint32_be(in_put[12:16])
for idx in range(32):
ul_buf[idx + 4] = self._f(
ul_buf[idx], ul_buf[idx + 1], ul_buf[idx + 2], ul_buf[idx + 3], sk[idx]
)
out_put += put_uint32_be(ul_buf[35])
out_put += put_uint32_be(ul_buf[34])
out_put += put_uint32_be(ul_buf[33])
out_put += put_uint32_be(ul_buf[32])
return out_put
def crypt_ecb(self, input_data):
# SM4-ECB块加密/解密
input_data = bytes_to_list(input_data)
if self.mode == ENCRYPT:
input_data = padding(input_data)
length = len(input_data)
i = 0
output_data = []
while length > 0:
output_data += self.one_round(self.sk, input_data[i:i + 16])
i += 16
length -= 16
if self.mode == DECRYPT:
return list_to_bytes(un_padding(output_data))
return list_to_bytes(output_data)
def crypt_cbc(self, iv, input_data):
# SM4-CBC缓冲区加密/解密
i = 0
output_data = []
tmp_input = [0] * 16
iv = bytes_to_list(iv)
if self.mode == ENCRYPT:
input_data = padding(bytes_to_list(input_data))
length = len(input_data)
while length > 0:
tmp_input[0:16] = xor(input_data[i:i + 16], iv[0:16])
output_data += self.one_round(self.sk, tmp_input[0:16])
iv = copy.deepcopy(output_data[i:i + 16])
i += 16
length -= 16
return list_to_bytes(output_data)
else:
length = len(input_data)
while length > 0:
output_data += self.one_round(self.sk, input_data[i:i + 16])
output_data[i:i + 16] = xor(output_data[i:i + 16], iv[0:16])
iv = copy.deepcopy(input_data[i:i + 16])
i += 16
length -= 16
return list_to_bytes(un_padding(output_data))
SM4_KEY = b'ED0Z2TCK2JN9SGV2'
SM4_IV = b'GM6PR0EL5TT4YUT6'
# 外部调用函数
def sm4_encrypt(value: str) -> str:
"""
加密数据并返回加密后数据
"""
sm = Crypt()
data = bytearray(value.encode('utf-8', 'ignore'))
sm.set_key(SM4_KEY, ENCRYPT)
digest = sm.crypt_cbc(SM4_IV, data)
digest = base64.b64encode(digest).decode('utf-8', 'ignore')
return digest
def sm4_decrypt(value: str) -> str:
"""
解密数据并返回解密后数据
"""
sm = Crypt()
data = base64.b64decode(value)
sm.set_key(SM4_KEY, DECRYPT)
digest = sm.crypt_cbc(SM4_IV, data)
return digest.decode('utf-8', 'ignore')
# 测试函数
def test():
key = b'KNN36H7F0MZB6RTW'
iv = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' # bytes类型
crypt_sm4 = Crypt()
# value = b'Jll9496././' # bytes类型
value = bytearray("test明文".encode('utf-8'))
# 使用 CBC
crypt_sm4.set_key(key, ENCRYPT)
encrypt_value = crypt_sm4.crypt_cbc(iv, value) # bytes类型
encrypt_value = base64.b64encode(encrypt_value).decode('utf-8', 'ignore')
print(encrypt_value.upper()) # 把所有小写字母大写
encrypt_value = base64.b64decode(encrypt_value)
crypt_sm4.set_key(key, DECRYPT)
decrypt_value = crypt_sm4.crypt_cbc(iv, encrypt_value) # bytes类型
print(decrypt_value.decode('utf-8', 'ignore'))
# 使用 ECB
# crypt_sm4.set_key(key, ENCRYPT)
# encrypt_value = crypt_sm4.crypt_ecb(value) # bytes类型
# print(encrypt_value)
#
# crypt_sm4.set_key(key, DECRYPT)
# decrypt_value = crypt_sm4.crypt_ecb(encrypt_value) # bytes类型
# print(decrypt_value)
| 15.822894 | 108 | 0.632678 | 3,553 | 0.465784 | 0 | 0 | 726 | 0.095176 | 0 | 0 | 1,345 | 0.176324 |
2f86b378e72ad44c8909918ac3d29f4b3f63ef71
| 617 |
py
|
Python
|
question_bank/unique-paths/unique-paths.py
|
yatengLG/leetcode-python
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
[
"Apache-2.0"
] | 9 |
2020-08-12T10:01:00.000Z
|
2022-01-05T04:37:48.000Z
|
question_bank/unique-paths/unique-paths.py
|
yatengLG/leetcode-python
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
[
"Apache-2.0"
] | 1 |
2021-02-16T10:19:31.000Z
|
2021-02-16T10:19:31.000Z
|
question_bank/unique-paths/unique-paths.py
|
yatengLG/leetcode-python
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
[
"Apache-2.0"
] | 4 |
2020-08-12T10:13:31.000Z
|
2021-11-05T01:26:58.000Z
|
# -*- coding: utf-8 -*-
# @Author : LG
"""
执行用时:40 ms, 在所有 Python3 提交中击败了74.47% 的用户
内存消耗:13.8 MB, 在所有 Python3 提交中击败了7.95% 的用户
解题思路:
只能向右或向下前进。
则当前格的路径数等于左侧格的路径数+上侧格的路径数
dp[i][j] = dp[i-1][j] + dp[i][j-1]
例子:
1 1 1 1 1 1
1 2 3 4 5 6
1 3 6 10 15 21
1 4 10 20 35 56
"""
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
dp = [[1 for _ in range(m)] for _ in range(n)]
for i in range(1, n):
for j in range(1, m):
dp[i][j] = dp[i-1][j] + dp[i][j-1]
return dp[-1][-1]
| 22.851852 | 54 | 0.458671 | 262 | 0.340702 | 0 | 0 | 0 | 0 | 0 | 0 | 502 | 0.652796 |
2f8851b9c216915fb1f4051cf734644949f0036e
| 1,207 |
py
|
Python
|
crusoe_observe/ansible/roles/mlData/files/build-ml.py
|
CSIRT-MU/CRUSOE
|
73e4ac0ced6c3ac46d24ac5c3feb01a1e88bd36b
|
[
"MIT"
] | 3 |
2021-11-09T09:55:17.000Z
|
2022-02-19T02:58:27.000Z
|
crusoe_observe/ansible/roles/mlData/files/build-ml.py
|
CSIRT-MU/CRUSOE
|
73e4ac0ced6c3ac46d24ac5c3feb01a1e88bd36b
|
[
"MIT"
] | null | null | null |
crusoe_observe/ansible/roles/mlData/files/build-ml.py
|
CSIRT-MU/CRUSOE
|
73e4ac0ced6c3ac46d24ac5c3feb01a1e88bd36b
|
[
"MIT"
] | null | null | null |
import sys
import structlog
from osrest import Tcpml
import services_component
def build_os(dataset_path, model_path, logger):
logger.info(f"Loading OS dataset from \"{dataset_path}\".")
dataset = Tcpml.load_dataset(dataset_path)
logger.info(f"Building OS model.")
model = Tcpml.build_model(dataset)
logger.info(f"Storing OS model to \"{model_path}\".")
Tcpml.save_model(model, model_path)
def build_si(dataset_path, model_path, logger):
paths = {
"model": model_path,
"dataset": dataset_path,
"nbar": f"{services_component.__path__[0]}/data/si_nbar.json"
}
si = services_component.services.ServiceIdentifier(paths, ["0.0.0.0/0"], logger)
def main():
ml_data_path = sys.argv[1]
ml_model_path = sys.argv[2]
logger = structlog.PrintLogger()
logger.info("Starting OS model build.")
build_os(f"{ml_data_path}os_dataset.csv", f"{ml_model_path}os_model.pkl", logger)
logger.info("Finishing OS model build.")
logger.info("Starting SI model build.")
build_si(f"{ml_data_path}si_dataset.csv", f"{ml_model_path}si_model.pkl", logger)
logger.info("Finishing SI model build.")
if __name__ == "__main__":
main()
| 30.948718 | 85 | 0.697597 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 431 | 0.357084 |
2f88bf2c5f6df294804820f97f5c583aad4fe844
| 129 |
py
|
Python
|
ted_lm/to_run/evaluate.py
|
mvdwerve/NLP-Classifier
|
5494a789c9b87476ec4253e00e3349462f08d3d2
|
[
"MIT"
] | null | null | null |
ted_lm/to_run/evaluate.py
|
mvdwerve/NLP-Classifier
|
5494a789c9b87476ec4253e00e3349462f08d3d2
|
[
"MIT"
] | null | null | null |
ted_lm/to_run/evaluate.py
|
mvdwerve/NLP-Classifier
|
5494a789c9b87476ec4253e00e3349462f08d3d2
|
[
"MIT"
] | null | null | null |
version https://git-lfs.github.com/spec/v1
oid sha256:b28da294230f24c172729139b1988b8008f6fb2c259b1c3425772b2c80cfb9dd
size 2688
| 32.25 | 75 | 0.883721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2f8911a3ffc8a10cc46f6545eeb625b8d7a7c1f6
| 4,049 |
py
|
Python
|
converter.py
|
TheSpiritXIII/Qt-Creator-TmTheme
|
3eba37c3712da9964e775a750732b6fda7cb6536
|
[
"Apache-2.0"
] | 1 |
2022-01-02T19:55:18.000Z
|
2022-01-02T19:55:18.000Z
|
converter.py
|
TheSpiritXIII/Qt-Creator-TmTheme
|
3eba37c3712da9964e775a750732b6fda7cb6536
|
[
"Apache-2.0"
] | null | null | null |
converter.py
|
TheSpiritXIII/Qt-Creator-TmTheme
|
3eba37c3712da9964e775a750732b6fda7cb6536
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import sys
import xml.etree.ElementTree as ET
def parse_value(element):
if element.tag == "string":
return element.text
elif element.tag == "dict":
return parse_dict(element)
elif element.tag == "array":
return parse_array(element)
else:
exception = "Unknown tag `" + element.tag + "`"
raise Exception(exception)
def parse_array(root):
sequence = []
for element in root:
if element.tag == "key":
exception = "Arrays must not have a key. Found key `" + element.text + "`"
raise Exception(exception)
else:
sequence.append(parse_value(element))
return sequence
def parse_dict(root):
lastKey = None;
sequence = {}
for element in root:
if element.tag == "key":
if lastKey:
exception = "Missing value for key `" + lastKey + "`"
raise Exception(exception)
lastKey = element.text
else:
if not lastKey:
exception = "Missing value for key after `" + lastKey + "`"
raise Exception(exception)
sequence[lastKey] = parse_value(element)
lastKey = None
return sequence
def parse_file(filename):
xml_file = open(filename, 'r')
file_contents = xml_file.read()
# Filter out all control characters.
mpa = dict.fromkeys(range(32))
file_contents = file_contents.translate(mpa)
return parse_dict(ET.fromstring(file_contents)[0])
def write_style(file, name, foreground, background, italic):
file.write("\t<style name=\"" + name + "\" ")
if foreground:
file.write("foreground=\"" + foreground + "\" ")
if background:
file.write("background=\"" + background + "\" ")
if italic:
file.write("italic=\"true\" ")
file.write("/>\n")
def create_file(filename, data):
f = open(filename, "w")
# f.write("<!-- Generated by Qt TmTheme Converter -->\n")
# f.write("<!-- Original file by " + data["author"] + ". -->\n")
f.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
f.write("<style-scheme version=\"1.0\" name=\"" + data["name"] + "\">\n")
if "gutterSettings" in data:
gutter_settings = data["gutterSettings"]
write_style(f, "LineNumber", gutter_settings["foreground"], None, False)
write_style(f, "DisabledCode", gutter_settings["foreground"], None, False)
key_map = {
"comment": ["Comment"],
"constant.numeric": ["Number"],
"entity.name.function": ["Function"],
"constant": ["Constant"],
"string": ["String"],
"keyword": ["Keyword", "Preprocessor"],
"keyword.operator": ["Operator"],
"variable": ["Field"],
# "storage": ["PrimitiveType"],
"storage.type": ["PrimitiveType"]
}
for setting in data["settings"]:
if "scope" in setting:
# print("Check: ", setting["scope"].split(","))
for scope in setting["scope"].split(","):
scope = scope.strip()
if scope in key_map:
full_settings = setting["settings"];
background = None
foreground = None
italics = False
if "foreground" in full_settings:
foreground = full_settings["foreground"]
if "background" in full_settings:
background = full_settings["background"]
if "fontStyle" in full_settings:
italics = full_settings["fontStyle"] == "italic"
for key in key_map[scope]:
write_style(f, key, foreground, background, italics)
elif "settings" in setting:
full_settings = setting["settings"];
write_style(f, "Text", full_settings["foreground"], full_settings["background"], False)
write_style(f, "Type", full_settings["foreground"], full_settings["background"], False)
write_style(f, "Enumeration", full_settings["foreground"], full_settings["background"], False)
write_style(f, "Selection", None, full_settings["selection"], False)
write_style(f, "CurrentLine", None, full_settings["lineHighlight"], False)
write_style(f, "VisualWhitespace", full_settings["invisibles"], None, False)
else:
raise Exception("Unknown setting type")
# f.write("\t\n")
f.write("</style-scheme>\n")
def main():
if len(sys.argv) != 3:
print("Invalid number of arguments. Must be: `converter.py input output`")
return
create_file(sys.argv[2], parse_file(sys.argv[1]))
main()
| 29.992593 | 97 | 0.667078 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,340 | 0.330946 |
2f8acf77c5feae7c80644d1d8292864bc245ea00
| 1,768 |
py
|
Python
|
panamsquad/urls.py
|
the-mandarine/mypanamsquad
|
b34c1c6169a3b7496e171b9536472a1ede0bdc84
|
[
"Beerware"
] | null | null | null |
panamsquad/urls.py
|
the-mandarine/mypanamsquad
|
b34c1c6169a3b7496e171b9536472a1ede0bdc84
|
[
"Beerware"
] | null | null | null |
panamsquad/urls.py
|
the-mandarine/mypanamsquad
|
b34c1c6169a3b7496e171b9536472a1ede0bdc84
|
[
"Beerware"
] | null | null | null |
"""panamsquad URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views as auth_views
from core import views as core_views
from votes import urls as votes_urls
from opinions import urls as opinions_urls
from docs import urls as docs_urls
from infos import urls as infos_urls
from derby import urls as derby_urls
from events import urls as events_urls
admin.site.site_header = 'Panam Squad Administration'
urlpatterns = [
url(r'^$', core_views.home, name='home'),
url(r'^privacy/$', core_views.privacy, name='privacy'),
url(r'^accounts/', include('django.contrib.auth.urls')),
# url(r'^login/$', auth_views.LoginView, name='login'),
# url(r'^logout/$', auth_views.LogoutView, name='logout'),
url(r'^oauth/', include('social_django.urls', namespace='social')),
url(r'^admin/', admin.site.urls),
url(r'^votes/', include(votes_urls)),
url(r'^opinions/', include(opinions_urls)),
url(r'^docs/', include(docs_urls)),
url(r'^profile/', include(infos_urls)),
url(r'^derby/', include(derby_urls)),
url(r'^events/', include(events_urls)),
]
| 40.181818 | 79 | 0.70871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 975 | 0.551471 |
2f8c9b6939590e2c0b5c1ffc1236673c73cfa389
| 1,015 |
py
|
Python
|
setup.py
|
yamahigashi/sphinx-git-lowdown
|
3df8ba2bb44c987f9510d45cd31198cfc5249f14
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
yamahigashi/sphinx-git-lowdown
|
3df8ba2bb44c987f9510d45cd31198cfc5249f14
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
yamahigashi/sphinx-git-lowdown
|
3df8ba2bb44c987f9510d45cd31198cfc5249f14
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='sphinx-git-lowdown',
version='0.0.1',
url='https://github.com/yamahigashi/sphinx-git-lowdown',
# download_url='http://pypi.python.org/pypi/sphinx-git-lowdown',
license='Apache',
author='yamahigashi',
author_email='[email protected]',
description='Sphinx extension to wrap git changelog',
long_description="",
zip_safe=True,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Documentation',
'Topic :: Utilities',
],
platforms='any',
include_package_data=True,
install_requires=['Sphinx>=1.1', 'GitPython', 'lowdown'],
# namespace_packages=['sphinx_git_lowdown'],
packages=['sphinx_git_lowdown'],
)
| 30.757576 | 68 | 0.634483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 624 | 0.614778 |
2f8e310bf9e77d36d1ba6cf080e2e966d1ebdb66
| 63 |
py
|
Python
|
gira_homeserver_api/devices/value_device.py
|
leoyn/gira-homeserver-api
|
7d642413a56078f694518d9189b4b7cc9776482d
|
[
"MIT"
] | 5 |
2020-03-17T12:45:50.000Z
|
2022-03-07T10:55:50.000Z
|
gira_homeserver_api/devices/value_device.py
|
leoyn/gira-homeserver-api
|
7d642413a56078f694518d9189b4b7cc9776482d
|
[
"MIT"
] | 3 |
2020-04-17T09:53:45.000Z
|
2021-01-25T22:14:14.000Z
|
gira_homeserver_api/devices/value_device.py
|
leoyn/gira-homeserver-api
|
7d642413a56078f694518d9189b4b7cc9776482d
|
[
"MIT"
] | 1 |
2020-04-17T06:51:50.000Z
|
2020-04-17T06:51:50.000Z
|
from .device import Device
class ValueDevice(Device):
pass
| 15.75 | 26 | 0.761905 | 35 | 0.555556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2f8eec6be049e9fe4a729f243ebe752e635be903
| 1,756 |
py
|
Python
|
rfim2d/tests/test_param_dict.py
|
lxh3/rfim2d
|
5283d0df492ad20ecef30b17803437ca9155f8b3
|
[
"MIT"
] | null | null | null |
rfim2d/tests/test_param_dict.py
|
lxh3/rfim2d
|
5283d0df492ad20ecef30b17803437ca9155f8b3
|
[
"MIT"
] | null | null | null |
rfim2d/tests/test_param_dict.py
|
lxh3/rfim2d
|
5283d0df492ad20ecef30b17803437ca9155f8b3
|
[
"MIT"
] | null | null | null |
from rfim2d import param_dict
key_dict = {
'A': ['Sigma', 'a', 'b'],
'dMdh': ['hMax', 'eta', 'a', 'b', 'c'],
'joint': ['rScale', 'rc', 'sScale', 'etaScale', 'df',
'lambdaH', 'B', 'C', 'F'],
'Sigma': ['rScale', 'rc', 'sScale', 'df', 'B', 'C'],
'eta': ['rScale', 'rc', 'etaScale', 'lambdaH', 'B', 'F']
}
powerlaw_key_dict = {
'joint': ['rScale', 'rc', 'sScale', 'etaScale', 'sigma', 'betaDelta'],
'Sigma': ['rScale', 'rc', 'sScale', 'sigma'],
'eta': ['rScale', 'rc', 'etaScale', 'betaDelta']
}
def test_split_dict():
adict = {'one': 1, 'two': 2}
keys, values = param_dict.split_dict(adict)
assert keys == ['one', 'two']
assert values == [1, 2]
assert param_dict.split_dict('test') == -1
def test_joint_dict():
keys = ['one', 'two']
values = [1, 2]
values_bad = [1, 2, 3]
assert isinstance(param_dict.join_dict(keys, values), dict)
assert param_dict.join_dict(keys, values_bad) == -1
def test_get_keys():
keys1 = param_dict.get_keys('A')
assert param_dict.get_keys('A', func_type='power law') == -1
keys3 = param_dict.get_keys('Sigma')
keys4 = param_dict.get_keys('Sigma', func_type='power law')
print(str(keys1)+str(keys3)+str(keys4))
def test_separate_params():
keys = param_dict.get_keys('joint')
values = [1. for i in range(len(keys))]
params = param_dict.join_dict(keys,values)
pS, pe = param_dict.separate_params(params)
return pS, pe
def test_generate_and_split_dict():
params = [1.0, 1.0]
keys = ['A', 'B', 'C']
fixed_dict = dict([('C', 0.)])
new_dict = param_dict.generate_dict_with_fixed_params(params, keys, fixed_dict)
vals = param_dict.split_dict_with_fixed_params(new_dict, fixed_dict)
| 30.807018 | 83 | 0.600228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 404 | 0.230068 |
2f8f9cde054908beafa4eaad551bd52319f17a78
| 80 |
py
|
Python
|
main.py
|
Zerex1/Test
|
eb1030a90961c18d7aac5f3ca63e3a540756349b
|
[
"MIT"
] | null | null | null |
main.py
|
Zerex1/Test
|
eb1030a90961c18d7aac5f3ca63e3a540756349b
|
[
"MIT"
] | 7 |
2022-02-19T16:54:51.000Z
|
2022-02-20T21:52:54.000Z
|
main.py
|
Scarloran/Error
|
07a1d70fb69b0c8ae45f500c92ad04eb59ed26f5
|
[
"MIT"
] | null | null | null |
print('hi all')
print('hii')
print('hello world')
print('hi')
print('hello')
| 8.888889 | 20 | 0.625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.4625 |
2f8fc48275f4387b3e97bc8ce0893dd23b2af531
| 2,250 |
py
|
Python
|
core.py
|
sure-fire/derbypi
|
681e266d40fa238effe5de54bd4bbff963db028e
|
[
"MIT"
] | 1 |
2016-10-31T17:34:40.000Z
|
2016-10-31T17:34:40.000Z
|
core.py
|
sure-fire/derbypi
|
681e266d40fa238effe5de54bd4bbff963db028e
|
[
"MIT"
] | null | null | null |
core.py
|
sure-fire/derbypi
|
681e266d40fa238effe5de54bd4bbff963db028e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2015 Aaron Soto
# Released under the MIT license
# Incorporates libraries from AdaFruit, also released under the MIT license
# TODO functions:
# display_progress(percentage,[title]) - display a progress bar (0-1,1-100) / check for float
# display_error(message,[timeout],[title]) - change backlight color, display error
import glob # module_scan(): to find files
import imp # module_run(): to load and run plugin modules
import traceback # module_run(): to gather a traceback during exceptions
import lcd # module_run(): to print fatal errors to the screen
import time # log(): to print the epoch time in log messages
import os # main(): to check UID for root
import sys # main(): to exit with status code
import subprocess # shutdown(): to call the 'halt' command
def module_scan(path):
# Identify all modules, and sort by time modified (newest first)
# NOTE TO FUTURE SELF: Use "touch -m" to re-order the menu
modules = sorted(glob.glob("modules/*/main.py"), key=os.path.getmtime)
modules.reverse()
moduleList = []
for module in modules:
log("FOUND MODULE: " + module)
moduleList.append(module.split('/')[1])
return moduleList
def module_run(path):
# Accepts path as "modules/[name]/main.py", "modules/[name]/", or "[name]"
# where [name] is the name of the module to run.
# Imports [name] and runs the init() function within 'modules/[name]/main.py'
if path.find('/') > 0:
name = path.split('/')[1]
else:
name = path
path = "./modules/" + name + "/main.py"
try:
log("LOADING MODULE: " + name + " (" + path + ")")
module = imp.load_source(name, path)
module.init()
except:
log("ERROR: SOMETHING HAPPENED IN THE " + name + " MODULE!")
trace = traceback.format_exc()
log(trace)
err = sys.exc_info()[0]
err = str(err).split("'")[1].split('.')[1]
lcd.showError(err,redraw=False)
sys.exit(-1)
def log(text):
print str(time.time()) + ": " + text
f = open('/var/log/wiper', 'a+')
f.write(str(time.time()) + ": " + text + "\r\n")
f.flush()
f.close()
def error(text):
log("ERROR:" + text)
if __name__=="__main__":
if "wipe" in module_scan("."):
module_run("modules/wipe2/main.py")
| 33.088235 | 107 | 0.651556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,264 | 0.561778 |
2f90a5c7e193988dc43d8564c22a87b2b8ba9258
| 753 |
py
|
Python
|
populator/exercise_splitter.py
|
Calvibert/workout-generator
|
0c905a2132be4e0f440d8ecbfaba71592c0fe9e2
|
[
"MIT"
] | null | null | null |
populator/exercise_splitter.py
|
Calvibert/workout-generator
|
0c905a2132be4e0f440d8ecbfaba71592c0fe9e2
|
[
"MIT"
] | null | null | null |
populator/exercise_splitter.py
|
Calvibert/workout-generator
|
0c905a2132be4e0f440d8ecbfaba71592c0fe9e2
|
[
"MIT"
] | null | null | null |
# Upper-lower splitter for the exercise list
import sys
import exercise_populator_config as conf
print('Enter the file name: ')
filename = sys.stdin.readline()
filename = filename[0:len(filename)-1]
f = open(filename, 'r')
upper = conf.CONST_MUSCLES['upper']
lower = conf.CONST_MUSCLES['lower']
uex = []
lex = []
for ex in f:
i = ex.find(',')
t = ex[i+2:].rstrip()
if t in upper:
uex.append(ex.rstrip())
continue
lex.append(ex.rstrip())
upper_filename = 'upper.txt'
lower_filename = 'lower.txt'
o_stdout = sys.stdout
f = open(upper_filename, 'w+')
sys.stdout = f
for i in uex:
print(i)
f.close()
f = open(lower_filename, 'w+')
sys.stdout = f
for i in lex:
print(i)
sys.stdout = o_stdout
f.close()
| 16.733333 | 44 | 0.648074 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.155378 |
2f9197c39f4c2b4b9b35a18f55ab839142699e80
| 4,893 |
py
|
Python
|
fbpmp/pcf/mpc/emp.py
|
benliugithub/fbpcs
|
7af984264428058645847135026d474d7e28144e
|
[
"MIT"
] | null | null | null |
fbpmp/pcf/mpc/emp.py
|
benliugithub/fbpcs
|
7af984264428058645847135026d474d7e28144e
|
[
"MIT"
] | null | null | null |
fbpmp/pcf/mpc/emp.py
|
benliugithub/fbpcs
|
7af984264428058645847135026d474d7e28144e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import logging
import os
import pathlib
import shutil
from typing import Dict, List
from fbpmp.pcf import call_process
from fbpmp.pcf.errors import MPCRuntimeError, MPCStartupError
from fbpmp.pcf.games import (
ConversionLift,
ConverterLift,
SecretShareConversionLift,
SecretShareConverterLift,
)
from fbpmp.pcf.mpc.base import ServerClientMPCFramework
from fbpmp.pcf.structs import Game, Metric, Status
EMP_GAME_DIR = pathlib.Path(os.environ.get("EMP_GAME_DIR", os.getcwd()))
MAX_ROWS_PER_PARTITION = 1000000 # 1 million
class EmpMPCFramework(ServerClientMPCFramework):
"""
Implementation of EMP SH2PC MPC Framework
https://github.com/emp-toolkit/emp-sh2pc
"""
SUPPORTED_GAMES: List[Game] = [
ConversionLift,
ConverterLift,
SecretShareConversionLift,
SecretShareConverterLift,
]
async def prepare_input(self) -> Status:
# We purposefully do not want to use the base class's prepare_input
# method since it will sort the input which breaks the secret_share
# game logic (since IDs won't appear to match).
return Status.OK
async def run_mpc(self) -> Dict[str, Dict[Metric, int]]:
"""
Run the MPC game as the given player.
"""
logger = logging.getLogger(
f"EmpMPCFramework <Game:{self.game.name}> <{self.player.role!s}>"
)
game_path = EMP_GAME_DIR / self.game.base_game
game_path_absolute = game_path.absolute()
self._check_executable(game_path_absolute)
if len(self.other_players) != 0:
# pre_setup should have validated this, but we put another check
# here just to reinforce the invariant.
if len(self.other_players) != 1:
raise ValueError(
f"Must be run with exactly one other player, not {len(self.other_players)}"
)
other_player = self.other_players[0]
ip_address = other_player.ip_address
port = other_player.port
else:
ip_address = self.player.ip_address
port = self.player.port
cmd = (
f"{game_path_absolute} --role={self.player.id}"
f" --data_directory={self.input_file.parent.absolute()}"
f" --input_filename={self.input_file.name}"
f" --server_ip={ip_address}"
f" --port={port}"
f" --output_filename={self.output_file}"
)
if self.output_s3_path:
cmd = cmd + f" --output_s3_path={self.output_s3_path}"
cmd = cmd.split(" ") + self.game.extra_args
self.base_logger.debug(f"running command: {cmd}")
try:
operating_dir = pathlib.Path(os.getcwd())
result = await asyncio.wait_for(
call_process.run_command(cmd, operating_dir, logger=logger),
timeout=self.run_timeout,
)
except Exception as e:
# TODO: Should log e and raise an MPCRuntimeError instead
raise e
if result.returncode != 0:
raise MPCRuntimeError(result.returncode)
# At this point, assuming everything went correctly, we should have a
# File with one result per line
result_filepath = self.input_file.parent / self.output_file
all_results: Dict[str, Dict[Metric, int]] = {}
with open(result_filepath) as f:
for line in f.readlines():
if len(line) == 0:
# For some reason, we sometimes read an empty line from the
# output of the EMP MPC program in the result file.
continue
parts = line.strip().split(",")
feature_group = parts[0]
contents = [int(field) for field in parts[1:]]
all_results[feature_group] = {
metric: value
for metric, value in zip(self.game.output_metrics, contents)
}
return all_results
def _check_executable(self, absolute_path: pathlib.Path) -> None:
self.base_logger.debug(f"Checking {absolute_path} is executable.")
if shutil.which(absolute_path) is None:
raise MPCStartupError(f"Executable {absolute_path} not found.")
def _check_file_exists(self, absolute_path: pathlib.Path) -> None:
self.base_logger.debug(f"Checking {absolute_path} exists.")
if not os.path.isfile(absolute_path):
raise MPCStartupError(f"File {absolute_path} not found.")
@staticmethod
def get_max_rows_per_partition() -> int:
return MAX_ROWS_PER_PARTITION
| 35.977941 | 95 | 0.625179 | 4,132 | 0.844472 | 0 | 0 | 96 | 0.01962 | 3,176 | 0.649091 | 1,529 | 0.312487 |
2f939a72fbb64e7dc423500b36e371b897a8fc9b
| 2,168 |
py
|
Python
|
01_Plots/plot_time_differences.py
|
awareseven/Reproducibility-and-Replicability-of-Web-Measurement-Studies
|
38953c70a9ab03e1d29e4f9c6da13ffcaaeac84b
|
[
"Apache-2.0"
] | 3 |
2022-01-27T07:36:24.000Z
|
2022-02-22T09:32:53.000Z
|
01_Plots/plot_time_differences.py
|
awareseven/Reproducibility-and-Replicability-of-Web-Measurement-Studies
|
38953c70a9ab03e1d29e4f9c6da13ffcaaeac84b
|
[
"Apache-2.0"
] | null | null | null |
01_Plots/plot_time_differences.py
|
awareseven/Reproducibility-and-Replicability-of-Web-Measurement-Studies
|
38953c70a9ab03e1d29e4f9c6da13ffcaaeac84b
|
[
"Apache-2.0"
] | 1 |
2022-02-02T08:21:39.000Z
|
2022-02-02T08:21:39.000Z
|
import matplotlib.font_manager as font_manager
import matplotlib.pyplot as plt
import pandas as pd
import os
# Read the data
path = os.path.join(os.getcwd(), "results")
df = pd.read_csv(os.path.join(path, "tracker_AND_cookies.csv"))
x = df["day"]
y1 = df["total_tracker"]
y2 = df["tracker_distinct"]
y3 = df["is_session"]
# Some styling stuff
fig, ax = plt.subplots(1, figsize=(7, 4))
legend_properties = {'weight': 'bold', 'size': 9}
font = font_manager.FontProperties(family='sans-serif',
weight='bold',
style='normal',
size=14)
plt.legend(loc='best', frameon=False, prop=font)
plt.xticks(weight='bold', fontname='sans-serif', size=14)
plt.yticks(weight='bold', fontname='sans-serif', size=14)
plt.xlabel("Measurement point", weight='bold', fontname='sans-serif', size=14)
# Add first y-axis (Number of tracking requests)
ax.plot(x, y1, color="#999999", label="Number of tracking requests", marker='o', linestyle='dashed')
ax.set_ylabel('Number of tracking requests')
ax.legend(loc=2, prop=legend_properties)
plt.ylabel("Number of tracking requests", weight='bold', fontname='sans-serif', size=14)
# Add second y-axis
ax2 = ax.twinx() # instantiate a second axes that shares the same x-axis
ax2.plot(x, y2, color="#555555", label="Number of distinct trackers", marker='x', linestyle='solid')
ax2.set_ylabel('Number of distinct trackers')
ax2.set_ylim(3500, 4200)
ax2.legend(loc=1, prop=legend_properties)
plt.ylabel("Number of distinct trackers", weight='bold', fontname='sans-serif', size=14)
plt.yticks(weight='bold', fontname='sans-serif')
# Save plot to disc
plt.grid(False)
#plt.show()
plt.savefig(path + "/04_long_term_tracker_cookies.pdf", dpi=600,
transparent=False, bbox_inches='tight', format="pdf")
# Simple min / max calculations
max_value = y1.max()
min_value = y1.min()
max_day = y1.index[df['total_tracker'] == max_value].tolist()
min_day = y1.index[df['total_tracker'] == min_value].tolist()
print("Max at: ", max_day, "max value: ", max_value)
print("Min at: ", min_day, "min value: ", min_value)
print("std:", y1.std())
| 37.37931 | 100 | 0.683579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 823 | 0.379613 |
2f9b8862aa5b57db0e4c23d664291957a9fbe6a4
| 379 |
py
|
Python
|
task_function.py
|
feihong/asyncio-tasks-talk
|
a4ef4e7246906d89aab81db69b7cba0c76258288
|
[
"CC-BY-4.0"
] | 1 |
2016-09-10T02:47:26.000Z
|
2016-09-10T02:47:26.000Z
|
task_function.py
|
feihong/asyncio-tasks-talk
|
a4ef4e7246906d89aab81db69b7cba0c76258288
|
[
"CC-BY-4.0"
] | null | null | null |
task_function.py
|
feihong/asyncio-tasks-talk
|
a4ef4e7246906d89aab81db69b7cba0c76258288
|
[
"CC-BY-4.0"
] | null | null | null |
import asyncio
async def long_task(writer):
total = 100
for i in range(1, total+1):
writer.write(type='progress', value=i, total=total)
print(i)
await asyncio.sleep(0.05)
class Writer:
def write(self, **kwargs):
print(kwargs)
coroutine = long_task(Writer())
asyncio.ensure_future(coroutine)
asyncio.get_event_loop()run_forever()
| 18.95 | 59 | 0.664908 | 66 | 0.174142 | 0 | 0 | 0 | 0 | 187 | 0.493404 | 10 | 0.026385 |
2f9feffcaa4a8285a2abe800ba2837e256eb6e2b
| 2,636 |
py
|
Python
|
nebula_utils/nebula_utils/persist_compute/utils.py
|
threathunterX/python_lib
|
e2d4052de04c82cb7bccd08042f28db824cab442
|
[
"Apache-2.0"
] | 2 |
2019-03-17T04:03:08.000Z
|
2019-05-01T09:42:23.000Z
|
nebula_utils/nebula_utils/persist_compute/utils.py
|
threathunterX/python_lib
|
e2d4052de04c82cb7bccd08042f28db824cab442
|
[
"Apache-2.0"
] | null | null | null |
nebula_utils/nebula_utils/persist_compute/utils.py
|
threathunterX/python_lib
|
e2d4052de04c82cb7bccd08042f28db824cab442
|
[
"Apache-2.0"
] | 4 |
2019-06-24T05:47:24.000Z
|
2020-09-29T05:00:31.000Z
|
# -*- coding: utf-8 -*-
Group_Key_To_Dimension = dict(
c_ip = 'ip',
uid = 'user',
page = 'page',
did = 'did',
# c_ipc = 'ipc',
)
Avail_Dimensions = tuple(Group_Key_To_Dimension.values())
# dimension : variable_name(获取点击量的变量名)
Click_Variable_Names = dict(
ip='ip__visit__dynamic_count__1h__slot',
did='did__visit__dynamic_count__1h__slot',
user='user__visit__dynamic_count__1h__slot',
page='page__visit__dynamic_count__1h__slot'
)
IP_Stat_Type = 2
IPC_Stat_Type = 3
DID_Stat_Type = 4
UID_Stat_Type = 5
PAGE_Stat_Type = 6
Dimension_Stat_Prefix = dict(
ip = IP_Stat_Type,
ipc = IPC_Stat_Type,
did = DID_Stat_Type,
user = UID_Stat_Type,
page = PAGE_Stat_Type,
)
Category = ['VISITOR', 'ACCOUNT', 'ORDER',
'TRANSACTION', 'MARKETING', 'OTHER']
Scene_Variable_Names = dict(
VISITOR='total__visit__visitor_incident_count__1h__slot',
ACCOUNT='total__visit__account_incident_count__1h__slot',
ORDER='total__visit__order_incident_count__1h__slot',
TRANSACTION='total__visit__transaction_incident_count__1h__slot',
MARKETING='total__visit__marketing_incident_count__1h__slot',
OTHER='total__visit__other_incident_count__1h__slot'
)
def get_dimension(group_key_name):
"""
根据groupby的key获取对应统计Stat_Dict中维度的key值
"""
return Group_Key_To_Dimension.get(group_key_name, None)
def dict_merge(src_dict, dst_dict):
"""
将两个dict中的数据对应键累加,
不同类型值的情况:
>>> s = dict(a=1,b='2')
>>> d = {'b': 3, 'c': 4}
>>> dict_merge(s,d)
>>> t = {'a': 1, 'b': 5, 'c': 4}
>>> s == t
True
>>> s = dict(a=set([1,2]), )
>>> d = dict(a=set([2, 3]),)
>>> dict_merge(s,d)
>>> t = {'a':set([1,2,3])}
>>> s == t
True
>>> s = dict(a={'a':1, 'b':2})
>>> d = dict(a={'a':1, 'b':2})
>>> dict_merge(s, d)
>>> t = dict(a={'a':2, 'b':4})
>>> s == t
True
"""
for k,v in dst_dict.iteritems():
if not src_dict.has_key(k):
src_dict[k] = v
else:
if isinstance(v, (basestring, int, float)):
src_dict[k] = int(v) + int(src_dict[k])
elif isinstance(v, set):
assert type(v) == type(src_dict[k]), 'key %s,dst_dict value: %s type: %s, src_dict value: %s type:%s' % (k, v, type(v), src_dict[k], type(src_dict[k]))
src_dict[k].update(v)
elif isinstance(v, dict):
assert type(v) == type(src_dict[k]), 'key %s,dst_dict value: %s type: %s, src_dict value: %s type:%s' % (k, v, type(v), src_dict[k], type(src_dict[k]))
dict_merge(src_dict[k], v)
| 28.967033 | 167 | 0.60091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,351 | 0.496326 |
85ca2de785e536315d8103867a6f6f3babb8d84b
| 346 |
py
|
Python
|
a301/scripts/check_nearir.py
|
Pearl-Ayem/ATSC_Notebook_Data
|
c075d166c235ac4e68a4b77750e02b2a5e77abd0
|
[
"MIT"
] | null | null | null |
a301/scripts/check_nearir.py
|
Pearl-Ayem/ATSC_Notebook_Data
|
c075d166c235ac4e68a4b77750e02b2a5e77abd0
|
[
"MIT"
] | null | null | null |
a301/scripts/check_nearir.py
|
Pearl-Ayem/ATSC_Notebook_Data
|
c075d166c235ac4e68a4b77750e02b2a5e77abd0
|
[
"MIT"
] | null | null | null |
from pyhdf.SD import SD, SDC
from pathlib import Path
import numpy as np
import a301
m5_file = a301.data_dir / Path('myd05_l2_10_7.hdf')
the_file = SD(str(m5_file), SDC.READ)
wv_nearir_data = the_file.select('Water_Vapor_Near_Infrared').get()
the_file.end
positive = wv_nearir_data > 0.
print(f'found {np.sum(positive.flat)} positive pixels')
| 24.714286 | 67 | 0.768786 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 94 | 0.271676 |
85cb5db8536dff080788a2b44e8c7498ab0bd3f3
| 2,649 |
py
|
Python
|
course_grader/dao/message.py
|
uw-it-aca/gradepage
|
7059d715cc112ad0ecb0e5012f716e525ee7b3bc
|
[
"Apache-2.0"
] | 1 |
2017-01-29T09:52:06.000Z
|
2017-01-29T09:52:06.000Z
|
course_grader/dao/message.py
|
uw-it-aca/gradepage
|
7059d715cc112ad0ecb0e5012f716e525ee7b3bc
|
[
"Apache-2.0"
] | 287 |
2017-03-09T00:17:20.000Z
|
2022-01-08T00:36:34.000Z
|
course_grader/dao/message.py
|
uw-it-aca/gradepage
|
7059d715cc112ad0ecb0e5012f716e525ee7b3bc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from course_grader.dao import current_datetime, display_datetime
from course_grader.dao.term import (
next_gradable_term, previous_gradable_term, submission_deadline_warning,
is_grading_period_open)
from persistent_message.models import Message
def get_open_grading_messages(term, params={}):
tags = ["is_open"]
rel_grade_submission_deadline = ""
if submission_deadline_warning(term):
tags.append("just_before_deadline")
delta = term.grade_submission_deadline - current_datetime()
seconds_remaining = (delta.days * 24 * 3600) + delta.seconds
if seconds_remaining < (17 * 3600):
rel_grade_submission_deadline = "5:00 PM today"
elif seconds_remaining < (41 * 3600):
rel_grade_submission_deadline = "5:00 PM tomorrow"
params.update({
"year": term.year,
"quarter": term.get_quarter_display(),
"grade_submission_deadline": term.grade_submission_deadline,
"rel_grade_submission_deadline": rel_grade_submission_deadline,
})
return _get_persistent_messages(tags, params)
def get_closed_grading_messages(params={}):
prev_term = previous_gradable_term()
next_term = next_gradable_term()
if next_term.quarter == next_term.SUMMER:
next_open_date = next_term.aterm_grading_period_open
else:
next_open_date = next_term.grading_period_open
params.update({
"prev_year": prev_term.year,
"prev_quarter": prev_term.get_quarter_display(),
"prev_window_close_date": display_datetime(
prev_term.grade_submission_deadline),
"next_year": next_term.year,
"next_quarter": next_term.get_quarter_display(),
"next_window_open_date": display_datetime(next_open_date),
"grade_submission_deadline": prev_term.grade_submission_deadline,
})
if (next_term.first_day_quarter < current_datetime().date()):
tags = ["is_closed"]
else:
tags = ["just_after_deadline"]
return _get_persistent_messages(tags, params)
def get_messages_for_term(term, params={}):
if is_grading_period_open(term):
return get_open_grading_messages(term, params)
else:
return get_closed_grading_messages(params)
def _get_persistent_messages(tags, params):
ret = {"messages": []}
for message in Message.objects.active_messages(tags=tags):
if "message_level" not in ret:
ret["message_level"] = message.get_level_display().lower()
ret["messages"].append(message.render(params))
return ret
| 35.797297 | 76 | 0.710834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 430 | 0.162325 |
85cb84708ec1159fcbafba9f83ab692e7fdf9668
| 4,541 |
py
|
Python
|
swn/file.py
|
wkitlasten/surface-water-network
|
fd36ad5ee3fbd7a1107f0c4c376c4af1295b5b1b
|
[
"BSD-3-Clause"
] | 18 |
2019-12-04T14:59:47.000Z
|
2021-12-21T12:34:28.000Z
|
swn/file.py
|
jonathanqv/surface-water-network
|
362217c897345042464564440be08b34f6f0915d
|
[
"BSD-3-Clause"
] | 17 |
2020-04-15T04:49:49.000Z
|
2022-03-04T05:22:17.000Z
|
swn/file.py
|
jonathanqv/surface-water-network
|
362217c897345042464564440be08b34f6f0915d
|
[
"BSD-3-Clause"
] | 6 |
2020-05-07T23:56:12.000Z
|
2022-01-08T16:56:32.000Z
|
"""File reading/writing helpers."""
__all__ = ["topnet2ts", "gdf_to_shapefile"]
import geopandas
import pandas as pd
from swn.logger import get_logger, logging
def topnet2ts(nc_path, varname, mult=None, log_level=logging.INFO):
"""Read TopNet data from a netCDF file into a pandas.DataFrame timeseries.
User may need to multiply DataFrame to convert units.
Parameters
----------
nc_path : str
File path to netCDF file
varname : str
Variable name in netCDF file to read
mult : float, optional
Multiplier applied to dataset, which preserves dtype. For example,
to convert from "meters3 second-1" to "meters3 day-1", use 86400.
verbose : int, optional
Level used by logging module; default is 20 (logging.INFO)
Returns
-------
pandas.DataFrame
Where columns is rchid and index is DatetimeIndex.
"""
try:
from netCDF4 import Dataset
except ImportError:
raise ImportError('function requires netCDF4')
try:
from cftime import num2pydate as n2d
except ImportError:
from cftime import num2date as n2d
logger = get_logger("topnet2ts", log_level)
logger.info("reading file: %s", nc_path)
with Dataset(nc_path, "r") as nc:
nc.set_auto_mask(False)
var = nc.variables[varname]
logger.info("variable %s:\n%s", varname, var)
# Evaluate dimensions
dim_has_time = False
dim_has_nrch = False
dim_ignore = []
varslice = [Ellipsis] # take first dimensions
for name, size in zip(var.dimensions, var.shape):
if name == "time":
dim_has_time = True
elif name == "nrch":
dim_has_nrch = True
elif size == 1:
dim_ignore.append(name)
varslice.append(0)
if not dim_has_time:
logger.error("no 'time' dimension found")
if not dim_has_nrch:
logger.error("no 'nrch' dimension found")
if dim_ignore:
logger.info("ignoring size 1 dimensions: %s", dim_ignore)
dat = var[tuple(varslice)]
if len(dat.shape) != 2:
logger.error("expected 2 dimensions, found shape %s", dat.shape)
if dim_has_time and var.dimensions.index("time") == 1:
dat = dat.T
if mult is not None and mult != 1.0:
dat *= mult
df = pd.DataFrame(dat)
df.columns = nc.variables["rchid"]
time_v = nc.variables["time"]
df.index = pd.DatetimeIndex(n2d(time_v[:], time_v.units))
logger.info("data successfully read")
return df
def gdf_to_shapefile(gdf, shp_fname, **kwargs):
"""Write any GeoDataFrame to a shapefile.
This is a workaround to the to_file method, which cannot save
GeoDataFrame objects with other data types, such as set.
Parameters
----------
gdf : geopandas.GeoDataFrame
GeoDataFrame to export
shp_fname : str
File path for output shapefile
kwargs : mapping
Keyword arguments passed to to_file and to fiona.open
Returns
-------
None
"""
if not isinstance(gdf, geopandas.GeoDataFrame):
raise ValueError("expected gdf to be a GeoDataFrame")
gdf = gdf.copy()
geom_name = gdf.geometry.name
for col, dtype in gdf.dtypes.iteritems():
if col == geom_name:
continue
if dtype == object:
is_none = gdf[col].map(lambda x: x is None)
gdf[col] = gdf[col].astype(str)
gdf.loc[is_none, col] = ""
elif dtype == bool:
gdf[col] = gdf[col].astype(int)
# potential names that need to be shortened to <= 10 characters for DBF
colname10 = {
"to_segnum": "to_seg",
"from_segnums": "from_seg",
"num_to_outlet": "num_to_out",
"dist_to_outlet": "dst_to_out",
"stream_order": "strm_order",
"upstream_length": "upstr_len",
"upstream_area": "upstr_area",
"inflow_segnums": "inflow_seg",
"zcoord_count": "zcoord_num",
"zcoord_first": "zcoordfrst",
"zcoord_last": "zcoordlast",
"strtop_incopt": "stpincopt",
"prev_ibound": "previbound",
"prev_idomain": "prevdomain",
}
for k, v in list(colname10.items()):
assert len(v) <= 10, v
if k == v or k not in gdf.columns:
del colname10[k]
gdf.rename(columns=colname10).reset_index(drop=False)\
.to_file(str(shp_fname), **kwargs)
| 32.435714 | 78 | 0.602731 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,946 | 0.42854 |
85cc4f7ba3e6215d40e3cc9668b7b4fc514ab919
| 5,752 |
py
|
Python
|
assignment4/src/clean_documents.py
|
jschmidtnj/cs584
|
d1d4d485d1fac8743cdbbc2996792db249dcf389
|
[
"MIT"
] | null | null | null |
assignment4/src/clean_documents.py
|
jschmidtnj/cs584
|
d1d4d485d1fac8743cdbbc2996792db249dcf389
|
[
"MIT"
] | null | null | null |
assignment4/src/clean_documents.py
|
jschmidtnj/cs584
|
d1d4d485d1fac8743cdbbc2996792db249dcf389
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
data clean for books (clean_documents.py)
note - this is the same as in assignment 1 for the most part
"""
import re
from ast import literal_eval
from os.path import basename, splitext, exists
from typing import Optional, List
from utils import get_glob, file_path_relative
from variables import part_1_data_folder, clean_data_folder, class_key, label_key, paragraph_key
from loguru import logger
from books import BookType, start_end_map, class_map
import pandas as pd
from typing import Tuple
import yaml
title_split: str = 'title: '
author_split: str = 'author: '
start_book: str = 'start of this project gutenberg ebook'
the_end: str = 'the end'
end_book: str = 'end of this project gutenberg ebook'
chapter: str = 'Chapter '
adventure: str = 'ADVENTURE '
multi_quote_identifier: str = '"'
min_line_len: int = 6 # line discarded if less than this number of characters
default_file_name: str = f'{clean_data_folder}/documents.csv'
classes_file_name: str = f'{clean_data_folder}/doc_classes.txt'
whitespace_regex = re.compile(r"\s+")
def normalize_sentence(sentence: str) -> str:
"""
remove punctuation, return list of words
"""
sentence = whitespace_regex.sub(' ', sentence).strip()
return sentence
def clean(clean_data_basename: Optional[str] = default_file_name) -> Tuple[pd.DataFrame, List[BookType]]:
"""
data cleaning
"""
class_count: int = 0
label_list: List[BookType] = []
get_from_disk = clean_data_basename is not None
if not get_from_disk:
clean_data_basename = default_file_name
clean_data_path = file_path_relative(clean_data_basename)
classes_path = file_path_relative(classes_file_name)
if get_from_disk and exists(clean_data_path) and exists(classes_path):
logger.info(f'reading data from {clean_data_path}')
data = pd.read_csv(clean_data_path, converters={
paragraph_key: literal_eval})
label_list_enum: Optional[List[BookType]] = None
with open(classes_path) as classes_file:
label_list = yaml.load(classes_file, Loader=yaml.FullLoader)
label_list_enum = [BookType(elem) for elem in label_list]
return data, label_list_enum
data: pd.DataFrame = pd.DataFrame()
# preprocess data and construct examples
found_files: bool = False
for file_path in get_glob(f'{part_1_data_folder}/*.txt'):
found_files = True
file_name: str = basename(splitext(file_path)[0])
logger.info(f'processing {file_name}')
title: Optional[str] = None
book_key: Optional[BookType] = None
book_started: bool = False
paragraphs: List[List[str]] = []
num_newline_count: int = 0
line_number: int = 0
with open(file_path, 'r') as current_file:
while True:
line = current_file.readline()
line_number += 1
line_trim: Optional[str] = None
if line:
line_trim = line.strip()
if not book_started and \
((line_trim is not None and line_trim.startswith(start_book))
or (book_key is not None and line_number >= start_end_map[book_key].start)):
book_started = True
if line_trim is None or line_trim.startswith(end_book) \
or line_trim == the_end or \
(book_key is not None and line_number >= start_end_map[book_key].end):
# done with reading the file
break
if not book_started:
if title is None and line_trim.startswith(title_split):
title = line_trim.split(title_split)[1]
logger.info(f'title: {title}')
if book_key is None and line_trim.startswith(author_split):
author: str = line_trim.split(author_split)[1]
logger.info(f'author: {author}')
book_key = BookType(author.split(' ')[-1])
else:
if len(line_trim) < min_line_len or \
line.startswith(chapter) or line.startswith(chapter):
num_newline_count += 1
else:
multi_line_quotes = line_trim.startswith(multi_quote_identifier) \
and paragraphs[-1][0].startswith(multi_quote_identifier)
if len(paragraphs) == 0 or \
(num_newline_count > 0 and not multi_line_quotes):
paragraphs.append([])
num_newline_count = 0
paragraphs[-1].append(line_trim)
if not found_files:
raise RuntimeError('no files found')
if book_key is None:
raise RuntimeError('no book key found')
class_name = class_map[book_key]
logger.info(
f'number of paragraphs in class "{class_name}": {len(paragraphs)}')
paragraphs = [[normalize_sentence(sentence) for sentence in paragraph] for paragraph in paragraphs]
data = pd.concat([data, pd.DataFrame({
paragraph_key: paragraphs,
label_key: [class_name] * len(paragraphs),
class_key: class_count
})], ignore_index=True)
label_list.append(book_key)
class_count += 1
data.to_csv(clean_data_path, index=False)
with open(classes_path, 'w') as classes_file:
label_list_str = [elem.name for elem in label_list]
yaml.dump(label_list_str, classes_file)
return data, label_list
if __name__ == '__main__':
clean()
| 39.129252 | 107 | 0.619784 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 801 | 0.139256 |
85ccd6d8d9bc17b243d312e04343cd6c75bdd27f
| 6,041 |
py
|
Python
|
miniproject/api/organization/views.py
|
dandy7373/HR_web
|
65dd80159c7e3113961d55ef126b7df75c7bda13
|
[
"MIT"
] | null | null | null |
miniproject/api/organization/views.py
|
dandy7373/HR_web
|
65dd80159c7e3113961d55ef126b7df75c7bda13
|
[
"MIT"
] | null | null | null |
miniproject/api/organization/views.py
|
dandy7373/HR_web
|
65dd80159c7e3113961d55ef126b7df75c7bda13
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from rest_framework.generics import RetrieveAPIView,CreateAPIView
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK, HTTP_400_BAD_REQUEST,HTTP_201_CREATED
from rest_framework.views import APIView
from .models import UserOrganization
from api.individual.models import Userprofile
from .serializers import UserOrganizationRegistrationSerializer,OrganizationLoginSerializer, OrganizationSerializer
from bson import ObjectId
class OrganizationLoginView(APIView):
permission_classes=[(AllowAny)]
def get_object(self):
print(self.request.data)
return UserOrganization.objects.all()
def post(self,request):
success={"success":"True"}
failure={"success":"False"}
try :
obj=UserOrganization.objects.get(email=str(request.data['email']))
if obj.password==str(request.data['password']):
return Response(success,status=HTTP_200_OK)
else:
return Response(failure,status=HTTP_400_BAD_REQUEST)
except:
return Response(failure,status=HTTP_400_BAD_REQUEST)
class OrganizationRegistrationView(APIView):
permission_classes=[AllowAny]
def post(self,request,*args,**kwargs):
if 'email' not in request.data and 'password' not in request.data and 'name' not in request.data and 'name_org' not in request.data:
return Response({"success":False},status=HTTP_400_BAD_REQUEST)
print(str(request.data['email']))
dic={}
for i in request.data:
dic.update({i:str(request.data[i])})
user=UserOrganization.objects.create(**dic)
response = {
"success": "True",
"status code": HTTP_200_OK,
"message": "User registered successfully",
}
return Response(response, status=HTTP_200_OK)
class ApproveLeaveView(APIView):
permission_classes=[AllowAny]
def post(self,request,*args,**kwargs):
user=UserOrganization.objects.get(email=request.data['to_email'])
individual=Userprofile.objects.get(email=request.data['from_email'])
leaves=list(user.Leaves_to_be_approved)
leave=leaves[int(request.data['index'])]['completed']
leaves.pop(int(request.data['index']))
user.Leaves_to_be_approved=leaves
user.save()
lis=list(individual.leave)
print(request.data)
print(lis)
index=-1
for i in lis:
if i['from_date']==request.data['from_date'] and i['to_date']==request.data['to_date']:
index=lis.index(i)
break
if index==-1:
return Response({"success":"False"},HTTP_400_BAD_REQUEST)
lis[index]['completed']="True"
print(lis)
individual.leave=lis
individual.save()
return Response({"success":"True"},HTTP_200_OK)
class AssignWorkView(APIView):
permission_classes=[AllowAny]
def post(self,request,*args,**kwargs):
user=UserOrganization.objects.get(email=request.data['email'])
print(user.employees)
print(user.employees is None)
if user.employees is None:
lis=[]
return Response('No employees',HTTP_400_BAD_REQUEST)
else:
lis=list(user.employees)
for i in range(len(lis)):
try:
ind=Userprofile.objects.get(_id=ObjectId(lis[i]['_id']))
print(ind)
if ind.workassigned is None:
work=[]
else:
work=list(ind.workassigned)
print(True)
dic={}
for i in request.data:
dic[i]=request.data[i][0]
print(dic)
work.append(dic)
ind.workassigned=work
print('yes')
ind.save()
print('all')
except:
return Response({"success":"False"},HTTP_400_BAD_REQUEST)
if user.work_assigned is None:
assigned=[]
else:
assigned=list(user.work_assigned)
assigned.append(request.data)
print(assigned)
user.work_assigned=assigned
user.save()
print(user.work_assigned)
return Response({"success":"True"},HTTP_200_OK)
class AddEmployeeView(APIView):
permission_classes=[AllowAny,]
def post(self,request,*args,**kwargs):
print(request.data)
dic={}
from_email=request.data['from_email']
for i in request.data:
if i!='from_email':
dic[i]=request.data[i]
dic['created_by']=from_email
print(dic)
try:
ind=Userprofile.objects.create(**dic)
user=UserOrganization.objects.get(email=from_email)
lis=[]
if user.employees is None:
lis.append(ind._id)
else:
lis=list(user.employees)
user.save()
return Response({'success':'True'},HTTP_200_OK)
except:
return Response({'success':'False'},HTTP_400_BAD_REQUEST)
class GetLeaves(APIView):
def get(self,request,*args,**kwargs):
try:
print(request.GET)
print(kwargs)
user=UserOrganization.objects.get(email=request.GET.get('email'))
print(user)
return Response({'leaves':user.Leaves_to_be_approved,'success':"True"},HTTP_200_OK)
except:
return Response({'success':False},HTTP_400_BAD_REQUEST)
class GetWorks(APIView):
def get(self,request,**kwargs):
print(request.GET.get('email'))
try:
user=UserOrganization.objects.get(email=request.GET.get('email'))
return Response({'works':user.work_assigned,'success':"True"},HTTP_200_OK)
except:
return Response({'success':False},HTTP_400_BAD_REQUEST)
| 37.290123 | 140 | 0.608343 | 5,485 | 0.907962 | 0 | 0 | 0 | 0 | 0 | 0 | 518 | 0.085747 |
85ccf00c2aab76068a1c4fc3ab1b4c929b9cff1a
| 9,378 |
py
|
Python
|
nutils/cli.py
|
JochenHinz/nutils
|
ac18dd6825b107e2e4c186ebb1598dbf0fff0f77
|
[
"MIT"
] | null | null | null |
nutils/cli.py
|
JochenHinz/nutils
|
ac18dd6825b107e2e4c186ebb1598dbf0fff0f77
|
[
"MIT"
] | null | null | null |
nutils/cli.py
|
JochenHinz/nutils
|
ac18dd6825b107e2e4c186ebb1598dbf0fff0f77
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2014 Evalf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
The cli (command line interface) module provides the `cli.run` function that
can be used set up properties, initiate an output environment, and execute a
python function based arguments specified on the command line.
"""
from . import util, config, long_version, warnings, matrix, cache
import sys, inspect, os, io, time, pdb, signal, subprocess, contextlib, traceback, pathlib, html, treelog as log, stickybar
def _version():
try:
githash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'], universal_newlines=True, stderr=subprocess.DEVNULL, cwd=os.path.dirname(__file__)).strip()
if subprocess.check_output(['git', 'status', '--untracked-files=no', '--porcelain'], stderr=subprocess.DEVNULL, cwd=os.path.dirname(__file__)):
githash += '+'
except:
return long_version
else:
return '{} (git:{})'.format(long_version, githash)
def _mkbox(*lines):
width = max(len(line) for line in lines)
ul, ur, ll, lr, hh, vv = '┌┐└┘─│' if config.richoutput else '++++-|'
return '\n'.join([ul + hh * (width+2) + ur]
+ [vv + (' '+line).ljust(width+2) + vv for line in lines]
+ [ll + hh * (width+2) + lr])
def _sigint_handler(mysignal, frame):
_handler = signal.signal(mysignal, signal.SIG_IGN) # temporarily disable handler
try:
while True:
answer = input('interrupted. quit, continue or start debugger? [q/c/d]')
if answer == 'q':
raise KeyboardInterrupt
if answer == 'c' or answer == 'd':
break
if answer == 'd': # after break, to minimize code after set_trace
print(_mkbox(
'TRACING ACTIVATED. Use the Python debugger',
'to step through the code at source line',
'level, list source code, set breakpoints,',
'and evaluate arbitrary Python code in the',
'context of any stack frame. Type "h" for',
'an overview of commands to get going, or',
'"c" to continue uninterrupted execution.'))
pdb.set_trace()
finally:
signal.signal(mysignal, _handler)
def _hms(dt):
seconds = int(dt)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return hours, minutes, seconds
def run(func, *, skip=1, loaduserconfig=True):
'''parse command line arguments and call function'''
configs = []
if loaduserconfig:
home = os.path.expanduser('~')
configs.append(dict(richoutput=sys.stdout.isatty()))
configs.extend(path for path in (os.path.join(home, '.config', 'nutils', 'config'), os.path.join(home, '.nutilsrc')) if os.path.isfile(path))
params = inspect.signature(func).parameters.values()
if '-h' in sys.argv[skip:] or '--help' in sys.argv[skip:]:
print('usage: {} (...)'.format(' '.join(sys.argv[:skip])))
print()
for param in params:
cls = param.default.__class__
print(' --{:<20}'.format(param.name + '=' + cls.__name__.upper() if cls != bool else '(no)' + param.name), end=' ')
if param.annotation != param.empty:
print(param.annotation, end=' ')
print('[{}]'.format(param.default))
sys.exit(1)
kwargs = {param.name: param.default for param in params}
cli_config = {}
for arg in sys.argv[skip:]:
name, sep, value = arg.lstrip('-').partition('=')
if not sep:
value = not name.startswith('no')
if not value:
name = name[2:]
if name in kwargs:
default = kwargs[name]
args = kwargs
else:
try:
default = getattr(config, name)
except AttributeError:
print('invalid argument {!r}'.format(arg))
sys.exit(2)
args = cli_config
try:
if isinstance(default, bool) and not isinstance(value, bool):
raise Exception('boolean value should be specifiec as --{0}/--no{0}'.format(name))
args[name] = default.__class__(value)
except Exception as e:
print('invalid argument for {!r}: {}'.format(name, e))
sys.exit(2)
with config(*configs, **cli_config):
status = call(func, kwargs, scriptname=os.path.basename(sys.argv[0]), funcname=None if skip==1 else func.__name__)
sys.exit(status)
def choose(*functions, loaduserconfig=True):
'''parse command line arguments and call one of multiple functions'''
assert functions, 'no functions specified'
funcnames = [func.__name__ for func in functions]
if len(sys.argv) == 1 or sys.argv[1] in ('-h', '--help'):
print('usage: {} [{}] (...)'.format(sys.argv[0], '|'.join(funcnames)))
sys.exit(1)
try:
ifunc = funcnames.index(sys.argv[1])
except ValueError:
print('invalid argument {!r}; choose from {}'.format(sys.argv[1], ', '.join(funcnames)))
sys.exit(2)
run(functions[ifunc], skip=2, loaduserconfig=loaduserconfig)
def call(func, kwargs, scriptname, funcname=None):
'''set up compute environment and call function'''
outdir = config.outdir or os.path.join(os.path.expanduser(config.outrootdir), scriptname)
with contextlib.ExitStack() as stack:
stack.enter_context(cache.enable(os.path.join(outdir, config.cachedir)) if config.cache else cache.disable())
stack.enter_context(matrix.backend(config.matrix))
stack.enter_context(log.set(log.FilterLog(log.RichOutputLog() if config.richoutput else log.StdoutLog(), minlevel=5-config.verbose)))
if config.htmloutput:
htmllog = stack.enter_context(log.HtmlLog(outdir, title=scriptname, htmltitle='<a href="http://www.nutils.org">{}</a> {}'.format(SVGLOGO, html.escape(scriptname)), favicon=FAVICON))
uri = (config.outrooturi.rstrip('/') + '/' + scriptname if config.outrooturi else pathlib.Path(outdir).resolve().as_uri()) + '/' + htmllog.filename
if config.richoutput:
t0 = time.perf_counter()
bar = lambda running: '{0} [{1}] {2[0]}:{2[1]:02d}:{2[2]:02d}'.format(uri, 'RUNNING' if running else 'STOPPED', _hms(time.perf_counter()-t0))
stack.enter_context(stickybar.activate(bar, update=1))
else:
log.info('opened log at', uri)
htmllog.write('<ul style="list-style-position: inside; padding-left: 0px; margin-top: 0px;">{}</ul>'.format(''.join(
'<li>{}={} <span style="color: gray;">{}</span></li>'.format(param.name, kwargs.get(param.name, param.default), param.annotation)
for param in inspect.signature(func).parameters.values())), level=1, escape=False)
stack.enter_context(log.add(htmllog))
stack.enter_context(warnings.via(log.warning))
stack.callback(signal.signal, signal.SIGINT, signal.signal(signal.SIGINT, _sigint_handler))
log.info('nutils v{}'.format(_version()))
log.info('start', time.ctime())
try:
func(**kwargs)
except (KeyboardInterrupt, SystemExit, pdb.bdb.BdbQuit):
log.error('killed by user')
return 1
except:
log.error(traceback.format_exc())
if config.pdb:
print(_mkbox(
'YOUR PROGRAM HAS DIED. The Python debugger',
'allows you to examine its post-mortem state',
'to figure out why this happened. Type "h"',
'for an overview of commands to get going.'))
pdb.post_mortem()
return 2
else:
log.info('finish', time.ctime())
return 0
SVGLOGO = '''\
<svg style="vertical-align: middle;" width="32" height="32" xmlns="http://www.w3.org/2000/svg">
<path d="M7.5 19 v-6 a6 6 0 0 1 12 0 v6 M25.5 13 v6 a6 6 0 0 1 -12 0 v-6" fill="none" stroke-width="3" stroke-linecap="round"/>
</svg>'''
FAVICON = 'data:image/png;base64,' \
'iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAQAAAD9CzEMAAACAElEQVRYw+2YS04bQRCGP2wJ' \
'gbAimS07WABXGMLzAgiBcgICFwDEEiGiDCScggWPHVseC1AIZ8AIJBA2hg1kF5DiycLYqppp' \
'M91j2KCp3rSq//7/VldPdfVAajHW0nAkywDjeHSTBx645IRdfvPvLWTbWeSewNDuWKC9Wfov' \
'3BjJa+2aqWa2bInKq/QBARV8MknoM2zHktfaVhKJ79b0AQEr7nsfpthjml466KCPr+xHNmrS' \
'7eTo0J4xFMEMUwiFu81eYFFNPSJvROU5Vrh5W/qsOvdnDegBOjkXyDJZO4Fhta7RV7FDCvvZ' \
'TmBdhTbODgT6R9zJr9qA8G2LfiurlCji0yq8O6LvKT4zHlQEeoXfr3t94e1TUSAWDzyJKTnh' \
'L9W9t8KbE+i/iieCr6XroEEKb9qfee8LJxVIBVKBjyRQqnuKavxZpTiZ1Ez4Typ9KoGN+sCG' \
'Evgj+l2ib8ZLxCOhi8KnaLgoTkVino7Fzwr0L7st/Cmm7MeiDwV6zU5gUF3wYw6Fg2dbztyJ' \
'SQWHcsb6fC6odR3T2YBeF2RzLiXltZpaYCSCGVWrD7hyKSlhKvJiOGCGfnLk6GdGhbZaFE+4' \
'fo7fnMr65STf+5Y1/Way9PPOT6uqTYbCHW5X7nsftjbmKRvJy8yZT05Lgnh4jOPR8/JAv+CE' \
'XU6ppH81Etp/wL7MKaEwo4sAAAAASUVORK5CYII='
# vim:sw=2:sts=2:et
| 44.028169 | 187 | 0.683301 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,904 | 0.415761 |
85ceb804c95eaa5e6ed011f7728feba8c174befd
| 6,336 |
py
|
Python
|
experiments/alpha_analysis.py
|
oeg-upm/tada-entity
|
6e538129229bed49bf1aa960fcd97a8468eca765
|
[
"Apache-2.0"
] | 3 |
2019-06-11T10:19:25.000Z
|
2022-02-28T22:58:29.000Z
|
experiments/alpha_analysis.py
|
oeg-upm/tada-entity
|
6e538129229bed49bf1aa960fcd97a8468eca765
|
[
"Apache-2.0"
] | 7 |
2019-02-04T08:57:54.000Z
|
2021-11-01T12:42:03.000Z
|
experiments/alpha_analysis.py
|
oeg-upm/tada-entity
|
6e538129229bed49bf1aa960fcd97a8468eca765
|
[
"Apache-2.0"
] | null | null | null |
"""
This script analyses optimal alphas for each class and draws them in a box and whisker plot
"""
import pandas as pd
import argparse
import seaborn as sns
import matplotlib.pyplot as plt
import itertools
def shorten_uri(class_uri, base="http://dbpedia.org/ontology/", pref="dbo:"):
return class_uri.replace(base, pref)
def get_classes(fpath, dataset):
d = dict()
f = open(fpath)
for line in f.readlines():
sline = line.strip()
if sline == "":
continue
if dataset == "wcv2":
fname, _, class_uri = sline.split(',')
elif dataset == "wcv1":
fname, _, class_uri, _ = sline.split(',')
fname = fname.split(".")[0]
else:
raise Exception("Unknown dataset")
fname = fname.replace('"', '')
fname += ".csv"
# #DEBUG
# print("%s> fname: %s" % (__name__, fname))
class_uri = class_uri.replace('"', "")
d[fname] = class_uri
return d
def analyse_alpha_for_all(falpha, classes, draw_fname, midalpha):
"""
:param fmeta: path to the meta file
:param classes: a dict of fnames and their classes
:return:
"""
df_all = pd.read_csv(falpha)
for fsid in range(1, 6):
df = df_all[df_all.fsid == fsid]
al_per_cls = aggregate_alpha_per_class(df, classes)
analyse_alpha(al_per_cls, draw_fname+"_fsid%d" % (fsid), midalpha)
# analyse_alpha(al_per_cls, "wcv2_alpha_%s_original_fsid%d" % (fattr,fsid))
# analyse_alpha(al_per_cls, "wcv2_alpha_fsid%d" % fsid)
# break
def analyse_alpha(alpha_per_class, draw_fname, midalpha):
rows = []
if midalpha:
attrs = ['mid_alpha']
else:
attrs = ['from_alpha', 'to_alpha']
# attrs = ['from_alpha', 'to_alpha', 'mid_alpha']
# attrs = ['mid_alpha']
for c in alpha_per_class:
for a_attr in attrs:
for a in alpha_per_class[c][a_attr]:
if a < 0:
continue
r = [shorten_uri(c), a, a_attr]
rows.append(r)
print(r)
# print(rows)
data = pd.DataFrame(rows, columns=["Class", "Alpha", "Attr"])
# ax = sns.boxplot(x="Class", y="Alpha",
# hue="Attr",
# data=data, linewidth=1.0,
# # palette="colorblind",
# palette="Spectral",
# # palette="pastel",
# dodge=True,
# # palette="ch:start=.2,rot=-.3",
# orient="v",
# flierprops=dict(markerfacecolor='0.50', markersize=2), whiskerprops={'linestyle': '-'})
ax = sns.boxplot(x="Alpha", y="Class",
hue="Attr",
data=data, linewidth=1.0,
# palette="colorblind",
palette="Spectral",
# palette="pastel",
dodge=True,
# palette="ch:start=.2,rot=-.3",
orient="h",
flierprops=dict(markerfacecolor='0.50', markersize=2))
ax.legend(bbox_to_anchor=(1.0, -0.1), borderaxespad=0)
if midalpha:
# to remove legend
ax.legend_.remove()
ax.set_xlim(0, 0.7)
# ax.set_ylim(0, 0.7)
# Horizontal
ticks = ax.get_yticks()
new_ticks = [t for t in ticks]
texts = ax.get_yticklabels()
print(ax.get_yticklabels())
labels = [t.get_text() for t in texts]
ax.set_yticks(new_ticks)
ax.set_yticklabels(labels, fontsize=8)
print(ax.get_yticklabels())
# Vertical
# ticks = ax.get_xticks()
# new_ticks = [t-1 for t in ticks]
# texts = ax.get_xticklabels()
# print(ax.get_xticklabels())
# labels = [t.get_text() for t in texts]
# ax.set_xticks(new_ticks)
# ax.set_xticklabels(labels)
# print(ax.get_xticklabels())
# for i, box in enumerate(ax.artists):
# box.set_edgecolor('black')
# To change bar colors
# plt.setp(ax.artists, edgecolor='k', facecolor='w')
# To make whiskers black
plt.setp(ax.lines, color='k')
# [t.set_rotation(70) for t in ax.get_xticklabels()]
#plt.show()
# ax.figure.savefig('docs/%s.svg' % draw_fname)
ax.figure.savefig('docs/%s.svg' % draw_fname, bbox_inches="tight")
ax.figure.clf()
def aggregate_alpha_per_class(df, classes):
"""
:param df: DataFrame of a meta file
:param calsses: a dict of fnames and their classes
:return:
"""
"""fname,colid,fsid,from_alpha,to_alpha"""
d = dict()
for idx, row in df.iterrows():
# print("fname: <%s>" % row['fname'])
# DEBUG
print("classes: ")
print(classes)
c = classes[row['fname']]
if c not in d:
d[c] = {'from_alpha': [], 'to_alpha': [], 'mid_alpha': []}
d[c]['from_alpha'].append(row['from_alpha'])
d[c]['to_alpha'].append(row['to_alpha'])
d[c]['mid_alpha'].append((row['from_alpha'] + row['to_alpha'])/2)
return d
def workflow(falpha, fmeta, draw_fpath, midalpha, dataset):
classes = get_classes(fmeta, dataset)
analyse_alpha_for_all(falpha, classes, draw_fpath, midalpha)
def main():
"""
Parse the arguments
:return:
"""
parser = argparse.ArgumentParser(description='Alpha Analysis')
# parser.add_argument('--debug', action="store_true", default=False, help="Whether to enable debug messages.")
parser.add_argument('falpha', help="The path to the alpha results file.")
parser.add_argument('fmeta', help="The path to the meta file which contain the classes.")
parser.add_argument('dataset', choices=["wcv1", "wcv2", "st19-r1", "st19-r2", "st19-r3", "st19-r4"],
help="The name of the dataset as the meta file differ for each")
parser.add_argument('--draw', default="test.svg", help="The filename prefix to draw (without the extension)")
parser.add_argument('--midalpha', action="store_true", default=False,
help="Whether to report the mid ranges of the optimal alpha or just the ranges")
parser.print_usage()
parser.print_help()
args = parser.parse_args()
workflow(args.falpha, args.fmeta, args.draw, args.midalpha, args.dataset)
if __name__ == "__main__":
main()
| 34.622951 | 114 | 0.574337 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,719 | 0.429135 |
85cf779b9a1cc2e9b35950583be014be08b8ba73
| 1,009 |
py
|
Python
|
p039m/combination_sum.py
|
l33tdaima/l33tdaima
|
0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90
|
[
"MIT"
] | 1 |
2020-02-20T12:04:46.000Z
|
2020-02-20T12:04:46.000Z
|
p039m/combination_sum.py
|
l33tdaima/l33tdaima
|
0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90
|
[
"MIT"
] | null | null | null |
p039m/combination_sum.py
|
l33tdaima/l33tdaima
|
0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90
|
[
"MIT"
] | null | null | null |
from typing import List
class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
candidates.sort()
ans = []
def helper(path: List[int], target: int, start: int) -> None:
if target < 0:
return
if target == 0:
ans.append(list(path))
return
for i in range(start, len(candidates)):
path.append(candidates[i])
helper(path, target - candidates[i], i)
path.pop()
helper([], target, 0)
return ans
# TESTS
tests = [
([2, 3, 6, 7], 7, [[2, 2, 3], [7]]),
([2, 3, 5], 8, [[2, 2, 2, 2], [2, 3, 3], [3, 5]]),
([2], 1, []),
([1], 1, [[1]]),
([1], 2, [[1, 1]]),
]
for candidates, target, expected in tests:
sol = Solution()
actual = sol.combinationSum(candidates, target)
print("Combinations in", candidates, "sum to", target, "->", actual)
assert actual == expected
| 27.27027 | 84 | 0.489594 | 582 | 0.576809 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.035679 |
85cff0c0609514a2aa77da41be4f85d685342405
| 12,197 |
py
|
Python
|
sdk/python/pulumi_proxmox/ct/container.py
|
meyskens/pulumi-proxmox
|
bf48570690350be68fa554e1cec376212eb449ab
|
[
"ECL-2.0",
"Apache-2.0"
] | 16 |
2021-01-11T11:26:19.000Z
|
2022-01-23T02:32:34.000Z
|
sdk/python/pulumi_proxmox/ct/container.py
|
meyskens/pulumi-proxmox
|
bf48570690350be68fa554e1cec376212eb449ab
|
[
"ECL-2.0",
"Apache-2.0"
] | 2 |
2021-01-29T08:15:46.000Z
|
2021-10-17T16:33:19.000Z
|
sdk/python/pulumi_proxmox/ct/container.py
|
meyskens/pulumi-proxmox
|
bf48570690350be68fa554e1cec376212eb449ab
|
[
"ECL-2.0",
"Apache-2.0"
] | 4 |
2021-04-06T00:36:05.000Z
|
2021-12-16T14:25:07.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Container']
class Container(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
clone: Optional[pulumi.Input[pulumi.InputType['ContainerCloneArgs']]] = None,
console: Optional[pulumi.Input[pulumi.InputType['ContainerConsoleArgs']]] = None,
cpu: Optional[pulumi.Input[pulumi.InputType['ContainerCpuArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
disk: Optional[pulumi.Input[pulumi.InputType['ContainerDiskArgs']]] = None,
initialization: Optional[pulumi.Input[pulumi.InputType['ContainerInitializationArgs']]] = None,
memory: Optional[pulumi.Input[pulumi.InputType['ContainerMemoryArgs']]] = None,
network_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerNetworkInterfaceArgs']]]]] = None,
node_name: Optional[pulumi.Input[str]] = None,
operating_system: Optional[pulumi.Input[pulumi.InputType['ContainerOperatingSystemArgs']]] = None,
pool_id: Optional[pulumi.Input[str]] = None,
started: Optional[pulumi.Input[bool]] = None,
template: Optional[pulumi.Input[bool]] = None,
vm_id: Optional[pulumi.Input[int]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Create a Container resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ContainerCloneArgs']] clone: The cloning configuration
:param pulumi.Input[pulumi.InputType['ContainerConsoleArgs']] console: The console configuration
:param pulumi.Input[pulumi.InputType['ContainerCpuArgs']] cpu: The CPU allocation
:param pulumi.Input[str] description: The description
:param pulumi.Input[pulumi.InputType['ContainerDiskArgs']] disk: The disks
:param pulumi.Input[pulumi.InputType['ContainerInitializationArgs']] initialization: The initialization configuration
:param pulumi.Input[pulumi.InputType['ContainerMemoryArgs']] memory: The memory allocation
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerNetworkInterfaceArgs']]]] network_interfaces: The network interfaces
:param pulumi.Input[str] node_name: The node name
:param pulumi.Input[pulumi.InputType['ContainerOperatingSystemArgs']] operating_system: The operating system configuration
:param pulumi.Input[str] pool_id: The ID of the pool to assign the container to
:param pulumi.Input[bool] started: Whether to start the container
:param pulumi.Input[bool] template: Whether to create a template
:param pulumi.Input[int] vm_id: The VM identifier
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['clone'] = clone
__props__['console'] = console
__props__['cpu'] = cpu
__props__['description'] = description
__props__['disk'] = disk
__props__['initialization'] = initialization
__props__['memory'] = memory
__props__['network_interfaces'] = network_interfaces
if node_name is None:
raise TypeError("Missing required property 'node_name'")
__props__['node_name'] = node_name
__props__['operating_system'] = operating_system
__props__['pool_id'] = pool_id
__props__['started'] = started
__props__['template'] = template
__props__['vm_id'] = vm_id
super(Container, __self__).__init__(
'proxmox:CT/container:Container',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
clone: Optional[pulumi.Input[pulumi.InputType['ContainerCloneArgs']]] = None,
console: Optional[pulumi.Input[pulumi.InputType['ContainerConsoleArgs']]] = None,
cpu: Optional[pulumi.Input[pulumi.InputType['ContainerCpuArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
disk: Optional[pulumi.Input[pulumi.InputType['ContainerDiskArgs']]] = None,
initialization: Optional[pulumi.Input[pulumi.InputType['ContainerInitializationArgs']]] = None,
memory: Optional[pulumi.Input[pulumi.InputType['ContainerMemoryArgs']]] = None,
network_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerNetworkInterfaceArgs']]]]] = None,
node_name: Optional[pulumi.Input[str]] = None,
operating_system: Optional[pulumi.Input[pulumi.InputType['ContainerOperatingSystemArgs']]] = None,
pool_id: Optional[pulumi.Input[str]] = None,
started: Optional[pulumi.Input[bool]] = None,
template: Optional[pulumi.Input[bool]] = None,
vm_id: Optional[pulumi.Input[int]] = None) -> 'Container':
"""
Get an existing Container resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ContainerCloneArgs']] clone: The cloning configuration
:param pulumi.Input[pulumi.InputType['ContainerConsoleArgs']] console: The console configuration
:param pulumi.Input[pulumi.InputType['ContainerCpuArgs']] cpu: The CPU allocation
:param pulumi.Input[str] description: The description
:param pulumi.Input[pulumi.InputType['ContainerDiskArgs']] disk: The disks
:param pulumi.Input[pulumi.InputType['ContainerInitializationArgs']] initialization: The initialization configuration
:param pulumi.Input[pulumi.InputType['ContainerMemoryArgs']] memory: The memory allocation
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerNetworkInterfaceArgs']]]] network_interfaces: The network interfaces
:param pulumi.Input[str] node_name: The node name
:param pulumi.Input[pulumi.InputType['ContainerOperatingSystemArgs']] operating_system: The operating system configuration
:param pulumi.Input[str] pool_id: The ID of the pool to assign the container to
:param pulumi.Input[bool] started: Whether to start the container
:param pulumi.Input[bool] template: Whether to create a template
:param pulumi.Input[int] vm_id: The VM identifier
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["clone"] = clone
__props__["console"] = console
__props__["cpu"] = cpu
__props__["description"] = description
__props__["disk"] = disk
__props__["initialization"] = initialization
__props__["memory"] = memory
__props__["network_interfaces"] = network_interfaces
__props__["node_name"] = node_name
__props__["operating_system"] = operating_system
__props__["pool_id"] = pool_id
__props__["started"] = started
__props__["template"] = template
__props__["vm_id"] = vm_id
return Container(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def clone(self) -> pulumi.Output[Optional['outputs.ContainerClone']]:
"""
The cloning configuration
"""
return pulumi.get(self, "clone")
@property
@pulumi.getter
def console(self) -> pulumi.Output[Optional['outputs.ContainerConsole']]:
"""
The console configuration
"""
return pulumi.get(self, "console")
@property
@pulumi.getter
def cpu(self) -> pulumi.Output[Optional['outputs.ContainerCpu']]:
"""
The CPU allocation
"""
return pulumi.get(self, "cpu")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def disk(self) -> pulumi.Output[Optional['outputs.ContainerDisk']]:
"""
The disks
"""
return pulumi.get(self, "disk")
@property
@pulumi.getter
def initialization(self) -> pulumi.Output[Optional['outputs.ContainerInitialization']]:
"""
The initialization configuration
"""
return pulumi.get(self, "initialization")
@property
@pulumi.getter
def memory(self) -> pulumi.Output[Optional['outputs.ContainerMemory']]:
"""
The memory allocation
"""
return pulumi.get(self, "memory")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> pulumi.Output[Optional[Sequence['outputs.ContainerNetworkInterface']]]:
"""
The network interfaces
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="nodeName")
def node_name(self) -> pulumi.Output[str]:
"""
The node name
"""
return pulumi.get(self, "node_name")
@property
@pulumi.getter(name="operatingSystem")
def operating_system(self) -> pulumi.Output[Optional['outputs.ContainerOperatingSystem']]:
"""
The operating system configuration
"""
return pulumi.get(self, "operating_system")
@property
@pulumi.getter(name="poolId")
def pool_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of the pool to assign the container to
"""
return pulumi.get(self, "pool_id")
@property
@pulumi.getter
def started(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to start the container
"""
return pulumi.get(self, "started")
@property
@pulumi.getter
def template(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to create a template
"""
return pulumi.get(self, "template")
@property
@pulumi.getter(name="vmId")
def vm_id(self) -> pulumi.Output[Optional[int]]:
"""
The VM identifier
"""
return pulumi.get(self, "vm_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 44.677656 | 145 | 0.649832 | 11,792 | 0.966795 | 0 | 0 | 6,677 | 0.54743 | 0 | 0 | 5,520 | 0.45257 |
85d0ab0de0a1394f2406e3a5b3e99179d2c59390
| 486 |
py
|
Python
|
tests/test/stateful/conftest.py
|
ActorForth/brownie
|
ef0d5af3bb48edcd11abf985626fc99dbc577c7d
|
[
"MIT"
] | 1,595 |
2020-06-01T19:41:53.000Z
|
2022-03-31T16:09:54.000Z
|
tests/test/stateful/conftest.py
|
ActorForth/brownie
|
ef0d5af3bb48edcd11abf985626fc99dbc577c7d
|
[
"MIT"
] | 532 |
2020-05-30T12:06:17.000Z
|
2022-03-31T22:33:41.000Z
|
tests/test/stateful/conftest.py
|
ActorForth/brownie
|
ef0d5af3bb48edcd11abf985626fc99dbc577c7d
|
[
"MIT"
] | 303 |
2020-06-17T00:38:34.000Z
|
2022-03-31T10:59:48.000Z
|
#!/usr/bin/python3
import pytest
from hypothesis import settings
# derandomizing prevents flaky test outcomes
# we are testing hypothesis itself, not testing with hypothesis
settings.register_profile("derandomize", derandomize=True)
@pytest.fixture
def SMTestBase(devnetwork):
settings.load_profile("derandomize")
class _Base:
def rule_one(self):
pass
def rule_two(self):
pass
yield _Base
settings.load_profile("default")
| 19.44 | 63 | 0.709877 | 103 | 0.211934 | 232 | 0.477366 | 248 | 0.510288 | 0 | 0 | 160 | 0.329218 |
85d1bb79ecc810612d2ce67b9924416144e6d28f
| 7,706 |
py
|
Python
|
singleimagemodel.py
|
severinaklingler/kaggle-ocular-disease
|
a6641f6005d1a7f2399b4de9e804ab3ac7f20dd2
|
[
"Apache-2.0"
] | null | null | null |
singleimagemodel.py
|
severinaklingler/kaggle-ocular-disease
|
a6641f6005d1a7f2399b4de9e804ab3ac7f20dd2
|
[
"Apache-2.0"
] | null | null | null |
singleimagemodel.py
|
severinaklingler/kaggle-ocular-disease
|
a6641f6005d1a7f2399b4de9e804ab3ac7f20dd2
|
[
"Apache-2.0"
] | null | null | null |
from logging import getLevelName
import numpy as np
import os
import tensorflow as tf
import pathlib
import pandas as pd
import re
import matplotlib.pyplot as plt
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten , Conv1D
from tensorflow.keras.layers import concatenate
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D,MaxPooling1D
from tensorflow.keras.utils import plot_model
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
import datetime
import argparse
# Global config (TODO)
random_seed = 77
data_path = "./input/ocular-disease-recognition-odir5k/preprocessed_images/"
data_path_tensor = tf.constant(data_path)
data_dir = pathlib.Path(data_path)
AUTOTUNE = tf.data.AUTOTUNE
batch_size = 32
img_height = 224
img_width = 224
class_count = 8
image_channels = 3
num_threads = 4
label_dict = {}
# tf.config.run_functions_eagerly(True)
def load_sample_ids(df, val_size):
sample_ids = df['ID'].to_list()
dataset = tf.data.Dataset.from_tensor_slices(sample_ids)
dataset = dataset.unique()
dataset = dataset.shuffle(len(sample_ids))
val_ds = dataset.take(val_size)
test_ds = dataset.skip(val_size).take(val_size)
train_ds = dataset.skip(2*val_size)
return train_ds, val_ds, test_ds
def decode_one_hot(x):
return next(i for i,v in enumerate(x) if v==1)
def build_label_dictionary(df):
keys = []
values = []
for index, row in df.iterrows():
filename = row['filename']
target = eval(row["target"])
image_target = decode_one_hot(target)
keys.append(filename)
values.append(image_target)
keys_tensor = tf.constant(keys)
vals_tensor = tf.constant(values)
table = tf.lookup.StaticHashTable(tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor), default_value=-1)
return table
def _file_exists(file_path):
return tf.io.gfile.exists(data_path + bytes.decode(file_path.numpy()))
def file_exists(file_path):
[exists] = tf.py_function(_file_exists, [file_path], [tf.bool])
exists.set_shape([])
return exists
def filenames_from_id(id):
right_path = tf.strings.as_string(id) + tf.constant("_right.jpg")
left_path = tf.strings.as_string(id) + tf.constant("_left.jpg")
return tf.data.Dataset.from_tensor_slices([left_path, right_path])
def decode_img(img):
img = tf.io.decode_jpeg(img, channels=image_channels)
return tf.image.resize(img, [img_height, img_width])
def process_filename(filename):
img = tf.io.read_file(tf.strings.join([data_path_tensor, filename], ''))
img = decode_img(img)
return img, label_dict.lookup(filename)
def print_dataset_stats(names, datasets, n=-1):
for name, dataset in zip(names, datasets):
if n>0:
dataset = dataset.take(n)
d = list(dataset.as_numpy_iterator())
top5 = d[:5]
print(f"{name} size: {len(d)} . First elements : {top5}")
def label_not_missing(data, label):
return tf.math.not_equal(label,-1)
def prepare_data(ds):
filenames = ds.flat_map(filenames_from_id)
existing_files = filenames.filter(file_exists)
existing_files_and_labels = existing_files.map(process_filename, num_parallel_calls=num_threads)
existing_files_and_existing_labels = existing_files_and_labels.filter(label_not_missing)
data_and_labels = existing_files_and_existing_labels.map(lambda x,y : (x, tf.one_hot(y,class_count)), num_parallel_calls=num_threads)
return data_and_labels
def configure_for_performance(ds):
ds = ds.batch(batch_size)
ds = ds.prefetch(buffer_size=1)
return ds
def show_batch(ds):
images_batch, label_batch = next(iter(ds))
plt.figure(figsize=(10, 10))
for i in range(8):
ax = plt.subplot(2, 4, i + 1)
label = label_batch[i]
print("Image shape: ", images_batch[i].numpy().shape)
print("label: ", label)
plt.imshow(images_batch[i].numpy().astype("uint8"))
plt.title(decode_one_hot(label))
plt.show()
def create_model():
inp1 = Input(shape=(img_height,img_width,image_channels), name="img")
new_input = Input(shape=(img_height,img_width, image_channels), name="New Input")
conv1 = Conv2D(3, kernel_size=3, padding ='same', activation='relu', name="conleft1")(inp1)
i1 = tf.keras.applications.ResNet50(include_top=False,weights="imagenet",input_tensor=new_input,input_shape=None, pooling='avg')(conv1)
class1 = Dense(1024, activation='relu')(i1)
class1 = Dense(256, activation='relu')(class1)
class1 = Dense(64, activation='relu')(class1)
output = Dense(class_count, activation='sigmoid')(class1)
model = Model(inputs=[inp1], outputs=output)
return model
def train_model(model, training_data, validation_data, number_of_epochs):
METRICS = [
'accuracy',
tf.keras.metrics.Precision(),
tf.keras.metrics.Recall(),
]
model.compile(
optimizer='Adam',
loss='binary_crossentropy',
metrics=METRICS
)
tf.keras.utils.plot_model(
model,
to_file="model.png",
show_shapes=True,
show_dtype=False,
show_layer_names=True,
rankdir="TB",
expand_nested=False,
dpi=300,
layer_range=None,
)
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=10)
model.fit(
training_data,
validation_data=validation_data,
epochs=number_of_epochs,
callbacks=[tensorboard_callback])
return model
def test(model, test_data):
yhat = model.predict(test_data)
yhat = yhat.round()
y_test = np.concatenate([y for x, y in test_data], axis=0)
report = classification_report(y_test, yhat,target_names=['N','D','G','C','A','H','M','O'],output_dict=True)
df = pd.DataFrame(report).transpose()
print(df)
def load_datasets():
global label_dict
df = pd.read_csv('./input/ocular-disease-recognition-odir5k/full_df.csv')
label_dict = build_label_dictionary(df)
train, val, test = load_sample_ids(df, 500)
training_data = configure_for_performance(prepare_data(train))
validation_data = configure_for_performance(prepare_data(val))
test_data = configure_for_performance(prepare_data(test))
return training_data, validation_data, test_data
def main():
parser = argparse.ArgumentParser(description='Optional app description')
parser.add_argument('--show', action='store_true', help='Visualize a training batch')
parser.add_argument('--train', action='store_true', help='Train model')
parser.add_argument('--test', action='store_true', help='Test model')
parser.add_argument('--dump', action='store_true', help='Dump data from first examples')
parser.add_argument('--name', type=str, help='Name of the model', default="tmpModel")
parser.add_argument('--epochs', type=int, help='Number of epochs to train', default=40)
args = parser.parse_args()
training_data, validation_data, test_data = load_datasets()
if args.show:
show_batch(training_data)
if args.dump:
print_dataset_stats(["training_data"],[training_data],5)
if args.train:
trained_model = train_model(create_model(), training_data, validation_data, args.epochs)
trained_model.save('models/' + args.name)
if args.test:
model = tf.keras.models.load_model('models/' + args.name)
test(model, test_data)
if __name__ == '__main__':
main()
| 32.514768 | 139 | 0.706982 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 798 | 0.103556 |
85d25582e5cd6979f09e8f1ee727114f02ab78b7
| 1,377 |
py
|
Python
|
graph/models.py
|
insung151/piro
|
aecbf8ce27e6e47856e2afd4a6e9e406bffa7a40
|
[
"MIT"
] | null | null | null |
graph/models.py
|
insung151/piro
|
aecbf8ce27e6e47856e2afd4a6e9e406bffa7a40
|
[
"MIT"
] | null | null | null |
graph/models.py
|
insung151/piro
|
aecbf8ce27e6e47856e2afd4a6e9e406bffa7a40
|
[
"MIT"
] | null | null | null |
import datetime
from django.db import models
from django.utils import timezone
# from .backends import update_data
class Member(models.Model):
github_username = models.CharField(max_length=100, unique=True)
def save(self, *args, **kwargs):
self.slug = self.github_username
super(Member, self).save(*args, **kwargs)
def get_latest_contribs(self):
return list(map(
lambda x: x.level,
self.contrib_set.filter(date__gte=datetime.date.today()-datetime.timedelta(7))
))
def process(self):
if not self.logs.filter(created_at__gt=timezone.now() - datetime.timedelta(hours=3)).exists():
CrawlLog.objects.create(member=self).process()
def __str__(self):
return str(self.github_username)
class Contrib(models.Model):
member = models.ForeignKey(Member, on_delete=models.CASCADE)
date = models.DateField()
level = models.PositiveIntegerField()
def __str__(self):
return str(self.member.github_username) + " / " + str(self.date) \
+ " / " + str(self.level)
class CrawlLog(models.Model):
member = models.ForeignKey(Member, related_name="logs")
created_at = models.DateTimeField(auto_now_add=True)
def process(self):
# update_data(self.member)
pass
def __str__(self):
return str(self.created_at)
| 28.102041 | 102 | 0.664488 | 1,252 | 0.909223 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.055919 |
85d2e42b2d5769672b5d6fd5964f344d0f20bc08
| 546 |
py
|
Python
|
gore/models/project.py
|
akx/gentry
|
f4205f5a14054231d064657347862a15ecf4c0e0
|
[
"MIT"
] | 4 |
2017-07-26T13:23:06.000Z
|
2019-02-21T14:55:34.000Z
|
gore/models/project.py
|
akx/gentry
|
f4205f5a14054231d064657347862a15ecf4c0e0
|
[
"MIT"
] | 26 |
2017-08-02T08:52:06.000Z
|
2022-03-04T15:13:26.000Z
|
gore/models/project.py
|
akx/gentry
|
f4205f5a14054231d064657347862a15ecf4c0e0
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils import timezone
from gentry.utils import make_absolute_uri
class Project(models.Model):
slug = models.SlugField()
name = models.CharField(max_length=128)
date_added = models.DateTimeField(default=timezone.now, editable=False)
def __str__(self):
return self.name
@property
def dsn(self):
key = self.key_set.first()
if not key:
return None
return make_absolute_uri('/%s' % self.id).replace('://', f'://{key.key}:{key.secret}@')
| 26 | 95 | 0.661172 | 436 | 0.798535 | 0 | 0 | 203 | 0.371795 | 0 | 0 | 39 | 0.071429 |
85d2ee56a1605c4085ef6834b7da596c8770a900
| 17,167 |
py
|
Python
|
features/steps/prs_steps.py
|
spidezad/python-pptx
|
eab3f55b84b54906876d5486172d5d0c457d55f8
|
[
"BSD-2-Clause"
] | 1 |
2021-05-17T06:33:32.000Z
|
2021-05-17T06:33:32.000Z
|
features/steps/prs_steps.py
|
spidezad/python-pptx
|
eab3f55b84b54906876d5486172d5d0c457d55f8
|
[
"BSD-2-Clause"
] | null | null | null |
features/steps/prs_steps.py
|
spidezad/python-pptx
|
eab3f55b84b54906876d5486172d5d0c457d55f8
|
[
"BSD-2-Clause"
] | null | null | null |
import os
from datetime import datetime, timedelta
from behave import given, when, then
from hamcrest import (
assert_that, equal_to, has_item, is_, is_not, greater_than, less_than
)
from StringIO import StringIO
from pptx import packaging
from pptx import Presentation
from pptx.constants import MSO_AUTO_SHAPE_TYPE as MAST, MSO, PP
from pptx.util import Inches
def absjoin(*paths):
return os.path.abspath(os.path.join(*paths))
thisdir = os.path.split(__file__)[0]
scratch_dir = absjoin(thisdir, '../_scratch')
test_file_dir = absjoin(thisdir, '../../test/test_files')
basic_pptx_path = absjoin(test_file_dir, 'test.pptx')
no_core_props_pptx_path = absjoin(test_file_dir, 'no-core-props.pptx')
saved_pptx_path = absjoin(scratch_dir, 'test_out.pptx')
test_image_path = absjoin(test_file_dir, 'python-powered.png')
test_text = "python-pptx was here!"
# logging.debug("saved_pptx_path is ==> '%s'\n", saved_pptx_path)
# given ===================================================
@given('a clean working directory')
def step_given_clean_working_dir(context):
if os.path.isfile(saved_pptx_path):
os.remove(saved_pptx_path)
@given('an initialized pptx environment')
def step_given_initialized_pptx_env(context):
pass
@given('I have a reference to a blank slide')
def step_given_ref_to_blank_slide(context):
context.prs = Presentation()
slidelayout = context.prs.slidelayouts[6]
context.sld = context.prs.slides.add_slide(slidelayout)
@given('I have a reference to a bullet body placeholder')
def step_given_ref_to_bullet_body_placeholder(context):
context.prs = Presentation()
slidelayout = context.prs.slidelayouts[1]
context.sld = context.prs.slides.add_slide(slidelayout)
context.body = context.sld.shapes.placeholders[1]
@given('I have a reference to a chevron shape')
def step_given_ref_to_chevron_shape(context):
context.prs = Presentation()
blank_slidelayout = context.prs.slidelayouts[6]
shapes = context.prs.slides.add_slide(blank_slidelayout).shapes
x = y = cx = cy = 914400
context.chevron_shape = shapes.add_shape(MAST.CHEVRON, x, y, cx, cy)
@given('I have a reference to a paragraph')
def step_given_ref_to_paragraph(context):
context.prs = Presentation()
blank_slidelayout = context.prs.slidelayouts[6]
slide = context.prs.slides.add_slide(blank_slidelayout)
length = Inches(2.00)
textbox = slide.shapes.add_textbox(length, length, length, length)
context.p = textbox.textframe.paragraphs[0]
@given('I have a reference to a slide')
def step_given_ref_to_slide(context):
context.prs = Presentation()
slidelayout = context.prs.slidelayouts[0]
context.sld = context.prs.slides.add_slide(slidelayout)
@given('I have a reference to a table')
def step_given_ref_to_table(context):
context.prs = Presentation()
slidelayout = context.prs.slidelayouts[6]
sld = context.prs.slides.add_slide(slidelayout)
shapes = sld.shapes
x, y = (Inches(1.00), Inches(2.00))
cx, cy = (Inches(3.00), Inches(1.00))
context.tbl = shapes.add_table(2, 2, x, y, cx, cy)
@given('I have a reference to a table cell')
def step_given_ref_to_table_cell(context):
context.prs = Presentation()
slidelayout = context.prs.slidelayouts[6]
sld = context.prs.slides.add_slide(slidelayout)
length = 1000
tbl = sld.shapes.add_table(2, 2, length, length, length, length)
context.cell = tbl.cell(0, 0)
@given('I have a reference to the core properties of a presentation')
def step_given_ref_to_core_doc_props(context):
context.prs = Presentation()
context.core_properties = context.prs.core_properties
@given('I have an empty presentation open')
def step_given_empty_prs(context):
context.prs = Presentation()
# when ====================================================
@when('I add a new slide')
def step_when_add_slide(context):
slidelayout = context.prs.slidemasters[0].slidelayouts[0]
context.prs.slides.add_slide(slidelayout)
@when("I add a picture stream to the slide's shape collection")
def step_when_add_picture_stream(context):
shapes = context.sld.shapes
x, y = (Inches(1.25), Inches(1.25))
with open(test_image_path) as f:
stream = StringIO(f.read())
shapes.add_picture(stream, x, y)
@when("I add a picture to the slide's shape collection")
def step_when_add_picture(context):
shapes = context.sld.shapes
x, y = (Inches(1.25), Inches(1.25))
shapes.add_picture(test_image_path, x, y)
@when("I add a table to the slide's shape collection")
def step_when_add_table(context):
shapes = context.sld.shapes
x, y = (Inches(1.00), Inches(2.00))
cx, cy = (Inches(3.00), Inches(1.00))
shapes.add_table(2, 2, x, y, cx, cy)
@when("I add a text box to the slide's shape collection")
def step_when_add_text_box(context):
shapes = context.sld.shapes
x, y = (Inches(1.00), Inches(2.00))
cx, cy = (Inches(3.00), Inches(1.00))
sp = shapes.add_textbox(x, y, cx, cy)
sp.text = test_text
@when("I add an auto shape to the slide's shape collection")
def step_when_add_auto_shape(context):
shapes = context.sld.shapes
x, y = (Inches(1.00), Inches(2.00))
cx, cy = (Inches(3.00), Inches(4.00))
sp = shapes.add_shape(MAST.ROUNDED_RECTANGLE, x, y, cx, cy)
sp.text = test_text
@when('I construct a Presentation instance with no path argument')
def step_when_construct_default_prs(context):
context.prs = Presentation()
@when('I indent the first paragraph')
def step_when_indent_first_paragraph(context):
p = context.body.textframe.paragraphs[0]
p.level = 1
@when('I open a basic PowerPoint presentation')
def step_when_open_basic_pptx(context):
context.prs = Presentation(basic_pptx_path)
@when('I open a presentation contained in a stream')
def step_when_open_presentation_stream(context):
with open(basic_pptx_path) as f:
stream = StringIO(f.read())
context.prs = Presentation(stream)
stream.close()
@when('I open a presentation having no core properties part')
def step_when_open_presentation_with_no_core_props_part(context):
context.prs = Presentation(no_core_props_pptx_path)
@when('I save that stream to a file')
def step_when_save_stream_to_a_file(context):
if os.path.isfile(saved_pptx_path):
os.remove(saved_pptx_path)
context.stream.seek(0)
with open(saved_pptx_path, 'wb') as f:
f.write(context.stream.read())
@when('I save the presentation')
def step_when_save_presentation(context):
if os.path.isfile(saved_pptx_path):
os.remove(saved_pptx_path)
context.prs.save(saved_pptx_path)
@when('I save the presentation to a stream')
def step_when_save_presentation_to_stream(context):
context.stream = StringIO()
context.prs.save(context.stream)
@when("I set the cell margins")
def step_when_set_cell_margins(context):
context.cell.margin_top = 1000
context.cell.margin_right = 2000
context.cell.margin_bottom = 3000
context.cell.margin_left = 4000
@when("I set the cell vertical anchor to middle")
def step_when_set_cell_vertical_anchor_to_middle(context):
context.cell.vertical_anchor = MSO.ANCHOR_MIDDLE
@when("I set the core properties to valid values")
def step_when_set_core_doc_props_to_valid_values(context):
context.propvals = (
('author', 'Creator'),
('category', 'Category'),
('comments', 'Description'),
('content_status', 'Content Status'),
('created', datetime(2013, 6, 15, 12, 34, 56)),
('identifier', 'Identifier'),
('keywords', 'key; word; keyword'),
('language', 'Language'),
('last_modified_by', 'Last Modified By'),
('last_printed', datetime(2013, 6, 15, 12, 34, 56)),
('modified', datetime(2013, 6, 15, 12, 34, 56)),
('revision', 9),
('subject', 'Subject'),
('title', 'Title'),
('version', 'Version'),
)
for name, value in context.propvals:
setattr(context.prs.core_properties, name, value)
@when("I set the first_col property to True")
def step_when_set_first_col_property_to_true(context):
context.tbl.first_col = True
@when("I set the first_row property to True")
def step_when_set_first_row_property_to_true(context):
context.tbl.first_row = True
@when("I set the first adjustment value to 0.15")
def step_when_set_first_adjustment_value(context):
context.chevron_shape.adjustments[0] = 0.15
@when("I set the horz_banding property to True")
def step_when_set_horz_banding_property_to_true(context):
context.tbl.horz_banding = True
@when("I set the last_col property to True")
def step_when_set_last_col_property_to_true(context):
context.tbl.last_col = True
@when("I set the last_row property to True")
def step_when_set_last_row_property_to_true(context):
context.tbl.last_row = True
@when("I set the paragraph alignment to centered")
def step_when_set_paragraph_alignment_to_centered(context):
context.p.alignment = PP.ALIGN_CENTER
@when("I set the text of the first cell")
def step_when_set_text_of_first_cell(context):
context.tbl.cell(0, 0).text = 'test text'
@when("I set the title text of the slide")
def step_when_set_slide_title_text(context):
context.sld.shapes.title.text = test_text
@when("I set the vert_banding property to True")
def step_when_set_vert_banding_property_to_true(context):
context.tbl.vert_banding = True
@when("I set the width of the table's columns")
def step_when_set_table_column_widths(context):
context.tbl.columns[0].width = Inches(1.50)
context.tbl.columns[1].width = Inches(3.00)
# then ====================================================
@then('a core properties part with default values is added')
def step_then_a_core_props_part_with_def_vals_is_added(context):
core_props = context.prs.core_properties
assert_that(core_props.title, is_('PowerPoint Presentation'))
assert_that(core_props.last_modified_by, is_('python-pptx'))
assert_that(core_props.revision, is_(1))
# core_props.modified only stores time with seconds resolution, so
# comparison needs to be a little loose (within two seconds)
modified_timedelta = datetime.utcnow() - core_props.modified
max_expected_timedelta = timedelta(seconds=2)
assert_that(modified_timedelta, less_than(max_expected_timedelta))
@then('I receive a presentation based on the default template')
def step_then_receive_prs_based_on_def_tmpl(context):
prs = context.prs
assert_that(prs, is_not(None))
slidemasters = prs.slidemasters
assert_that(slidemasters, is_not(None))
assert_that(len(slidemasters), is_(1))
slidelayouts = slidemasters[0].slidelayouts
assert_that(slidelayouts, is_not(None))
assert_that(len(slidelayouts), is_(11))
@then('I see the pptx file in the working directory')
def step_then_see_pptx_file_in_working_dir(context):
assert_that(os.path.isfile(saved_pptx_path))
minimum = 30000
actual = os.path.getsize(saved_pptx_path)
assert_that(actual, is_(greater_than(minimum)))
@then('the auto shape appears in the slide')
def step_then_auto_shape_appears_in_slide(context):
prs = Presentation(saved_pptx_path)
sp = prs.slides[0].shapes[0]
sp_text = sp.textframe.paragraphs[0].runs[0].text
assert_that(sp.shape_type, is_(equal_to(MSO.AUTO_SHAPE)))
assert_that(sp.auto_shape_type, is_(equal_to(MAST.ROUNDED_RECTANGLE)))
assert_that(sp_text, is_(equal_to(test_text)))
@then('the cell contents are inset by the margins')
def step_then_cell_contents_are_inset_by_the_margins(context):
prs = Presentation(saved_pptx_path)
table = prs.slides[0].shapes[0]
cell = table.cell(0, 0)
assert_that(cell.margin_top, is_(equal_to(1000)))
assert_that(cell.margin_right, is_(equal_to(2000)))
assert_that(cell.margin_bottom, is_(equal_to(3000)))
assert_that(cell.margin_left, is_(equal_to(4000)))
@then('the cell contents are vertically centered')
def step_then_cell_contents_are_vertically_centered(context):
prs = Presentation(saved_pptx_path)
table = prs.slides[0].shapes[0]
cell = table.cell(0, 0)
assert_that(cell.vertical_anchor, is_(equal_to(MSO.ANCHOR_MIDDLE)))
@then('the chevron shape appears with a less acute arrow head')
def step_then_chevron_shape_appears_with_less_acute_arrow_head(context):
chevron = Presentation(saved_pptx_path).slides[0].shapes[0]
assert_that(chevron.adjustments[0], is_(equal_to(0.15)))
@then('the columns of the table have alternating shading')
def step_then_columns_of_table_have_alternating_shading(context):
tbl = Presentation(saved_pptx_path).slides[0].shapes[0]
assert_that(tbl.vert_banding, is_(True))
@then('the core properties of the presentation have the values I set')
def step_then_core_props_have_values_previously_set(context):
core_props = Presentation(saved_pptx_path).core_properties
for name, value in context.propvals:
reason = "for core property '%s'" % name
assert_that(getattr(core_props, name), is_(value), reason)
@then('the first column of the table has special formatting')
def step_then_first_column_of_table_has_special_formatting(context):
tbl = Presentation(saved_pptx_path).slides[0].shapes[0]
assert_that(tbl.first_col, is_(True))
@then('the first row of the table has special formatting')
def step_then_first_row_of_table_has_special_formatting(context):
tbl = Presentation(saved_pptx_path).slides[0].shapes[0]
assert_that(tbl.first_row, is_(True))
@then('the image is saved in the pptx file')
def step_then_img_saved_in_pptx_file(context):
pkgng_pkg = packaging.Package().open(saved_pptx_path)
partnames = [part.partname for part in pkgng_pkg.parts
if part.partname.startswith('/ppt/media/')]
assert_that(partnames, has_item('/ppt/media/image1.png'))
@then('the last column of the table has special formatting')
def step_then_last_column_of_table_has_special_formatting(context):
tbl = Presentation(saved_pptx_path).slides[0].shapes[0]
assert_that(tbl.last_col, is_(True))
@then('the last row of the table has special formatting')
def step_then_last_row_of_table_has_special_formatting(context):
tbl = Presentation(saved_pptx_path).slides[0].shapes[0]
assert_that(tbl.last_row, is_(True))
@then('the paragraph is indented to the second level')
def step_then_paragraph_indented_to_second_level(context):
prs = Presentation(saved_pptx_path)
sld = prs.slides[0]
body = sld.shapes.placeholders[1]
p = body.textframe.paragraphs[0]
assert_that(p.level, is_(equal_to(1)))
@then('the picture appears in the slide')
def step_then_picture_appears_in_slide(context):
prs = Presentation(saved_pptx_path)
sld = prs.slides[0]
shapes = sld.shapes
classnames = [sp.__class__.__name__ for sp in shapes]
assert_that(classnames, has_item('_Picture'))
@then('the pptx file contains a single slide')
def step_then_pptx_file_contains_single_slide(context):
prs = Presentation(saved_pptx_path)
assert_that(len(prs.slides), is_(equal_to(1)))
@then('the paragraph is aligned centered')
def step_then_paragraph_is_aligned_centered(context):
prs = Presentation(saved_pptx_path)
p = prs.slides[0].shapes[0].textframe.paragraphs[0]
assert_that(p.alignment, is_(equal_to(PP.ALIGN_CENTER)))
@then('the rows of the table have alternating shading')
def step_then_rows_of_table_have_alternating_shading(context):
tbl = Presentation(saved_pptx_path).slides[0].shapes[0]
assert_that(tbl.horz_banding, is_(True))
@then('the table appears in the slide')
def step_then_table_appears_in_slide(context):
prs = Presentation(saved_pptx_path)
sld = prs.slides[0]
shapes = sld.shapes
classnames = [sp.__class__.__name__ for sp in shapes]
assert_that(classnames, has_item('_Table'))
@then('the table appears with the new column widths')
def step_then_table_appears_with_new_col_widths(context):
prs = Presentation(saved_pptx_path)
sld = prs.slides[0]
tbl = sld.shapes[0]
assert_that(tbl.columns[0].width, is_(equal_to(Inches(1.50))))
assert_that(tbl.columns[1].width, is_(equal_to(Inches(3.00))))
@then('the text appears in the first cell of the table')
def step_then_text_appears_in_first_cell_of_table(context):
prs = Presentation(saved_pptx_path)
sld = prs.slides[0]
tbl = sld.shapes[0]
text = tbl.cell(0, 0).textframe.paragraphs[0].runs[0].text
assert_that(text, is_(equal_to('test text')))
@then('the text box appears in the slide')
def step_then_text_box_appears_in_slide(context):
prs = Presentation(saved_pptx_path)
textbox = prs.slides[0].shapes[0]
textbox_text = textbox.textframe.paragraphs[0].runs[0].text
assert_that(textbox_text, is_(equal_to(test_text)))
@then('the text appears in the title placeholder')
def step_then_text_appears_in_title_placeholder(context):
prs = Presentation(saved_pptx_path)
title_shape = prs.slides[0].shapes.title
title_text = title_shape.textframe.paragraphs[0].runs[0].text
assert_that(title_text, is_(equal_to(test_text)))
| 33.926877 | 74 | 0.732044 | 0 | 0 | 0 | 0 | 15,861 | 0.923924 | 0 | 0 | 3,575 | 0.208248 |
85d564c27e33a35fe1a491c27c616e6e1a78f815
| 2,604 |
py
|
Python
|
DailyProgrammer/DP20160818B.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | 2 |
2020-12-23T18:59:22.000Z
|
2021-04-14T13:16:09.000Z
|
DailyProgrammer/DP20160818B.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
DailyProgrammer/DP20160818B.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
"""
[2016-08-18] Challenge #279 [Intermediate] Text Reflow
https://www.reddit.com/r/dailyprogrammer/comments/4ybbcz/20160818_challenge_279_intermediate_text_reflow/
#Description:
Text reflow means to break up lines of text so that they fit within a certain width. It is useful in e.g. mobile
browsers. When you zoom in on a web page the lines will become too long to fit the width of the screen, unless the text
is broken up into shorter lines.
#Input:
You will be given a text with a maximum line width of 80 characters.
#Output:
Produce the same text with a maximum line width of 40 characters
#Challenge Input:
In the beginning God created the heavens and the earth. Now the earth was
formless and empty, darkness was over the surface of the deep, and the Spirit of
God was hovering over the waters.
And God said, "Let there be light," and there was light. God saw that the light
was good, and he separated the light from the darkness. God called the light
"day," and the darkness he called "night." And there was evening, and there was
morning - the first day.
#Challenge Output:
In the beginning God created the heavens
and the earth. Now the earth was
formless and empty, darkness was over
the surface of the deep, and the Spirit
of God was hovering over the waters.
And God said, "Let there be light," and
there was light. God saw that the light
was good, and he separated the light
from the darkness. God called the light
"day," and the darkness he called
"night." And there was evening, and
there was morning - the first day.
#Bonus:
Let's get rid of the jagged right margin of the text and make the output prettier. Output the text with full
justification; Adjusting the word spacing so that the text is flush against both the left and the right margin.
#Bonus Output:
In the beginning God created the heavens
and the earth. Now the earth was
formless and empty, darkness was over
the surface of the deep, and the Spirit
of God was hovering over the waters.
And God said, "Let there be light," and
there was light. God saw that the light
was good, and he separated the light
from the darkness. God called the light
"day," and the darkness he called
"night." And there was evening, and
there was morning - the first day.
#Finally
This challenge is posted by /u/slampropp
Also have a good challenge idea?
Consider submitting it to /r/dailyprogrammer_ideas
"""
def main():
pass
if __name__ == "__main__":
main()
| 40.061538 | 119 | 0.720814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,550 | 0.979263 |
85d71db4dff31a27689c64809381f6863f31ac08
| 3,177 |
py
|
Python
|
PomodoroTimer/Python/main2.py
|
zcribe/SmallProjectsCollection
|
fbd6bc9884468eba7519728e295b36b24043af27
|
[
"MIT"
] | null | null | null |
PomodoroTimer/Python/main2.py
|
zcribe/SmallProjectsCollection
|
fbd6bc9884468eba7519728e295b36b24043af27
|
[
"MIT"
] | null | null | null |
PomodoroTimer/Python/main2.py
|
zcribe/SmallProjectsCollection
|
fbd6bc9884468eba7519728e295b36b24043af27
|
[
"MIT"
] | null | null | null |
from time import time, sleep
from math import floor
import argparse
import csv
import datetime
# Constants
TIME_WORK = 25
TIME_REST = 5
TIME_REST_LONG = 30
ONE_MINUTE = 60
SESSIONS_WORK_MAX = 4
LOOP_LIMIT = 9999
# Console
parser = argparse.ArgumentParser(description='===== Pomodoro timer CLI =====')
parser.add_argument('-wt', '-worktime', type=int, help=f'Minutes of work in a work sessions (default {TIME_WORK})',
default=TIME_WORK, nargs='?')
parser.add_argument('-rt', '-resttime', type=int, help=f'Minutes of rest in a rest sessions (default {TIME_REST})',
default=TIME_REST, nargs='?')
parser.add_argument('-rtl', '-resttimelong', type=int,
help=f'Minutes of rest in a long rest sessions (default {TIME_REST_LONG})',
default=TIME_REST_LONG, nargs='?')
parser.add_argument('-mws', '-maxworksessions', type=int,
help=f'Number of work sessions cycles before long rest session (default {SESSIONS_WORK_MAX})',
default=SESSIONS_WORK_MAX, nargs='?')
parser.add_argument('-ll', '-looplimit', type=int,
help=f'Maximum number of total sessions (default 9999)', default=LOOP_LIMIT, nargs='?')
parser.add_argument('-log', '-logsessions', type=bool,
help='Should sessions be logged (False)', default=False, nargs='?')
arguments = vars(parser.parse_args())
time_work = arguments['wt']
time_rest = arguments['rt']
time_rest_long = arguments['rtl']
sessions_work_max = arguments['mws']
loop_lim = arguments['ll']
# Core
def run():
target_minutes = time_work
work_sessions = 0
started = False
for _ in range(0, loop_lim):
if target_minutes == time_work and work_sessions >= sessions_work_max and started:
target_minutes = time_rest_long
elif target_minutes == time_work and started:
target_minutes = time_rest
work_sessions += 1
elif not started:
started = True
else:
target_minutes = time_work
timer(target_minutes)
write_log(target_minutes)
def timer(target_minutes: int) -> int:
time_target = create_target_time(target_minutes, time())
while time() < time_target:
tick(time_target)
sleep(1)
return target_minutes
def write_log(minutes: int, testing=False):
with open('session_log.csv', 'w', newline='') as csvfile:
log_writer = csv.writer(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)
today = datetime.datetime.now(datetime.timezone.utc)
log_writer.writerow([today, minutes])
def tick(time_target: float, broadcast=True):
if broadcast:
print(create_message(time_target))
def create_message(time_target: float) -> str:
time_left = time_target - time()
print(time())
minutes = floor(time_left / ONE_MINUTE)
seconds = round(time_left - minutes * ONE_MINUTE)
message = f"{minutes}:{seconds}"
return message
def create_target_time(target_minutes: int, current_time: float) -> float:
return current_time + target_minutes * ONE_MINUTE
if __name__ == "__main__":
run()
| 33.09375 | 115 | 0.664463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 632 | 0.19893 |
85d8b4fdcc862d0733f6de5e3fdf2b8f4d3ba6b3
| 2,454 |
py
|
Python
|
PinVidderer/PinVidderer.py
|
Gestas/PinVidderer
|
c980906cd77bf9a8cb66be022676e57e9a54702e
|
[
"MIT"
] | null | null | null |
PinVidderer/PinVidderer.py
|
Gestas/PinVidderer
|
c980906cd77bf9a8cb66be022676e57e9a54702e
|
[
"MIT"
] | null | null | null |
PinVidderer/PinVidderer.py
|
Gestas/PinVidderer
|
c980906cd77bf9a8cb66be022676e57e9a54702e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import signal
import click
from .client import Client
from .utils import Utils
utils = Utils()
signal.signal(signal.SIGINT, utils.signal_handler)
class Config(object):
def __init__(self):
self.loglevel = None
pass_config = click.make_pass_decorator(Config, ensure=True)
@click.group()
@click.option(
"--loglevel",
default=None,
type=click.Choice(["ERROR", "WARNING", "INFO", "DEBUG", "CRITICAL"]),
help="Sets the logging level, overriding the value in config.ini. If a loglevel is set here messages will be sent "
"to the console as well as the log file.",
)
@pass_config
def cli(config, loglevel):
"""A Bookmarked Video Downloader."""
# signal.signal(signal.SIGINT, utils.signal_handler)
config.loglevel = loglevel
@cli.command(help="Setup PinVidderer.")
@pass_config
def setup(config):
"""Setup PinVidderer."""
config.client = Client(loglevel=config.loglevel, is_setup=True)
@cli.command(help="Start watching Pinboard.")
@pass_config
def start(config):
"""Start watching Pinboard."""
config.client = Client(loglevel=config.loglevel)
client = config.client
client.start()
@cli.command(help="Run once for a single URL.")
@pass_config
@click.argument("url", nargs=1)
def runonce(config, url):
"""Downloads a single video from <URL>."""
config.client = Client(loglevel=config.loglevel)
client = config.client
client.runonce(url)
@cli.command(help="Get the current status and recent history.")
@pass_config
def status(config):
"""Get the current status and recent history."""
config.client = Client(loglevel=config.loglevel)
client = config.client
client.status()
@cli.command(help="Get the history.")
@click.option("-h", "--human", is_flag=True)
@click.option("-f", "--failed-only", is_flag=True)
@pass_config
def get_history(config, human, failed_only):
"""View the history."""
config.client = Client(loglevel=config.loglevel)
client = config.client
client.get_history(human, failed_only)
@cli.command(help="Delete an event from the history.")
@click.option("--all", "all_", is_flag=True)
@click.option("-u", "--url")
@pass_config
def remove_from_history(config, url, all_):
"""Remove the event for <URL> from the history."""
config.client = Client(loglevel=config.loglevel)
client = config.client
client.remove_from_history(url, all_)
if __name__ == "__main__":
cli()
| 25.831579 | 119 | 0.698859 | 74 | 0.030155 | 0 | 0 | 2,083 | 0.848818 | 0 | 0 | 770 | 0.313773 |
85db5b6d5b5a64186bb3b9c04d0a279e4a5f0c0a
| 998 |
py
|
Python
|
hw1/1.6/encrpyt_equals_decrypt.py
|
rocke97/crypto
|
89c4e595adf74558e12ceb1762025fd2f0275fec
|
[
"MIT"
] | null | null | null |
hw1/1.6/encrpyt_equals_decrypt.py
|
rocke97/crypto
|
89c4e595adf74558e12ceb1762025fd2f0275fec
|
[
"MIT"
] | null | null | null |
hw1/1.6/encrpyt_equals_decrypt.py
|
rocke97/crypto
|
89c4e595adf74558e12ceb1762025fd2f0275fec
|
[
"MIT"
] | null | null | null |
from itertools import count
from string import ascii_lowercase
plain_text = 'july'
results_file = open('results.txt', 'w')
letters_to_numbers = dict(zip(ascii_lowercase, count(0)))
numbers_to_letters = dict(zip(count(0), ascii_lowercase))
plain_text_numbers = [letters_to_numbers[letter] for letter in plain_text]
for i in range(0, 26):
#encrypt the plain text by shifting by some number
cipher_numbers = [(num + i)%26 for num in plain_text_numbers]
#try to decrypt the plain text by shifting forward by the same number (encrypt function = decrypt function)
decrypted_cipher_numbers = [(num + i)%26 for num in cipher_numbers]
attempted_plain_text = [numbers_to_letters[num] for num in decrypted_cipher_numbers]
if ''.join(attempted_plain_text) == plain_text: #if we decrypt print which key values work
print('At shift = ' + str(i) + ':')
print('Plain text: ' + plain_text)
print('Attempted Plain Text Decrypt: ' + ''.join(attempted_plain_text))
| 52.526316 | 111 | 0.728457 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 287 | 0.287575 |
85db89656ff34bccb3df57eb36eff9c756872dce
| 2,663 |
py
|
Python
|
generator.py
|
mann1/DD_SIM_Template
|
84c7787b6b3c52f08e7031114894c98416c02fcf
|
[
"MIT"
] | null | null | null |
generator.py
|
mann1/DD_SIM_Template
|
84c7787b6b3c52f08e7031114894c98416c02fcf
|
[
"MIT"
] | null | null | null |
generator.py
|
mann1/DD_SIM_Template
|
84c7787b6b3c52f08e7031114894c98416c02fcf
|
[
"MIT"
] | null | null | null |
import os, pickle
import numpy as np
import tensorflow as tf
def read_pickle(file_name):
with (open(file_name, "rb")) as openfile:
while True:
try:
objects = pickle.load(openfile)
except EOFError:
break
return objects
class Generator(tf.keras.utils.Sequence):
def __init__(self, DATASET_PATH, BATCH_SIZE=32):
""" Initialize Generator object.
Args
DATASET_PATH : Path to folder containing individual folders named by their class names
BATCH_SIZE : The size of the batches to generate.
"""
self.batch_size = BATCH_SIZE
self.load_data(DATASET_PATH)
self.create_data_batches()
def load_data(self, DATASET_PATH):
cwd = os.getcwd()
DATA_PATH = os.path.join(cwd, DATASET_PATH)
if DATASET_PATH == 'datasets/train':
data_file = os.path.join(DATA_PATH, "train_data.pickle")
target_file = os.path.join(DATA_PATH, "train_target.pickle")
elif DATASET_PATH == 'datasets/val':
data_file = os.path.join(DATA_PATH, "val_data.pickle")
target_file = os.path.join(DATA_PATH, "val_target.pickle")
self.data = read_pickle(data_file)
self.target = read_pickle(target_file)
assert len(self.data) == len(self.target)
def create_data_batches(self):
# Divide data and target into groups of BATCH_SIZE
self.data_batchs = [[self.data[x % len(self.data)] for x in range(i, i + self.batch_size)]
for i in range(0, len(self.data), self.batch_size)]
self.target_batchs = [[self.target[x % len(self.target)] for x in range(i, i + self.batch_size)]
for i in range(0, len(self.target), self.batch_size)]
def __len__(self):
"""
Number of batches for each Epoch.
"""
return len(self.data_batchs)
def __getitem__(self, index):
"""
Keras sequence method for generating batches.
"""
if index >= len(self.data_batchs):
index = index % len(self.data_batchs)
data_batch = self.data_batchs[index]
target_batch = self.target_batchs[index]
return np.array(data_batch), np.array(target_batch)
if __name__ == "__main__":
train_generator = Generator('datasets/train')
val_generator = Generator('datasets/val')
print(len(train_generator))
print(len(val_generator))
data_batch, target_batch = train_generator.__getitem__(0)
print(data_batch.shape)
print(target_batch.shape)
| 32.876543 | 108 | 0.613969 | 2,059 | 0.773188 | 0 | 0 | 0 | 0 | 0 | 0 | 567 | 0.212918 |
85db99fa2aa9b948ffca4017b69512e862fe9571
| 5,096 |
py
|
Python
|
src/mlb/schedule/schedule_view.py
|
benbrandt22/MagTagMLB
|
1ec347743bc7df9339fb8e3de0f86ea037b7694f
|
[
"MIT"
] | null | null | null |
src/mlb/schedule/schedule_view.py
|
benbrandt22/MagTagMLB
|
1ec347743bc7df9339fb8e3de0f86ea037b7694f
|
[
"MIT"
] | null | null | null |
src/mlb/schedule/schedule_view.py
|
benbrandt22/MagTagMLB
|
1ec347743bc7df9339fb8e3de0f86ea037b7694f
|
[
"MIT"
] | null | null | null |
from mlb.models.game_detail import GameDetail
import time
import board
import displayio
from adafruit_display_text import label
from adafruit_display_shapes.roundrect import RoundRect
import fonts.fonts as FONTS
from mlb.schedule.schedule_view_model import ScheduleViewModel
from time_utils import day_of_week, month_name_short, relative_day, utc_to_local, month_name, hour_12, ampm
class ScheduleView:
# (Display 296 x 128)
def __init__(self, model: ScheduleViewModel):
self.model = model
def render(self):
display = board.DISPLAY
# wait until we can draw
time.sleep(display.time_to_refresh)
# main group to hold everything
main_group = displayio.Group()
# white background. Scaled to save RAM
bg_bitmap = displayio.Bitmap(display.width // 8, display.height // 8, 1)
bg_palette = displayio.Palette(1)
bg_palette[0] = 0xFFFFFF
bg_sprite = displayio.TileGrid(bg_bitmap, x=0, y=0, pixel_shader=bg_palette)
bg_group = displayio.Group(scale=8)
bg_group.append(bg_sprite)
main_group.append(bg_group)
game1_group = self._single_game_group(self.model.game1)
game1_group.x = 0
game1_group.y = 0
game2_group = self._single_game_group(self.model.game2)
game2_group.x = 99
game2_group.y = 0
game3_group = self._single_game_group(self.model.game3)
game3_group.x = 198
game3_group.y = 0
main_group.append(game1_group)
main_group.append(game2_group)
main_group.append(game3_group)
# show the main group and refresh.
display.show(main_group)
display.refresh()
def _single_game_group(self, game: GameDetail):
game_group = displayio.Group()
if game is None:
return game_group
roundrect = RoundRect(5, 5, 88, 118, 10, fill=0xFFFFFF, outline=0x555555, stroke=3)
game_group.append(roundrect)
gametime_local = utc_to_local(game.dateTimeUtc)
day_text = ( relative_day(gametime_local) or day_of_week(gametime_local) )
date_text = f'{month_name(gametime_local)} {gametime_local.day}'
time_text = f'{hour_12(gametime_local)}:{gametime_local.minute:02d} {ampm(gametime_local)}'
day_label = label.Label(FONTS.OpenSans_12, text=day_text, color=0x000000)
day_label.anchor_point = (0.5, 0)
day_label.anchored_position = (49, 11)
game_group.append(day_label)
date_label = label.Label(FONTS.OpenSans_12, text=date_text, color=0x000000)
date_label.anchor_point = (0.5, 0)
date_label.anchored_position = (49, 25)
game_group.append(date_label)
time_label = label.Label(FONTS.OpenSans_12, text=time_text, color=0x000000)
time_label.anchor_point = (0.5, 0)
time_label.anchored_position = (49, 39)
game_group.append(time_label)
#Teams
if game.isPreview: #(no score to show)
away_team = label.Label(FONTS.OpenSans_Bold_18, text=f"{game.away.teamAbbreviation}", color=0x000000)
away_team.anchor_point = (0.5, 0)
away_team.anchored_position = (49, 58)
game_group.append(away_team)
at_label = label.Label(FONTS.OpenSans_12, text='@', color=0x000000)
at_label.anchor_point = (0.5, 0)
at_label.anchored_position = (49, 75)
game_group.append(at_label)
home_team = label.Label(FONTS.OpenSans_Bold_18, text=f"{game.home.teamAbbreviation}", color=0x000000)
home_team.anchor_point = (0.5, 0)
home_team.anchored_position = (49, 90)
game_group.append(home_team)
else:
team_y = 58
for team in [ game.away, game.home ]:
team_abbrev = label.Label(FONTS.OpenSans_Bold_18, text=f"{team.teamAbbreviation}", color=0x000000)
team_abbrev.anchor_point = (0, 0)
team_abbrev.anchored_position = (15, team_y)
game_group.append(team_abbrev)
score = label.Label(FONTS.OpenSans_Bold_18, text=f"{team.runs}", color=0x000000)
score.anchor_point = (1, 0)
score.anchored_position = (84, team_y)
game_group.append(score)
team_y = team_y + 20
if game.isLive or game.isFinal:
# show status text at the bottom
status_text = game.detailedState if game.isStatusExceptional else game.abstractGameState
if game.isLive and not game.isStatusExceptional:
status_text = f'{game.inningHalf} {game.currentInningOrdinal}'
if game.isFinal and game.isExtraInnings:
status_text = f'{game.abstractGameState} / {game.inningCount}'
status_label = label.Label(FONTS.OpenSans_12, text=status_text, color=0x000000)
status_label.anchor_point = (0.5, 0)
status_label.anchored_position = (49, 105)
game_group.append(status_label)
return game_group
| 38.315789 | 114 | 0.649333 | 4,712 | 0.924647 | 0 | 0 | 0 | 0 | 0 | 0 | 537 | 0.105377 |
85dc9bd56f92f2f2d3c556c7f4e9a56721c6e747
| 1,307 |
py
|
Python
|
bootcamp/chapter-1/strings.py
|
pushkar2112/Python-practice
|
75f88eaa2b4f3c47570b1a11e0e221436551ce89
|
[
"Apache-2.0"
] | 1 |
2021-11-23T08:36:43.000Z
|
2021-11-23T08:36:43.000Z
|
bootcamp/chapter-1/strings.py
|
pushkar2112/Python-practice
|
75f88eaa2b4f3c47570b1a11e0e221436551ce89
|
[
"Apache-2.0"
] | 1 |
2021-07-18T12:39:40.000Z
|
2021-09-08T09:48:16.000Z
|
bootcamp/chapter-1/strings.py
|
pushkar2112/Python-practice
|
75f88eaa2b4f3c47570b1a11e0e221436551ce89
|
[
"Apache-2.0"
] | null | null | null |
# Strings are used in Python to record text information, such as names.
# Strings in Python are actually a sequence, which basically means Python keeps track
# of every element in the string as a sequence.
# For example, Python understands the string "hello' to be a sequence of letters in a specific order.
# This means we will be able to use indexing to grab particular letters
# (like the first letter, or the last letter).
s1 = 'hello'
s2 = "This is a sample string"
print('This is another sample string')
print('it"s an example to show how to use a quote in a quote')
# len function on string
print(len(s2))
# string sllicing
# string indexes starts from 0 when going left to right
# -1 from right
# [start:stop:step]
print(s2[5::])
# Reversing a string(shortcut)
print(s2[::-1])
# STRING PROPERTIES
#It's important to note that strings have an important property known as immutability.
# This means that once a string is created, the elements within it can not be changed or replaced.
# String Concatenation
print(s1 + s2)
# REPITITION
print(s1 * 5)
# Built-in Methods
cap = 'HELLO PEOPLE'
print(cap.lower())
cap = cap.lower()
print(cap.upper())
print(cap.split())
print(cap.split('h'))
#String formatting
print('this is sample formatting and {}'.format(s2))
| 25.627451 | 102 | 0.719204 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,050 | 0.803366 |
85dcdeab8f386b3045fce501c4a13cd8e441b56a
| 1,138 |
py
|
Python
|
FindFT_NyquistFreq.py
|
PrabhjotKaurGosal/AudioAnalysisScripts
|
a752f62b2634022c1c2737b21998da218cef4dff
|
[
"MIT"
] | null | null | null |
FindFT_NyquistFreq.py
|
PrabhjotKaurGosal/AudioAnalysisScripts
|
a752f62b2634022c1c2737b21998da218cef4dff
|
[
"MIT"
] | null | null | null |
FindFT_NyquistFreq.py
|
PrabhjotKaurGosal/AudioAnalysisScripts
|
a752f62b2634022c1c2737b21998da218cef4dff
|
[
"MIT"
] | null | null | null |
# This code finds the Fourier Tranform of a signal and the Nyquist frequency
import matplotlib.pyplot as plt
import numpy as np
import librosa
import librosa as lr
from scipy import signal
from scipy.fft import fft, ifft
import math
import matplotlib.pyplot as plt
if __name__ == "__main__":
# Read the audio files
ch1, sfreq = lr.load("ch1.wav", sr=44100)
ch2, sfreq = lr.load("ch2.wav", sr=44100)
ch3, sfreq = lr.load("ch3.wav", sr=44100)
# # # Find the spectrogram of the signal
f,t, Sxx = signal.spectrogram(ch1, fs=sfreq)
plt.pcolormesh(t, f, Sxx, shading='gouraud')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.show()
ind_max = np.unravel_index(np.argmax(Sxx, axis=None), Sxx.shape)
ind_min = np.unravel_index(np.argmin(Sxx, axis=None), Sxx.shape)
row_max = ind_max[0]
col_max = ind_max[1]
row_min = ind_min[0]
col_min = ind_min[1]
Bandwidth = Sxx[row_max][col_max] - Sxx[row_min][col_min]
fsample = 2*Bandwidth # This is called Nyquist frequency
print("The sampling frequency of the signal must be greater than: ", fsample)
| 33.470588 | 81 | 0.681019 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 309 | 0.271529 |
85dddc830d151d3b583e5d23116cb924afd1cfe8
| 2,106 |
py
|
Python
|
src/platform_controller/scripts/controlTiltMotors.py
|
ahmohamed1/activeStereoVisionPlatform
|
6c928ca242e4de68c7b15a8748bff1d9f7fa1382
|
[
"MIT"
] | null | null | null |
src/platform_controller/scripts/controlTiltMotors.py
|
ahmohamed1/activeStereoVisionPlatform
|
6c928ca242e4de68c7b15a8748bff1d9f7fa1382
|
[
"MIT"
] | null | null | null |
src/platform_controller/scripts/controlTiltMotors.py
|
ahmohamed1/activeStereoVisionPlatform
|
6c928ca242e4de68c7b15a8748bff1d9f7fa1382
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
import actionlib
from control_msgs.msg import *
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
PI = 3.14159265359
class TiltMotorController:
def __init__(self):
self.leftMotor_publisher = rospy.Publisher('/left_motor_tilt/command', Float64, queue_size = 2)
self.rightMotor_publisher = rospy.Publisher('/right_motor_tilt/command', Float64, queue_size = 2)
self.leftMotorState_publisher = rospy.Publisher('/left/tilt/angle', Float64, queue_size = 2)
self.rightMotorState_publisher = rospy.Publisher('/right/tilt/angle', Float64, queue_size = 2)
# Alternative command topics
self.right_motor_subscriper = rospy.Subscriber('/right/tilt/move', Float64, self.right_motor_callback)
self.left_motor_subscriper = rospy.Subscriber("/left/tilt/move", Float64, self.left_motor_callback)
self.joint_command = rospy.Subscriber('/joint_states', JointState, self.jointCommandCb)
def rad2Deg(self, val):
return val * 180 / PI
def deg2Rad(self, val):
return val * PI / 180
def jointCommandCb(self, msg):
leftMotorState = Float64
rightMotorState = Float64
left = msg.position[0]
right = msg.position[1]
# print (left, right)
leftMotorState.data = self.rad2Deg(left)
rightMotorState.data = self.rad2Deg(right-10)
self.leftMotorState_publisher.publish(leftMotorState)
self.rightMotorState_publisher.publish(rightMotorState)
def right_motor_callback(self, msg):
rad = Float64
rad.date = self.deg2Rad(msg.data)
self.rightMotor_publisher.publish(rad)
def left_motor_callback(self, msg):
rad = Float64
rad.date = self.deg2Rad(msg.data)
print(rad)
self.leftMotor_publisher.publish(rad)
def controlLoop(self):
"""
Runs the control loop
"""
rate = rospy.Rate(15) # 10hz
while not rospy.is_shutdown():
rate.sleep()
def start(self):
"""
Starts the control loop and runs spin
"""
self.controlLoop()
def main():
rospy.init_node('TiltMotorController')
tiltMotorController = TiltMotorController()
tiltMotorController.start()
if __name__=='__main__':
main()
exit()
| 23.931818 | 104 | 0.746439 | 1,754 | 0.832858 | 0 | 0 | 0 | 0 | 0 | 0 | 331 | 0.15717 |
85dde154e71416994a5fa1e8b1afe91eea13927c
| 14,888 |
py
|
Python
|
py/Parser.py
|
Sqazine/ComputeDuck
|
d307d88a24601d433aa7507ea90000207a34e1f0
|
[
"Apache-2.0"
] | 2 |
2021-12-05T12:38:26.000Z
|
2022-03-09T02:24:44.000Z
|
py/Parser.py
|
Sqazine/ComputeDuck
|
d307d88a24601d433aa7507ea90000207a34e1f0
|
[
"Apache-2.0"
] | null | null | null |
py/Parser.py
|
Sqazine/ComputeDuck
|
d307d88a24601d433aa7507ea90000207a34e1f0
|
[
"Apache-2.0"
] | null | null | null |
from ast import Lambda
from enum import IntEnum
from typing import Any
from Ast import Stmt
from Ast import Expr
from Token import Token, TokenType
from Utils import Assert
from Ast import AstType, ArrayExpr, BoolExpr, ExprStmt, FunctionCallExpr, FunctionStmt, GroupExpr, IdentifierExpr, IfStmt, IndexExpr, InfixExpr, NilExpr, NumExpr, PrefixExpr, ReturnStmt, ScopeStmt, StrExpr, StructCallExpr, StructStmt, VarStmt, WhileStmt, RefExpr,LambdaExpr
class Precedence(IntEnum):
LOWEST = 0, # ,
ASSIGN = 1, # =
OR = 2, # or
AND = 3, # and
EQUAL = 4, # == !=
COMPARE = 5, # < <= > >=
ADD_PLUS = 6, # + -
MUL_DIV = 7, # * /
PREFIX = 8, # !
INFIX = 9, # [] () .
class Parser:
__curPos: int = 0
__tokens: list[Token] = []
__prefixFunctions: dict[TokenType, Any] = {}
__infixFunctions: dict[TokenType, Any] = {}
__precedence: dict[TokenType, Any] = {}
def __init__(self) -> None:
self.__curPos: int = 0
self.__tokens: list[Token] = []
self.__prefixFunctions: dict[TokenType, Any] = {}
self.__infixFunctions: dict[TokenType, Any] = {}
self.__precedence: dict[TokenType, Any] = {}
self.__prefixFunctions = {
TokenType.IDENTIFIER: self.ParseIdentifierExpr,
TokenType.NUMBER: self.ParseNumExpr,
TokenType.STRING: self.ParseStrExpr,
TokenType.NIL: self.ParseNilExpr,
TokenType.TRUE: self.ParseTrueExpr,
TokenType.FALSE: self.ParseFalseExpr,
TokenType.MINUS: self.ParsePrefixExpr,
TokenType.NOT: self.ParsePrefixExpr,
TokenType.LPAREN: self.ParseGroupExpr,
TokenType.LBRACKET: self.ParseArrayExpr,
TokenType.REF:self.ParseRefExpr,
TokenType.LAMBDA:self.ParseLambdaExpr,
}
self.__infixFunctions = {
TokenType.EQUAL: self.ParseInfixExpr,
TokenType.EQUAL_EQUAL: self.ParseInfixExpr,
TokenType.BANG_EQUAL: self.ParseInfixExpr,
TokenType.LESS: self.ParseInfixExpr,
TokenType.LESS_EQUAL: self.ParseInfixExpr,
TokenType.GREATER: self.ParseInfixExpr,
TokenType.GREATER_EQUAL: self.ParseInfixExpr,
TokenType.PLUS: self.ParseInfixExpr,
TokenType.MINUS: self.ParseInfixExpr,
TokenType.ASTERISK: self.ParseInfixExpr,
TokenType.SLASH: self.ParseInfixExpr,
TokenType.LPAREN: self.ParseFunctionCallExpr,
TokenType.LBRACKET: self.ParseIndexExpr,
TokenType.AND: self.ParseInfixExpr,
TokenType.OR: self.ParseInfixExpr,
TokenType.DOT: self.ParseStructCallExpr,
}
self.__precedence = {
TokenType.EQUAL: Precedence.ASSIGN,
TokenType.EQUAL_EQUAL: Precedence.EQUAL,
TokenType.BANG_EQUAL: Precedence.EQUAL,
TokenType.LESS: Precedence.COMPARE,
TokenType.LESS_EQUAL: Precedence.COMPARE,
TokenType.GREATER: Precedence.COMPARE,
TokenType.GREATER_EQUAL: Precedence.COMPARE,
TokenType.PLUS: Precedence.ADD_PLUS,
TokenType.MINUS: Precedence.ADD_PLUS,
TokenType.ASTERISK: Precedence.MUL_DIV,
TokenType.SLASH: Precedence.MUL_DIV,
TokenType.LBRACKET: Precedence.INFIX,
TokenType.LPAREN: Precedence.INFIX,
TokenType.AND: Precedence.AND,
TokenType.OR: Precedence.OR,
TokenType.DOT: Precedence.INFIX
}
def Parse(self, tokens: list[Token]) -> list[Stmt]:
self.__curPos = 0
self.__tokens = tokens
stmts: list[Stmt] = []
while (not self.IsMatchCurToken(TokenType.END)):
stmts.append(self.ParseStmt())
return stmts
def IsAtEnd(self) -> bool:
return self.__curPos >= len(self.__tokens)
def Consume(self, type, errMsg) -> Token:
if self.IsMatchCurToken(type):
return self.GetCurTokenAndStepOnce()
Assert("[line "+str(self.GetCurToken().line)+"]:"+errMsg)
return Token(TokenType.END, "", 0)
def GetCurToken(self) -> Token:
if not self.IsAtEnd():
return self.__tokens[self.__curPos]
return self.__tokens[-1]
def GetCurTokenAndStepOnce(self) -> Token:
if not self.IsAtEnd():
result = self.__tokens[self.__curPos]
self.__curPos += 1
return result
return self.__tokens[-1]
def GetCurTokenPrecedence(self) -> Token:
if self.__precedence.get(self.GetCurToken().type)==None:
return Precedence.LOWEST
return self.__precedence.get(self.GetCurToken().type)
def GetNextToken(self) -> Token:
if self.__curPos+1 < self.__tokens.count:
return self.__tokens[self.__curPos+1]
return self.__tokens[-1]
def GetNextTokenAndStepOnce(self) -> Token:
if self.__curPos+1 < self.__tokens.count:
self.__curPos += 1
return self.__tokens[self.__curPos]
return self.__tokens[-1]
def GetNextTokenPrecedence(self) -> Token:
return self.__precedence.get(self.GetNextToken().type, default=Precedence.LOWEST)
def IsMatchCurToken(self, type) -> bool:
return self.GetCurToken().type == type
def IsMatchCurTokenAndStepOnce(self, type) -> bool:
if self.IsMatchCurToken(type):
self.__curPos += 1
return True
return False
def IsMatchNextToken(self, type) -> bool:
return self.GetNextToken().type == type
def IsMatchNextTokenAndStepOnce(self, type) -> bool:
if self.IsMatchNextToken(type):
self.__curPos += 1
return True
return False
def ParseStmt(self) -> Stmt:
if self.IsMatchCurToken(TokenType.VAR):
return self.ParseVarStmt()
elif self.IsMatchCurToken(TokenType.RETURN):
return self.ParseReturnStmt()
elif self.IsMatchCurToken(TokenType.IF):
return self.ParseIfStmt()
elif self.IsMatchCurToken(TokenType.LBRACE):
return self.ParseScopeStmt()
elif self.IsMatchCurToken(TokenType.WHILE):
return self.ParseWhileStmt()
elif self.IsMatchCurToken(TokenType.FUNCTION):
return self.ParseFunctionStmt()
elif self.IsMatchCurToken(TokenType.STRUCT):
return self.ParseStructStmt()
else:
return self.ParseExprStmt()
def ParseExprStmt(self) -> Stmt:
exprStmt = ExprStmt(self.ParseExpr())
self.Consume(TokenType.SEMICOLON, "Expect ';' after expr stmt.")
return exprStmt
def ParseVarStmt(self) -> Stmt:
self.Consume(TokenType.VAR, "Expect 'var' key word")
name = (self.ParseIdentifierExpr())
value = NilExpr()
if self.IsMatchCurTokenAndStepOnce(TokenType.EQUAL):
value = self.ParseExpr()
self.Consume(TokenType.SEMICOLON, "Expect ';' after var stmt")
return VarStmt(name, value)
def ParseReturnStmt(self) -> Stmt:
self.Consume(TokenType.RETURN, "Expecr 'return' keyword")
expr = None
if not self.IsMatchCurToken(TokenType.SEMICOLON):
expr = self.ParseExpr()
self.Consume(TokenType.SEMICOLON, "Expect ';' after return stmt.")
return ReturnStmt(expr)
def ParseIfStmt(self) -> Stmt:
self.Consume(TokenType.IF, "Expect 'if' key word.")
self.Consume(TokenType.LPAREN, "Expect '(' after 'if'.")
condition = self.ParseExpr()
self.Consume(TokenType.RPAREN, "Expect ')' after if condition")
thenBranch = self.ParseStmt()
elseBranch = None
if self.IsMatchCurTokenAndStepOnce(TokenType.ELSE):
elseBranch = self.ParseStmt()
return IfStmt(condition, thenBranch, elseBranch)
def ParseScopeStmt(self) -> Stmt:
self.Consume(TokenType.LBRACE, "Expect '{'.")
scopeStmt = ScopeStmt([])
while (not self.IsMatchCurToken(TokenType.RBRACE)):
scopeStmt.stmts.append(self.ParseStmt())
self.Consume(TokenType.RBRACE, "Expect '}'.")
return scopeStmt
def ParseWhileStmt(self) -> Stmt:
self.Consume(TokenType.WHILE, "Expect 'while' keyword.")
self.Consume(TokenType.LPAREN, "Expect '(' after 'while'.")
condition = self.ParseExpr()
self.Consume(TokenType.RPAREN,
"Expect ')' after while stmt's condition")
body = self.ParseStmt()
return WhileStmt(condition, body)
def ParseFunctionStmt(self) -> Stmt:
self.Consume(TokenType.FUNCTION, "Expect 'fn' keyword")
funcStmt = FunctionStmt("", [], None)
funcStmt.name = self.ParseIdentifierExpr().Stringify()
self.Consume(TokenType.LPAREN, "Expect '(' after function name")
if (not self.IsMatchCurToken(TokenType.RPAREN)):
idenExpr = self.ParseIdentifierExpr()
funcStmt.parameters.append(idenExpr)
while self.IsMatchCurTokenAndStepOnce(TokenType.COMMA):
idenExpr = self.ParseIdentifierExpr()
funcStmt.parameters.append(idenExpr)
self.Consume(TokenType.RPAREN, "Expect ')' after function expr's '('")
funcStmt.body = self.ParseScopeStmt()
return funcStmt
def ParseStructStmt(self) -> Stmt:
self.Consume(TokenType.STRUCT, "Expect 'struct keyword'")
structStmt = StructStmt("", [])
structStmt.name = self.ParseIdentifierExpr().Stringify()
self.Consume(TokenType.LBRACE, "Expect '{' after struct name")
while not self.IsMatchCurToken(TokenType.RBRACE):
structStmt.members.append(self.ParseVarStmt())
self.Consume(TokenType.RBRACE, "Expect '}' after struct's '{'")
return structStmt
def ParseExpr(self, precedence=Precedence.LOWEST) -> Expr:
if self.__prefixFunctions.get(self.GetCurToken().type) == None:
print("no prefix definition for:" +
self.GetCurTokenAndStepOnce().literal)
return NilExpr()
prefixFn = self.__prefixFunctions.get(self.GetCurToken().type)
leftExpr = prefixFn()
while (not self.IsMatchCurToken(TokenType.SEMICOLON) and precedence < self.GetCurTokenPrecedence()):
if self.__infixFunctions.get(self.GetCurToken().type) == None:
return leftExpr
infixFn = self.__infixFunctions[self.GetCurToken().type]
leftExpr = infixFn(leftExpr)
return leftExpr
def ParseIdentifierExpr(self) -> Expr:
literal=self.Consume(TokenType.IDENTIFIER, "Unexpect Identifier '"+self.GetCurToken().literal+".").literal
return IdentifierExpr(literal)
def ParseNumExpr(self) -> Expr:
numLiteral = self.Consume(
TokenType.NUMBER, "Expect a number literal.").literal
return NumExpr(float(numLiteral))
def ParseStrExpr(self) -> Expr:
return StrExpr(self.Consume(TokenType.STRING, "Expact a string literal.").literal)
def ParseNilExpr(self) -> Expr:
self.Consume(TokenType.NIL, "Expect 'nil' keyword")
return NilExpr()
def ParseTrueExpr(self) -> Expr:
self.Consume(TokenType.TRUE, "Expect 'true' keyword")
return BoolExpr(True)
def ParseFalseExpr(self) -> Expr:
self.Consume(TokenType.FALSE, "Expect 'false' keyword")
return BoolExpr(False)
def ParseGroupExpr(self) -> Expr:
self.Consume(TokenType.LPAREN, "Expect '('.")
groupExpr = GroupExpr(self.ParseExpr())
self.Consume(TokenType.RPAREN, "Expect ')'.")
return groupExpr
def ParseArrayExpr(self) -> Expr:
self.Consume(TokenType.LBRACKET, "Expect '['.")
arrayExpr = ArrayExpr([])
if (not self.IsMatchCurToken(TokenType.RBRACKET)):
arrayExpr.elements.append(self.ParseExpr())
while self.IsMatchCurTokenAndStepOnce(TokenType.COMMA):
arrayExpr.elements.append(self.ParseExpr())
self.Consume(TokenType.RBRACKET, "Expect ']'.")
return arrayExpr
def ParsePrefixExpr(self) -> Expr:
prefixExpr = PrefixExpr("", None)
prefixExpr.op = self.GetCurTokenAndStepOnce().literal
prefixExpr.right = self.ParseExpr(Precedence.PREFIX)
return prefixExpr
def ParseInfixExpr(self, prefixExpr: Expr) -> Expr:
infixExpr = InfixExpr(None, "", None)
infixExpr.left = prefixExpr
opPrece = self.GetCurTokenPrecedence()
infixExpr.op = self.GetCurTokenAndStepOnce().literal
infixExpr.right = self.ParseExpr(opPrece)
return infixExpr
def ParseIndexExpr(self, prefixExpr: Expr) -> Expr:
self.Consume(TokenType.LBRACKET, "Expect '['.")
indexExpr = IndexExpr(None, None)
indexExpr.ds = prefixExpr
indexExpr.index = self.ParseExpr(Precedence.INFIX)
self.Consume(TokenType.RBRACKET, "Expect ']'.")
return indexExpr
def ParseRefExpr(self)->Expr:
self.Consume(TokenType.REF,"Expect 'ref' keyword.")
refExpr=self.ParseExpr(Precedence.LOWEST)
if refExpr.Type() != AstType.IDENTIFIER:
Assert("Invalid reference type, only variable can be referenced.")
return RefExpr(refExpr)
def ParseLambdaExpr(self)->Expr:
self.Consume(TokenType.LAMBDA,"Expect 'lambda' keyword.")
self.Consume(TokenType.LPAREN,"Expect '(' after keyword 'lambda'.")
parameters: list[IdentifierExpr] = []
body: ScopeStmt = None
if (not self.IsMatchCurToken(TokenType.RPAREN)):
idenExpr = self.ParseIdentifierExpr()
parameters.append(idenExpr)
while self.IsMatchCurTokenAndStepOnce(TokenType.COMMA):
idenExpr = self.ParseIdentifierExpr()
parameters.append(idenExpr)
self.Consume(TokenType.RPAREN, "Expect ')' after lambda expr's '('.")
body = self.ParseScopeStmt()
return LambdaExpr(parameters,body)
def ParseFunctionCallExpr(self, prefixExpr: Expr) -> Expr:
funcCallExpr = FunctionCallExpr("", [])
funcCallExpr.name = prefixExpr
self.Consume(TokenType.LPAREN, "Expect '('.")
if not self.IsMatchCurToken(TokenType.RPAREN):
funcCallExpr.arguments.append(self.ParseExpr())
while self.IsMatchCurTokenAndStepOnce(TokenType.COMMA):
funcCallExpr.arguments.append(self.ParseExpr())
self.Consume(TokenType.RPAREN, "Expect ')'.")
return funcCallExpr
def ParseStructCallExpr(self, prefixExpr: Expr) -> Expr:
self.Consume(TokenType.DOT, "Expect '.'.")
structCallExpr = StructCallExpr(None, None)
structCallExpr.callee = prefixExpr
structCallExpr.callMember = self.ParseExpr(Precedence.INFIX)
return structCallExpr
| 39.076115 | 273 | 0.637493 | 14,433 | 0.969438 | 0 | 0 | 0 | 0 | 0 | 0 | 1,059 | 0.071131 |
85e03a75a96c393560650c8bb391a58fe00c64f1
| 302 |
py
|
Python
|
code/color.py
|
Archkitten/sleep
|
dd81d8fe379d8e37c58b101d78fe258588d6c1bc
|
[
"MIT"
] | null | null | null |
code/color.py
|
Archkitten/sleep
|
dd81d8fe379d8e37c58b101d78fe258588d6c1bc
|
[
"MIT"
] | null | null | null |
code/color.py
|
Archkitten/sleep
|
dd81d8fe379d8e37c58b101d78fe258588d6c1bc
|
[
"MIT"
] | null | null | null |
# COLORS
black = "\033[30m"
red = "\033[31m"
green = "\033[32m"
yellow = "\033[33m"
blue = "\033[34m"
magenta = "\033[35m"
cyan = "\033[36m"
white = "\033[37m"
nc = "\n"
# COLOR TESTING
def test():
print(red + "test")
print(blue + "test2")
print(green + "test3" + "\n" + cyan + "test4" + white)
| 17.764706 | 56 | 0.566225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.456954 |
85e1dc6359b959fbe3bde169c1c1df0d7df72888
| 253 |
py
|
Python
|
database/urls.py
|
shrishtickling/train_coding
|
ba2918ce13379940f359e2ae253987691a00f3a9
|
[
"Apache-2.0"
] | null | null | null |
database/urls.py
|
shrishtickling/train_coding
|
ba2918ce13379940f359e2ae253987691a00f3a9
|
[
"Apache-2.0"
] | null | null | null |
database/urls.py
|
shrishtickling/train_coding
|
ba2918ce13379940f359e2ae253987691a00f3a9
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from . import views
app_name = 'database'
urlpatterns = [
path('update/', views.update),
path('update2/', views.update2),
path('update3/', views.update3),
path('upload-user/', views.create_user_dataset)
]
| 19.461538 | 51 | 0.675889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.209486 |
85e2f348622632176aeb4fc8c874b128321e99b9
| 135 |
py
|
Python
|
CodeWars/7 Kyu/Unlucky Days.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/7 Kyu/Unlucky Days.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/7 Kyu/Unlucky Days.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
from datetime import datetime
def unlucky_days(year):
return sum(datetime(year, month, 13).weekday() == 4 for month in range(1,13))
| 33.75 | 80 | 0.733333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
85e2fec7419e462992cdef82f856f348913b6d84
| 714 |
py
|
Python
|
backend/errors.py
|
cryptSky/hlsa_task7
|
40365033e24ec147640f828cccc69f3711eedfc0
|
[
"MIT"
] | 1 |
2021-05-20T06:04:13.000Z
|
2021-05-20T06:04:13.000Z
|
backend/errors.py
|
cryptSky/hlsa_task7
|
40365033e24ec147640f828cccc69f3711eedfc0
|
[
"MIT"
] | null | null | null |
backend/errors.py
|
cryptSky/hlsa_task7
|
40365033e24ec147640f828cccc69f3711eedfc0
|
[
"MIT"
] | null | null | null |
from werkzeug.exceptions import HTTPException
class InternalServerError(HTTPException):
pass
class SchemaValidationError(HTTPException):
pass
class UserNotFoundError(HTTPException):
pass
class EmailAlreadyExistError(HTTPException):
pass
errors = {
"InternalServerError": {
"message": "Oops something wrong",
"status": 500
},
"SchemaValidationError": {
"message": "Required fields missing",
"status": 400
},
"UserNotFoundError": {
"message": "User not found in database",
"status": 400
},
"EmailAlreadyExistError": {
"message": "User with specified email already exists in database",
"status": 400
},
}
| 21.636364 | 74 | 0.648459 | 197 | 0.27591 | 0 | 0 | 0 | 0 | 0 | 0 | 284 | 0.397759 |
85e31f8319151021136e63792aab66a8fe4825ad
| 421 |
py
|
Python
|
scripts/read_radar.py
|
jdiasn/raincoat
|
b0249c88f1a5ca22a720285e87be4b06b67705b5
|
[
"MIT"
] | 1 |
2020-04-22T05:41:08.000Z
|
2020-04-22T05:41:08.000Z
|
scripts/read_radar.py
|
jdiasn/raincoat
|
b0249c88f1a5ca22a720285e87be4b06b67705b5
|
[
"MIT"
] | null | null | null |
scripts/read_radar.py
|
jdiasn/raincoat
|
b0249c88f1a5ca22a720285e87be4b06b67705b5
|
[
"MIT"
] | 4 |
2019-01-01T11:33:14.000Z
|
2021-01-04T20:34:43.000Z
|
from raincoat.radarFunctions import getVarTimeRange, getRadarVar
import pandas as pd
data = getRadarVar('../samplefiles/radar/181202_000000_P09_ZEN_compact.nc',
'2001.01.01. 00:00:00',
'Ze')
start = pd.to_datetime('2018-12-02 00:00:00', format='%Y-%m-%d %H:%M:%S')
stop = pd.to_datetime('2018-12-02 01:00:00',format='%Y-%m-%d %H:%M:%S')
data = getVarTimeRange(data,1,2000, start, stop)
| 35.083333 | 75 | 0.655582 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.382423 |
85e397373b9dc700b3ec2e1bd8bc94f48fdddec5
| 1,527 |
py
|
Python
|
gecko/geckolib/driver/protocol/reminders.py
|
mmillmor/home_assistant-components
|
625f97413bd6516a2358220a80819b85cc8072c6
|
[
"Apache-2.0"
] | null | null | null |
gecko/geckolib/driver/protocol/reminders.py
|
mmillmor/home_assistant-components
|
625f97413bd6516a2358220a80819b85cc8072c6
|
[
"Apache-2.0"
] | null | null | null |
gecko/geckolib/driver/protocol/reminders.py
|
mmillmor/home_assistant-components
|
625f97413bd6516a2358220a80819b85cc8072c6
|
[
"Apache-2.0"
] | 1 |
2022-03-07T20:04:05.000Z
|
2022-03-07T20:04:05.000Z
|
""" Gecko REQRM/RMREQ handlers """
import logging
import struct
from .packet import GeckoPacketProtocolHandler
REQRM_VERB = b"REQRM"
RMREQ_VERB = b"RMREQ"
_LOGGER = logging.getLogger(__name__)
class GeckoRemindersProtocolHandler(GeckoPacketProtocolHandler):
@staticmethod
def request(seq, **kwargs):
return GeckoRemindersProtocolHandler(
content=b"".join([REQRM_VERB, struct.pack(">B", seq)]), **kwargs
)
@staticmethod
def response(**kwargs):
return GeckoRemindersProtocolHandler(
content=b"".join(
[
RMREQ_VERB,
b"\x01\x01\x00\x01\x02\x1f\x00\x01\x03\x29"
b"\x00\x01\x04\xa9\x02\x01\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
]
),
**kwargs,
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
def can_handle(self, received_bytes: bytes, sender: tuple) -> bool:
return received_bytes.startswith(REQRM_VERB) or received_bytes.startswith(
RMREQ_VERB
)
def handle(self, socket, received_bytes: bytes, sender: tuple):
remainder = received_bytes[5:]
if received_bytes.startswith(REQRM_VERB):
self._sequence = struct.unpack(">B", remainder[0:1])[0]
return # Stay in the handler list
# Otherwise must be RMREQ
self._should_remove_handler = True
| 30.54 | 87 | 0.606418 | 1,327 | 0.869024 | 0 | 0 | 643 | 0.421087 | 0 | 0 | 284 | 0.185986 |
85e52bfde40a74e45c8231c717edf8c32b7d97fa
| 376 |
py
|
Python
|
components/studio/apps/migrations/0005_auto_20210209_1244.py
|
aitmlouk/stackn
|
c8029394a15b03796a4864938f9db251b65c7354
|
[
"Apache-2.0"
] | 25 |
2020-05-08T22:24:54.000Z
|
2022-03-11T18:16:58.000Z
|
components/studio/apps/migrations/0005_auto_20210209_1244.py
|
aitmlouk/stackn
|
c8029394a15b03796a4864938f9db251b65c7354
|
[
"Apache-2.0"
] | 75 |
2020-05-08T22:15:59.000Z
|
2021-11-22T10:00:04.000Z
|
components/studio/apps/migrations/0005_auto_20210209_1244.py
|
aitmlouk/stackn
|
c8029394a15b03796a4864938f9db251b65c7354
|
[
"Apache-2.0"
] | 12 |
2020-11-04T13:09:46.000Z
|
2022-03-14T16:22:40.000Z
|
# Generated by Django 2.2.13 on 2021-02-09 12:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('apps', '0004_auto_20210209_1243'),
]
operations = [
migrations.RenameField(
model_name='appinstance',
old_name='lab_session_owner',
new_name='owner',
),
]
| 19.789474 | 48 | 0.598404 | 290 | 0.771277 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.31383 |
85e79f4d2b450460c3e188d3ec311565e5eee0d2
| 30,714 |
py
|
Python
|
SoundServer.py
|
yoyoberenguer/SoundServer
|
3a824a8f519f205d5f4c277d314cb92732a157b1
|
[
"MIT"
] | null | null | null |
SoundServer.py
|
yoyoberenguer/SoundServer
|
3a824a8f519f205d5f4c277d314cb92732a157b1
|
[
"MIT"
] | null | null | null |
SoundServer.py
|
yoyoberenguer/SoundServer
|
3a824a8f519f205d5f4c277d314cb92732a157b1
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
__version__ = "1.0.1"
try:
import pygame
from pygame import mixer
except ImportError:
raise ImportError("\n<pygame> library is missing on your system."
"\nTry: \n C:\\pip install pygame on a window command prompt.")
from time import time
class SoundObject:
def __init__(self, sound_, priority_: int, name_: str,
channel_: int, obj_id_: int, position_: int, loop_: int = False):
"""
CREATE A SOUND OBJECT CONTAINING CERTAIN ATTRIBUTES (SEE THE
COMPLETE LIST BELOW)
:param sound_ : Sound object; Sound object to play
:param priority_: integer; Define the sound priority (Sound with highest priority have to be stopped with
specific methods)
:param name_ : string; Sound given name (if the object has no name -> str(id(sound_))
:param channel_ : integer; Channel to use (channel where the sound is being played by the mixer)
:param obj_id_ : python int (C long long int); Sound unique ID
:param position_: integer | None ; Sound position for panning sound in stereo.
position must be within range [0...Max display width]
:param loop_ : int; -1 for looping the sound
"""
self.sound = sound_ # sound object to play
self.length = sound_.get_length() # return the length of this sound in seconds
self.priority = priority_ if 0 < priority_ < 2 else 0 # sound priority - lowest to highest (0 - 2)
self.time = time() # timestamp
self.name = name_ # sound name for identification
self.active_channel = channel_ # channel used
self.obj_id = obj_id_ # unique sound id number
self.id = id(self) # class id
# NOTE : new attribute 27/11/2020
# sound position for panning sound on stereo
self.pos = position_ # Sound position for panning method
self.loop = loop_
class SoundControl(object):
def __init__(self, screen_size_, channels_: int = 8):
"""
:param screen_size_: pygame.Rect; Size of the active display
:param channels_ : integer; number of channels to reserved for the sound controller
:return : None
"""
if not isinstance(screen_size_, pygame.Rect):
raise ValueError("\n screen_size_ argument must be a pygame.Rect type, got %s " % type(screen_size_))
if not isinstance(channels_, int):
raise ValueError("\n channels_ argument must be a integer type, got %s " % type(channels_))
assert channels_ >= 1, "\nArgument channel_num_ must be >=1"
if pygame.mixer.get_init() is None:
raise ValueError("\nMixer has not been initialized."
"\nUse pygame.mixer.init() before starting the Sound controller")
self.channel_num = channels_ # channel to init
self.start = mixer.get_num_channels() # get the total number of playback channels
self.end = self.channel_num + self.start # last channel
mixer.set_num_channels(self.end) # sets the number of available channels for the mixer.
mixer.set_reserved(self.end) # reserve channels from being automatically used
self.channels = [mixer.Channel(j + self.start)
for j in range(self.channel_num)] # create a channel object for controlling playback
self.snd_obj = [None] * self.channel_num # list of un-initialised objects
self.channel = self.start # pointer to the bottom of the stack
self.all = list(range(self.start, self.end)) # create a list with all channel number
self.screen_size = screen_size_ # size of the display (used for stereo mode)
def update(self):
"""
THIS METHOD HAS TO BE CALLED FROM THE MAIN LOOP OF YOUR PROGRAM
DETECT SOUNDS THAT HAVE STOPPED TO PLAY ON THE MIXER AND SET THE CHANNEL VALUE TO NONE
"""
i = 0
snd_obj = self.snd_obj
for c in self.channels:
if c:
# Returns True if the mixer is busy mixing any channels.
# If the mixer is idle then this return False.
if not c.get_busy():
snd_obj[i] = None
i += 1
# SINGLE SOUND
def update_sound_panning(self, new_x_: int, volume_: float, name_=None, id_=None) -> None:
"""
PANNING IS THE DISTRIBUTION OF A SOUND SIGNAL INTO A NEW STEREO OR MULTI-CHANNEL SOUND FIELD
CHANGE PANNING FOR ALL SOUNDS BEING PLAYED ON THE MIXER.
ADJUST THE PANNING OF A GIVEN SOUND (FOUND THE SOUND OBJECT WITH AN EXPLICIT NAME OR ID).
AT LEAST ONE SEARCH METHOD MUST BE DEFINED.
:param new_x_ : integer; new sound position in the display. Value must be in range [0, Max width]
:param volume_ : float; Sound volume (adjust all sound being played by the mixer)
value must be in range [0 ... 1.0]
:param name_ : string; Given sound name (name given at the time eof the SoundObject construction)
:param id_ : int | None; Default None. ID number such as object_id_ = id(sound_).
:return : None
"""
assert 0 <= new_x_ <= self.screen_size.w, \
"\nArgument new_x_ value must be in range (0, %s) got %s" % (self.screen_size.w, new_x_)
# SET THE VOLUME IN CASE OF AN INPUT ERROR
if 0.0 >= volume_ >= 1.0:
volume_ = 1.0
if name_ is None and id_ is None:
raise ValueError("\nInvalid function call, at least one argument must be set!")
# search by name take precedence (if name value is not undefined)
if name_ is not None:
id_ = None
# Calculate the sound panning, left & right volume values
left, right = self.stereo_panning(new_x_, self.screen_size.w)
left *= volume_
right *= volume_
channels = self.channels # Fetch all the channels from the sound controller
for obj in self.snd_obj: # Iterate all the SoundObject
if obj:
if hasattr(obj, "pos") and obj.pos is not None:
# search by name
if name_ is not None:
if hasattr(obj, 'name') and hasattr(obj, 'active_channel'):
if obj.name == name_:
c = obj.active_channel # Channel playing the sound
obj.pos = new_x_ # update the sound position
try:
channel = channels[c]
if hasattr(channel, 'set_volume'):
channel.set_volume(left, right) # set the panning for the channel
else:
raise AttributeError("\nObject is missing attribute set_volume")
except IndexError as e:
raise IndexError("\n %s " % e)
else:
continue
else:
raise IndexError(
"\nSoundObject is missing attribute(s), "
"obj must be a SoundObject type got %s " % type(obj))
# search by id
elif id_ is not None:
if hasattr(obj, 'obj_id') and hasattr(obj, 'active_channel'):
if obj.obj_id == id_:
c = obj.active_channel # Channel playing the sound
obj.pos = new_x_ # update the sound position
try:
channel = channels[c]
if hasattr(channel, 'set_volume'):
channel.set_volume(left, right) # set the panning for the channel
else:
raise AttributeError("\nObject is missing attribute set_volume")
except IndexError as e:
raise IndexError("\n %s " % e)
else:
continue
else:
print('\nFunction call error, at least one search method must'
' be set (search by name or search by id')
return
# ALL SOUNDS
def update_sounds_panning(self, new_x_: int, volume_: float) -> None:
"""
PANNING IS THE DISTRIBUTION OF A SOUND SIGNAL INTO A NEW STEREO OR MULTI-CHANNEL SOUND FIELD
CHANGE PANNING FOR ALL SOUNDS BEING PLAYED ON THE MIXER.
THIS METHOD ITERATE OVER ALL SOUNDS BEING PLAYED BY THE MIXER AND ADJUST THE PANNING ACCORDING
TO THE NEW POSITION new_x_ AND GIVEN VOLUME_
:param new_x_ : integer; new sound position in the display. Value must be in range [0, Max width]
:param volume_ : float; Sound volume (adjust all sound being played by the mixer)
value must be in range [0 ... 1.0]
:return : None
"""
assert 0 <= new_x_ <= self.screen_size.w, \
"\nArgument new_x_ value must be in range (0, %s) got %s" % (self.screen_size.w, new_x_)
# SET THE VOLUME IN CASE OF AN INPUT ERROR
if 0.0 >= volume_ >= 1.0:
volume_ = 1.0
# Calculate the sound panning, left & right volume values
left, right = self.stereo_panning(new_x_, self.screen_size.w)
left *= volume_
right *= volume_
channels = self.channels # Fetch all the channels from the sound controller
for obj in self.snd_obj: # Iterate all the SoundObject
if obj:
if hasattr(obj, "pos") and obj.pos is not None:
if hasattr(obj, 'active_channel'):
c = obj.active_channel # Channel playing the sound
obj.pos = new_x_ # update the sound position
try:
c = channels[c]
if hasattr(c, "set_volume"):
c.set_volume(left, right) # set the panning for the channel
else:
raise AttributeError('\nObject is missing attributes set_volume')
except IndexError as e:
raise IndexError("\n %s " % e)
else:
raise AttributeError(
"\nSoundObject is missing attribute(s), "
"obj must be a SoundObject type got %s " % type(obj))
def update_volume(self, volume_: float = 1.0) -> None:
"""
UPDATE ALL SOUND OBJECT VOLUME TO A SPECIFIC VALUE.
THIS HAS IMMEDIATE EFFECT AND DO NOT FADE THE SOUND
AFFECT ALL SOUNDS WITH OR WITHOUT PANNING EFFECT.
PANNING SOUND EFFECT WILL BE CONSERVED AFTER ADJUSTING THE VOLUME
:param volume_: float; volume value, default is 1.0
:return : None
"""
# SET THE VOLUME IN CASE OF AN INPUT ERROR
if 0.0 >= volume_ >= 1.0:
volume_ = 1.0
objs = self.snd_obj
i = 0
# SET THE VOLUME FOR ALL SOUNDS
for channel in self.channels:
try:
single_obj = objs[i]
except IndexError as e:
raise IndexError("\n %s " % e)
if single_obj is not None:
# WITH PANNING
if hasattr(single_obj, "pos") and single_obj.pos is not None:
if hasattr(channel, "set_volume"):
# Calculate the sound panning, left & right volume values
left, right = self.stereo_panning(single_obj.pos, self.screen_size.w)
left *= volume_
right *= volume_
channel.set_volume(left, right)
# WITHOUT PANNING
else:
if single_obj is not None:
if hasattr(single_obj.sound, "set_volume"):
single_obj.sound.set_volume(volume_)
i += 1
def pause_sound(self, name_: str = None, id_=None) -> None:
"""
PAUSE A SINGLE SOUND FROM THE MIXER (AT LEAST ONE SEARCH ELEMENT HAS TO BE PROVIDED NAME OR ID)
:param name_ : string | None; Given sound name (name given at the time eof the SoundObject construction)
:param id_ : int | None; Default None. ID number such as object_id_ = id(sound_).
:return : None
"""
if name_ is None and id_ is None:
raise ValueError("\nInvalid function call, at least one argument must be set!")
# search by name take precedence (if name value is not undefined)
if name_ is not None:
id_ = None
objs = self.snd_obj
i = 0
# SET THE VOLUME FOR ALL SOUNDS
for channel in self.channels:
if hasattr(channel, "pause"):
try:
single_obj = objs[i]
except IndexError as e:
raise IndexError("\n %s " % e)
if single_obj is not None:
# search by name
if name_ is not None:
if single_obj.name == name_:
channel.pause()
# search by id_
elif id_ is not None:
if single_obj.obj_id == id_:
channel.pause()
i += 1
...
def pause_sounds(self) -> None:
"""
PAUSE ALL SOUND OBJECTS (THIS HAS IMMEDIATE EFFECT)
:return : None
"""
objs = self.snd_obj
i = 0
# SET THE VOLUME FOR ALL SOUNDS
for channel in self.channels:
try:
single_obj = objs[i]
except IndexError as e:
raise IndexError("\n %s " % e)
if single_obj is not None:
if hasattr(channel, "pause"):
channel.pause()
i += 1
def unpause_sounds(self) -> None:
"""
UNPAUSE ALL SOUND OBJECTS (THIS HAS IMMEDIATE EFFECT)
:return : None
"""
objs = self.snd_obj
i = 0
for channel in self.channels:
try:
single_obj = objs[i]
except IndexError as e:
raise IndexError("\n %s " % e)
if single_obj is not None:
if hasattr(channel, "unpause"):
channel.unpause()
i += 1
def unpause_sound(self, name_: str = None, id_=None) -> None:
"""
UNPAUSE A SINGLE SOUND FROM THE MIXER (AT LEAST ONE SEARCH ELEMENT HAS TO BE PROVIDED NAME OR ID)
:param name_ : string | None; Given sound name (name given at the time eof the SoundObject construction)
:param id_ : int | None; Default None. ID number such as object_id_ = id(sound_).
:return : None
"""
if name_ is None and id_ is None:
raise ValueError("\nInvalid function call, at least one argument must be set!")
# search by name take precedence (if name value is not undefined)
if name_ is not None:
id_ = None
objs = self.snd_obj
i = 0
for channel in self.channels:
try:
single_obj = objs[i]
except IndexError as e:
raise IndexError("\n %s " % e)
if single_obj is not None:
# search by name
if name_ is not None:
if single_obj.name == name_:
channel.unpause()
# search by id_
elif id_ is not None:
if single_obj.obj_id == id_:
channel.unpause()
i += 1
def show_free_channels(self) -> list:
"""
RETURN A LIST OF FREE CHANNELS (NUMERICAL VALUES).
:return: list; RETURN A LIST
"""
free_channels = []
i = 0
free_channels_append = free_channels.append
start = self.start
for c in self.channels:
if not c.get_busy():
free_channels_append(i + start)
i += 1
print("Free channels : %s " % free_channels)
return free_channels
def show_sounds_playing(self):
"""
DISPLAY ALL SOUNDS OBJECTS
"""
j = 0
for object_ in self.snd_obj:
if object_:
timeleft = round(object_.length - (time() - object_.time), 2)
# if timeleft < 0, most likely to be a sound with attribute loop enabled
if timeleft < 0.0:
timeleft = 0.0
print('Name %s priority %s channel %s length(s) %s time left(s) %s' %
(object_.name, object_.priority, object_.active_channel, round(object_.length, 2),
timeleft))
j += 1
def get_identical_sounds(self, sound_: pygame.mixer.Sound) -> list:
"""
RETURN A LIST OF CHANNEL(S) PLAYING IDENTICAL SOUND OBJECT(s)
SEARCH BY IDENTICAL PYGAME.SOUND OBJECT
:param sound_ : Mixer object; Object to compare to
:return : python list; List containing channels number playing similar sound object,
if no match is found, return an empty list
"""
assert isinstance(sound_, pygame.mixer.Sound), \
"\nPositional argument sound_ must be a pygame.mixer.Sound type, got %s " % type(sound_)
duplicate = []
duplicate_append = duplicate.append
for obj in self.snd_obj:
if obj:
if obj.sound == sound_:
duplicate_append(obj.active_channel)
return duplicate
def get_identical_id(self, id_: int) -> list:
"""
RETURN A LIST CONTAINING ANY IDENTICAL SOUND BEING MIXED.
USE THE UNIQUE ID FOR REFERENCING OBJECTS
:param id_: python integer; unique id number that reference a sound object
:return : list; Return a list of channels containing identical sound object
"""
assert isinstance(id_, int), \
"\nPositional argument id_ must be an int type, got %s " % type(id_)
duplicate = []
duplicate_append = duplicate.append
for obj in self.snd_obj:
if obj:
if obj.obj_id == id_:
duplicate_append(obj)
return duplicate
def stop(self, stop_list_: list):
"""
STOP ALL SOUND BEING PLAYED ON THE GIVEN LIST OF CHANNELS.
ONLY SOUND WITH PRIORITY LEVEL 0 CAN BE STOPPED.
:param stop_list_: python list; list of channels
:return : None
"""
assert isinstance(stop_list_, list), \
"\nPositional argument stop_list must be a python list type, got %s " % type(stop_list_)
start = self.start
snd_obj = self.snd_obj
channels = self.channels
for c in stop_list_:
l = c - start
if snd_obj[l]:
if snd_obj[l].priority == 0:
channels[l].set_volume(0.0, 0.0)
channels[l].stop()
self.update()
def stop_all_except(self, exception_: list):
"""
STOP ALL SOUND OBJECT EXCEPT SOUNDS FROM A GIVEN LIST OF ID(SOUND)
IT WILL STOP SOUND PLAYING ON ALL CHANNELS REGARDLESS
OF THEIR PRIORITY.
:param exception_: Can be a single pygame.Sound id value or a list containing
all pygame.Sound object id numbers.
"""
assert isinstance(exception_, list),\
"\nPositional argument exception_ must be a python list type, got %s " % type(exception_)
start = self.start
snd_obj = self.snd_obj
channels = self.channels
for c in self.all:
l = c - start
snd_object = snd_obj[l]
if snd_object:
if snd_object.obj_id not in exception_:
channels[l].set_volume(0.0)
channels[l].stop()
self.update()
def stop_all(self):
"""
STOP ALL SOUNDS NO EXCEPTIONS.
:return: None
"""
start = self.start
snd_obj = self.snd_obj
channels = self.channels
for c in self.all:
l = c - start
snd_object = snd_obj[l]
if snd_object:
channels[l].set_volume(0.0)
channels[l].stop()
self.update()
def stop_name(self, name_: str = ""):
"""
STOP A PYGAME.SOUND OBJECT IF PLAYING ON ANY OF THE CHANNELS.
:param name_: string; Sound name to stop
:return : None
"""
assert isinstance(name_, str),\
"\nPositional argument name_ must be a python string type, got %s " % type(name_)
channels = self.channels
start = self.start
for sound in self.snd_obj:
if sound and sound.name == name_:
try:
channels[sound.active_channel - start].set_volume(0.0)
channels[sound.active_channel - start].stop()
except IndexError:
# IGNORE ERROR
...
self.update()
def stop_object(self, object_id: int):
"""
STOP A GIVEN SOUND USING THE PYGAME.SOUND OBJECT ID NUMBER.
:param object_id: integer; Object unique identifier such as id(sound)
:return : None
"""
assert isinstance(object_id, int), \
"\nPositional argument object_id must be a python string type, got %s " % type(object_id)
channels = self.channels
start = self.start
for sound in self.snd_obj:
if sound and sound.obj_id == object_id:
try:
channels[sound.active_channel - start].set_volume(0.0)
channels[sound.active_channel - start].stop()
except IndexError:
# IGNORE ERROR
...
self.update()
def return_time_left(self, object_id) -> float:
"""
RETURN THE TIME LEFT IN SECONDS (RETURN -1 IF SOUND IS SEAMLESS LOOPED ON THE CHANNEL,
AND NONE WHEN SOUND IS NOT FOUND
:param object_id: python integer; unique object id
:return : float | None; Return a float representing the time left in seconds.
"""
j = 0
snd_obj = self.snd_obj
for obj in snd_obj:
if obj:
if obj.obj_id == object_id:
timeleft = round(snd_obj[j].length - (time() - snd_obj[j].time), 2)
# if timeleft < 0, most likely to be a sound with attribute loop enabled
if timeleft < 0.0:
if obj.loop:
return -1.0
else:
timeleft = 0.0
return timeleft
j += 1
return None
def get_reserved_channels(self):
""" RETURN THE NUMBER OF RESERVED CHANNELS """
return self.channel_num
def get_reserved_start(self):
""" RETURN THE FIRST RESERVED CHANNEL NUMBER """
return self.start
def get_reserved_end(self):
""" RETURN THE LAST RESERVED CHANNEL NUMBER """
return self.end
def get_channels(self):
"""
RETURN A LIST OF ALL RESERVED PYGAME MIXER CHANNELS.
"""
return self.channels
def get_sound(self, channel_):
"""
RETURN THE SOUND BEING PLAYED ON A SPECIFIC CHANNEL (PYGAME.MIXER.CHANNEL)
:param channel_: integer; channel_ is an integer representing the channel number.
"""
try:
sound = self.channels[channel_]
except IndexError:
raise Exception('\nIndexError: Channel number out of range ')
else:
return sound
def get_sound_object(self, channel_):
"""
RETURN A SPECIFIC SOUND OBJECT
RETURN NONE IN CASE OF AN INDEX ERROR
"""
try:
s = self.snd_obj[channel_]
except IndexError:
return None
else:
return s
def get_all_sound_object(self):
""" RETURN ALL SOUND OBJECTS """
return self.snd_obj
def play(self, sound_, loop_=0, priority_=0, volume_=1.0,
fade_in_ms=100, fade_out_ms=100, panning_=False, name_=None,
x_=None, object_id_=None):
"""
PLAY A SOUND OBJECT ON THE GIVEN CHANNEL
RETURN NONE IF ALL CHANNELS ARE BUSY OR IF AN EXCEPTION IS RAISED
:param sound_ : pygame mixer sound
:param loop_ : loop the sound indefinitely -1 (default = 0)
:param priority_ : Set the sound priority (low : 0, med : 1, high : 2)
:param volume_ : Set the sound volume 0.0 to 1.0 (100% full volume)
:param fade_in_ms : Fade in sound effect in ms
:param fade_out_ms : float; Fade out sound effect in ms
:param panning_ : boolean for using panning method (stereo mode)
:param name_ : String representing the sound name (if no name default is -> str(id(sound_)))
:param x_ : Sound position for stereo mode,
:param object_id_ : unique sound id
"""
l = 0
channels = self.channels
channel = self.channel
start = self.start
end = self.end
screen_width = self.screen_size.w
left = 0
right = 0
try:
if not sound_:
raise AttributeError('\nIncorrect call argument, sound_ cannot be None')
if panning_:
# panning mode is enable but sound position value is not correct
# Adjusting the value manually
if x_ is None or (0 > x_ > screen_width):
x_ = screen_width >> 1
# Regardless x_ value, if passing mode is disabled the variable
# x_ is set to None
else:
x_ = None
# set a name by default id(sound_)
if name_ is None:
name_ = str(id(sound_))
# set object id default value
if object_id_ is None:
object_id_ = id(sound_)
l = channel - start
# TODO OVERFLOW CHANNELS[l]
# CHECK IF CURRENT CHANNEL IS BUSY
if channels[l].get_busy() == 0:
# PLAY A SOUND IN STEREO MODE
if panning_:
left, right = self.stereo_panning(x_, self.screen_size.w)
channels[l].set_volume(left * volume_, right * volume_)
else:
channels[l].set_volume(volume_)
channels[l].fadeout(fade_out_ms)
channels[l].play(sound_, loops=loop_, maxtime=0, fade_ms=fade_in_ms)
self.snd_obj[l] = SoundObject(sound_, priority_, name_, l, object_id_, position_ = x_, loop_ = loop_)
# PREPARE THE MIXER FOR THE NEXT CHANNEL
self.channel += 1
if self.channel > end - 1:
self.channel = start
# RETURN THE CHANNEL NUMBER PLAYING THE SOUND OBJECT
return channel - 1
# ALL CHANNELS ARE BUSY
else:
self.stop(self.get_identical_sounds(sound_))
# VERY IMPORTANT, GO TO NEXT CHANNEL.
self.channel += 1
if self.channel > end - 1:
self.channel = start
return None
except IndexError as e:
print('\n[-] SoundControl error : %s ' % e)
print(self.channel, l)
return None
def display_size_update(self, rect_):
"""
UPDATE THE SCREEN SIZE AFTER CHANGING MODE
THIS FUNCTION IS MAINLY USED FOR THE PANNING MODE (STEREO)
:param rect_: pygame.Rect; display dimension
:return: None
"""
self.screen_size = rect_
def stereo_panning(self, x_, screen_width):
"""
STEREO MODE
:param screen_width: display width
:param x_ : integer; x value of sprite position on screen
:return: tuple of float;
"""
right_volume = 0.0
left_volume = 0.0
# MUTE THE SOUND IF OUTSIDE THE BOUNDARIES
if 0 > x_ > screen_width:
return right_volume, left_volume
right_volume = float(x_) / screen_width
left_volume = 1.0 - right_volume
return left_volume, right_volume
| 39.226054 | 119 | 0.513088 | 30,397 | 0.989679 | 0 | 0 | 0 | 0 | 0 | 0 | 12,643 | 0.411636 |
85e90c8a65010ce9ecba5749d22457498fa4d999
| 2,931 |
py
|
Python
|
tests/extmethods/run.py
|
dariobig/pyangbind
|
db0808f719bb963dac85606fddd65a1930a84aef
|
[
"Apache-2.0"
] | 1 |
2020-04-01T05:45:41.000Z
|
2020-04-01T05:45:41.000Z
|
tests/extmethods/run.py
|
dariobig/pyangbind
|
db0808f719bb963dac85606fddd65a1930a84aef
|
[
"Apache-2.0"
] | null | null | null |
tests/extmethods/run.py
|
dariobig/pyangbind
|
db0808f719bb963dac85606fddd65a1930a84aef
|
[
"Apache-2.0"
] | 3 |
2016-11-01T23:51:35.000Z
|
2018-05-23T10:09:08.000Z
|
#!/usr/bin/env python
import os
import sys
import getopt
TESTNAME = "extmethods"
class extmethodcls(object):
def commit(self, *args, **kwargs):
return "COMMIT_CALLED"
def presave(self, *args, **kwargs):
return "PRESAVE_CALLED"
def postsave(self, *args, **kwargs):
return "POSTSAVE_CALLED"
def oam_check(self, *args, **kwargs):
return "OAM_CHECK_CALLED"
def echo(self, *args, **kwargs):
return {'args': args, 'kwargs': kwargs}
# generate bindings in this folder
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "k", ["keepfiles"])
except getopt.GetoptError as e:
sys.exit(127)
k = False
for o, a in opts:
if o in ["-k", "--keepfiles"]:
k = True
pythonpath = os.environ.get("PATH_TO_PYBIND_TEST_PYTHON") if \
os.environ.get('PATH_TO_PYBIND_TEST_PYTHON') is not None \
else sys.executable
pyangpath = os.environ.get('PYANGPATH') if \
os.environ.get('PYANGPATH') is not None else False
pyangbindpath = os.environ.get('PYANGBINDPATH') if \
os.environ.get('PYANGBINDPATH') is not None else False
assert pyangpath is not False, "could not find path to pyang"
assert pyangbindpath is not False, "could not resolve pyangbind directory"
this_dir = os.path.dirname(os.path.realpath(__file__))
cmd = "%s " % pythonpath
cmd += "%s --plugindir %s/pyangbind/plugin" % (pyangpath, pyangbindpath)
cmd += " -f pybind -o %s/bindings.py" % this_dir
cmd += " -p %s" % this_dir
cmd += " --use-extmethods"
cmd += " %s/%s.yang" % (this_dir, TESTNAME)
os.system(cmd)
extdict = {
'/item/one': extmethodcls()
}
from bindings import extmethods
x = extmethods(extmethods=extdict)
results = [
("commit", True, "COMMIT_CALLED"),
("presave", True, "PRESAVE_CALLED"),
("postsave", True, "POSTSAVE_CALLED"),
("oam_check", True, "OAM_CHECK_CALLED"),
("doesnotexist", False, "")
]
for chk in results:
method = getattr(x.item.one, "_" + chk[0], None)
assert (method is not None) == chk[1], \
"Method %s retrieved incorrectly, method was: %s" % method
if method is not None:
result = method()
assert result == chk[2], "Incorrect return from %s -> %s != %s" \
% (chk[0], result, chk[2])
expected_return = {'args': ('one',), 'kwargs': {'caller': ['item', 'one'],
'two': 2, 'path_helper': False}}
assert x.item.one._echo('one', two=2) == expected_return, \
"args+kwargs not echoed correctly"
try:
x.item.two = False
assert False, \
"incorrectly set an attribute that did not exist in extmethods"
except AttributeError:
pass
if not k:
os.system("/bin/rm %s/bindings.py" % this_dir)
os.system("/bin/rm %s/bindings.pyc" % this_dir)
if __name__ == '__main__':
main()
| 29.019802 | 76 | 0.604572 | 378 | 0.128966 | 0 | 0 | 0 | 0 | 0 | 0 | 904 | 0.308427 |
85eb93c822a019fc750d57de9e82b6de5c0352f3
| 790 |
py
|
Python
|
scripts/solved/031_TRAN.py
|
akikuno/rosalind
|
7015dc63e493d870e5789e99f2ee523a9b1f5ab9
|
[
"MIT"
] | null | null | null |
scripts/solved/031_TRAN.py
|
akikuno/rosalind
|
7015dc63e493d870e5789e99f2ee523a9b1f5ab9
|
[
"MIT"
] | null | null | null |
scripts/solved/031_TRAN.py
|
akikuno/rosalind
|
7015dc63e493d870e5789e99f2ee523a9b1f5ab9
|
[
"MIT"
] | null | null | null |
# https://rosalind.info/problems/tran/
file = "data/tran.txt"
def read_fasta(file: str):
"""
Args
file: path of fasta file
"""
with open(file) as f:
fa = f.read().splitlines()
prev = True
header = []
seq = []
for f in fa:
if ">" in f:
header.append(f[1:])
prev = True
elif prev:
seq.append(f)
prev = False
else:
seq[-1] += f
return header, seq
_, seq = read_fasta(file)
seq1, seq2 = seq
transition = 0
transversion = 0
import re
for s1, s2 in zip(seq1, seq2):
if s1 == s2:
continue
s = s1 + s2
if re.match(r"(AG)|(GA)|(CT)|(TC)", s):
transition += 1
else:
transversion += 1
print(transition / transversion)
| 16.458333 | 43 | 0.501266 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 127 | 0.160759 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.