hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
46b4aae481a7dcad8401c1fdb98aae95f3b590c6 | 2,207 | py | Python | api/patients/urls.py | Wellheor1/l2 | d980210921c545c68fe9d5522bb693d567995024 | [
"MIT"
]
| 10 | 2018-03-14T06:17:06.000Z | 2022-03-10T05:33:34.000Z | api/patients/urls.py | Wellheor1/l2 | d980210921c545c68fe9d5522bb693d567995024 | [
"MIT"
]
| 512 | 2018-09-10T07:37:34.000Z | 2022-03-30T02:23:43.000Z | api/patients/urls.py | D00dleman/l2 | 0870144537ee340cd8db053a608d731e186f02fb | [
"MIT"
]
| 24 | 2018-07-31T05:52:12.000Z | 2022-02-08T00:39:41.000Z | from django.urls import path
from . import views
urlpatterns = [
path('search-card', views.patients_search_card),
path('search-individual', views.patients_search_individual),
path('search-l2-card', views.patients_search_l2_card),
path('create-l2-individual-from-card', views.create_l2_individual_from_card),
path('card/<int:card_id>', views.patients_get_card_data),
path('card/save', views.patients_card_save),
path('card/archive', views.patients_card_archive),
path('card/unarchive', views.patients_card_unarchive),
path('individuals/search', views.individual_search),
path('individuals/sex', views.get_sex_by_param),
path('individuals/edit-doc', views.edit_doc),
path('individuals/edit-agent', views.edit_agent),
path('individuals/update-cdu', views.update_cdu),
path('individuals/update-wia', views.update_wia),
path('individuals/sync-rmis', views.sync_rmis),
path('individuals/sync-tfoms', views.sync_tfoms),
path('individuals/load-anamnesis', views.load_anamnesis),
path('individuals/load-dreg', views.load_dreg),
path('individuals/load-screening', views.load_screening),
path('individuals/load-vaccine', views.load_vaccine),
path('individuals/load-ambulatory-data', views.load_ambulatory_data),
path('individuals/load-benefit', views.load_benefit),
path('individuals/load-dreg-detail', views.load_dreg_detail),
path('individuals/load-vaccine-detail', views.load_vaccine_detail),
path('individuals/load-ambulatorydata-detail', views.load_ambulatory_data_detail),
path('individuals/load-ambulatory-history', views.load_ambulatory_history),
path('individuals/load-benefit-detail', views.load_benefit_detail),
path('individuals/save-dreg', views.save_dreg),
path('individuals/save-plan-dreg', views.update_dispensary_reg_plans),
path('individuals/save-vaccine', views.save_vaccine),
path('individuals/save-ambulatory-data', views.save_ambulatory_data),
path('individuals/save-benefit', views.save_benefit),
path('individuals/save-anamnesis', views.save_anamnesis),
path('is-card', views.is_l2_card),
path('save-screening-plan', views.update_screening_reg_plan),
]
| 53.829268 | 86 | 0.752152 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 852 | 0.386044 |
46b5e33d3c7311128739c73f9d648a67b6c52c18 | 1,139 | py | Python | resolwe_bio/kb/migrations/0002_alter_field_max_length.py | JureZmrzlikar/resolwe-bio | 54cde9b293abebad2db0564c9fefa33d6d2fe835 | [
"Apache-2.0"
]
| null | null | null | resolwe_bio/kb/migrations/0002_alter_field_max_length.py | JureZmrzlikar/resolwe-bio | 54cde9b293abebad2db0564c9fefa33d6d2fe835 | [
"Apache-2.0"
]
| null | null | null | resolwe_bio/kb/migrations/0002_alter_field_max_length.py | JureZmrzlikar/resolwe-bio | 54cde9b293abebad2db0564c9fefa33d6d2fe835 | [
"Apache-2.0"
]
| 1 | 2021-09-03T08:50:54.000Z | 2021-09-03T08:50:54.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-15 07:06
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resolwe_bio_kb', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='feature',
name='aliases',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=256), blank=True, default=[], size=None),
),
migrations.AlterField(
model_name='feature',
name='name',
field=models.CharField(max_length=1024),
),
migrations.AlterField(
model_name='feature',
name='sub_type',
field=models.CharField(choices=[(b'protein-coding', b'Protein-coding'), (b'pseudo', b'Pseudo'), (b'rRNA', b'rRNA'), (b'ncRNA', b'ncRNA'), (b'snRNA', b'snRNA'), (b'snoRNA', b'snoRNA'), (b'tRNA', b'tRNA'), (b'asRNA', b'asRNA'), (b'other', b'Other'), (b'unknown', b'Unknown')], max_length=20),
),
]
| 35.59375 | 302 | 0.604039 | 943 | 0.827919 | 0 | 0 | 0 | 0 | 0 | 0 | 335 | 0.294118 |
46b6263389154f262f0911cbbda3dfc8ad613ae7 | 3,014 | py | Python | setup.py | conan-hdk/xlwings | 44395c4d18b46f76249279b7d0965e640291499c | [
"BSD-3-Clause"
]
| null | null | null | setup.py | conan-hdk/xlwings | 44395c4d18b46f76249279b7d0965e640291499c | [
"BSD-3-Clause"
]
| null | null | null | setup.py | conan-hdk/xlwings | 44395c4d18b46f76249279b7d0965e640291499c | [
"BSD-3-Clause"
]
| null | null | null | import os
import sys
import re
import glob
from setuptools import setup, find_packages
# long_description: Take from README file
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
readme = f.read()
# Version Number
with open(os.path.join(os.path.dirname(__file__), 'xlwings', '__init__.py')) as f:
version = re.compile(r".*__version__ = '(.*?)'", re.S).match(f.read()).group(1)
# Dependencies
if sys.platform.startswith('win'):
if sys.version_info[:2] >= (3, 7):
pywin32 = 'pywin32 >= 224'
else:
pywin32 = 'pywin32'
install_requires = [pywin32]
# This places dlls next to python.exe for standard setup and in the parent folder for virtualenv
data_files = [('', glob.glob('xlwings*.dll'))]
elif sys.platform.startswith('darwin'):
install_requires = ['psutil >= 2.0.0', 'appscript >= 1.0.1']
data_files = [(os.path.expanduser("~") + '/Library/Application Scripts/com.microsoft.Excel', [f'xlwings/xlwings-{version}.applescript'])]
else:
if os.environ.get('READTHEDOCS', None) == 'True' or os.environ.get('INSTALL_ON_LINUX') == '1':
data_files = []
install_requires = []
else:
raise OSError("xlwings requires an installation of Excel and therefore only works on Windows and macOS. To enable the installation on Linux nevertheless, do: export INSTALL_ON_LINUX=1; pip install xlwings")
extras_require = {
'pro': ['cryptography', 'Jinja2', 'pdfrw'],
'all': ['cryptography', 'Jinja2', 'pandas', 'matplotlib', 'plotly', 'flask', 'requests', 'pdfrw']
}
setup(
name='xlwings',
version=version,
url='https://www.xlwings.org',
license='BSD 3-clause',
author='Zoomer Analytics LLC',
author_email='[email protected]',
description='Make Excel fly: Interact with Excel from Python and vice versa.',
long_description=readme,
data_files=data_files,
packages=find_packages(exclude=('tests', 'tests.*',)),
package_data={'xlwings': ['xlwings.bas', 'Dictionary.cls', '*.xlsm', '*.xlam', '*.applescript', 'addin/xlwings.xlam', 'addin/xlwings_unprotected.xlam']},
keywords=['xls', 'excel', 'spreadsheet', 'workbook', 'vba', 'macro'],
install_requires=install_requires,
extras_require=extras_require,
entry_points={'console_scripts': ['xlwings=xlwings.cli:main'],},
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Office/Business :: Financial :: Spreadsheet',
'License :: OSI Approved :: BSD License'],
platforms=['Windows', 'Mac OS X'],
python_requires='>=3.6',
)
| 42.450704 | 214 | 0.651626 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,650 | 0.547445 |
46b6eaa6075021f6bee39458eda6a940c6b7c8b2 | 2,044 | py | Python | secedgar/tests/test_cli.py | abbadata/sec-edgar | f801d2137a988c928449bf64b44a85c01e80fd3a | [
"Apache-2.0"
]
| null | null | null | secedgar/tests/test_cli.py | abbadata/sec-edgar | f801d2137a988c928449bf64b44a85c01e80fd3a | [
"Apache-2.0"
]
| null | null | null | secedgar/tests/test_cli.py | abbadata/sec-edgar | f801d2137a988c928449bf64b44a85c01e80fd3a | [
"Apache-2.0"
]
| null | null | null | import pytest
from click.testing import CliRunner
from secedgar.cli import daily, filing
from secedgar.utils.exceptions import FilingTypeError
def run_cli_command(cli, user_input, directory, catch_exceptions=False):
runner = CliRunner()
user_input = user_input + " --directory {}".format(directory)
return runner.invoke(cli, user_input, catch_exceptions=catch_exceptions)
def check_bad_inputs(cli, user_input, expected_exception, directory):
# SystemExit does not raise exception by runner
if expected_exception is SystemExit:
result = run_cli_command(cli, user_input, directory)
assert result.exit_code != 0
else:
with pytest.raises(expected_exception):
run_cli_command(cli, user_input, directory)
class TestCLIFiling:
@pytest.mark.parametrize(
"user_input,expected_exception",
[
("-l aapl msft Facebook", SystemExit), # missing filing type
("-l aapl -t null", FilingTypeError), # unrecognized filing type
("-l aapl -t FILING_10Q -n abc", SystemExit), # count is not int
("-l aapl -t FILING_10Q -n 0", ValueError) # no filings available if 0 picked
]
)
def test_filing_bad_inputs(self, user_input, expected_exception, tmp_data_directory):
check_bad_inputs(filing, user_input, expected_exception, tmp_data_directory)
@pytest.mark.parametrize(
"user_input",
[
"-l aapl msft fb FILING_10Q",
"-l aapl msft fb FILING_10Q -n 10",
"-l aapl msft fb FILING_10Q -n 1"
]
)
def test_multiple_companies_input(self, user_input, tmp_data_directory):
pass
class TestCLIDaily:
@pytest.mark.parametrize(
"user_input,expected_exception",
[
("", SystemExit),
("-d 2020", ValueError)
]
)
def test_daily_bad_inputs(self, user_input, expected_exception, tmp_data_directory):
check_bad_inputs(daily, user_input, expected_exception, tmp_data_directory)
| 34.066667 | 90 | 0.672211 | 1,275 | 0.623777 | 0 | 0 | 1,218 | 0.59589 | 0 | 0 | 441 | 0.215753 |
46b74e2cb30b2d76500271ee27ada8ec4c26cdc1 | 2,013 | py | Python | hydro.py | garethcmurphy/hydrosolve | ef150a6adcab1e835b4b907c5fed2dd58cd4ba08 | [
"MIT"
]
| null | null | null | hydro.py | garethcmurphy/hydrosolve | ef150a6adcab1e835b4b907c5fed2dd58cd4ba08 | [
"MIT"
]
| null | null | null | hydro.py | garethcmurphy/hydrosolve | ef150a6adcab1e835b4b907c5fed2dd58cd4ba08 | [
"MIT"
]
| null | null | null | import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
nstep=200
nx=400
nv=3
u=np.zeros((nx,nv))
prim=np.zeros((nx,nv))
gam=5./3.
dx=1./nx
dt=1e-3
time=0
x=np.linspace(0,1,num=nx)
def ptou(pri):
u=np.zeros((nx,nv))
rho=pri[:,0]
v=pri[:,1]
prs=pri[:,2]
mom=rho*v
u[:,0]=rho
u[:,1]=mom
u[:,2]=0.5*mom*v+prs/(gam-1)
return(u)
def utop(u):
pri=np.zeros((nx,nv))
rho=u[:,0]
mom=u[:,1]
ene=u[:,2]
vel=mom/(rho+1e-6)
pri[:,0]=rho
pri[:,1]=vel
pri[:,2]=(ene-0.5*mom*vel)*(gam-1)
return(pri)
def getmaxv(pri):
rho=pri[:,0]
vel=pri[:,1]
prs=pri[:,2]
cs=np.sqrt(gam*prs/rho)
return(max(abs(vel)+cs))
def getflux(u):
f=np.zeros((nx,nv))
pri=utop(u)
rho=pri[:,0]
v=pri[:,1]
prs=pri[:,2]
mom=u[:,1]
ene=u[:,2]
f[:,0]=mom
f[:,1]=mom*v+prs
f[:,2]=(ene+prs)*v
return(f)
prim[:,0]=1.
prim[:,1]=0.
prim[:,2]=1.
for i in range(int(nx/2),nx):
prim[i,0]=0.1
prim[i,1]=0.
prim[i,2]=0.125
print (prim[:,2])
u=ptou(prim)
uold=u
pold=prim
fig = plt.figure()
gs = gridspec.GridSpec(nv,1)
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[1,0])
ax3 = fig.add_subplot(gs[2,0])
ax1.plot(x,prim[:,0],'pres')
ax2.plot(x,prim[:,1],'pres')
ax3.plot(x,prim[:,2],'pres')
fig.show()
for nstep in range(0,nstep):
print (time)
um=np.roll(u, 1,axis=0)
up=np.roll(u,-1,axis=0)
um[0,:] =um[1,:]
up[nx-1,:]=up[nx-2,:]
fm=getflux(um)
fp=getflux(up)
cfl=0.49
dtdx=1./getmaxv(p)
dt=dtdx*dx
time=time+dt
un=0.5*(um+up) - cfl*dtdx* (fp-fm)
u=un
p=utop(u)
plt.close(fig)
fig = plt.figure()
gs = gridspec.GridSpec(nv,1)
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[1,0])
ax3 = fig.add_subplot(gs[2,0])
ax1.plot(p[:,0])
ax2.plot(p[:,1])
ax3.plot(p[:,2])
fig.show()
| 18.638889 | 42 | 0.516642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.008942 |
d3b125d2c7eabb30628cc33826a64ef3ed9c92f4 | 22,179 | py | Python | tests/test_pluralize.py | weixu365/pluralizer-py | 793b2a8ff1320f701e70810038e0902c610aa5b0 | [
"MIT"
]
| 4 | 2020-05-10T12:02:57.000Z | 2022-02-02T11:20:02.000Z | tests/test_pluralize.py | weixu365/pluralizer-py | 793b2a8ff1320f701e70810038e0902c610aa5b0 | [
"MIT"
]
| 30 | 2020-05-10T10:07:00.000Z | 2022-03-26T18:22:43.000Z | tests/test_pluralize.py | weixu365/pluralizer-py | 793b2a8ff1320f701e70810038e0902c610aa5b0 | [
"MIT"
]
| null | null | null |
import unittest
from pluralizer import Pluralizer
import re
# Standard singular/plural matches.
#
# @type {Array}
BASIC_TESTS = [
# Uncountables.
['firmware', 'firmware'],
['fish', 'fish'],
['media', 'media'],
['moose', 'moose'],
['police', 'police'],
['sheep', 'sheep'],
['series', 'series'],
['agenda', 'agenda'],
['news', 'news'],
['reindeer', 'reindeer'],
['starfish', 'starfish'],
['smallpox', 'smallpox'],
['tennis', 'tennis'],
['chickenpox', 'chickenpox'],
['shambles', 'shambles'],
['garbage', 'garbage'],
['you', 'you'],
['wildlife', 'wildlife'],
['Staff', 'Staff'],
['STAFF', 'STAFF'],
['turquois', 'turquois'],
['carnivorous', 'carnivorous'],
['only', 'only'],
['aircraft', 'aircraft'],
# Latin.
['veniam', 'veniam'],
# Pluralization.
['this', 'these'],
['that', 'those'],
['is', 'are'],
['man', 'men'],
['superman', 'supermen'],
['ox', 'oxen'],
['bus', 'buses'],
['airbus', 'airbuses'],
['railbus', 'railbuses'],
['wife', 'wives'],
['guest', 'guests'],
['thing', 'things'],
['mess', 'messes'],
['guess', 'guesses'],
['person', 'people'],
['meteor', 'meteors'],
['chateau', 'chateaus'],
['lap', 'laps'],
['cough', 'coughs'],
['death', 'deaths'],
['coach', 'coaches'],
['boy', 'boys'],
['toy', 'toys'],
['guy', 'guys'],
['girl', 'girls'],
['chair', 'chairs'],
['toe', 'toes'],
['tiptoe', 'tiptoes'],
['tomato', 'tomatoes'],
['potato', 'potatoes'],
['tornado', 'tornadoes'],
['torpedo', 'torpedoes'],
['hero', 'heroes'],
['superhero', 'superheroes'],
['volcano', 'volcanoes'],
['canto', 'cantos'],
['hetero', 'heteros'],
['photo', 'photos'],
['portico', 'porticos'],
['quarto', 'quartos'],
['kimono', 'kimonos'],
['albino', 'albinos'],
['cherry', 'cherries'],
['piano', 'pianos'],
['pro', 'pros'],
['combo', 'combos'],
['turbo', 'turbos'],
['bar', 'bars'],
['crowbar', 'crowbars'],
['van', 'vans'],
['tobacco', 'tobaccos'],
['afficionado', 'afficionados'],
['monkey', 'monkeys'],
['neutrino', 'neutrinos'],
['rhino', 'rhinos'],
['steno', 'stenos'],
['latino', 'latinos'],
['casino', 'casinos'],
['avocado', 'avocados'],
['commando', 'commandos'],
['tuxedo', 'tuxedos'],
['speedo', 'speedos'],
['dingo', 'dingoes'],
['echo', 'echoes'],
['nacho', 'nachos'],
['motto', 'mottos'],
['psycho', 'psychos'],
['poncho', 'ponchos'],
['pass', 'passes'],
['ghetto', 'ghettos'],
['mango', 'mangos'],
['lady', 'ladies'],
['bath', 'baths'],
['professional', 'professionals'],
['dwarf', 'dwarves'], # Proper spelling is "dwarfs".
['encyclopedia', 'encyclopedias'],
['louse', 'lice'],
['roof', 'roofs'],
['woman', 'women'],
['formula', 'formulas'],
['polyhedron', 'polyhedra'],
['index', 'indices'], # Maybe "indexes".
['matrix', 'matrices'],
['vertex', 'vertices'],
['axe', 'axes'], # Could also be plural of "ax".
['pickaxe', 'pickaxes'],
['crisis', 'crises'],
['criterion', 'criteria'],
['phenomenon', 'phenomena'],
['addendum', 'addenda'],
['datum', 'data'],
['forum', 'forums'],
['millennium', 'millennia'],
['alumnus', 'alumni'],
['medium', 'mediums'],
['census', 'censuses'],
['genus', 'genera'],
['dogma', 'dogmata'],
['life', 'lives'],
['hive', 'hives'],
['kiss', 'kisses'],
['dish', 'dishes'],
['human', 'humans'],
['knife', 'knives'],
['phase', 'phases'],
['judge', 'judges'],
['class', 'classes'],
['witch', 'witches'],
['church', 'churches'],
['massage', 'massages'],
['prospectus', 'prospectuses'],
['syllabus', 'syllabi'],
['viscus', 'viscera'],
['cactus', 'cacti'],
['hippopotamus', 'hippopotamuses'],
['octopus', 'octopuses'],
['platypus', 'platypuses'],
['kangaroo', 'kangaroos'],
['atlas', 'atlases'],
['stigma', 'stigmata'],
['schema', 'schemata'],
['phenomenon', 'phenomena'],
['diagnosis', 'diagnoses'],
['mongoose', 'mongooses'],
['mouse', 'mice'],
['liturgist', 'liturgists'],
['box', 'boxes'],
['gas', 'gases'],
['self', 'selves'],
['chief', 'chiefs'],
['quiz', 'quizzes'],
['child', 'children'],
['shelf', 'shelves'],
['fizz', 'fizzes'],
['tooth', 'teeth'],
['thief', 'thieves'],
['day', 'days'],
['loaf', 'loaves'],
['fix', 'fixes'],
['spy', 'spies'],
['vertebra', 'vertebrae'],
['clock', 'clocks'],
['lap', 'laps'],
['cuff', 'cuffs'],
['leaf', 'leaves'],
['calf', 'calves'],
['moth', 'moths'],
['mouth', 'mouths'],
['house', 'houses'],
['proof', 'proofs'],
['hoof', 'hooves'],
['elf', 'elves'],
['turf', 'turfs'],
['craft', 'crafts'],
['die', 'dice'],
['penny', 'pennies'],
['campus', 'campuses'],
['virus', 'viri'],
['iris', 'irises'],
['bureau', 'bureaus'],
['kiwi', 'kiwis'],
['wiki', 'wikis'],
['igloo', 'igloos'],
['ninja', 'ninjas'],
['pizza', 'pizzas'],
['kayak', 'kayaks'],
['canoe', 'canoes'],
['tiding', 'tidings'],
['pea', 'peas'],
['drive', 'drives'],
['nose', 'noses'],
['movie', 'movies'],
['status', 'statuses'],
['alias', 'aliases'],
['memorandum', 'memorandums'],
['language', 'languages'],
['plural', 'plurals'],
['word', 'words'],
['multiple', 'multiples'],
['reward', 'rewards'],
['sandwich', 'sandwiches'],
['subway', 'subways'],
['direction', 'directions'],
['land', 'lands'],
['row', 'rows'],
['grow', 'grows'],
['flow', 'flows'],
['rose', 'roses'],
['raise', 'raises'],
['friend', 'friends'],
['follower', 'followers'],
['male', 'males'],
['nail', 'nails'],
['sex', 'sexes'],
['tape', 'tapes'],
['ruler', 'rulers'],
['king', 'kings'],
['queen', 'queens'],
['zero', 'zeros'],
['quest', 'quests'],
['goose', 'geese'],
['foot', 'feet'],
['ex', 'exes'],
['reflex', 'reflexes'],
['heat', 'heats'],
['train', 'trains'],
['test', 'tests'],
['pie', 'pies'],
['fly', 'flies'],
['eye', 'eyes'],
['lie', 'lies'],
['node', 'nodes'],
['trade', 'trades'],
['chinese', 'chinese'],
['please', 'pleases'],
['japanese', 'japanese'],
['regex', 'regexes'],
['license', 'licenses'],
['zebra', 'zebras'],
['general', 'generals'],
['corps', 'corps'],
['pliers', 'pliers'],
['flyer', 'flyers'],
['scissors', 'scissors'],
['fireman', 'firemen'],
['chirp', 'chirps'],
['harp', 'harps'],
['corpse', 'corpses'],
['dye', 'dyes'],
['move', 'moves'],
['zombie', 'zombies'],
['variety', 'varieties'],
['talkie', 'talkies'],
['walkie-talkie', 'walkie-talkies'],
['groupie', 'groupies'],
['goonie', 'goonies'],
['lassie', 'lassies'],
['genie', 'genies'],
['foodie', 'foodies'],
['faerie', 'faeries'],
['collie', 'collies'],
['obloquy', 'obloquies'],
['looey', 'looies'],
['osprey', 'ospreys'],
['cover', 'covers'],
['tie', 'ties'],
['groove', 'grooves'],
['bee', 'bees'],
['ave', 'aves'],
['wave', 'waves'],
['wolf', 'wolves'],
['airwave', 'airwaves'],
['archive', 'archives'],
['arch', 'arches'],
['dive', 'dives'],
['aftershave', 'aftershaves'],
['cave', 'caves'],
['grave', 'graves'],
['gift', 'gifts'],
['nerve', 'nerves'],
['nerd', 'nerds'],
['carve', 'carves'],
['rave', 'raves'],
['scarf', 'scarves'],
['sale', 'sales'],
['sail', 'sails'],
['swerve', 'swerves'],
['love', 'loves'],
['dove', 'doves'],
['glove', 'gloves'],
['wharf', 'wharves'],
['valve', 'valves'],
['werewolf', 'werewolves'],
['view', 'views'],
['emu', 'emus'],
['menu', 'menus'],
['wax', 'waxes'],
['fax', 'faxes'],
['nut', 'nuts'],
['crust', 'crusts'],
['lemma', 'lemmata'],
['anathema', 'anathemata'],
['analysis', 'analyses'],
['locus', 'loci'],
['uterus', 'uteri'],
['curriculum', 'curricula'],
['quorum', 'quora'],
['genius', 'geniuses'],
['flower', 'flowers'],
['crash', 'crashes'],
['soul', 'souls'],
['career', 'careers'],
['planet', 'planets'],
['son', 'sons'],
['sun', 'suns'],
['drink', 'drinks'],
['diploma', 'diplomas'],
['dilemma', 'dilemmas'],
['grandma', 'grandmas'],
['no', 'nos'],
['yes', 'yeses'],
['employ', 'employs'],
['employee', 'employees'],
['history', 'histories'],
['story', 'stories'],
['purchase', 'purchases'],
['order', 'orders'],
['key', 'keys'],
['bomb', 'bombs'],
['city', 'cities'],
['sanity', 'sanities'],
['ability', 'abilities'],
['activity', 'activities'],
['cutie', 'cuties'],
['validation', 'validations'],
['floaty', 'floaties'],
['nicety', 'niceties'],
['goalie', 'goalies'],
['crawly', 'crawlies'],
['duty', 'duties'],
['scrutiny', 'scrutinies'],
['deputy', 'deputies'],
['beauty', 'beauties'],
['bank', 'banks'],
['family', 'families'],
['tally', 'tallies'],
['ally', 'allies'],
['alley', 'alleys'],
['valley', 'valleys'],
['medley', 'medleys'],
['melody', 'melodies'],
['trolly', 'trollies'],
['thunk', 'thunks'],
['koala', 'koalas'],
['special', 'specials'],
['book', 'books'],
['knob', 'knobs'],
['crab', 'crabs'],
['plough', 'ploughs'],
['high', 'highs'],
['low', 'lows'],
['hiccup', 'hiccups'],
['bonus', 'bonuses'],
['circus', 'circuses'],
['abacus', 'abacuses'],
['phobia', 'phobias'],
['case', 'cases'],
['lace', 'laces'],
['trace', 'traces'],
['mage', 'mages'],
['lotus', 'lotuses'],
['motorbus', 'motorbuses'],
['cutlas', 'cutlases'],
['tequila', 'tequilas'],
['liar', 'liars'],
['delta', 'deltas'],
['visa', 'visas'],
['flea', 'fleas'],
['favela', 'favelas'],
['cobra', 'cobras'],
['finish', 'finishes'],
['gorilla', 'gorillas'],
['mass', 'masses'],
['face', 'faces'],
['rabbit', 'rabbits'],
['adventure', 'adventures'],
['breeze', 'breezes'],
['brew', 'brews'],
['canopy', 'canopies'],
['copy', 'copies'],
['spy', 'spies'],
['cave', 'caves'],
['charge', 'charges'],
['cinema', 'cinemas'],
['coffee', 'coffees'],
['favourite', 'favourites'],
['themself', 'themselves'],
['country', 'countries'],
['issue', 'issues'],
['authority', 'authorities'],
['force', 'forces'],
['objective', 'objectives'],
['present', 'presents'],
['industry', 'industries'],
['believe', 'believes'],
['century', 'centuries'],
['category', 'categories'],
['eve', 'eves'],
['fee', 'fees'],
['gene', 'genes'],
['try', 'tries'],
['currency', 'currencies'],
['pose', 'poses'],
['cheese', 'cheeses'],
['clue', 'clues'],
['cheer', 'cheers'],
['litre', 'litres'],
['money', 'monies'],
['attorney', 'attorneys'],
['balcony', 'balconies'],
['cockney', 'cockneys'],
['donkey', 'donkeys'],
['honey', 'honeys'],
['smiley', 'smilies'],
['survey', 'surveys'],
['whiskey', 'whiskeys'],
['whisky', 'whiskies'],
['volley', 'volleys'],
['tongue', 'tongues'],
['suit', 'suits'],
['suite', 'suites'],
['cruise', 'cruises'],
['eave', 'eaves'],
['consultancy', 'consultancies'],
['pouch', 'pouches'],
['wallaby', 'wallabies'],
['abyss', 'abysses'],
['weekly', 'weeklies'],
['whistle', 'whistles'],
['utilise', 'utilises'],
['utilize', 'utilizes'],
['mercy', 'mercies'],
['mercenary', 'mercenaries'],
['take', 'takes'],
['flush', 'flushes'],
['gate', 'gates'],
['evolve', 'evolves'],
['slave', 'slaves'],
['native', 'natives'],
['revolve', 'revolves'],
['twelve', 'twelves'],
['sleeve', 'sleeves'],
['subjective', 'subjectives'],
['stream', 'streams'],
['beam', 'beams'],
['foam', 'foams'],
['callus', 'calluses'],
['use', 'uses'],
['beau', 'beaus'],
['gateau', 'gateaus'],
['fetus', 'fetuses'],
['luau', 'luaus'],
['pilau', 'pilaus'],
['shoe', 'shoes'],
['sandshoe', 'sandshoes'],
['zeus', 'zeuses'],
['nucleus', 'nuclei'],
['sky', 'skies'],
['beach', 'beaches'],
['brush', 'brushes'],
['hoax', 'hoaxes'],
['scratch', 'scratches'],
['nanny', 'nannies'],
['negro', 'negroes'],
['taco', 'tacos'],
['cafe', 'cafes'],
['cave', 'caves'],
['giraffe', 'giraffes'],
['goodwife', 'goodwives'],
['housewife', 'housewives'],
['safe', 'safes'],
['save', 'saves'],
['pocketknife', 'pocketknives'],
['tartufe', 'tartufes'],
['tartuffe', 'tartuffes'],
['truffle', 'truffles'],
['jefe', 'jefes'],
['agrafe', 'agrafes'],
['agraffe', 'agraffes'],
['bouffe', 'bouffes'],
['carafe', 'carafes'],
['chafe', 'chafes'],
['pouffe', 'pouffes'],
['pouf', 'poufs'],
['piaffe', 'piaffes'],
['gaffe', 'gaffes'],
['executive', 'executives'],
['cove', 'coves'],
['dove', 'doves'],
['fave', 'faves'],
['positive', 'positives'],
['solve', 'solves'],
['trove', 'troves'],
['treasure', 'treasures'],
['suave', 'suaves'],
['bluff', 'bluffs'],
['half', 'halves'],
['knockoff', 'knockoffs'],
['handkerchief', 'handkerchiefs'],
['reed', 'reeds'],
['reef', 'reefs'],
['yourself', 'yourselves'],
['sunroof', 'sunroofs'],
['plateau', 'plateaus'],
['radius', 'radii'],
['stratum', 'strata'],
['stratus', 'strati'],
['focus', 'foci'],
['fungus', 'fungi'],
['appendix', 'appendices'],
['seraph', 'seraphim'],
['cherub', 'cherubim'],
['memo', 'memos'],
['cello', 'cellos'],
['automaton', 'automata'],
['button', 'buttons'],
['crayon', 'crayons'],
['captive', 'captives'],
['abrasive', 'abrasives'],
['archive', 'archives'],
['additive', 'additives'],
['hive', 'hives'],
['beehive', 'beehives'],
['olive', 'olives'],
['black olive', 'black olives'],
['chive', 'chives'],
['adjective', 'adjectives'],
['cattle drive', 'cattle drives'],
['explosive', 'explosives'],
['executive', 'executives'],
['negative', 'negatives'],
['fugitive', 'fugitives'],
['progressive', 'progressives'],
['laxative', 'laxatives'],
['incentive', 'incentives'],
['genesis', 'geneses'],
['surprise', 'surprises'],
['enterprise', 'enterprises'],
['relative', 'relatives'],
['positive', 'positives'],
['perspective', 'perspectives'],
['superlative', 'superlatives'],
['afterlife', 'afterlives'],
['native', 'natives'],
['detective', 'detectives'],
['collective', 'collectives'],
['lowlife', 'lowlives'],
['low-life', 'low-lives'],
['strife', 'strifes'],
['pony', 'ponies'],
['phony', 'phonies'],
['felony', 'felonies'],
['colony', 'colonies'],
['symphony', 'symphonies'],
['semicolony', 'semicolonies'],
['radiotelephony', 'radiotelephonies'],
['company', 'companies'],
['ceremony', 'ceremonies'],
['carnivore', 'carnivores'],
['emphasis', 'emphases'],
['abuse', 'abuses'],
['ass', 'asses'],
['mile', 'miles'],
['consensus', 'consensuses'],
['coatdress', 'coatdresses'],
['courthouse', 'courthouses'],
['playhouse', 'playhouses'],
['crispness', 'crispnesses'],
['racehorse', 'racehorses'],
['greatness', 'greatnesses'],
['demon', 'demons'],
['lemon', 'lemons'],
['pokemon', 'pokemon'],
['pokémon', 'pokémon'],
['christmas', 'christmases'],
['zymase', 'zymases'],
['accomplice', 'accomplices'],
['amice', 'amices'],
['titmouse', 'titmice'],
['slice', 'slices'],
['base', 'bases'],
['database', 'databases'],
['rise', 'rises'],
['uprise', 'uprises'],
['size', 'sizes'],
['prize', 'prizes'],
['booby', 'boobies'],
['hobby', 'hobbies'],
['baby', 'babies'],
['cookie', 'cookies'],
['budgie', 'budgies'],
['calorie', 'calories'],
['brownie', 'brownies'],
['lolly', 'lollies'],
['hippie', 'hippies'],
['smoothie', 'smoothies'],
['techie', 'techies'],
['specie', 'species'],
['quickie', 'quickies'],
['pixie', 'pixies'],
['rotisserie', 'rotisseries'],
['porkpie', 'porkpies'],
['newbie', 'newbies'],
['veggie', 'veggies'],
['bourgeoisie', 'bourgeoisies'],
['party', 'parties'],
['apology', 'apologies'],
['ancestry', 'ancestries'],
['anomaly', 'anomalies'],
['anniversary', 'anniversaries'],
['battery', 'batteries'],
['nappy', 'nappies'],
['hanky', 'hankies'],
['junkie', 'junkies'],
['hogtie', 'hogties'],
['footsie', 'footsies'],
['curry', 'curries'],
['fantasy', 'fantasies'],
['housefly', 'houseflies'],
['falsy', 'falsies'],
['doggy', 'doggies'],
['carny', 'carnies'],
['cabby', 'cabbies'],
['charlie', 'charlies'],
['bookie', 'bookies'],
['auntie', 'aunties'],
# Prototype inheritance.
['constructor', 'constructors'],
# Non-standard case.
['randomWord', 'randomWords'],
['camelCase', 'camelCases'],
['PascalCase', 'PascalCases'],
['Alumnus', 'Alumni'],
['CHICKEN', 'CHICKENS'],
['日本語', '日本語'],
['한국', '한국'],
['中文', '中文'],
['اللغة العربية', 'اللغة العربية'],
['四 chicken', '四 chickens'],
['Order2', 'Order2s'],
['Work Order2', 'Work Order2s'],
['SoundFX2', 'SoundFX2s'],
['oDonald', 'oDonalds']
]
#
# Odd plural to singular tests.
#
# @type {Array}
#
SINGULAR_TESTS = [
['dingo', 'dingos'],
['mango', 'mangoes'],
['echo', 'echos'],
['ghetto', 'ghettoes'],
['nucleus', 'nucleuses'],
['bureau', 'bureaux'],
['seraph', 'seraphs']
]
#
# Odd singular to plural tests.
#
# @type {Array}
#
PLURAL_TESTS = [
['plateaux', 'plateaux'],
['axis', 'axes'],
['basis', 'bases'],
['automatum', 'automata'],
['thou', 'you'],
['axiS', 'axes'],
['passerby', 'passersby']
]
class TestPluralize(unittest.TestCase):
def test_methods_plural(self):
pluralizer = Pluralizer()
for test in [*BASIC_TESTS, *PLURAL_TESTS]:
self.assertEqual(pluralizer.plural(test[0]), test[1])
def test_methods_is_plural(self):
pluralizer = Pluralizer()
for test in [*BASIC_TESTS, *PLURAL_TESTS]:
self.assertTrue(pluralizer.isPlural(test[1]), f"isPlural('{test[1]}')")
def test_methods_singular(self):
pluralizer = Pluralizer()
for test in [*BASIC_TESTS, *SINGULAR_TESTS]:
self.assertEqual(pluralizer.singular(test[1]), test[0])
def test_methods_is_singular(self):
pluralizer = Pluralizer()
for test in [*BASIC_TESTS, *SINGULAR_TESTS]:
self.assertTrue(pluralizer.isSingular(test[0]))
def test_automatically_convert_plural(self):
pluralizer = Pluralizer()
for test in [*BASIC_TESTS, *PLURAL_TESTS]:
self.assertEqual(pluralizer.pluralize(test[1], 5), test[1])
self.assertEqual(pluralizer.pluralize(test[0], 5), test[1])
def test_automatically_convert_singular(self):
pluralizer = Pluralizer()
for test in [*BASIC_TESTS, *SINGULAR_TESTS]:
self.assertEqual(pluralizer.pluralize(test[0], 1), test[0])
self.assertEqual(pluralizer.pluralize(test[1], 1), test[0])
def test_prepend_count_plural_words(self):
pluralizer = Pluralizer()
self.assertEqual(pluralizer.pluralize('test', 5, True), '5 tests')
def test_prepend_count_singular_words(self):
pluralizer = Pluralizer()
self.assertEqual(pluralizer.pluralize('test', 1, True), '1 test')
def test_add_new_uncountable_rules(self):
pluralizer = Pluralizer()
self.assertEqual(pluralizer.pluralize('paper'), 'papers')
pluralizer.addUncountableRule('paper')
self.assertEqual(pluralizer.pluralize('paper'), 'paper')
def test_add_new_irregular_words(self):
pluralizer = Pluralizer()
self.assertEqual(pluralizer.pluralize('irregular'), 'irregulars')
pluralizer.addIrregularRule('irregular', 'regular')
self.assertEqual(pluralizer.pluralize('irregular'), 'regular')
def test_return_false_for_irregular_words(self):
pluralizer = Pluralizer()
self.assertTrue(pluralizer.isPlural('irregulars'))
pluralizer.addIrregularRule('irregulars', 'regular')
self.assertFalse(pluralizer.isPlural('irregulars'))
def test_add_new_plural_matching_rules(self):
pluralizer = Pluralizer()
self.assertEqual(pluralizer.plural('regex'), 'regexes')
pluralizer.addPluralRule(re.compile(r'(?i)gex$'), 'gexii')
self.assertEqual(pluralizer.plural('regex'), 'regexii')
def test_add_new_singular_matching_rules(self):
pluralizer = Pluralizer()
self.assertEqual(pluralizer.singular('singles'), 'single')
pluralizer.addSingularRule(re.compile('singles$'), 'singular')
self.assertEqual(pluralizer.singular('singles'), 'singular')
def test_allow_new_plural_matching_rules_to_be_strings(self):
pluralizer = Pluralizer()
self.assertEqual(pluralizer.plural('person'), 'people')
pluralizer.addPluralRule('person', 'peeps')
self.assertEqual(pluralizer.plural('person'), 'peeps')
def test_allow_new_singular_matching_rules_to_be_strings(self):
pluralizer = Pluralizer()
self.assertEqual(pluralizer.singular('mornings'), 'morning')
pluralizer.addSingularRule('mornings', 'suck')
self.assertEqual(pluralizer.singular('mornings'), 'suck')
if __name__ == '__main__':
unittest.main()
| 28.039191 | 83 | 0.515803 | 3,649 | 0.164096 | 0 | 0 | 0 | 0 | 0 | 0 | 12,082 | 0.543329 |
d3b146cefcbdfbb497115b74257a2891722524b5 | 1,988 | py | Python | promgen/util.py | sundy-li/promgen | e532bde46b542dd66f46e3dd654bc1ad31deeec7 | [
"MIT"
]
| null | null | null | promgen/util.py | sundy-li/promgen | e532bde46b542dd66f46e3dd654bc1ad31deeec7 | [
"MIT"
]
| 8 | 2021-04-08T21:59:34.000Z | 2022-02-10T10:42:43.000Z | promgen/util.py | Andreich2010/promgen | dae2b720f30b0c002aa50a74c4c4fc8dfbcbb2b7 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
]
| null | null | null | # Copyright (c) 2017 LINE Corporation
# These sources are released under the terms of the MIT license: see LICENSE
import requests.sessions
from django.db.models import F
from promgen.version import __version__
from django.conf import settings
# Wrappers around request api to ensure we always attach our user agent
# https://github.com/requests/requests/blob/master/requests/api.py
def post(url, data=None, json=None, **kwargs):
with requests.sessions.Session() as session:
session.headers['User-Agent'] = 'promgen/{}'.format(__version__)
return session.post(url, data=data, json=json, **kwargs)
def get(url, params=None, **kwargs):
with requests.sessions.Session() as session:
session.headers['User-Agent'] = 'promgen/{}'.format(__version__)
return session.get(url, params=params, **kwargs)
def delete(url, **kwargs):
with requests.sessions.Session() as session:
session.headers['User-Agent'] = 'promgen/{}'.format(__version__)
return session.delete(url, **kwargs)
def setting(key, default=None, domain=None):
"""
Settings helper based on saltstack's query
Allows a simple way to query settings from YAML
using the style `path:to:key` to represent
path:
to:
key: value
"""
rtn = settings.PROMGEN
if domain:
rtn = rtn[domain]
for index in key.split(":"):
try:
rtn = rtn[index]
except KeyError:
return default
return rtn
class HelpFor:
# Wrap a model's lower level api so that we can easily
# grab help_text for a specific field
# help_text = HelpFor(DjangoModel)
# help_test.field_name
def __init__(self, model):
self.model = model
def __getattr__(self, name):
return self.model._meta.get_field(name).help_text
def inc_for_pk(model, pk, **kwargs):
# key=F('key') + value
model.objects.filter(pk=pk).update(**{key: F(key) + kwargs[key] for key in kwargs})
| 28.811594 | 87 | 0.667505 | 331 | 0.166499 | 0 | 0 | 0 | 0 | 0 | 0 | 696 | 0.350101 |
d3b1ab341873d6a614ea74d34b804a1a2793bea2 | 5,478 | py | Python | integration_tests/test_suites/k8s-integration-test-suite/test_utils.py | ericct/dagster | dd2c9f05751e1bae212a30dbc54381167a14f6c5 | [
"Apache-2.0"
]
| null | null | null | integration_tests/test_suites/k8s-integration-test-suite/test_utils.py | ericct/dagster | dd2c9f05751e1bae212a30dbc54381167a14f6c5 | [
"Apache-2.0"
]
| null | null | null | integration_tests/test_suites/k8s-integration-test-suite/test_utils.py | ericct/dagster | dd2c9f05751e1bae212a30dbc54381167a14f6c5 | [
"Apache-2.0"
]
| null | null | null | import time
import kubernetes
import pytest
from dagster_k8s.client import DagsterK8sError, WaitForPodState
from dagster_k8s.utils import retrieve_pod_logs, wait_for_job_success, wait_for_pod
from dagster_k8s_test_infra.helm import get_helm_test_namespace
def construct_pod_spec(name, cmd):
return kubernetes.client.V1PodSpec(
restart_policy="Never",
containers=[
kubernetes.client.V1Container(name=name, image="busybox", args=["/bin/sh", "-c", cmd])
],
)
def construct_pod_manifest(name, cmd):
return kubernetes.client.V1Pod(
metadata=kubernetes.client.V1ObjectMeta(name=name), spec=construct_pod_spec(name, cmd),
)
def construct_job_manifest(name, cmd):
return kubernetes.client.V1Job(
api_version="batch/v1",
kind="Job",
metadata=kubernetes.client.V1ObjectMeta(name=name),
spec=kubernetes.client.V1JobSpec(
template=kubernetes.client.V1PodTemplateSpec(spec=construct_pod_spec(name, cmd)),
),
)
def test_wait_for_pod(cluster_provider): # pylint: disable=unused-argument
api = kubernetes.client.CoreV1Api()
with get_helm_test_namespace() as namespace:
# Without this sleep, we get the following error on kind:
# HTTP response body:
# {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"No API
# token found for service account \"default\", retry after the token is automatically
# created and added to the service
# account","reason":"ServerTimeout","details":{"name":"create
# pod","kind":"serviceaccounts","retryAfterSeconds":1},"code":500}
time.sleep(5)
try:
api.create_namespaced_pod(
body=construct_pod_manifest("sayhi1", 'echo "hello world"'), namespace=namespace
)
wait_for_pod("sayhi1", namespace=namespace)
assert retrieve_pod_logs("sayhi1", namespace=namespace) == "hello world\n"
api.create_namespaced_pod(
body=construct_pod_manifest("sayhi2", 'echo "hello world"'), namespace=namespace
)
wait_for_pod("sayhi2", namespace=namespace, wait_for_state=WaitForPodState.Terminated)
with pytest.raises(
DagsterK8sError, match="Timed out while waiting for pod to become ready"
):
api.create_namespaced_pod(
body=construct_pod_manifest("sayhi3", 'sleep 5; echo "hello world"'),
namespace=namespace,
)
wait_for_pod("sayhi3", namespace=namespace, wait_timeout=1)
with pytest.raises(DagsterK8sError) as exc_info:
api.create_namespaced_pod(
body=construct_pod_manifest("fail", 'echo "whoops!"; exit 1'),
namespace=namespace,
)
wait_for_pod("fail", namespace=namespace, wait_for_state=WaitForPodState.Terminated)
# not doing total match because integration test. unit tests test full log message
assert "Pod did not exit successfully." in str(exc_info.value)
finally:
for pod_name in ["sayhi1", "sayhi2", "sayhi3", "fail"]:
try:
api.delete_namespaced_pod(pod_name, namespace=namespace)
except kubernetes.client.rest.ApiException:
pass
def test_wait_for_job(cluster_provider): # pylint: disable=unused-argument
with get_helm_test_namespace() as namespace:
# Without this sleep, we get the following error on kind:
# HTTP response body:
# {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"No API
# token found for service account \"default\", retry after the token is automatically
# created and added to the service
# account","reason":"ServerTimeout","details":{"name":"create
# pod","kind":"serviceaccounts","retryAfterSeconds":1},"code":500}
time.sleep(5)
try:
api = kubernetes.client.BatchV1Api()
api.create_namespaced_job(
body=construct_job_manifest("sayhi1", 'echo "hello world"'), namespace=namespace
)
wait_for_job_success("sayhi1", namespace=namespace)
with pytest.raises(
DagsterK8sError, match="Timed out while waiting for job sayhi2 to complete"
):
api.create_namespaced_job(
body=construct_job_manifest("sayhi2", 'sleep 5; echo "hello world"'),
namespace=namespace,
)
wait_for_job_success("sayhi2", namespace=namespace, wait_timeout=1)
with pytest.raises(
DagsterK8sError, match="Encountered failed job pods for job fail with status:",
):
api.create_namespaced_job(
body=construct_job_manifest("fail", 'echo "whoops!"; exit 1'),
namespace=namespace,
)
wait_for_job_success("fail", namespace=namespace)
finally:
for job in ["sayhi1", "sayhi2", "fail"]:
try:
api.delete_namespaced_job(
job, namespace=namespace, propagation_policy="Foreground"
)
except kubernetes.client.rest.ApiException:
pass
| 41.18797 | 100 | 0.61245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,559 | 0.284593 |
d3b2063cf7dc483f806ac22531b14a9333116ffb | 1,092 | py | Python | radioepg/migrations/0001_initial.py | mervij/radiodns | 01543cf1e4de8de335af0301616e089c35fc67f8 | [
"Apache-2.0"
]
| null | null | null | radioepg/migrations/0001_initial.py | mervij/radiodns | 01543cf1e4de8de335af0301616e089c35fc67f8 | [
"Apache-2.0"
]
| 8 | 2021-05-17T10:54:28.000Z | 2021-06-08T12:02:37.000Z | radioepg/migrations/0001_initial.py | mervij/radiodns | 01543cf1e4de8de335af0301616e089c35fc67f8 | [
"Apache-2.0"
]
| null | null | null | # Generated by Django 3.1.6 on 2021-02-15 08:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('short_name', models.CharField(max_length=8)),
('medium_name', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='Bearer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bearer_id', models.TextField()),
('cost', models.IntegerField()),
('mimeValue', models.CharField(max_length=255)),
('service', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='radioepg.service')),
],
),
]
| 32.117647 | 115 | 0.571429 | 966 | 0.884615 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.14652 |
d3b26049eb155f0068830e3349db9d53a2b93029 | 2,088 | py | Python | uitester/ui/case_manager/tag_names_line_edit.py | IfengAutomation/uitester | 6f9c78c86965b05efea875d38dbd9587386977fa | [
"Apache-2.0"
]
| 4 | 2016-07-12T09:01:52.000Z | 2016-12-07T03:11:02.000Z | uitester/ui/case_manager/tag_names_line_edit.py | IfengAutomation/uitester | 6f9c78c86965b05efea875d38dbd9587386977fa | [
"Apache-2.0"
]
| null | null | null | uitester/ui/case_manager/tag_names_line_edit.py | IfengAutomation/uitester | 6f9c78c86965b05efea875d38dbd9587386977fa | [
"Apache-2.0"
]
| 3 | 2016-11-29T02:13:17.000Z | 2019-10-16T06:25:20.000Z | from PyQt5.QtCore import Qt, QStringListModel
from PyQt5.QtWidgets import QLineEdit, QCompleter
class TagNamesLineEdit(QLineEdit):
def __init__(self, parent=None):
super(QLineEdit, self).__init__(parent)
self.cmp = None
self.is_completer = True
def setCompleter(self, completer):
self.cmp = completer
self.cmp.setWidget(self)
self.cmp.setCompletionMode(QCompleter.PopupCompletion)
self.cmp.setCaseSensitivity(Qt.CaseInsensitive)
self.textChanged.connect(self.tag_names_changed)
self.cmp.activated.connect(self.insert_completion)
def tag_names_changed(self):
if self.is_completer:
text = self.text()
tag_names = text.split(';')
last_tag_name = tag_names[len(tag_names) - 1]
self.cmp.update(last_tag_name)
self.cmp.popup().setCurrentIndex(self.cmp.completionModel().index(0, 0))
cr = self.cursorRect()
cr.setWidth(self.cmp.popup().sizeHintForColumn(0)
+ self.cmp.popup().verticalScrollBar().sizeHint().width())
self.cmp.complete(cr)
else:
pass
def completer(self):
return self.cmp
def insert_completion(self, string):
text = self.text()
tag_names = text.split(';')
last_tag_name = tag_names[len(tag_names) - 1]
new_text = text[0:len(text) - len(last_tag_name)] + string + ';'
self.is_completer = False
self.clear()
self.setText(new_text)
self.is_completer = True
class TagCompleter(QCompleter):
def __init__(self, string_list, parent=None):
super(TagCompleter, self).__init__(parent)
self.string_list = string_list
self.setModel(QStringListModel())
def update(self, completion_text):
filtered = []
for string in self.string_list:
if completion_text in string:
filtered.append(string)
self.model().setStringList(filtered)
self.popup().setCurrentIndex(self.model().index(0, 0))
| 34.229508 | 84 | 0.631226 | 1,985 | 0.95067 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.00431 |
d3b26adf9f1c111614b51c252d8d80c26d192abc | 337 | py | Python | utils/__init__.py | millermuttu/torch_soft | 70a692650b6eb8c70000e0f8dc2b22cbb9f94741 | [
"MIT"
]
| null | null | null | utils/__init__.py | millermuttu/torch_soft | 70a692650b6eb8c70000e0f8dc2b22cbb9f94741 | [
"MIT"
]
| null | null | null | utils/__init__.py | millermuttu/torch_soft | 70a692650b6eb8c70000e0f8dc2b22cbb9f94741 | [
"MIT"
]
| null | null | null | # # importing all the modules at once
# from .config import *
# from .normalization import *
# from .others import *
# from .img_reg import *
# from .transformation import *
# from .visualization import *
# importing the modules in a selective way
import utils.config
import utils.normalization
import utils.misc
import utils.lr_finder
| 24.071429 | 42 | 0.762611 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 240 | 0.712166 |
d3b2a1b997cbe83aa232f13b17539c3d7b815053 | 434 | py | Python | tasks.py | epu-ntua/QualiChain-mediator | 1d0f848d60861665d95ad0359914add361551763 | [
"MIT"
]
| 2 | 2020-03-09T11:10:15.000Z | 2020-03-11T06:11:58.000Z | tasks.py | epu-ntua/QualiChain-mediator | 1d0f848d60861665d95ad0359914add361551763 | [
"MIT"
]
| 2 | 2021-03-31T19:43:58.000Z | 2021-12-13T20:34:57.000Z | tasks.py | epu-ntua/QualiChain-mediator | 1d0f848d60861665d95ad0359914add361551763 | [
"MIT"
]
| 2 | 2020-03-12T11:14:20.000Z | 2020-07-07T06:17:45.000Z | from celery import Celery
from clients.dobie_client import send_data_to_dobie
app = Celery('qualichain_mediator')
app.config_from_object('settings', namespace='CELERY_')
@app.task()
def consume_messages_async(message):
"""
This task is used to received job posting text and feed DOBIE component
"""
extracted_skills = send_data_to_dobie(message)
print(extracted_skills, flush=True)
return extracted_skills
| 25.529412 | 75 | 0.767281 | 0 | 0 | 0 | 0 | 259 | 0.596774 | 0 | 0 | 127 | 0.292627 |
d3b3426ac37ef57bd78d3b9aa39a2ef7e95619d6 | 1,174 | py | Python | ingest/ambit_geo.py | brianhouse/okavango | 4006940ddead3f31eea701efb9b9dcdc7b19402e | [
"MIT"
]
| 2 | 2015-01-25T06:20:03.000Z | 2015-02-15T23:54:41.000Z | ingest/ambit_geo.py | brianhouse/okavango_15 | 4006940ddead3f31eea701efb9b9dcdc7b19402e | [
"MIT"
]
| null | null | null | ingest/ambit_geo.py | brianhouse/okavango_15 | 4006940ddead3f31eea701efb9b9dcdc7b19402e | [
"MIT"
]
| 3 | 2017-11-14T21:18:23.000Z | 2021-06-20T21:08:31.000Z | import json, math
from ingest import ingest_json_body
from housepy import config, log, strings, util
def parse(request):
log.info("ambit_geo.parse")
sample = ingest_json_body(request)
if sample is None:
return sample, "Could not parse"
data = {}
for key, value in sample.items():
if key == "UTC":
dt = util.parse_date(sample['UTC']) # these are marked UTC in the data
t = util.timestamp(dt)
data['t_utc'] = t
continue
if key == "Longitude":
data['longitude'] = math.degrees(float(sample['Longitude']))
continue
if key == "Latitude":
data['latitude'] = math.degrees(float(sample['Latitude']))
continue
if key == "GPSAltitude":
data['altitude'] = float(sample['GPSAltitude'])
continue
if type(value) != str:
continue
data[key] = strings.as_numeric(value)
try:
log.debug("%s %s %s" % (data['longitude'], data['latitude'], data['altitude']))
except:
log.error("MISSING GEO")
return data | 32.611111 | 87 | 0.537479 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.202726 |
d3b45e5164e572fbde2110d62cb448013353f1cd | 1,593 | py | Python | gandyndns.py | nim65s/scripts | 2c61bd77bfca6ae6437654e43ad2bc95d611360a | [
"BSD-2-Clause"
]
| 1 | 2020-12-17T09:41:42.000Z | 2020-12-17T09:41:42.000Z | gandyndns.py | nim65s/scripts | 2c61bd77bfca6ae6437654e43ad2bc95d611360a | [
"BSD-2-Clause"
]
| null | null | null | gandyndns.py | nim65s/scripts | 2c61bd77bfca6ae6437654e43ad2bc95d611360a | [
"BSD-2-Clause"
]
| null | null | null | #!/usr/bin/env python
'''update gandi DNS domain entry, with LiveDNS v5
Cf. https://doc.livedns.gandi.net/#work-with-domains
'''
import argparse
import ipaddress
import json
import os
from subprocess import check_output
import requests
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('domain')
parser.add_argument('name')
parser.add_argument('--ip', help="defaults to ifconfig.me's return")
parser.add_argument('--api_key', help="defaults to GANDI_API_KEY env var, or the return of 'pass api/gandi'")
args = parser.parse_args()
if args.ip is None:
args.ip = requests.get('http://ifconfig.me', headers={'User-Agent': 'curl/7.61.1'}).content.decode().strip()
ip = ipaddress.ip_address(args.ip)
if args.api_key is None:
args.api_key = os.environ.get('GANDI_API_KEY', check_output(['pass', 'api/gandi'], text=True).strip())
key = {'X-Api-Key': args.api_key}
r = requests.get(f'https://dns.api.gandi.net/api/v5/domains/{args.domain}/records/{args.name}', headers=key)
r.raise_for_status()
if r.json()[0]['rrset_values'][0] == args.ip:
if args.verbose:
print('ok')
else:
type_ = 'AAAA' if isinstance(ip, ipaddress.IPv6Address) else 'A'
url = f'https://dns.api.gandi.net/api/v5/domains/{args.domain}/records/{args.name}/{type_}'
data = {'rrset_values': [args.ip]}
headers = {'Content-Type': 'application/json', **key}
r = requests.put(url, data=json.dumps(data), headers=headers)
if args.verbose:
print(r.json())
else:
r.raise_for_status()
| 32.510204 | 112 | 0.696171 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 612 | 0.384181 |
d3b4e36f678d36e360884bedfd448ece0a34ced3 | 1,895 | py | Python | leetcode.com/python/314_Binary_Tree_Vertical_Order_Traversal.py | mamane19/coding-interview-gym | 20ae1a048eddbc9a32c819cf61258e2b57572f05 | [
"MIT"
]
| 713 | 2019-11-19T16:11:25.000Z | 2022-03-31T02:27:52.000Z | leetcode.com/python/314_Binary_Tree_Vertical_Order_Traversal.py | arunsank/coding-interview-gym | 8131e3a82795707e144fe55d765b6c15bdb97306 | [
"MIT"
]
| 7 | 2020-01-16T17:07:18.000Z | 2021-11-15T18:24:39.000Z | leetcode.com/python/314_Binary_Tree_Vertical_Order_Traversal.py | arunsank/coding-interview-gym | 8131e3a82795707e144fe55d765b6c15bdb97306 | [
"MIT"
]
| 393 | 2019-11-18T17:55:45.000Z | 2022-03-28T20:26:32.000Z | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import deque
from collections import defaultdict
class Solution(object):
def verticalOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
queue = deque([(root, 0)])
verticalNodeMap = defaultdict(list)
while queue:
node, horrizotalDistace = queue.popleft()
if node:
verticalNodeMap[horrizotalDistace].append(node.val)
queue.append((node.left, horrizotalDistace - 1))
queue.append((node.right, horrizotalDistace + 1))
minHorrizotalDistace, maxHorrizotalDistace = min(verticalNodeMap.keys()), max(verticalNodeMap.keys())
result = []
for key in range(minHorrizotalDistace, maxHorrizotalDistace + 1):
result.append(verticalNodeMap[key])
return result
# My solution during mock, getting TLE, don't know why
from collections import defaultdict
from collections import deque
class Solution(object):
def verticalOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
orderMap = defaultdict(list)
queue = deque([(root, 0)])
while queue:
currentNode, vLine = queue.popleft()
if currentNode:
orderMap[vLine].append(root.val)
queue.append((root.left, vLine - 1))
queue.append((root.right, vLine + 1))
result = []
for i in range(min(orderMap.keys()), max(orderMap.keys()) + 1):
result.append(orderMap[i])
return result | 31.583333 | 109 | 0.582058 | 1,503 | 0.79314 | 0 | 0 | 0 | 0 | 0 | 0 | 400 | 0.211082 |
d3b4eac02574fc5ff2fd374b340d31cb4dba25c1 | 3,750 | py | Python | src/sentry/models/pluginhealth.py | ayesha-omarali/sentry | 96f81a1805227c26234e6317771bc0dcb5c176ad | [
"BSD-3-Clause"
]
| null | null | null | src/sentry/models/pluginhealth.py | ayesha-omarali/sentry | 96f81a1805227c26234e6317771bc0dcb5c176ad | [
"BSD-3-Clause"
]
| null | null | null | src/sentry/models/pluginhealth.py | ayesha-omarali/sentry | 96f81a1805227c26234e6317771bc0dcb5c176ad | [
"BSD-3-Clause"
]
| null | null | null | from __future__ import absolute_import
from sentry.db.models import (
ArrayField, BoundedPositiveIntegerField, Model, FlexibleForeignKey, sane_repr
)
from django.db import models
from jsonfield import JSONField
from django.utils import timezone
from sentry.constants import ObjectStatus
from django.utils.translation import ugettext_lazy as _
class PluginFeatures(object):
issue_basic = 'issue_basic'
issue_sync = 'issue_sync'
repository = 'repository'
class PluginHealth(Model):
__core__ = True
name = models.CharField(max_length=128, db_index=True)
features_list = ArrayField(of=models.TextField)
date_added = models.DateTimeField(default=timezone.now)
link = models.URLField(null=True, blank=True)
author = models.CharField(max_length=64)
metadata = JSONField()
status = BoundedPositiveIntegerField(
default=0,
choices=(
(ObjectStatus.VISIBLE,
_('Active')), (ObjectStatus.PENDING_DELETION, _('Pending Deletion')),
(ObjectStatus.DELETION_IN_PROGRESS, _('Deletion in Progress')),
),
db_index=True
)
class Meta:
app_label = 'sentry'
db_table = 'sentry_pluginhealth'
__repr__ = sane_repr('name')
def run_tests(self):
plugin_test = PluginHealthTest.objects.create(
plugin_id=self.id,
)
plugin_test.test_data = plugin_test.run_tests(self)
plugin_test.save()
return plugin_test
class PluginHealthTest(Model):
__core__ = True
date_added = models.DateTimeField(default=timezone.now)
plugin = FlexibleForeignKey('sentry.PluginHealth')
test_data = JSONField()
class Meta:
app_label = 'sentry'
db_table = 'sentry_pluginhealthtest'
unique_together = (('plugin', 'date_added'))
__repr__ = sane_repr('plugin', 'date_added')
def run_tests(self, plugin_health):
return {
'configure_test': self.configure_test(plugin_health),
'create_issue_test': self.create_issue_test(plugin_health),
'link_issue_test': self.link_issue_test(plugin_health),
'sync_assignment_test': self.sync_assignment_test(plugin_health),
'sync_comment_test': self.sync_comment_test(plugin_health),
'sync_status_test': self.sync_status_test(plugin_health),
'repository_test': self.repository_test(plugin_health),
}
def configure_test(self, plugin_health):
test_results = None
return test_results
def create_issue_test(self, plugin_health):
if PluginFeatures.issue_basic not in plugin_health.features_list:
return None
test_results = None
return test_results
def link_issue_test(self, plugin_health):
if PluginFeatures.issue_basic not in plugin_health.features_list:
return None
test_results = None
return test_results
def sync_assignment_test(self, plugin_health):
if PluginFeatures.issue_sync not in plugin_health.features_list:
return None
test_results = None
return test_results
def sync_comment_test(self, plugin_health):
if PluginFeatures.issue_sync not in plugin_health.features_list:
return None
test_results = None
return test_results
def sync_status_test(self, plugin_health):
if PluginFeatures.issue_sync not in plugin_health.features_list:
return None
test_results = None
return test_results
def repository_test(self, plugin_health):
if PluginFeatures.repository not in plugin_health.features_list:
return None
test_results = None
return test_results
| 32.327586 | 82 | 0.686133 | 3,393 | 0.9048 | 0 | 0 | 0 | 0 | 0 | 0 | 342 | 0.0912 |
d3b76c1c0fc989bb41ad8f58fabce2395587d211 | 1,615 | py | Python | src/masonite/oauth/drivers/FacebookDriver.py | girardinsamuel/masonite-socialite | 04110601b299d8505ec453b7743124cb88047d9d | [
"MIT"
]
| 1 | 2021-05-07T16:37:03.000Z | 2021-05-07T16:37:03.000Z | src/masonite/oauth/drivers/FacebookDriver.py | girardinsamuel/masonite-socialite | 04110601b299d8505ec453b7743124cb88047d9d | [
"MIT"
]
| 11 | 2021-05-17T06:45:48.000Z | 2021-10-03T15:16:23.000Z | src/masonite/oauth/drivers/FacebookDriver.py | girardinsamuel/masonite-socialite | 04110601b299d8505ec453b7743124cb88047d9d | [
"MIT"
]
| null | null | null | from .BaseDriver import BaseDriver
from ..OAuthUser import OAuthUser
class FacebookDriver(BaseDriver):
def get_default_scopes(self):
return ["email"]
def get_auth_url(self):
return "https://www.facebook.com/dialog/oauth"
def get_token_url(self):
return "https://graph.facebook.com/oauth/access_token"
def get_user_url(self):
return "https://graph.facebook.com/me?"
def get_request_options(self, token):
return {
"headers": {"Authorization": f"Bearer {token}", "Accept": "application/json"},
"query": {"prettyPrint": "false"},
}
def user(self):
user_data, token = super().user()
user = (
OAuthUser()
.set_token(token)
.build(
{
"id": user_data["sub"],
"nickname": user_data["nickname"],
"name": user_data["name"],
"email": user_data["email"],
"avatar": user_data["picture"],
}
)
)
return user
def user_from_token(self, token):
user_data = super().user_from_token(token)
user = (
OAuthUser()
.set_token(token)
.build(
{
"id": user_data["sub"],
"nickname": user_data["nickname"],
"name": user_data["name"],
"email": user_data["email"],
"avatar": user_data["picture"],
}
)
)
return user
| 28.333333 | 90 | 0.479257 | 1,543 | 0.955418 | 0 | 0 | 0 | 0 | 0 | 0 | 363 | 0.224768 |
d3b8efd54656a0b32ac0c5b886fd7d4ce09f8a83 | 1,266 | py | Python | python/convert_to_readwise.py | t27/highlights-convert | a6c6696ece4fabbbb56e420cb23c0466710e1345 | [
"MIT"
]
| null | null | null | python/convert_to_readwise.py | t27/highlights-convert | a6c6696ece4fabbbb56e420cb23c0466710e1345 | [
"MIT"
]
| null | null | null | python/convert_to_readwise.py | t27/highlights-convert | a6c6696ece4fabbbb56e420cb23c0466710e1345 | [
"MIT"
]
| 1 | 2021-06-29T20:40:06.000Z | 2021-06-29T20:40:06.000Z | import pandas as pd
import json
import glob
columns = ["Highlight","Title","Author","URL","Note","Location"]
# for sample of the input json look at any json in the root of the `results` folder
def convert_to_readwise_df(json_files):
"""Convert the internal json format to a readwise compatible dataframe
Args:
json_files (List[str]): list of json files
Returns:
pd.DataFrame: dataframe with columns as required by readwise
"""
df_data = []
for file in json_files:
with open(file) as f:
data = json.load(f)
title = data['volume']['title']
author = ", ".join(data['volume']['authors'])
for entry in data['highlights']:
highlight = entry['content']
location = entry['location']
notes = ""
if "notes" in entry:
for note in notes:
notes = notes+"\n"+note
df_data.append([highlight,title,author,"",notes,location])
df = pd.DataFrame(df_data,columns = columns)
return df
if __name__ == "__main__":
json_files = glob.glob("../results/*.json")
df = convert_to_readwise_df(json_files)
df.to_csv("tarang_readwise.csv",index=False)
| 32.461538 | 83 | 0.590837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 485 | 0.383096 |
d3ba25fae7aacb5e43b639c41eadbd3d14fb7a48 | 303 | py | Python | ms_deisotope/qc/__init__.py | mstim/ms_deisotope | 29f4f466e92e66b65a2d21eca714aa627caa21db | [
"Apache-2.0"
]
| 18 | 2017-09-01T12:26:12.000Z | 2022-02-23T02:31:29.000Z | ms_deisotope/qc/__init__.py | mstim/ms_deisotope | 29f4f466e92e66b65a2d21eca714aa627caa21db | [
"Apache-2.0"
]
| 19 | 2017-03-12T20:40:36.000Z | 2022-03-31T22:50:47.000Z | ms_deisotope/qc/__init__.py | mstim/ms_deisotope | 29f4f466e92e66b65a2d21eca714aa627caa21db | [
"Apache-2.0"
]
| 14 | 2016-05-06T02:25:30.000Z | 2022-03-31T14:40:06.000Z | """A collection of methods for determining whether a given spectrum is
of high quality (likely to produce a high quality interpretation)
"""
from .heuristic import xrea
from .isolation import CoIsolation, PrecursorPurityEstimator
__all__ = [
"xrea",
"CoIsolation", "PrecursorPurityEstimator"
]
| 27.545455 | 70 | 0.772277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.610561 |
d3bbc84b4a938b83b84adeff2d313509849c11f6 | 3,855 | py | Python | rpi_animations/message.py | Anski-D/rpi_animations_old | b019a301ba777d76e3cedc6b86359570e2c2f18b | [
"MIT"
]
| null | null | null | rpi_animations/message.py | Anski-D/rpi_animations_old | b019a301ba777d76e3cedc6b86359570e2c2f18b | [
"MIT"
]
| null | null | null | rpi_animations/message.py | Anski-D/rpi_animations_old | b019a301ba777d76e3cedc6b86359570e2c2f18b | [
"MIT"
]
| null | null | null | from .item import Item
class Message(Item):
"""
Message feature object in the rpi_animations package.
"""
def __init__(self, group, screen_animator) -> None:
"""
Initialise Message object with sprite group and screen object. Run initial setup methods.
Args:
group (Group): Pygame sprite group to which the object will be added.
screen_animator (ScreenAnimator): Main package object controlling the animation.
"""
super().__init__(group, screen_animator)
# Store x position as float
self._x = float(self._rect.x)
# Set the flag that the message hasn't fully emerged
self._has_fully_emerged = False
def _setup_item(self) -> None:
"""
Run methods to setup the object.
Returns:
None
"""
self._set_text()
# Run parent method
super()._setup_item()
def _set_text(self) -> None:
"""
Set font, message text, and outline of text.
Returns:
None
"""
# Set font
self._font = self._settings.font
# Set the message text
self._text = self._settings.text
# Set the outline text
self._outline_text = self._font.render(
self._text,
self._settings.settings['text_aa'],
self._settings.outline_colour
)
def _set_item_content(self) -> None:
"""
Render the message text.
Returns:
None
"""
self.content = self._font.render(
self._text,
self._settings.settings['text_aa'],
self._settings.text_colour
)
def _place_item(self) -> None:
"""
Set the initial object position on the screen.
Returns:
None
"""
self._rect.midleft = self._screen_rect.midright
def _draw_outline(self) -> None:
"""
Draw the message text outline.
Returns:
None
"""
outline_width = self._settings.settings['outline_width']
self._screen.blit(self._outline_text, (self._rect.x - outline_width, self._rect.y - outline_width))
self._screen.blit(self._outline_text, (self._rect.x - outline_width, self._rect.y + outline_width))
self._screen.blit(self._outline_text, (self._rect.x + outline_width, self._rect.y - outline_width))
self._screen.blit(self._outline_text, (self._rect.x + outline_width, self._rect.y + outline_width))
def blit(self) -> None:
"""
Add the object to the pygame screen.
Returns:
None
"""
# Draw outline text
self._draw_outline()
# Draw the message
self._set_item_content()
# Run parent method
super().blit()
def update(self) -> None:
"""
Move the object position to the left during a frame update.
Returns:
None
"""
self._x -= self._settings.settings['text_speed'] / self._settings.settings['fps']
self._rect.x = self._x
def is_on_screen(self) -> bool:
"""
Determine whether the object is still on the screen.
Returns:
bool: True if still on screen, False otherwise.
"""
if self._rect.right <= self._screen_rect.left:
return False
return True
def has_just_emerged(self) -> bool:
"""
Determine whether the right side of the message is now visible on the screen.
Returns:
bool: True if right edge is now on screen, False otherwise.
"""
if not self._has_fully_emerged and self._rect.right <= self._screen_rect.right:
self._has_fully_emerged = True
return True
return False
| 27.147887 | 107 | 0.575357 | 3,829 | 0.993256 | 0 | 0 | 0 | 0 | 0 | 0 | 1,659 | 0.43035 |
d3bc12f8ef0d8afa0eabbca33671def2b9e2dfc8 | 4,293 | py | Python | styrobot/cogs/help.py | ThatRedKite/styrobot | c6c449aec99cb59c4695f739d59efe2def0e0064 | [
"MIT"
]
| 1 | 2021-08-02T23:19:31.000Z | 2021-08-02T23:19:31.000Z | styrobot/cogs/help.py | ThatRedKite/styrobot | c6c449aec99cb59c4695f739d59efe2def0e0064 | [
"MIT"
]
| null | null | null | styrobot/cogs/help.py | ThatRedKite/styrobot | c6c449aec99cb59c4695f739d59efe2def0e0064 | [
"MIT"
]
| 1 | 2021-07-28T02:26:54.000Z | 2021-07-28T02:26:54.000Z | import discord
from discord.ext import commands
from styrobot.util.contrib import info
import random
class BetterHelpCommand(commands.HelpCommand):
async def send_embed(self, embed):
embed.colour = discord.Colour.random()
await self.get_destination().send(embed=embed)
def blank_line(self, embed):
embed.add_field(name='_ _', value='_ _', inline=False)
def signature(self, command: commands.Command):
out = [command.qualified_name]
params = command.clean_params or {}
for name, param in params.items():
# slightly copied from discord.py
greedy = isinstance(param.annotation, commands.converter._Greedy)
if param.default is not param.empty:
should_print = param.default if isinstance(param.default, str) else param.default is not None
if should_print:
out.append(f'[{name}={param.default}]{"..." if greedy else ""}')
else:
out.append(f'[{name}]')
elif param.kind == param.VAR_POSITIONAL:
out.append(f'<{name}...>')
elif greedy:
out.append(f'[{name}]...')
else:
out.append(f'<{name}>')
return ' '.join(out)
async def send_bot_help(self, mapping):
e = discord.Embed(title=info['name'])
if random.random() < 0.95:
e.add_field(name='I am', value=info['name'], inline=True)
else:
e.add_field(name='I am', value='an impostor', inline=True)
e.set_author(name='sus', icon_url='https://i.redd.it/0qtc8un3bz061.png')
e.add_field(name='Contribute at', value=info['repo'], inline=False)
e.add_field(name='I send you my cogs (pls respond)', value='_ _', inline=True)
cogs = [(cog, await self.filter_commands(mapping[cog])) for cog in mapping.keys()]
cogs = [x for x in cogs if len(x[1]) > 0]
for i, (cog, cmds) in enumerate(cogs):
if i % 2 == 0:
self.blank_line(e)
h = '\n'.join([cmd.name for cmd in cmds])
if cog is None:
e.add_field(name='builtin', value=h, inline=True)
else:
e.add_field(name=cog.qualified_name, value=h, inline=True)
if random.random() < 0.9:
e.set_footer(text='Made with ❤️')
else:
e.set_footer(text='Made with 🍆')
await self.send_embed(e)
async def send_cog_help(self, cog: commands.Cog):
e = discord.Embed(title=cog.qualified_name)
e.add_field(name='Cog', value=cog.qualified_name, inline=True)
e.add_field(name='`in_code`', value=f'`{cog.__class__.__name__}`', inline=True)
e.add_field(name='Commands', value='_ _', inline=False)
for cmd in await self.filter_commands(cog.get_commands()):
e.add_field(name=cmd, value=(cmd.help or '[no help]'), inline=False)
await self.send_embed(e)
async def send_group_help(self, group: commands.Group):
e = discord.Embed(title=group.qualified_name)
e.add_field(name='Command Group', value=group.qualified_name, inline=True)
e.add_field(name='Help', value=(group.help or '[no help]'), inline=False)
e.add_field(name='Subcommands', value='_ _', inline=False)
for command in await self.filter_commands(group.commands):
command: commands.Command
e.add_field(name=self.signature(command), value=(command.help or '[no help]'), inline=False)
await self.send_embed(e)
async def send_command_help(self, command: commands.Command):
e = discord.Embed(title=(command.qualified_name or command.name))
e.add_field(name='Name', value=(command.qualified_name or command.name), inline=False)
e.add_field(name='Signature', value=(self.signature(command)), inline=False)
e.add_field(name='Help', value=(command.help or '[no help]'), inline=False)
await self.send_embed(e)
class HelpCog(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
help_command = BetterHelpCommand()
help_command.cog = self
self.bot.help_command = help_command
def setup(bot):
bot.add_cog(HelpCog(bot))
| 44.257732 | 109 | 0.610063 | 4,145 | 0.963953 | 0 | 0 | 0 | 0 | 2,850 | 0.662791 | 500 | 0.116279 |
d3bcd85e84067fe1c97d2fef2c0994e569b7ca18 | 1,527 | py | Python | misc/Queue_hello.py | benhunter/py-stuff | a04f94851370e08a65792a53a6207f3146eb130b | [
"MIT"
]
| 3 | 2017-05-22T03:14:21.000Z | 2019-05-24T11:44:15.000Z | misc/Queue_hello.py | benhunter/py-stuff | a04f94851370e08a65792a53a6207f3146eb130b | [
"MIT"
]
| null | null | null | misc/Queue_hello.py | benhunter/py-stuff | a04f94851370e08a65792a53a6207f3146eb130b | [
"MIT"
]
| null | null | null | # Testing with threading and queue modules for Thread-based parallelism
import threading, queue, time
# The worker thread gets jobs off the queue. When the queue is empty, it
# assumes there will be no more work and exits.
# (Realistically workers will run until terminated.)
def worker():
print('Running worker')
time.sleep(0.1)
while True:
try:
arg = q.get(block=False) # False to terminate Thread when no work is available
except queue.Empty:
print('Worker', threading.currentThread(), end=' ')
print('queue empty')
break
else:
print('Worker', threading.currentThread(), end=' ')
print('running with argument', arg)
work_func(arg) # do the work
time.sleep(0.5)
q.task_done() # Create queue
# Work function that processes the arguments
def work_func(arg):
print('Working on', arg)
print('Square is', arg**2)
print('Cube is', arg**3)
q = queue.Queue()
# Begin adding work to the queue
for i in range(20):
q.put(i)
threadPool = []
# Start a pool of 5 workers
for i in range(5):
t = threading.Thread(target=worker, name='worker %i' % (i + 1))
t.start()
threadPool.append(t)
# time.sleep(5) # testing if workers die before work is queued - yes they do die
# q.join()
for i in range(20):
q.put(i+20)
for t in threadPool:
t.join()
# Give threads time to run
# print('Main thread sleeping')
# time.sleep(5)
print('Main thread finished') | 26.327586 | 90 | 0.629339 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 728 | 0.476752 |
d3bea2de7d4525c6881fb3abdb31815d971e7131 | 506 | py | Python | tests/conftest.py | Beanxx/alonememo | aa90bcca6a5dcaa41305b162ac5d6dbe8d0d2562 | [
"MIT"
]
| null | null | null | tests/conftest.py | Beanxx/alonememo | aa90bcca6a5dcaa41305b162ac5d6dbe8d0d2562 | [
"MIT"
]
| null | null | null | tests/conftest.py | Beanxx/alonememo | aa90bcca6a5dcaa41305b162ac5d6dbe8d0d2562 | [
"MIT"
]
| null | null | null | import pytest
from pymongo import MongoClient
import app as flask_app
test_database_name = 'spartatest'
client = MongoClient('localhost', 27017)
db = client.get_database(test_database_name)
@pytest.fixture
def app():
test_app = flask_app.create_app(test_database_name)
# 제네레이터 문법(yield 구문까지만 실행하고 대기,
# 이후 다시 호출할 때 yield 구문 다음이 진행됨)
# app이 종료되는 것이 아니라 stop됨.
yield test_app
# 여기서부터는 모든 테스트가 완료되고 나서 시행됨
client.drop_database(test_database_name)
print('테스트 DB 제거 완료')
| 20.24 | 55 | 0.727273 | 0 | 0 | 439 | 0.673313 | 455 | 0.697853 | 0 | 0 | 298 | 0.457055 |
d3bfaf1d9fa752290f67cc0958281b146d4daff0 | 98 | py | Python | threader/__init__.py | mwoolweaver/threader | fdb4fe9ab71d3c85146969f716d10b78f970323e | [
"MIT"
]
| 34 | 2017-07-24T20:54:06.000Z | 2022-03-18T13:10:11.000Z | threader/__init__.py | mwoolweaver/threader | fdb4fe9ab71d3c85146969f716d10b78f970323e | [
"MIT"
]
| 2 | 2019-05-28T07:21:15.000Z | 2019-07-23T21:45:43.000Z | threader/__init__.py | mwoolweaver/threader | fdb4fe9ab71d3c85146969f716d10b78f970323e | [
"MIT"
]
| 8 | 2019-05-28T06:49:02.000Z | 2022-02-04T22:59:09.000Z | """Tools to quickly create twitter threads."""
from .thread import Threader
__version__ = "0.1.1" | 24.5 | 46 | 0.734694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.540816 |
d3bfd6a64622fae1b5dc880f345c000e85f77a5b | 1,080 | py | Python | src/utility/count_pages.py | WikiCommunityHealth/wikimedia-revert | b584044d8b6a61a79d98656db356bf1f74d23ee0 | [
"MIT"
]
| null | null | null | src/utility/count_pages.py | WikiCommunityHealth/wikimedia-revert | b584044d8b6a61a79d98656db356bf1f74d23ee0 | [
"MIT"
]
| null | null | null | src/utility/count_pages.py | WikiCommunityHealth/wikimedia-revert | b584044d8b6a61a79d98656db356bf1f74d23ee0 | [
"MIT"
]
| null | null | null | # count numbers of pages from the Mediawiki history dumps
import bz2
import subprocess
import os
from datetime import datetime
inizio = datetime.now()
dataset_folder = '/home/gandelli/dev/data/it/'
totali = set()
revisioni = set()
revert = set()
ns0 = set()
for year in range(2001, 2021):
dump_in = bz2.open(dataset_folder+'/it' + str(year) + '.tsv.bz2', 'r')
line = dump_in.readline()
print(year)
while line != '':
line = dump_in.readline().rstrip().decode('utf-8')[:-1]
values = line.split('\t')
if len(values) < 2:
continue
if values[23] != '':
page = int(values[23])
totali.add(page)
if values[28] == '0':
ns0.add(page)
if values[1] == 'revision':
revisioni.add(page)
if values[64] == 'true' and values[67] == 'true':
revert.add(page)
print('total page ',len(totali))
print('total pages ns0', len(ns0))
print('total revisions ns0', len(revisioni))
print('total revert ns0', len(revert) ) | 23.478261 | 74 | 0.566667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 213 | 0.197222 |
d3c078904bb9cd81a5346502975e431e6b94a34e | 6,395 | py | Python | livescore/LivescoreCommon.py | TechplexEngineer/frc-livescore | dedf68218a1a8e2f8a463ded835ea2a7d4b51b78 | [
"MIT"
]
| null | null | null | livescore/LivescoreCommon.py | TechplexEngineer/frc-livescore | dedf68218a1a8e2f8a463ded835ea2a7d4b51b78 | [
"MIT"
]
| null | null | null | livescore/LivescoreCommon.py | TechplexEngineer/frc-livescore | dedf68218a1a8e2f8a463ded835ea2a7d4b51b78 | [
"MIT"
]
| null | null | null | import colorsys
import cv2
from PIL import Image
import pkg_resources
from .LivescoreBase import LivescoreBase
from .details import Alliance, OngoingMatchDetails
class LivescoreCommon(LivescoreBase):
def __init__(self, game_year, **kwargs):
super(LivescoreCommon, self).__init__(game_year, **kwargs)
self._match_key = None
self._match_name = None
def _getMatchKeyName(self, img, debug_img):
if self._match_key is None:
tl = self._transformPoint((220, 6))
br = self._transformPoint((570, 43))
raw_match_name = self._parseRawMatchName(self._getImgCropThresh(img, tl, br))
self._match_key = self._getMatchKey(raw_match_name)
if self._match_key:
self._match_name = raw_match_name
else:
self._match_name = None
if self._debug:
box = self._cornersToBox(tl, br)
self._drawBox(debug_img, box, (0, 255, 0))
return self._match_key, self._match_name
def _getTimeAndMode(self, img, debug_img):
# Check for match under review
review_point1 = self._transformPoint((624, 93))
review_sample1 = img[review_point1[1], review_point1[0], :]
hsvL = colorsys.rgb_to_hsv(float(review_sample1[2])/255, float(review_sample1[1])/255, float(review_sample1[0])/255)
review_point2 = self._transformPoint((1279 - 624, 93))
review_sample2 = img[review_point2[1], review_point2[0], :]
hsvR = colorsys.rgb_to_hsv(float(review_sample2[2])/255, float(review_sample2[1])/255, float(review_sample2[0])/255)
if 0.116 < hsvL[0] < 0.216 and 0.116 < hsvR[0] < 0.216:
return 0, 'post_match'
# Find time remaining
horiz_center = self._TEMPLATE_SHAPE[0]/2
tl = self._transformPoint((horiz_center-25, 56))
br = self._transformPoint((horiz_center+25, 82))
time_remaining = self._parseDigits(self._getImgCropThresh(img, tl, br))
if self._debug:
# draw a green box for time
box = self._cornersToBox(tl, br)
self._drawBox(debug_img, box, (0, 255, 0))
# Determine mode: 'pre_match', 'auto', 'teleop', or 'post_match'
mode_point = self._transformPoint((520, 70))
mode_point2 = self._transformPoint((581, 70))
mode_sample = img[mode_point[1], mode_point[0], :]
mode_sample2 = img[mode_point2[1], mode_point2[0], :]
hsv1 = colorsys.rgb_to_hsv(float(mode_sample[2])/255, float(mode_sample[1])/255, float(mode_sample[0])/255)
hsv2 = colorsys.rgb_to_hsv(float(mode_sample2[2])/255, float(mode_sample2[1])/255, float(mode_sample2[0])/255)
if time_remaining is None:
return None, None
if time_remaining == 0:
if hsv1[1] > 0.6 and hsv2[1] > 0.6: # Both saturated
mode = 'post_match'
elif hsv1[1] > 0.6: # First saturated
mode = 'auto' # End of auton
else:
mode = 'pre_match'
elif time_remaining <= 15 and hsv2[1] < 0.6:
mode = 'auto'
else:
mode = 'teleop'
if self._debug:
box = self._cornersToBox(tl, br)
self._drawBox(debug_img, box, (0, 255, 0))
cv2.circle(debug_img, review_point1, 2, (0, 255, 0), -1)
cv2.circle(debug_img, review_point2, 2, (0, 255, 0), -1)
cv2.circle(debug_img, mode_point, 2, (0, 255, 0), -1)
cv2.circle(debug_img, mode_point2, 2, (0, 255, 0), -1)
return time_remaining, mode
def _getFlipped(self, img, debug_img):
# Sample point to determine red/blue side
color_point = self._transformPoint((520, 95))
color_sample = img[color_point[1], color_point[0], :]
is_flipped = color_sample[0] > color_sample[2] # More blue than red
if self._debug:
cv2.circle(debug_img, color_point, 2, (0, 255, 0), -1)
return is_flipped
def _getScores(self, img, debug_img, is_flipped):
# Left score limits
left_tl = self._transformPoint((520, 110))
left_br = self._transformPoint((634, 155))
# Right score limits
right_tl = self._transformPoint((644, 110))
right_br = self._transformPoint((760, 155))
left_score = self._parseDigits(self._getImgCropThresh(img, left_tl, left_br, white=True))
right_score = self._parseDigits(self._getImgCropThresh(img, right_tl, right_br, white=True))
if is_flipped:
red_score = right_score
blue_score = left_score
else:
red_score = left_score
blue_score = right_score
if self._debug:
left_box = self._cornersToBox(left_tl, left_br)
right_box = self._cornersToBox(right_tl, right_br)
self._drawBox(debug_img, left_box, (255, 255, 0) if is_flipped else (255, 0, 255))
self._drawBox(debug_img, right_box, (255, 0, 255) if is_flipped else (255, 255, 0))
return red_score, blue_score
def _getMatchDetails(self, img, force_find_overlay):
debug_img = None
if self._debug:
debug_img = img.copy()
time_remaining, mode = self._getTimeAndMode(img, debug_img)
if self._is_new_overlay or force_find_overlay:
self._match_key = None
match_key, match_name = self._getMatchKeyName(img, debug_img)
is_flipped = self._getFlipped(img, debug_img)
red_score, blue_score = self._getScores(img, debug_img, is_flipped)
box = self._cornersToBox(self._transformPoint((0, 0)), self._transformPoint((1280, 170)))
self._drawBox(debug_img, box, (255, 255, 0))
if self._debug:
cv2.imshow("Match Details", debug_img)
cv2.waitKey()
if match_key is not None and red_score is not None \
and blue_score is not None and time_remaining is not None:
return OngoingMatchDetails(
match_key=match_key,
match_name=match_name,
mode=mode,
time=time_remaining,
red=Alliance(
score=red_score,
),
blue=Alliance(
score=blue_score,
)
)
else:
return None
| 39.475309 | 124 | 0.603597 | 6,229 | 0.974042 | 0 | 0 | 0 | 0 | 0 | 0 | 359 | 0.056138 |
d3c0c248eab748f6973cc1f7d32930648b9e6320 | 1,825 | py | Python | challenges/challenge.py | Tech-With-Tim/models | 221fce614776df01b151e73071c788c3ce57dc52 | [
"MIT"
]
| 2 | 2021-07-09T18:53:15.000Z | 2021-08-06T06:21:14.000Z | challenges/challenge.py | Tech-With-Tim/models | 221fce614776df01b151e73071c788c3ce57dc52 | [
"MIT"
]
| 8 | 2021-07-09T13:08:07.000Z | 2021-09-12T20:25:08.000Z | challenges/challenge.py | Tech-With-Tim/models | 221fce614776df01b151e73071c788c3ce57dc52 | [
"MIT"
]
| 4 | 2021-07-09T12:32:20.000Z | 2021-07-29T15:19:25.000Z | from postDB import Model, Column, types
from datetime import datetime
import utils
class Challenge(Model):
"""
Challenge class to store the challenge details
Database Attributes:
Attributes stored in the `challenges` table.
:param int id: The challenge Snowflake ID.
:param str title: The challenge title.
:param int author_id: The challenge author's Discord ID.
:param str description: A description.
:param List[str] example_in: Example input.
:param List[str] example_out: Example output.
:param List[int] language_ids: The languages you can use to complete this challenge.
:param :class:`datetime` released_at: The time this challenge was released at.
:param bool deleted: Whether or not this challenge has been deleted.
:param str slug: The URL slug this challenge relates to.
"""
id = Column(types.Integer(big=True), primary_key=True)
title = Column(types.String, unique=True)
author_id = Column(
types.ForeignKey("users", "id", sql_type=types.Integer(big=True)),
)
description = Column(types.String)
example_in = Column(types.Array(types.String))
example_out = Column(types.Array(types.String))
# Implicit ForeignKey to ChallengeLanguage.id
language_ids = Column(types.Array(types.Integer(big=True)))
released_at = Column(types.DateTime, nullable=True)
deleted = Column(types.Boolean, default=False)
slug = Column(types.String, unique=True)
@property
def created_at(self) -> datetime:
"""Returns the time the challenge was created"""
return utils.snowflake_time(self.id)
| 40.555556 | 101 | 0.633973 | 1,738 | 0.952329 | 0 | 0 | 149 | 0.081644 | 0 | 0 | 1,039 | 0.569315 |
d3c2f371f8e9bd53dfa26410d72fcf0c4b952e00 | 1,004 | py | Python | settings.py | embrace-inpe/cycle-slip-correction | c465dd4d45ea7df63a18749e26ba4bf0aa27eb59 | [
"MIT"
]
| 6 | 2019-05-20T21:23:41.000Z | 2021-06-23T15:00:30.000Z | settings.py | embrace-inpe/cycle-slip-correction | c465dd4d45ea7df63a18749e26ba4bf0aa27eb59 | [
"MIT"
]
| null | null | null | settings.py | embrace-inpe/cycle-slip-correction | c465dd4d45ea7df63a18749e26ba4bf0aa27eb59 | [
"MIT"
]
| 5 | 2018-12-27T16:46:45.000Z | 2020-09-14T13:44:00.000Z | """
Commom settings to all applications
"""
A = 40.3
TECU = 1.0e16
C = 299792458
F1 = 1.57542e9
F2 = 1.22760e9
factor_1 = (F1 - F2) / (F1 + F2) / C
factor_2 = (F1 * F2) / (F2 - F1) / C
DIFF_TEC_MAX = 0.05
LIMIT_STD = 7.5
plot_it = True
REQUIRED_VERSION = 3.01
CONSTELLATIONS = ['G', 'R']
COLUMNS_IN_RINEX = {'3.03': {'G': {'L1': 'L1C', 'L2': 'L2W', 'C1': 'C1C', 'P1': 'C1W', 'P2': 'C2W'},
'R': {'L1': 'L1C', 'L2': 'L2C', 'C1': 'C1C', 'P1': 'C1P', 'P2': 'C2P'}
},
'3.02': {'G': {'L1': 'L1', 'L2': 'L2', 'C1': 'C1C', 'P1': 'C1W', 'P2': 'C2W'},
'R': {'L1': 'L1', 'L2': 'L2', 'C1': 'C1C', 'P1': 'C1P', 'P2': 'C2P'}
},
'3.01': {'G': {'L1': 'L1', 'L2': 'L2', 'C1': 'C1C', 'P1': 'C1W', 'P2': 'C2W'},
'R': {'L1': 'L1', 'L2': 'L2', 'C1': 'C1C', 'P1': 'C1P', 'P2': 'C2P'}
}
}
| 33.466667 | 100 | 0.351594 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 347 | 0.345618 |
d3c36036476de94ac751c017398b3c5474c873f2 | 51 | py | Python | io_almacen/channel/__init__.py | xyla-io/io_almacen | 76725391b496fe3f778d013fc680ae80637eb74b | [
"MIT"
]
| null | null | null | io_almacen/channel/__init__.py | xyla-io/io_almacen | 76725391b496fe3f778d013fc680ae80637eb74b | [
"MIT"
]
| null | null | null | io_almacen/channel/__init__.py | xyla-io/io_almacen | 76725391b496fe3f778d013fc680ae80637eb74b | [
"MIT"
]
| null | null | null | from .channel_io import Channel, channel_entity_url | 51 | 51 | 0.882353 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d3c3d276986b71cc9d8aae788f2dcd9c3f2eb96a | 1,009 | py | Python | tests/api/test_libcoap_api.py | ggravlingen/ikeatradfri | 9eef5317ab770de874c407449489604b2fdf35f1 | [
"MIT"
]
| 726 | 2017-04-12T22:55:39.000Z | 2020-09-02T20:47:13.000Z | tests/api/test_libcoap_api.py | ggravlingen/ikeatradfri | 9eef5317ab770de874c407449489604b2fdf35f1 | [
"MIT"
]
| 248 | 2017-04-12T21:45:10.000Z | 2020-09-03T08:48:37.000Z | tests/api/test_libcoap_api.py | ggravlingen/ikeatradfri | 9eef5317ab770de874c407449489604b2fdf35f1 | [
"MIT"
]
| 140 | 2017-04-12T20:02:57.000Z | 2020-09-02T08:54:23.000Z | """Test API utilities."""
import json
from pytradfri.api.libcoap_api import APIFactory
from pytradfri.gateway import Gateway
def test_constructor_timeout_passed_to_subprocess(monkeypatch):
"""Test that original timeout is passed to subprocess."""
capture = {}
def capture_args(*args, **kwargs):
capture.update(kwargs)
return json.dumps([])
monkeypatch.setattr("subprocess.check_output", capture_args)
api = APIFactory("anything", timeout=20, psk="abc")
api.request(Gateway().get_devices())
assert capture["timeout"] == 20
def test_custom_timeout_passed_to_subprocess(monkeypatch):
"""Test that custom timeout is passed to subprocess."""
capture = {}
def capture_args(*args, **kwargs):
capture.update(kwargs)
return json.dumps([])
monkeypatch.setattr("subprocess.check_output", capture_args)
api = APIFactory("anything", psk="abc")
api.request(Gateway().get_devices(), timeout=1)
assert capture["timeout"] == 1
| 28.027778 | 64 | 0.698712 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.232904 |
d3c44721938c2e001d9a0ea64b9e887be6780370 | 1,293 | py | Python | scrape_tvz.py | awordforthat/rhymes | b7d47b48a9b641e4736ed04058a183afc0a83b04 | [
"MIT"
]
| null | null | null | scrape_tvz.py | awordforthat/rhymes | b7d47b48a9b641e4736ed04058a183afc0a83b04 | [
"MIT"
]
| null | null | null | scrape_tvz.py | awordforthat/rhymes | b7d47b48a9b641e4736ed04058a183afc0a83b04 | [
"MIT"
]
| 1 | 2021-02-16T03:06:38.000Z | 2021-02-16T03:06:38.000Z | # scrapes Townes van Zandt lyrics
# sample code so I don't have to remember all of this stuff
# the next time I want to source some verses
from bs4 import BeautifulSoup as soup
import requests
import string
punctuation_trans_table = str.maketrans("", "", string.punctuation)
def strip_punctuation(s):
return s.translate(punctuation_trans_table)
base_url = "http://ippc2.orst.edu/coopl/lyrics/"
index = requests.get(base_url + "albums.html")
parsed_index = soup(index.text)
all_links = parsed_index.find_all("a") # get all <a> tags
links = [l for l in all_links if l.text] # filter out image links
def to_filename(s, path="texts/townes_van_zandt/"):
'''Quick and dirty snake-casing'''
s = s.replace("&", "and") # special case, "Poncho & Lefty"
s = strip_punctuation(s)
s = s.lower()
s = s.replace(" ", "_")
s = path + s + ".txt"
return s
def process_link(link):
title = link.text
f = open(to_filename(title), "w")
remote_file = link.get("href")
song_file = requests.get(base_url + remote_file)
verses = [l for l in soup(song_file.text).find_all("font")
if l.get("size")]
for verse in verses:
if verse.text:
f.writelines("\n".join(verse.stripped_strings))
f.write("\n\n")
| 28.108696 | 67 | 0.657386 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 381 | 0.294664 |
d3c48e47d2fa33e8114041e17aa2a33b9c9c1809 | 895 | py | Python | chapter04/ifelse.py | persevere-in-coding-persist-in-learning/python2 | b207d0040232abae63638784b34a950b932bef77 | [
"Apache-2.0"
]
| 3 | 2020-08-05T01:15:41.000Z | 2020-08-05T09:28:36.000Z | chapter04/ifelse.py | persevere-in-coding-persist-in-learning/python2 | b207d0040232abae63638784b34a950b932bef77 | [
"Apache-2.0"
]
| null | null | null | chapter04/ifelse.py | persevere-in-coding-persist-in-learning/python2 | b207d0040232abae63638784b34a950b932bef77 | [
"Apache-2.0"
]
| null | null | null | # coding=utf-8
"""
控制结构if elif else的研究
Version: 0.1
Author: huijz
Date: 2020-08-24
"""
# 例1:if的基本用法:
flag = False
name = 'huijz'
if name == 'python': # 判断变量是否为 python
flag = True # 条件成立时设置标志为真
print 'welcome boss' # 并输出欢迎信息
else:
print name # 条件不成立时输出变量名称
# 例2:elif用法
num = 5
if num == 3: # 判断num的值
print 'boss'
elif num == 2:
print 'user'
elif num == 1:
print 'worker'
elif num < 0: # 值小于零时输出
print 'error'
else:
print 'road' # 条件均不成立时输出
# 例3:if语句多个条件
num = 9
if 0 <= num <= 10: # 判断值是否在0~10之间
print 'hello'
# 输出结果: hello
num = 10
if num < 0 or num > 10: # 判断值是否在小于0或大于10
print 'hello'
else:
print 'unDefine'
# 输出结果: unDefine
num = 8
# 判断值是否在0~5或者10~15之间
if (0 <= num <= 5) or (10 <= num <= 15):
print 'hello'
else:
print 'unDefine'
# 输出结果: unDefine
# 例4:var = 100
var = 100
if var == 100: print "变量 var 的值为100"
print "Good bye!"
| 16.886792 | 41 | 0.606704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 701 | 0.606926 |
d3c5d75262328f54482b5a9f8b47cfdc49c36760 | 445 | py | Python | setup.py | korymath/JANN | 98468a2e90a6b55ccb15e905ee10a1d1130cf5d8 | [
"MIT"
]
| 39 | 2018-09-25T21:40:38.000Z | 2022-01-19T23:26:51.000Z | setup.py | korymath/JANN | 98468a2e90a6b55ccb15e905ee10a1d1130cf5d8 | [
"MIT"
]
| 22 | 2018-09-25T21:36:46.000Z | 2021-09-07T16:03:41.000Z | setup.py | korymath/JANN | 98468a2e90a6b55ccb15e905ee10a1d1130cf5d8 | [
"MIT"
]
| 9 | 2018-09-26T00:38:35.000Z | 2020-02-27T05:59:03.000Z | from setuptools import setup
from setuptools import find_packages
setup(
name="Jann",
version="4.0.0",
description="Jann is a Nearest Neighbour retrieval-based chatbot.",
author="Kory Mathewson",
author_email="[email protected]",
license="MIT",
url="https://github.com/korymath/jann",
packages=find_packages(),
setup_requires=[
"pytest-runner"
],
tests_require=[
"pytest"
],
)
| 20.227273 | 71 | 0.647191 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.370787 |
d3c6c4df7fb938e9c1ce540f827eb9b023f7dd26 | 7,895 | py | Python | tests/scanner/scanners/ke_version_scanner_test.py | pombredanne/forseti-security | 68a9a88243460065e00b6c131b3d9abd0331fb37 | [
"Apache-2.0"
]
| 1 | 2018-03-26T08:15:21.000Z | 2018-03-26T08:15:21.000Z | tests/scanner/scanners/ke_version_scanner_test.py | pombredanne/forseti-security | 68a9a88243460065e00b6c131b3d9abd0331fb37 | [
"Apache-2.0"
]
| null | null | null | tests/scanner/scanners/ke_version_scanner_test.py | pombredanne/forseti-security | 68a9a88243460065e00b6c131b3d9abd0331fb37 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KE Version Rule Scanner Tests."""
import unittest
import mock
from tests import unittest_utils
from google.cloud.security.common.gcp_type import (
ke_cluster as ke_cluster_type)
from google.cloud.security.common.gcp_type import (
organization as organization_type)
from google.cloud.security.common.gcp_type import project as project_type
from google.cloud.security.scanner.scanners import ke_version_scanner
# pylint: disable=bad-indentation
class FakeProjectDao(object):
def get_project(self, project_id, snapshot_timestamp=0):
return project_type.Project(project_id=project_id)
class FakeOrgDao(object):
def find_ancestors(self, resource_id, snapshot_timestamp=0):
return [organization_type.Organization(organization_id=123456)]
class KeVersionScannerTest(unittest_utils.ForsetiTestCase):
def tearDown(self):
self.org_patcher.stop()
self.project_patcher.stop()
def setUp(self):
# patch the daos
self.org_patcher = mock.patch(
'google.cloud.security.common.data_access.'
'org_resource_rel_dao.OrgResourceRelDao')
self.mock_org_rel_dao = self.org_patcher.start()
self.mock_org_rel_dao.return_value = FakeOrgDao()
self.project_patcher = mock.patch(
'google.cloud.security.common.data_access.'
'project_dao.ProjectDao')
self.mock_project_dao = self.project_patcher.start()
self.mock_project_dao.return_value = FakeProjectDao()
self.server_config = {
'defaultClusterVersion': '1.7.11-gke.1',
'validNodeVersions': [
'1.8.6-gke.0',
'1.7.11-gke.1',
'1.7.10-gke.1',
'1.6.13-gke.1',
],
'defaultImageType': 'COS',
'validImageTypes': [
'UBUNTU',
'COS'
],
'validMasterVersions': [
'1.8.6-gke.0',
'1.7.11-gke.1'
]
}
self.ke_clusters = {
# The main backend service.
'master-version-invalid': ke_cluster_type.KeCluster.from_dict(
'foo', self.server_config,
{
'name': 'master-version-invalid',
'nodePools': [{
'name': 'default-pool',
'version': '1.6.13-gke.1'
}],
'initialClusterVersion': '1.6.13-gke.1',
'currentMasterVersion': '1.6.13-gke.1',
'currentNodeVersion': '1.6.13-gke.1'
}),
'node-version-invalid': ke_cluster_type.KeCluster.from_dict(
'foo', self.server_config,
{
'name': 'node-version-invalid',
'nodePools': [{
'name': 'default-pool',
'version': '1.8.4-gke.1'
}],
'initialClusterVersion': '1.8.4-gke.1',
'currentMasterVersion': '1.8.6-gke.0',
'currentNodeVersion': '1.8.4-gke.1'
}),
'node-version-not-allowed': ke_cluster_type.KeCluster.from_dict(
'foo', self.server_config,
{
'name': 'node-version-not-allowed',
'nodePools': [{
'name': 'default-pool',
'version': '1.7.10-gke.1'
}],
'initialClusterVersion': '1.7.10-gke.1',
'currentMasterVersion': '1.7.11-gke.1',
'currentNodeVersion': '1.7.10-gke.1'
}),
'multiple-node-pools': ke_cluster_type.KeCluster.from_dict(
'foo', self.server_config,
{
'name': 'multiple-node-pools',
'nodePools': [{
'name': 'default-pool',
'version': '1.7.11-gke.1'
}, {
'name': 'secondary-pool',
'version': '1.7.11-gke.1'
}],
'initialClusterVersion': '1.7.11-gke.1',
'currentMasterVersion': '1.7.11-gke.1',
'currentNodeVersion': '1.7.11-gke.1'
})
}
self.scanner = ke_version_scanner.KeVersionScanner(
{}, {}, '',
unittest_utils.get_datafile_path(
__file__, 'ke_version_scanner_test_data.yaml'))
self.scanner._retrieve = mock.Mock(
return_value=self.ke_clusters.values())
@mock.patch.object(
ke_version_scanner.KeVersionScanner,
'_output_results_to_db', autospec=True)
def test_run_scanner(self, mock_output_results):
self.scanner.run()
expected_violations = [
{'resource_id': 'node-version-not-allowed',
'resource_type': 'ke',
'rule_index': 2,
'rule_name': 'Disallowed node pool version',
'violation_data': {'cluster_name': 'node-version-not-allowed',
'node_pool_name': 'default-pool',
'project_id': 'foo',
'violation_reason': (
"Node pool version 1.7.10-gke.1 is not "
"allowed (['>= 1.6.13-gke.1', "
"'>= 1.7.11-gke.1', '>= 1.8.4-gke.1', "
"'>= 1.9.*']).")},
'violation_type': 'KE_VERSION_VIOLATION'},
{'resource_id': 'master-version-invalid',
'resource_type': 'ke',
'rule_index': 1,
'rule_name': 'Unsupported master version',
'violation_data': {'cluster_name': 'master-version-invalid',
'node_pool_name': '',
'project_id': 'foo',
'violation_reason': (
"Master version 1.6.13-gke.1 is not "
"supported (['1.7.11-gke.1', "
"'1.8.6-gke.0']).")},
'violation_type': 'KE_VERSION_VIOLATION'},
{'resource_id': 'node-version-invalid',
'resource_type': 'ke',
'rule_index': 0,
'rule_name': 'Unsupported node pool version',
'violation_data': {'cluster_name': 'node-version-invalid',
'node_pool_name': 'default-pool',
'project_id': 'foo',
'violation_reason': (
"Node pool version 1.8.4-gke.1 is not "
"supported (['1.6.13-gke.1', "
"'1.7.10-gke.1', '1.7.11-gke.1', "
"'1.8.6-gke.0']).")},
'violation_type': 'KE_VERSION_VIOLATION'}]
mock_output_results.assert_called_once_with(mock.ANY,
expected_violations)
if __name__ == '__main__':
unittest.main()
| 41.119792 | 76 | 0.500823 | 6,767 | 0.857125 | 0 | 0 | 2,536 | 0.321216 | 0 | 0 | 3,175 | 0.402153 |
d3c71f0ccce66077dfdcd88c05a9aa625f2426c0 | 1,147 | py | Python | metrics/utils.py | edwardyehuang/iSeg | 256b0f7fdb6e854fe026fa8df41d9a4a55db34d5 | [
"MIT"
]
| 4 | 2021-12-13T09:49:26.000Z | 2022-02-19T11:16:50.000Z | metrics/utils.py | edwardyehuang/iSeg | 256b0f7fdb6e854fe026fa8df41d9a4a55db34d5 | [
"MIT"
]
| 1 | 2021-07-28T10:40:56.000Z | 2021-08-09T07:14:06.000Z | metrics/utils.py | edwardyehuang/iSeg | 256b0f7fdb6e854fe026fa8df41d9a4a55db34d5 | [
"MIT"
]
| null | null | null | # ================================================================
# MIT License
# Copyright (c) 2021 edwardyehuang (https://github.com/edwardyehuang)
# ================================================================
import tensorflow as tf
from iseg.metrics.seg_metric_wrapper import SegMetricWrapper
from iseg.metrics.mean_iou import MeanIOU
class SegMetricBuilder:
def __init__(self, num_class, ignore_label):
self.num_class = num_class
self.ignore_label = ignore_label
self.__metrics = []
def add(self, prefix="", use_iou=True, pre_compute_fn=None):
metrics_list = []
if prefix is None:
prefix = ""
if prefix != "":
prefix = prefix + "_"
if use_iou:
iou_metric = SegMetricWrapper(
MeanIOU(self.num_class), num_class=self.num_class, ignore_label=self.ignore_label, name=prefix + "IOU"
)
iou_metric.add_pre_compute_fn(pre_compute_fn)
metrics_list.append(iou_metric)
self.__metrics.append(metrics_list)
@property
def metrics(self):
return self.__metrics
| 26.068182 | 118 | 0.569311 | 797 | 0.694856 | 0 | 0 | 62 | 0.054054 | 0 | 0 | 228 | 0.198779 |
d3c7eb72e9d8627f04182ce89238416d18909674 | 1,436 | py | Python | src/core/stats.py | dynaryu/vaws | f6ed9b75408f7ce6100ed59b7754f745e59be152 | [
"BSD-3-Clause"
]
| null | null | null | src/core/stats.py | dynaryu/vaws | f6ed9b75408f7ce6100ed59b7754f745e59be152 | [
"BSD-3-Clause"
]
| null | null | null | src/core/stats.py | dynaryu/vaws | f6ed9b75408f7ce6100ed59b7754f745e59be152 | [
"BSD-3-Clause"
]
| null | null | null | import math
def lognormal_mean(m, stddev):
""" compute mean of log x with mean and std. of x
Args:
m: mean of x
stddev: standard deviation of x
Returns: mean of log x
"""
return math.log(m) - (0.5 * math.log(1.0 + (stddev * stddev) / (m * m)))
def lognormal_stddev(m, stddev):
""" compute std. of log x with mean and std. of x
Args:
m: mean of x
stddev: standard deviation of x
Returns: std. of log x
"""
return math.sqrt(math.log((stddev * stddev) / (m * m) + 1))
def lognormal_underlying_mean(m, stddev):
""" compute mean of x with mean and std of log x
Args:
m: mean of log x
stddev: std of log x
Returns:
"""
# if m == 0 or stddev == 0:
# print '{}'.format('why ???')
# return 0
return math.exp(m + 0.5 * stddev * stddev)
def lognormal_underlying_stddev(m, stddev):
""" compute std of x with mean and std of log x
Args:
m: mean of log x
stddev: std of log x
Returns: std of x
"""
# if m == 0 or stddev == 0:
# print '{}'.format('strange why???')
# return 0
return math.sqrt((math.exp(stddev**2.0) - 1.0) *
math.exp(2.0*m + stddev**2.0))
#return lognormal_underlying_mean(m, stddev) * \
# math.sqrt((math.exp(stddev * stddev) - 1.0))
| 23.16129 | 77 | 0.521588 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 891 | 0.620474 |
d3c82c2d822564092119880c7f993bd3fd1d721b | 5,720 | py | Python | vim.d/vimfiles/bundle/taghighlight/plugin/TagHighlight/module/languages.py | lougxing/gbox | f28402d97cacd22b5e564003af72c4022908cb4d | [
"MIT"
]
| null | null | null | vim.d/vimfiles/bundle/taghighlight/plugin/TagHighlight/module/languages.py | lougxing/gbox | f28402d97cacd22b5e564003af72c4022908cb4d | [
"MIT"
]
| 13 | 2020-01-28T22:30:33.000Z | 2022-03-02T14:57:16.000Z | vim.d/vimfiles/bundle/taghighlight/plugin/TagHighlight/module/languages.py | lougxing/gbox | f28402d97cacd22b5e564003af72c4022908cb4d | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# Tag Highlighter:
# Author: A. S. Budden <abudden _at_ gmail _dot_ com>
# Copyright: Copyright (C) 2009-2013 A. S. Budden
# Permission is hereby granted to use and distribute this code,
# with or without modifications, provided that this copyright
# notice is copied with it. Like anything else that's free,
# the TagHighlight plugin is provided *as is* and comes with no
# warranty of any kind, either expressed or implied. By using
# this plugin, you agree that in no event will the copyright
# holder be liable for any damages resulting from the use
# of this software.
# ---------------------------------------------------------------------
import os
import glob
from .config import config
from .loaddata import LoadDataFile, LoadFile, GlobData
from .debug import Debug
class Languages():
registry = {}
def __init__(self, options):
self.options = options
self.kinds = None
language_list_entries = ['SkipList','Priority']
# Import language specific modules: this will make them be parsed
# and will add to the registry
self.defaults = LoadDataFile('language_defaults.txt')
for entry in language_list_entries:
if entry in self.defaults:
if not isinstance(self.defaults[entry], list):
self.defaults[entry] = self.defaults[entry].split(',')
for language_file in GlobData('languages/*.txt'):
language_dict = LoadDataFile(language_file)
for entry in language_list_entries:
if entry in language_dict:
if not isinstance(language_dict[entry], list):
language_dict[entry] = language_dict[entry].split(',')
language_dict['Filename'] = language_file
if 'ReservedKeywords' in language_dict:
# This is some weird python magic that takes a list of
# strings containing space-separated items and produces
# a single list of those items.
language_dict['ReservedKeywords'] = \
[item for sublist in language_dict['ReservedKeywords'] for item in sublist.split(' ')]
else:
language_dict['ReservedKeywords'] = []
language_dict = self.VerifyLanguage(language_dict)
self.registry[language_dict['FriendlyName']] = language_dict
def ReadConfigFile(self, filename):
result = {}
fh = open(filename, 'r')
list_entries = ['SkipList','Priority']
key = None
for line in fh:
if line.strip().endswith(':') and line[0] not in [' ','\t',':','#']:
key = line.strip()[:-1]
result[key] = []
elif key is not None and line.startswith('\t'):
result[key] += [line.strip()]
elif ':' in line and line[0] not in [' ','\t',':','#']:
# End of the previous list, so reset key
key = None
parts = line.strip().split(':',1)
if parts[0] in list_entries:
if ',' in parts[1]:
result[parts[0]] = parts[1].split(',')
else:
result[parts[0]] = [parts[1]]
else:
result[parts[0]] = parts[1]
fh.close()
return result
def VerifyLanguage(self, language_dict):
required_keys = [
'FriendlyName',
'CTagsName',
'PythonExtensionMatcher',
'VimExtensionMatcher',
'Suffix',
'SkipList',
'IsKeyword',
'Priority',
]
for key in required_keys:
if key not in language_dict:
if key in self.defaults:
language_dict[key] = self.defaults[key]
else:
raise Exception("Language data from file {filename} is " \
"missing required key {key} (no default " \
"available).".format(filename=language_dict['Filename'],
key=key))
return language_dict
def GetAllLanguages(self):
return list(self.registry.keys())
def GetAllLanguageHandlers(self):
return list(self.registry.values())
def GetLanguageHandler(self, name):
return self.registry[name]
def GenerateExtensionTable(self):
results = {}
for handler in list(self.registry.values()):
extensions = handler.GetVimMatcher()
suffix = handler.GetSuffix()
results[extensions] = suffix
return results
def GenerateFullKindList(self):
self.LoadKindList()
kinds = set()
for language in list(self.kinds.keys()):
kinds |= set(self.kinds[language].values())
return sorted(list(kinds))
def GetKindList(self, language=None):
"""Explicit list of kinds exported from ctags help."""
if self.kinds is None:
kind_import = LoadDataFile('kinds.txt')
# Generate the kind database with 'ctags_' prefix on the keys
self.kinds = {}
for key in kind_import:
self.kinds[key] = {}
for kind in kind_import[key]:
self.kinds[key]['ctags_'+kind] = kind_import[key][kind]
if language is None:
return self.kinds
elif language in self.kinds:
return self.kinds[language]
else:
return None
| 38.648649 | 110 | 0.544755 | 4,827 | 0.843881 | 0 | 0 | 0 | 0 | 0 | 0 | 1,601 | 0.279895 |
d3c84323f9dc3dcd909b2c9adb14b8efc078f1c5 | 6,099 | py | Python | archive/bayes_sensor.py | robmarkcole/HASS-data-science | 7edd07a1519682683b42d140d6268a87d91522ec | [
"MIT"
]
| 11 | 2018-01-21T02:37:02.000Z | 2022-01-20T03:32:40.000Z | archive/bayes_sensor.py | robmarkcole/HASS-data-science | 7edd07a1519682683b42d140d6268a87d91522ec | [
"MIT"
]
| null | null | null | archive/bayes_sensor.py | robmarkcole/HASS-data-science | 7edd07a1519682683b42d140d6268a87d91522ec | [
"MIT"
]
| 8 | 2017-12-19T14:05:33.000Z | 2021-12-08T09:54:06.000Z | """
Bayes sensor code split out from
https://github.com/home-assistant/home-assistant/blob/dev/homeassistant/components/binary_sensor/bayesian.py
This module is used to explore the sensor.
"""
from collections import OrderedDict
from const import *
def update_probability(prior, prob_true, prob_false):
"""Update probability using Bayes' rule."""
numerator = prob_true * prior
denominator = numerator + prob_false * (1 - prior)
probability = numerator / denominator
return probability
def setup_platform(config):
"""Set up the Bayesian Binary sensor.
Modified from async_setup_platform."""
name = config[CONF_NAME]
observations = config[CONF_OBSERVATIONS]
prior = config[CONF_PRIOR]
probability_threshold = config[CONF_PROBABILITY_THRESHOLD]
device_class = config[CONF_DEVICE_CLASS]
return BayesianBinarySensor(
name, prior, observations, probability_threshold, device_class)
class BinarySensorDevice(): # Entity
"""Represent a binary sensor."""
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return None
@property
def state(self):
"""Return the state of the binary sensor."""
return STATE_ON if self.is_on else STATE_OFF
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return None
class BayesianBinarySensor(BinarySensorDevice):
"""Representation of a Bayesian sensor.
Removed some methods I don't think will be needed for this investigation.
"""
def __init__(self, name, prior, observations, probability_threshold,
device_class):
"""Initialize the Bayesian sensor."""
self._name = name
self._observations = observations
self._probability_threshold = probability_threshold
self._device_class = device_class
self._deviation = False
self.prior = prior
self.probability = prior
self.current_obs = OrderedDict({})
# return the entity_id to observ
to_observe = set(obs['entity_id'] for obs in self._observations)
self.entity_obs = dict.fromkeys(to_observe, [])
# Append observations
for ind, obs in enumerate(self._observations):
obs['id'] = ind
self.entity_obs[obs['entity_id']].append(obs)
self.watchers = {
'numeric_state': self._process_numeric_state,
'state': self._process_state
}
# @asyncio.coroutine
def async_added_to_hass(self):
"""Call when entity about to be added."""
@callback
# pylint: disable=invalid-name
def async_threshold_sensor_state_listener(entity, old_state,
new_state):
"""Handle sensor state changes."""
if new_state.state == STATE_UNKNOWN:
return
entity_obs_list = self.entity_obs[entity]
for entity_obs in entity_obs_list:
platform = entity_obs['platform']
self.watchers[platform](entity_obs)
prior = self.prior
for obs in self.current_obs.values():
prior = update_probability(
prior, obs['prob_true'], obs['prob_false'])
self.probability = prior # Updates prior for each observation.
# self.hass.async_add_job(self.async_update_ha_state, True)
entities = [obs['entity_id'] for obs in self._observations]
# async_track_state_change(
# self.hass, entities, async_threshold_sensor_state_listener)
def _update_current_obs(self, entity_observation, should_trigger):
"""Update current observation for single entity."""
obs_id = entity_observation['id']
if should_trigger:
prob_true = entity_observation['prob_given_true']
prob_false = entity_observation.get(
'prob_given_false', 1 - prob_true)
# Update prob_true and prob_false
self.current_obs[obs_id] = {
'prob_true': prob_true,
'prob_false': prob_false
}
else:
self.current_obs.pop(obs_id, None)
def _process_numeric_state(self, entity_observation):
"""Add entity to current_obs if numeric state conditions are met (regular sensor)."""
entity = entity_observation['entity_id']
should_trigger = condition.async_numeric_state(
self.hass, entity,
entity_observation.get('below'),
entity_observation.get('above'), None, entity_observation)
self._update_current_obs(entity_observation, should_trigger)
def _process_state(self, entity_observation):
"""Add entity to current observations if state conditions are met (binary sensor)."""
entity = entity_observation['entity_id']
should_trigger = condition.state(
self.hass, entity, entity_observation.get('to_state'))
self._update_current_obs(entity_observation, should_trigger)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return true if sensor is on."""
return self._deviation
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_class(self):
"""Return the sensor class of the sensor."""
return self._device_class
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_OBSERVATIONS: [val for val in self.current_obs.values()],
ATTR_PROBABILITY: round(self.probability, 2),
ATTR_PROBABILITY_THRESHOLD: self._probability_threshold,
}
#@asyncio.coroutine
def async_update(self):
"""Get the latest data and update the states."""
self._deviation = bool(self.probability > self._probability_threshold)
| 33.510989 | 108 | 0.643384 | 5,146 | 0.843745 | 0 | 0 | 1,897 | 0.311035 | 0 | 0 | 1,773 | 0.290703 |
d3c8f408394af973ef52e2eab96bcf7c6c3f5ac5 | 27,212 | py | Python | CoarseNet/MinutiaeNet_utils.py | khaihp98/minutiae | afb7feff33ef86a673f899006aded486964f18dc | [
"MIT"
]
| null | null | null | CoarseNet/MinutiaeNet_utils.py | khaihp98/minutiae | afb7feff33ef86a673f899006aded486964f18dc | [
"MIT"
]
| null | null | null | CoarseNet/MinutiaeNet_utils.py | khaihp98/minutiae | afb7feff33ef86a673f899006aded486964f18dc | [
"MIT"
]
| null | null | null | import os
import glob
import shutil
import logging
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage, misc, signal, spatial
from skimage.filters import gaussian, gabor_kernel
import cv2
import math
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def re_mkdir(path):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
def init_log(output_dir):
re_mkdir(output_dir)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(message)s',
datefmt='%Y%m%d-%H:%M:%S',
filename=os.path.join(output_dir, 'log.log'),
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logging.getLogger('').addHandler(console)
return logging
def copy_file(path_s, path_t):
shutil.copy(path_s, path_t)
def get_files_in_folder(folder, file_ext=None):
files = glob.glob(folder + file_ext)
files_name = []
for i in files:
_, name = os.path.split(i)
name, ext = os.path.splitext(name)
files_name.append(name)
return np.asarray(files), np.asarray(files_name)
def point_rot(points, theta, b_size, a_size):
cosA = np.cos(theta)
sinA = np.sin(theta)
b_center = [b_size[1] / 2.0, b_size[0] / 2.0]
a_center = [a_size[1] / 2.0, a_size[0] / 2.0]
points = np.dot(points - b_center, np.array([[cosA, -sinA], [sinA, cosA]])) + a_center
return points
def mnt_reader(file_name):
f = open(file_name)
minutiae = []
for i, line in enumerate(f):
if i < 4 or len(line) == 0: continue
w, h, o = [float(x) for x in line.split()]
w, h = int(round(w)), int(round(h))
minutiae.append([w, h, o])
f.close()
return minutiae
def mnt_writer(mnt, image_name, image_size, file_name):
f = open(file_name, 'w')
f.write('%s\n' % (image_name))
f.write('%d %d %d\n' % (mnt.shape[0], image_size[0], image_size[1]))
for i in range(mnt.shape[0]):
f.write('%d %d %.6f %.4f\n' % (mnt[i, 0], mnt[i, 1], mnt[i, 2], mnt[i, 3]))
f.close()
return
def gabor_fn(ksize, sigma, theta, Lambda, psi, gamma):
sigma_x = sigma
sigma_y = float(sigma) / gamma
# Bounding box
nstds = 3
xmax = ksize[0] / 2
ymax = ksize[1] / 2
xmin = -xmax
ymin = -ymax
(y, x) = np.meshgrid(np.arange(ymin, ymax + 1), np.arange(xmin, xmax + 1))
# Rotation
x_theta = x * np.cos(theta) + y * np.sin(theta)
y_theta = -x * np.sin(theta) + y * np.cos(theta)
gb_cos = np.exp(-.5 * (x_theta ** 2 / sigma_x ** 2 + y_theta ** 2 / sigma_y ** 2)) * np.cos(
2 * np.pi / Lambda * x_theta + psi)
gb_sin = np.exp(-.5 * (x_theta ** 2 / sigma_x ** 2 + y_theta ** 2 / sigma_y ** 2)) * np.sin(
2 * np.pi / Lambda * x_theta + psi)
return gb_cos, gb_sin
def gabor_bank(stride=2, Lambda=8):
filters_cos = np.ones([25, 25, 180 // stride], dtype=float)
filters_sin = np.ones([25, 25, 180 // stride], dtype=float)
for n, i in enumerate(range(-90, 90, stride)):
theta = i * np.pi / 180.
kernel_cos, kernel_sin = gabor_fn((24, 24), 4.5, -theta, Lambda, 0, 0.5)
filters_cos[..., n] = kernel_cos
filters_sin[..., n] = kernel_sin
filters_cos = np.reshape(filters_cos, [25, 25, 1, -1])
filters_sin = np.reshape(filters_sin, [25, 25, 1, -1])
return filters_cos, filters_sin
def gaussian2d(shape=(5, 5), sigma=0.5):
"""
2D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma])
"""
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
def gausslabel(length=180, stride=2):
gaussian_pdf = signal.gaussian(length + 1, 3)
label = np.reshape(np.arange(stride / 2, length, stride), [1, 1, -1, 1])
y = np.reshape(np.arange(stride / 2, length, stride), [1, 1, 1, -1])
delta = np.array(np.abs(label - y), dtype=int)
delta = np.minimum(delta, length - delta) + length // 2
return gaussian_pdf[delta]
def angle_delta(A, B, max_D=np.pi * 2):
delta = np.abs(A - B)
delta = np.minimum(delta, max_D - delta)
return delta
def fmeasure(P, R):
return 2 * P * R / (P + R + 1e-10)
def distance(y_true, y_pred, max_D=16, max_O=np.pi / 6):
D = spatial.distance.cdist(y_true[:, :2], y_pred[:, :2], 'euclidean')
O = spatial.distance.cdist(np.reshape(y_true[:, 2], [-1, 1]), np.reshape(y_pred[:, 2], [-1, 1]), angle_delta)
return (D <= max_D) * (O <= max_O)
def metric_P_R_F(y_true, y_pred, maxd=16, maxo=np.pi / 6):
# Calculate Precision, Recall, F-score
if y_pred.shape[0] == 0 or y_true.shape[0] == 0:
return 0, 0, 0, 0, 0
y_true, y_pred = np.array(y_true), np.array(y_pred)
total_gt, total = float(y_true.shape[0]), float(y_pred.shape[0])
# Using L2 loss
dis = spatial.distance.cdist(y_pred[:, :2], y_true[:, :2], 'euclidean')
mindis, idx = dis.min(axis=1), dis.argmin(axis=1)
# Change to adapt to new annotation: old version. When training, comment it
# y_pred[:,2] = -y_pred[:,2]
angle = abs(np.mod(y_pred[:, 2], 2 * np.pi) - y_true[idx, 2])
angle = np.asarray([angle, 2 * np.pi - angle]).min(axis=0)
# Satisfy the threshold
tmp = (mindis <= maxd) & (angle <= maxo)
# print('mindis,idx,angle,tmp=%s,%s,%s,%s'%(mindis,idx,angle,tmp))
precision = len(np.unique(idx[(mindis <= maxd) & (angle <= maxo)])) / float(y_pred.shape[0])
recall = len(np.unique(idx[(mindis <= maxd) & (angle <= maxo)])) / float(y_true.shape[0])
# print('pre=%f/ %f'%(len(np.unique(idx[(mindis <= maxd) & (angle<=maxo)])),float(y_pred.shape[0])))
# print('recall=%f/ %f'%(len(np.unique(idx[(mindis <= maxd) & (angle<=maxo)])),float(y_true.shape[0])))
if recall != 0:
loc = np.mean(mindis[(mindis <= maxd) & (angle <= maxo)])
ori = np.mean(angle[(mindis <= maxd) & (angle <= maxo)])
else:
loc = 0
ori = 0
return precision, recall, fmeasure(precision, recall), loc, ori
def nms(mnt):
if mnt.shape[0] == 0:
return mnt
# sort score
mnt_sort = mnt.tolist()
mnt_sort.sort(key=lambda x: x[3], reverse=True)
mnt_sort = np.array(mnt_sort)
# cal distance
inrange = distance(mnt_sort, mnt_sort, max_D=16, max_O=np.pi / 6).astype(np.float32)
keep_list = np.ones(mnt_sort.shape[0])
for i in range(mnt_sort.shape[0]):
if keep_list[i] == 0:
continue
keep_list[i + 1:] = keep_list[i + 1:] * (1 - inrange[i, i + 1:])
return mnt_sort[keep_list.astype(np.bool), :]
def fuse_nms(mnt, mnt_set_2):
if mnt.shape[0] == 0:
return mnt
# sort score
all_mnt = np.concatenate((mnt, mnt_set_2))
mnt_sort = all_mnt.tolist()
mnt_sort.sort(key=lambda x: x[3], reverse=True)
mnt_sort = np.array(mnt_sort)
# cal distance
inrange = distance(mnt_sort, mnt_sort, max_D=16, max_O=2 * np.pi).astype(np.float32)
keep_list = np.ones(mnt_sort.shape[0])
for i in range(mnt_sort.shape[0]):
if keep_list[i] == 0:
continue
keep_list[i + 1:] = keep_list[i + 1:] * (1 - inrange[i, i + 1:])
return mnt_sort[keep_list.astype(np.bool), :]
def py_cpu_nms(det, thresh):
if det.shape[0] == 0:
return det
dets = det.tolist()
dets.sort(key=lambda x: x[3], reverse=True)
dets = np.array(dets)
box_sz = 25
x1 = np.reshape(dets[:, 0], [-1, 1]) - box_sz
y1 = np.reshape(dets[:, 1], [-1, 1]) - box_sz
x2 = np.reshape(dets[:, 0], [-1, 1]) + box_sz
y2 = np.reshape(dets[:, 1], [-1, 1]) + box_sz
scores = dets[:, 2]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return dets[keep, :]
def draw_minutiae(image, minutiae, fname, saveimage=False, r=15, drawScore=False):
image = np.squeeze(image)
fig = plt.figure()
plt.imshow(image, cmap='gray')
# plt.hold(True)
# Check if no minutiae
if minutiae.shape[0] > 0:
plt.plot(minutiae[:, 0], minutiae[:, 1], 'rs', fillstyle='none', linewidth=1)
for x, y, o, s in minutiae:
plt.plot([x, x + r * np.cos(o)], [y, y + r * np.sin(o)], 'r-')
if drawScore == True:
plt.text(x - 10, y - 10, '%.2f' % s, color='yellow', fontsize=4)
plt.axis([0, image.shape[1], image.shape[0], 0])
plt.axis('off')
if saveimage:
plt.savefig(fname, dpi=500, bbox_inches='tight', pad_inches=0)
plt.close(fig)
else:
plt.show()
return
def draw_minutiae_overlay(image, minutiae, mnt_gt, fname, saveimage=False, r=15, drawScore=False):
image = np.squeeze(image)
fig = plt.figure()
plt.imshow(image, cmap='gray')
plt.hold(True)
if mnt_gt.shape[1] > 3:
mnt_gt = mnt_gt[:, :3]
if mnt_gt.shape[0] > 0:
if mnt_gt.shape[1] > 3:
mnt_gt = mnt_gt[:, :3]
plt.plot(mnt_gt[:, 0], mnt_gt[:, 1], 'bs', fillstyle='none', linewidth=1)
for x, y, o in mnt_gt:
plt.plot([x, x + r * np.cos(o)], [y, y + r * np.sin(o)], 'b-')
if minutiae.shape[0] > 0:
plt.plot(minutiae[:, 0], minutiae[:, 1], 'rs', fillstyle='none', linewidth=1)
for x, y, o in minutiae:
plt.plot([x, x + r * np.cos(o)], [y, y + r * np.sin(o)], 'r-')
if drawScore == True:
plt.text(x - 10, y - 10, '%.2f' %s, color='yellow', fontsize=4)
plt.axis([0, image.shape[1], image.shape[0], 0])
plt.axis('off')
plt.show()
if saveimage:
plt.savefig(fname, dpi=500, bbox_inches='tight')
plt.close(fig)
else:
plt.show()
return
def draw_minutiae_overlay_with_score(image, minutiae, mnt_gt, fname, saveimage=False, r=15):
image = np.squeeze(image)
fig = plt.figure()
plt.imshow(image, cmap='gray')
plt.hold(True)
if mnt_gt.shape[0] > 0:
plt.plot(mnt_gt[:, 0], mnt_gt[:, 1], 'bs', fillstyle='none', linewidth=1)
if mnt_gt.shape[1] > 3:
for x, y, o, s in mnt_gt:
plt.plot([x, x + r * np.cos(o)], [y, y + r * np.sin(o)], 'b-')
plt.text(x - 10, y - 5, '%.2f' % s, color='green', fontsize=4)
else:
for x, y, o in mnt_gt:
plt.plot([x, x + r * np.cos(o)], [y, y + r * np.sin(o)], 'b-')
if minutiae.shape[0] > 0:
plt.plot(minutiae[:, 0], minutiae[:, 1], 'rs', fillstyle='none', linewidth=1)
for x, y, o, s in minutiae:
plt.plot([x, x + r * np.cos(o)], [y, y + r * np.sin(o)], 'r-')
plt.text(x - 10, y - 10, '%.2f' % s, color='yellow', fontsize=4)
plt.axis([0, image.shape[1], image.shape[0], 0])
plt.axis('off')
if saveimage:
plt.savefig(fname, dpi=500, bbox_inches='tight')
plt.close(fig)
else:
plt.show()
return
def draw_ori_on_img(img, ori, mask, fname, saveimage=False, coh=None, stride=16):
ori = np.squeeze(ori)
# mask = np.squeeze(np.round(mask))
img = np.squeeze(img)
ori = ndimage.zoom(ori, np.array(img.shape) / np.array(ori.shape, dtype=float), order=0)
if mask.shape != img.shape:
mask = ndimage.zoom(mask, np.array(img.shape) / np.array(mask.shape, dtype=float), order=0)
if coh is None:
coh = np.ones_like(img)
fig = plt.figure()
plt.imshow(img, cmap='gray')
plt.hold(True)
for i in range(stride, img.shape[0], stride):
for j in range(stride, img.shape[1], stride):
if mask[i, j] == 0:
continue
x, y, o, r = j, i, ori[i, j], coh[i, j] * (stride * 0.9)
plt.plot([x, x + r * np.cos(o)], [y, y + r * np.sin(o)], 'r-')
plt.axis([0, img.shape[1], img.shape[0], 0])
plt.axis('off')
if saveimage:
plt.savefig(fname, bbox_inches='tight')
plt.close(fig)
else:
plt.show()
return
def local_constrast_enhancement(img):
img = img.astype(np.float32)
meanV = cv2.blur(img, (15, 15))
normalized = img - meanV
var = abs(normalized)
var = cv2.blur(var, (15, 15))
normalized = normalized / (var + 10) * 0.75
normalized = np.clip(normalized, -1, 1)
normalized = (normalized + 1) * 127.5
return normalized
def get_quality_map_ori_dict(img, dict, spacing, dir_map=None, block_size=16):
if img.dtype == 'uint8':
img = img.astype(np.float)
img = FastEnhanceTexture(img)
h, w = img.shape
blkH, blkW = dir_map.shape
quality_map = np.zeros((blkH, blkW), dtype=np.float)
fre_map = np.zeros((blkH, blkW), dtype=np.float)
ori_num = len(dict)
# dir_map = math.pi/2 - dir_map
dir_ind = dir_map * ori_num / math.pi
dir_ind = dir_ind.astype(np.int)
dir_ind = dir_ind % ori_num
patch_size = np.sqrt(dict[0].shape[1])
patch_size = patch_size.astype(np.int)
pad_size = (patch_size - block_size) // 2
img = np.lib.pad(img, (pad_size, pad_size), 'symmetric')
for i in range(0, blkH):
for j in range(0, blkW):
ind = dir_ind[i, j]
patch = img[i * block_size:i * block_size + patch_size, j * block_size:j * block_size + patch_size]
patch = patch.reshape(patch_size * patch_size, )
patch = patch - np.mean(patch)
patch = patch / (np.linalg.norm(patch) + 0.0001)
patch[patch > 0.05] = 0.05
patch[patch < -0.05] = -0.05
simi = np.dot(dict[ind], patch)
similar_ind = np.argmax(abs(simi))
quality_map[i, j] = np.max(abs(simi))
fre_map[i, j] = 1. / spacing[ind][similar_ind]
quality_map = gaussian(quality_map, sigma=2)
return quality_map, fre_map
def FastEnhanceTexture(img, sigma=2.5, show=False):
img = img.astype(np.float32)
h, w = img.shape
h2 = 2 ** nextpow2(h)
w2 = 2 ** nextpow2(w)
FFTsize = np.max([h2, w2])
x, y = np.meshgrid(range(-FFTsize // 2, FFTsize // 2), range(-FFTsize // 2, FFTsize // 2))
r = np.sqrt(x * x + y * y) + 0.0001
r = r // FFTsize
L = 1. // (1 + (2 * math.pi * r * sigma) ** 4)
img_low = LowpassFiltering(img, L)
gradim1 = compute_gradient_norm(img)
gradim1 = LowpassFiltering(gradim1, L)
gradim2 = compute_gradient_norm(img_low)
gradim2 = LowpassFiltering(gradim2, L)
diff = gradim1 - gradim2
ar1 = np.abs(gradim1)
diff[ar1 > 1] = diff[ar1 > 1] // ar1[ar1 > 1]
diff[ar1 <= 1] = 0
cmin = 0.3
cmax = 0.7
weight = (diff - cmin) / (cmax - cmin)
weight[diff < cmin] = 0
weight[diff > cmax] = 1
u = weight * img_low + (1 - weight) * img
temp = img - u
lim = 20
temp1 = (temp + lim) * 255 / (2 * lim)
temp1[temp1 < 0] = 0
temp1[temp1 > 255] = 255
v = temp1
if show:
plt.imshow(v, cmap='gray')
plt.show()
return v
def compute_gradient_norm(input):
input = input.astype(np.float32)
Gx, Gy = np.gradient(input)
out = np.sqrt(Gx * Gx + Gy * Gy) + 0.000001
return out
def LowpassFiltering(img, L):
h, w = img.shape
h2, w2 = L.shape
img = cv2.copyMakeBorder(img, 0, h2 - h, 0, w2 - w, cv2.BORDER_CONSTANT, value=0)
img_fft = np.fft.fft2(img)
img_fft = np.fft.fftshift(img_fft)
img_fft = img_fft * L
rec_img = np.fft.ifft2(np.fft.fftshift(img_fft))
rec_img = np.real(rec_img)
rec_img = rec_img[:h, :w]
return rec_img
def nextpow2(x):
return int(math.ceil(math.log(x, 2)))
def construct_dictionary(ori_num=30):
ori_dict = []
s = []
for i in range(ori_num):
ori_dict.append([])
s.append([])
patch_size2 = 16
patch_size = 32
dict_all = []
spacing_all = []
ori_all = []
Y, X = np.meshgrid(range(-patch_size2, patch_size2), range(-patch_size2, patch_size2))
for spacing in range(6, 13):
for valley_spacing in range(3, spacing // 2):
ridge_spacing = spacing - valley_spacing
for k in range(ori_num):
theta = np.pi / 2 - k * np.pi / ori_num
X_r = X * np.cos(theta) - Y * np.sin(theta)
for offset in range(0, spacing - 1, 2):
X_r_offset = X_r + offset + ridge_spacing / 2
X_r_offset = np.remainder(X_r_offset, spacing)
Y1 = np.zeros((patch_size, patch_size))
Y2 = np.zeros((patch_size, patch_size))
Y1[X_r_offset <= ridge_spacing] = X_r_offset[X_r_offset <= ridge_spacing]
Y2[X_r_offset > ridge_spacing] = X_r_offset[X_r_offset > ridge_spacing] - ridge_spacing
element = -np.sin(2 * math.pi * (Y1 / ridge_spacing / 2)) + np.sin(
2 * math.pi * (Y2 / valley_spacing / 2))
element = element.reshape(patch_size * patch_size, )
element = element - np.mean(element)
element = element / np.linalg.norm(element)
ori_dict[k].append(element)
s[k].append(spacing)
dict_all.append(element)
spacing_all.append(1.0 / spacing)
ori_all.append(theta)
for i in range(len(ori_dict)):
ori_dict[i] = np.asarray(ori_dict[i])
s[k] = np.asarray(s[k])
dict_all = np.asarray(dict_all)
dict_all = np.transpose(dict_all)
spacing_all = np.asarray(spacing_all)
ori_all = np.asarray(ori_all)
return ori_dict, s, dict_all, ori_all, spacing_all
def get_maps_STFT(img, patch_size=64, block_size=16, preprocess=False):
assert len(img.shape) == 2
nrof_dirs = 16
ovp_size = (patch_size - block_size) // 2
if preprocess:
img = FastEnhanceTexture(img, sigma=2.5, show=False)
img = np.lib.pad(img, (ovp_size, ovp_size), 'symmetric')
h, w = img.shape
blkH = (h - patch_size) // block_size + 1
blkW = (w - patch_size) // block_size + 1
local_info = np.empty((blkH, blkW), dtype=object)
x, y = np.meshgrid(range(-patch_size // 2, patch_size // 2), range(-patch_size // 2, patch_size // 2))
x = x.astype(np.float32)
y = y.astype(np.float32)
r = np.sqrt(x * x + y * y) + 0.0001
RMIN = 3 # min allowable ridge spacing
RMAX = 18 # maximum allowable ridge spacing
FLOW = patch_size // RMAX
FHIGH = patch_size // RMIN
dRLow = 1. // (1 + (r // FHIGH) ** 4)
dRHigh = 1. // (1 + (FLOW // r) ** 4)
dBPass = dRLow * dRHigh # bandpass
dir = np.arctan2(y, x)
dir[dir < 0] = dir[dir < 0] + math.pi
dir_ind = np.floor(dir / (math.pi // nrof_dirs))
dir_ind = dir_ind.astype(np.int, copy=False)
dir_ind[dir_ind == nrof_dirs] = 0
dir_ind_list = []
for i in range(nrof_dirs):
tmp = np.argwhere(dir_ind == i)
dir_ind_list.append(tmp)
sigma = patch_size // 3
weight = np.exp(-(x * x + y * y) // (sigma * sigma))
for i in range(0, blkH):
for j in range(0, blkW):
patch = img[i * block_size:i * block_size + patch_size, j * block_size:j * block_size + patch_size].copy()
local_info[i, j] = local_STFT(patch, weight, dBPass)
local_info[i, j].analysis(r, dir_ind_list)
# get the ridge flow from the local information
dir_map, fre_map = get_ridge_flow_top(local_info)
dir_map = smooth_dir_map(dir_map)
return dir_map, fre_map
def smooth_dir_map(dir_map, sigma=2.0, mask=None):
cos2Theta = np.cos(dir_map * 2)
sin2Theta = np.sin(dir_map * 2)
if mask is not None:
assert (dir_map.shape[0] == mask.shape[0])
assert (dir_map.shape[1] == mask.shape[1])
cos2Theta[mask == 0] = 0
sin2Theta[mask == 0] = 0
cos2Theta = gaussian(cos2Theta, sigma, multichannel=False, mode='reflect')
sin2Theta = gaussian(sin2Theta, sigma, multichannel=False, mode='reflect')
dir_map = np.arctan2(sin2Theta, cos2Theta) * 0.5
return dir_map
def get_ridge_flow_top(local_info):
blkH, blkW = local_info.shape
dir_map = np.zeros((blkH, blkW)) - 10
fre_map = np.zeros((blkH, blkW)) - 10
for i in range(blkH):
for j in range(blkW):
if local_info[i, j].ori is None:
continue
dir_map[i, j] = local_info[i, j].ori[0] # + math.pi*0.5
fre_map[i, j] = local_info[i, j].fre[0]
return dir_map, fre_map
class local_STFT:
def __init__(self, patch, weight=None, dBPass=None):
if weight is not None:
patch = patch * weight
patch = patch - np.mean(patch)
norm = np.linalg.norm(patch)
patch = patch / (norm + 0.000001)
f = np.fft.fft2(patch)
fshift = np.fft.fftshift(f)
if dBPass is not None:
fshift = dBPass * fshift
self.patch_FFT = fshift
self.patch = patch
self.ori = None
self.fre = None
self.confidence = None
self.patch_size = patch.shape[0]
def analysis(self, r, dir_ind_list=None, N=2):
assert (dir_ind_list is not None)
energy = np.abs(self.patch_FFT)
energy = energy / (np.sum(energy) + 0.00001)
nrof_dirs = len(dir_ind_list)
ori_interval = math.pi / nrof_dirs
ori_interval2 = ori_interval / 2
pad_size = 1
dir_norm = np.zeros((nrof_dirs + 2,))
for i in range(nrof_dirs):
tmp = energy[dir_ind_list[i][:, 0], dir_ind_list[i][:, 1]]
dir_norm[i + 1] = np.sum(tmp)
dir_norm[0] = dir_norm[nrof_dirs]
dir_norm[nrof_dirs + 1] = dir_norm[1]
# smooth dir_norm
smoothed_dir_norm = dir_norm
for i in range(1, nrof_dirs + 1):
smoothed_dir_norm[i] = (dir_norm[i - 1] + dir_norm[i] * 4 + dir_norm[i + 1]) / 6
smoothed_dir_norm[0] = smoothed_dir_norm[nrof_dirs]
smoothed_dir_norm[nrof_dirs + 1] = smoothed_dir_norm[1]
den = np.sum(smoothed_dir_norm[1:nrof_dirs + 1]) + 0.00001 # verify if den == 1
smoothed_dir_norm = smoothed_dir_norm / den # normalization if den == 1, this line can be removed
ori = []
fre = []
confidence = []
wenergy = energy * r
for i in range(1, nrof_dirs + 1):
if smoothed_dir_norm[i] > smoothed_dir_norm[i - 1] and smoothed_dir_norm[i] > smoothed_dir_norm[i + 1]:
tmp_ori = (i - pad_size) * ori_interval + ori_interval2 + math.pi / 2
ori.append(tmp_ori)
confidence.append(smoothed_dir_norm[i])
tmp_fre = np.sum(wenergy[dir_ind_list[i - pad_size][:, 0], dir_ind_list[i - pad_size][:, 1]]) / \
dir_norm[i]
tmp_fre = 1 / (tmp_fre + 0.00001)
fre.append(tmp_fre)
if len(confidence) > 0:
confidence = np.asarray(confidence)
fre = np.asarray(fre)
ori = np.asarray(ori)
ind = confidence.argsort()[::-1]
confidence = confidence[ind]
fre = fre[ind]
ori = ori[ind]
if len(confidence) >= 2 and confidence[0] / confidence[1] > 2.0:
self.ori = [ori[0]]
self.fre = [fre[0]]
self.confidence = [confidence[0]]
elif len(confidence) > N:
fre = fre[:N]
ori = ori[:N]
confidence = confidence[:N]
self.ori = ori
self.fre = fre
self.confidence = confidence
else:
self.ori = ori
self.fre = fre
self.confidence = confidence
def get_features_of_topN(self, N=2):
if self.confidence is None:
self.border_wave = None
return
candi_num = len(self.ori)
candi_num = np.min([candi_num, N])
patch_size = self.patch_FFT.shape
for i in range(candi_num):
kernel = gabor_kernel(self.fre[i], theta=self.ori[i], sigma_x=10, sigma_y=10)
kernel_f = np.fft.fft2(kernel.real, patch_size)
kernel_f = np.fft.fftshift(kernel_f)
patch_f = self.patch_FFT * kernel_f
patch_f = np.fft.ifftshift(patch_f) # *np.sqrt(np.abs(fshift)))
rec_patch = np.real(np.fft.ifft2(patch_f))
plt.subplot(121), plt.imshow(self.patch, cmap='gray')
plt.title('Input patch'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(rec_patch, cmap='gray')
plt.title('filtered patch'), plt.xticks([]), plt.yticks([])
plt.show()
def reconstruction(self, weight=None):
f_ifft = np.fft.ifftshift(self.patch_FFT) # *np.sqrt(np.abs(fshift)))
rec_patch = np.real(np.fft.ifft2(f_ifft))
if weight is not None:
rec_patch = rec_patch * weight
return rec_patch
def gabor_filtering(self, theta, fre, weight=None):
patch_size = self.patch_FFT.shape
kernel = gabor_kernel(fre, theta=theta, sigma_x=4, sigma_y=4)
f = kernel.real
f = f - np.mean(f)
f = f / (np.linalg.norm(f) + 0.0001)
kernel_f = np.fft.fft2(f, patch_size)
kernel_f = np.fft.fftshift(kernel_f)
patch_f = self.patch_FFT * kernel_f
patch_f = np.fft.ifftshift(patch_f) # *np.sqrt(np.abs(fshift)))
rec_patch = np.real(np.fft.ifft2(patch_f))
if weight is not None:
rec_patch = rec_patch * weight
return rec_patch
def show_orientation_field(img, dir_map, mask=None, fname=None):
h, w = img.shape[:2]
if mask is None:
mask = np.ones((h, w), dtype=np.uint8)
blkH, blkW = dir_map.shape
blk_size = h / blkH
R = blk_size / 2 * 0.8
fig, ax = plt.subplots(1)
ax.imshow(img, cmap='gray')
for i in range(blkH):
y0 = i * blk_size + blk_size / 2
y0 = int(y0)
for j in range(blkW):
x0 = j * blk_size + blk_size / 2
x0 = int(x0)
ori = dir_map[i, j]
if mask[y0, x0] == 0:
continue
if ori < -9:
continue
x1 = x0 - R * math.cos(ori)
x2 = x0 + R * math.cos(ori)
y1 = y0 - R * math.sin(ori)
y2 = y0 + R * math.sin(ori)
plt.plot([x1, x2], [y1, y2], 'r-', lw=0.5)
plt.axis('off')
if fname is not None:
fig.savefig(fname, dpi=500, bbox_inches='tight', pad_inches=0)
plt.close()
else:
plt.show(block=True)
| 32.785542 | 118 | 0.561921 | 5,101 | 0.187454 | 0 | 0 | 0 | 0 | 0 | 0 | 1,487 | 0.054645 |
d3c9a9f08cb2ab991b3fa5be8156332e24b37380 | 52 | py | Python | config/paths.py | fusic-com/flask-todo | 909ce22132ed081feca02e2fb255afa08b59611d | [
"MIT"
]
| 34 | 2015-01-08T07:11:54.000Z | 2021-08-28T23:55:25.000Z | config/paths.py | spacecode-live/flask-todo | 909ce22132ed081feca02e2fb255afa08b59611d | [
"MIT"
]
| null | null | null | config/paths.py | spacecode-live/flask-todo | 909ce22132ed081feca02e2fb255afa08b59611d | [
"MIT"
]
| 13 | 2015-02-10T09:48:53.000Z | 2021-03-02T15:23:21.000Z | from settings import VAR_DIR
CACHE=VAR_DIR/'cache'
| 13 | 28 | 0.807692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.134615 |
d3c9f4c940421bb8e75ec41e434f5dfd39d574c9 | 1,687 | py | Python | Android.py | ChakradharG/Sudoku-Core | 5963db235cecec4cc6682380c30b7af10a3c4d11 | [
"MIT"
]
| null | null | null | Android.py | ChakradharG/Sudoku-Core | 5963db235cecec4cc6682380c30b7af10a3c4d11 | [
"MIT"
]
| 1 | 2022-02-10T07:19:40.000Z | 2022-02-10T07:19:40.000Z | Android.py | ChakradharG/Sudoku-Solver | 5963db235cecec4cc6682380c30b7af10a3c4d11 | [
"MIT"
]
| null | null | null | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #To suppress warnings thrown by tensorflow
from time import sleep
import numpy as np
from cv2 import cv2
import pyautogui as pg
import Sudoku_Core as SC
import OCR
s = 513//9 #Size of board//9
fs = 25 #Size of the final image
def getBoard():
pg.click(266, 740)
sleep(1)
pg.click(266, 930) #Changing the difficulty to expert
sleep(2)
image = pg.screenshot(region=(10, 187, 513, 513))
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2GRAY)
_,image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)
return image
def readBoard(image):
for i in range(9):
for j in range(9):
subImage = image[i*s + 3: (i+1)*s - 3, j*s + 3: (j+1)*s - 3] #(+3, -3) is a hack to remove border contours
contour, _ = cv2.findContours(subImage, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if contour != []:
(x, y, w, h) = cv2.boundingRect(contour[0])
img = cv2.resize(subImage[y: y+h, x: x+w], (fs, fs), interpolation=cv2.INTER_AREA)
else:
img = np.zeros((fs,fs), dtype='uint8')
SC.board[i][j] = OCR.model.predict(img.reshape(1, fs, fs, 1)).argmax()
def outputBoard():
for ((posY, posX), v) in SC.moves.items():
posX = 42 + posX * 57
posY = 216 + posY * 57
pg.moveTo(posX, posY, 0.1)
pg.click()
# vX = 42 + 55*(v-1)
# vY = 843
# pg.moveTo(vX, vY, 0.1) #To use the numpad in the app
# pg.click()
pg.typewrite(str(v)) #To send numbers from the keyboard
def main():
image = getBoard()
readBoard(image)
print('Got the board, now solving')
if SC.solve(0, 0):
outputBoard()
else:
print('Couldn\'t solve')
input('Press any key to exit')
if __name__ == '__main__':
main()
| 24.449275 | 109 | 0.653231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 402 | 0.238293 |
d3cb3a07ae3bcc910cc22e9e664b83887e73f8fe | 3,570 | py | Python | app/model.py | kurapikaaaa/CITS3403Project | 8958219845d5251830f2abd7c58dfd87d97b8c4a | [
"MIT"
]
| 1 | 2021-08-04T12:50:57.000Z | 2021-08-04T12:50:57.000Z | app/model.py | kurapikaaaa/CITS3403Project | 8958219845d5251830f2abd7c58dfd87d97b8c4a | [
"MIT"
]
| null | null | null | app/model.py | kurapikaaaa/CITS3403Project | 8958219845d5251830f2abd7c58dfd87d97b8c4a | [
"MIT"
]
| 1 | 2021-08-12T10:40:28.000Z | 2021-08-12T10:40:28.000Z | from app import db, login
from flask_login import UserMixin
from datetime import datetime
from flask import url_for, redirect
from werkzeug.security import generate_password_hash, check_password_hash
class users(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(100), nullable=False, unique=True)
password = db.Column(db.String(96), nullable=False)
email = db.Column(db.String(128), nullable=False, unique=True)
firstname = db.Column(db.String(130), nullable=False)
lastname = db.Column(db.String(130), nullable=False)
lastLogin = db.Column(db.DateTime)
isActive = db.Column(db.Boolean)
isAdmin = db.Column(db.Boolean)
noteHighScore = db.Column(db.Integer)
KeyHighScore = db.Column(db.Integer)
submit = db.relationship("submission", backref="submitter")
###################################################
def __init__(self):
self.isActive = True
self.isAdmin = False
self.noteHighScore = 0
self.lastLogin = None
self.KeyHighScore = 0
def set_password(self, pwd):
self.password = generate_password_hash(pwd, method="sha384")
def check_password(self, pwd):
return check_password_hash(self.password, pwd)
def is_active(self):
return self.isActive
def validate(self):
if self.username and self.email and self.firstname and self.lastname:
return True
else:
return False
def getSubmissions(self):
res = submission.query.filter_by(creater_id=self.id).all()
return res
def __repr__(self):
return '<user %r>' % self.username
class submission(db.Model):
__tablename__ = 'submission'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
createdAt = db.Column(db.DateTime, nullable=False)
markedAt = db.Column(db.DateTime)
feedback = db.Column(db.Boolean)
totalmark = db.Column(db.Integer)
difficulty = db.Column(db.String(30), nullable=False)
passed = db.Column(db.Boolean)
creater_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
answers = db.relationship("answer", backref="submission")
def __init__(self):
self.createdAt = datetime.utcnow()
self.markedAt = None
self.feedback = False
self.totalmark = None
self.marked = False
self.passed = False
def validate(self):
if self.difficulty and self.creater_id and self.createdAt:
return True
def __repr__(self):
return '<submission %r>' % self.id
class answer(db.Model):
__tablename__ = 'answer'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
answerSeq = db.Column(db.Integer)
submittedAnswer = db.Column(db.String(400))
feedback = db.Column(db.String(400))
markreceived = db.Column(db.Boolean)
submissionId = db.Column(db.Integer, db.ForeignKey("submission.id"))
def __init__(self):
self.feedback = None
self.markreceived = False
def validate(self):
if self.answerSeq and self.submittedAnswer and self.submissionId:
return True
else:
print("missingfield")
return False
def __repr__(self):
return '<ans>'
@login.user_loader
def load_user(usr_id):
return users.query.get(int(usr_id))
@login.unauthorized_handler
def unauthorized():
return redirect(url_for("auth.login"))
| 30.254237 | 81 | 0.654622 | 3,174 | 0.889076 | 0 | 0 | 171 | 0.047899 | 0 | 0 | 215 | 0.060224 |
d3cb4fc2b23e4f4fb2c765f3d7673f2b43240708 | 19,911 | py | Python | bert_multitask_learning/top.py | akashnd/bert-multitask-learning | aee5be006ef6a3feadf0c751a6f9b42c24c3fd21 | [
"Apache-2.0"
]
| 1 | 2021-07-11T14:07:59.000Z | 2021-07-11T14:07:59.000Z | bert_multitask_learning/top.py | akashnd/bert-multitask-learning | aee5be006ef6a3feadf0c751a6f9b42c24c3fd21 | [
"Apache-2.0"
]
| null | null | null | bert_multitask_learning/top.py | akashnd/bert-multitask-learning | aee5be006ef6a3feadf0c751a6f9b42c24c3fd21 | [
"Apache-2.0"
]
| null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: source_nbs/12_top.ipynb (unless otherwise specified).
__all__ = ['empty_tensor_handling_loss', 'nan_loss_handling', 'create_dummy_if_empty', 'BaseTop', 'SequenceLabel',
'Classification', 'PreTrain', 'Seq2Seq', 'MultiLabelClassification', 'MaskLM']
# Cell
import logging
from functools import partial
from typing import Dict, Tuple, Union
import tensorflow as tf
import tensorflow_addons as tfa
import transformers
from transformers.modeling_tf_utils import TFSharedEmbeddings
from tensorflow_addons.layers.crf import CRF
from tensorflow_addons.text.crf import crf_log_likelihood
from .params import BaseParams
from .utils import gather_indexes
@tf.function
def empty_tensor_handling_loss(labels, logits, loss_fn):
if tf.equal(tf.size(labels), 0):
return 0.0
if tf.equal(tf.size(tf.shape(labels)), 0):
return 0.0
if tf.equal(tf.shape(labels)[0], 0):
return 0.0
else:
return tf.reduce_mean(loss_fn(
labels, logits, from_logits=True))
@tf.function
def nan_loss_handling(loss):
if tf.math.is_nan(loss):
return 0.0
else:
return loss
@tf.function
def create_dummy_if_empty(inp_tensor: tf.Tensor) -> tf.Tensor:
shape_tensor = tf.shape(inp_tensor)
if tf.equal(shape_tensor[0], 0):
data_type = inp_tensor.dtype
dummy_shape_first_dim = tf.convert_to_tensor([1], dtype=tf.int32)
dummy_shape = tf.concat(
[dummy_shape_first_dim, shape_tensor[1:]], axis=0)
dummy_tensor = tf.zeros(dummy_shape, dtype=data_type)
return dummy_tensor
else:
return inp_tensor
class BaseTop(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str) -> None:
super(BaseTop, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
def call(self, inputs: Tuple[Dict], mode: str):
raise NotImplementedError
# Cell
class SequenceLabel(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str):
super(SequenceLabel, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
num_classes = self.params.num_classes[self.problem_name]
self.dense = tf.keras.layers.Dense(num_classes, activation=None)
self.dropout = tf.keras.layers.Dropout(1-params.dropout_keep_prob)
if self.params.crf:
self.crf = CRF(num_classes)
self.metric_fn = tf.keras.metrics.Accuracy(
name='{}_acc'.format(self.problem_name)
)
else:
self.metric_fn = tf.keras.metrics.SparseCategoricalAccuracy(
name='{}_acc'.format(self.problem_name))
def return_crf_result(self, labels: tf.Tensor, logits: tf.Tensor, mode: str, input_mask: tf.Tensor):
input_mask.set_shape([None, None])
logits = create_dummy_if_empty(logits)
input_mask = create_dummy_if_empty(input_mask)
viterbi_decoded, potentials, sequence_length, chain_kernel = self.crf(
logits, input_mask)
if mode != tf.estimator.ModeKeys.PREDICT:
loss = -crf_log_likelihood(potentials,
labels, sequence_length, chain_kernel)[0]
loss = tf.reduce_mean(loss)
loss = nan_loss_handling(loss)
self.add_loss(loss)
acc = self.metric_fn(
labels, viterbi_decoded, sample_weight=input_mask)
self.add_metric(acc)
# make the crf prediction has the same shape as non-crf prediction
return tf.one_hot(viterbi_decoded, name='%s_predict' % self.problem_name, depth=self.params.num_classes[self.problem_name])
def call(self, inputs, mode):
training = (mode == tf.estimator.ModeKeys.TRAIN)
feature, hidden_feature = inputs
hidden_feature = hidden_feature['seq']
if mode != tf.estimator.ModeKeys.PREDICT:
labels = feature['{}_label_ids'.format(self.problem_name)]
# sometimes the length of labels dose not equal to length of inputs
# that's caused by tf.data.experimental.bucket_by_sequence_length in multi problem scenario
pad_len = tf.shape(input=hidden_feature)[
1] - tf.shape(input=labels)[1]
# top, bottom, left, right
pad_tensor = [[0, 0], [0, pad_len]]
labels = tf.pad(tensor=labels, paddings=pad_tensor)
else:
labels = None
hidden_feature = self.dropout(hidden_feature, training)
if self.params.crf:
return self.return_crf_result(labels, hidden_feature, mode, feature['model_input_mask'])
logits = self.dense(hidden_feature)
if mode != tf.estimator.ModeKeys.PREDICT:
loss = empty_tensor_handling_loss(
labels, logits,
tf.keras.losses.sparse_categorical_crossentropy)
self.add_loss(loss)
acc = self.metric_fn(
labels, logits, sample_weight=feature['model_input_mask'])
self.add_metric(acc)
return tf.nn.softmax(
logits, name='%s_predict' % self.problem_name)
# Cell
class Classification(tf.keras.layers.Layer):
def __init__(self, params: BaseParams, problem_name: str) -> None:
super(Classification, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
num_classes = self.params.num_classes[self.problem_name]
self.dense = tf.keras.layers.Dense(num_classes, activation=None)
self.metric_fn = tf.keras.metrics.SparseCategoricalAccuracy(
name='{}_acc'.format(self.problem_name))
self.dropout = tf.keras.layers.Dropout(1-params.dropout_keep_prob)
def call(self, inputs, mode):
training = (mode == tf.estimator.ModeKeys.TRAIN)
feature, hidden_feature = inputs
hidden_feature = hidden_feature['pooled']
if mode != tf.estimator.ModeKeys.PREDICT:
labels = feature['{}_label_ids'.format(self.problem_name)]
else:
labels = None
hidden_feature = self.dropout(hidden_feature, training)
logits = self.dense(hidden_feature)
if mode != tf.estimator.ModeKeys.PREDICT:
# labels = tf.squeeze(labels)
# convert labels to one-hot to use label_smoothing
one_hot_labels = tf.one_hot(
labels, depth=self.params.num_classes[self.problem_name])
loss_fn = partial(tf.keras.losses.categorical_crossentropy,
from_logits=True, label_smoothing=self.params.label_smoothing)
loss = empty_tensor_handling_loss(
one_hot_labels, logits,
loss_fn)
loss = nan_loss_handling(loss)
self.add_loss(loss)
acc = self.metric_fn(labels, logits)
self.add_metric(acc)
return tf.nn.softmax(
logits, name='%s_predict' % self.problem_name)
# Cell
class PreTrain(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str, input_embeddings: tf.Tensor=None, share_embedding=True):
super(PreTrain, self).__init__(name=problem_name)
self.params = params
self.nsp = transformers.models.bert.modeling_tf_bert.TFBertNSPHead(
self.params.bert_config)
if share_embedding is False:
self.vocab_size = self.params.bert_config.vocab_size
self.share_embedding = False
else:
word_embedding_weight = input_embeddings.word_embeddings
self.vocab_size = word_embedding_weight.shape[0]
embedding_size = word_embedding_weight.shape[-1]
share_valid = (self.params.bert_config.hidden_size ==
embedding_size)
if not share_valid and self.params.share_embedding:
logging.warning(
'Share embedding is enabled but hidden_size != embedding_size')
self.share_embedding = self.params.share_embedding & share_valid
if self.share_embedding:
self.share_embedding_layer = TFSharedEmbeddings(
vocab_size=word_embedding_weight.shape[0], hidden_size=word_embedding_weight.shape[1])
self.share_embedding_layer.build([1])
self.share_embedding_layer.weight = word_embedding_weight
else:
self.share_embedding_layer = tf.keras.layers.Dense(self.vocab_size)
def call(self,
inputs: Tuple[Dict[str, Dict[str, tf.Tensor]], Dict[str, Dict[str, tf.Tensor]]],
mode: str) -> Tuple[tf.Tensor, tf.Tensor]:
features, hidden_features = inputs
# compute logits
nsp_logits = self.nsp(hidden_features['pooled'])
# masking is done inside the model
seq_hidden_feature = hidden_features['seq']
if mode != tf.estimator.ModeKeys.PREDICT:
positions = features['masked_lm_positions']
# gather_indexes will flatten the seq hidden_states, we need to reshape
# back to 3d tensor
input_tensor = gather_indexes(seq_hidden_feature, positions)
shape_tensor = tf.shape(positions)
shape_list = tf.concat(
[shape_tensor, [seq_hidden_feature.shape.as_list()[-1]]], axis=0)
input_tensor = tf.reshape(input_tensor, shape=shape_list)
# set_shape to determin rank
input_tensor.set_shape(
[None, None, seq_hidden_feature.shape.as_list()[-1]])
else:
input_tensor = seq_hidden_feature
if self.share_embedding:
mlm_logits = self.share_embedding_layer(
input_tensor, mode='linear')
else:
mlm_logits = self.share_embedding_layer(input_tensor)
if mode != tf.estimator.ModeKeys.PREDICT:
nsp_labels = features['next_sentence_label_ids']
mlm_labels = features['masked_lm_ids']
mlm_labels.set_shape([None, None])
# compute loss
nsp_loss = empty_tensor_handling_loss(
nsp_labels, nsp_logits,
tf.keras.losses.sparse_categorical_crossentropy)
mlm_loss_layer = transformers.modeling_tf_utils.TFMaskedLanguageModelingLoss()
# mlm_loss = tf.reduce_mean(
# mlm_loss_layer.compute_loss(mlm_labels, mlm_logits))
# add a useless from_logits argument to match the function signature of keras losses.
def loss_fn_wrapper(labels, logits, from_logits=True):
return mlm_loss_layer.compute_loss(labels, logits)
mlm_loss = empty_tensor_handling_loss(
mlm_labels,
mlm_logits,
loss_fn_wrapper
)
loss = nsp_loss + mlm_loss
self.add_loss(loss)
return (tf.sigmoid(nsp_logits), tf.nn.softmax(mlm_logits))
# Cell
class Seq2Seq(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str, input_embeddings: tf.keras.layers.Layer):
super(Seq2Seq, self).__init__(name=problem_name)
# self.params = params
# self.problem_name = problem_name
# # if self.params.init_weight_from_huggingface:
# # self.decoder = load_transformer_model(
# # self.params.transformer_decoder_model_name,
# # self.params.transformer_decoder_model_loading)
# # else:
# # self.decoder = load_transformer_model(
# # self.params.bert_decoder_config, self.params.transformer_decoder_model_loading)
# # TODO: better implementation
# logging.warning(
# 'Seq2Seq model is not well supported yet. Bugs are expected.')
# config = self.params.bert_decoder_config
# # some hacky approach to share embeddings from encoder to decoder
# word_embedding_weight = input_embeddings.word_embeddings
# self.vocab_size = word_embedding_weight.shape[0]
# self.share_embedding_layer = TFSharedEmbeddings(
# vocab_size=word_embedding_weight.shape[0], hidden_size=word_embedding_weight.shape[1])
# self.share_embedding_layer.build([1])
# self.share_embedding_layer.weight = word_embedding_weight
# # self.decoder = TFBartDecoder(
# # config=config, embed_tokens=self.share_embedding_layer)
# self.decoder = TFBartDecoderForConditionalGeneration(
# config=config, embedding_layer=self.share_embedding_layer)
# self.decoder.set_bos_id(self.params.bos_id)
# self.decoder.set_eos_id(self.params.eos_id)
# self.metric_fn = tf.keras.metrics.SparseCategoricalAccuracy(
# name='{}_acc'.format(self.problem_name))
raise NotImplementedError
def _seq2seq_label_shift_right(self, labels: tf.Tensor, eos_id: int) -> tf.Tensor:
batch_eos_ids = tf.fill([tf.shape(labels)[0], 1], eos_id)
batch_eos_ids = tf.cast(batch_eos_ids, dtype=tf.int64)
decoder_lable = labels[:, 1:]
decoder_lable = tf.concat([decoder_lable, batch_eos_ids], axis=1)
return decoder_lable
def call(self,
inputs: Tuple[Dict[str, Dict[str, tf.Tensor]], Dict[str, Dict[str, tf.Tensor]]],
mode: str):
features, hidden_features = inputs
encoder_mask = features['model_input_mask']
if mode == tf.estimator.ModeKeys.PREDICT:
input_ids = None
decoder_padding_mask = None
else:
input_ids = features['%s_label_ids' % self.problem_name]
decoder_padding_mask = features['{}_mask'.format(
self.problem_name)]
if mode == tf.estimator.ModeKeys.PREDICT:
return self.decoder.generate(eos_token_id=self.params.eos_id, encoder_hidden_states=hidden_features['seq'])
else:
decoder_output = self.decoder(input_ids=input_ids,
encoder_hidden_states=hidden_features['seq'],
encoder_padding_mask=encoder_mask,
decoder_padding_mask=decoder_padding_mask,
decode_max_length=self.params.decode_max_seq_len,
mode=mode)
loss = decoder_output.loss
logits = decoder_output.logits
self.add_loss(loss)
decoder_label = self._seq2seq_label_shift_right(
features['%s_label_ids' % self.problem_name], eos_id=self.params.eos_id)
acc = self.metric_fn(decoder_label, logits)
self.add_metric(acc)
return logits
# Cell
class MultiLabelClassification(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str) -> None:
super(MultiLabelClassification, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
self.dense = tf.keras.layers.Dense(
self.params.num_classes[problem_name])
self.dropout = tf.keras.layers.Dropout(
1-self.params.dropout_keep_prob
)
# self.metric_fn = tfa.metrics.F1Score(
# num_classes=self.params.num_classes[problem_name],
# threshold=self.params.multi_cls_threshold,
# average='macro',
# name='{}_f1'.format(problem_name))
def call(self, inputs, mode):
training = (mode == tf.estimator.ModeKeys.TRAIN)
feature, hidden_feature = inputs
hidden_feature = hidden_feature['pooled']
if mode != tf.estimator.ModeKeys.PREDICT:
labels = feature['{}_label_ids'.format(self.problem_name)]
else:
labels = None
hidden_feature = self.dropout(hidden_feature, training)
logits = self.dense(hidden_feature)
if mode != tf.estimator.ModeKeys.PREDICT:
labels = tf.cast(labels, tf.float32)
# use weighted loss
label_weights = self.params.multi_cls_positive_weight
def _loss_fn_wrapper(x, y, from_logits=True):
return tf.nn.weighted_cross_entropy_with_logits(x, y, pos_weight=label_weights, name='{}_loss'.format(self.problem_name))
loss = empty_tensor_handling_loss(
labels, logits, _loss_fn_wrapper)
loss = nan_loss_handling(loss)
self.add_loss(loss)
# labels = create_dummy_if_empty(labels)
# logits = create_dummy_if_empty(logits)
# f1 = self.metric_fn(labels, logits)
# self.add_metric(f1)
return tf.nn.sigmoid(
logits, name='%s_predict' % self.problem_name)
# Cell
class MaskLM(tf.keras.Model):
"""Multimodal MLM top layer.
"""
def __init__(self, params: BaseParams, problem_name: str, input_embeddings: tf.keras.layers.Layer=None, share_embedding=True) -> None:
super(MaskLM, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
if share_embedding is False:
self.vocab_size = self.params.bert_config.vocab_size
self.share_embedding = False
else:
word_embedding_weight = input_embeddings.word_embeddings
self.vocab_size = word_embedding_weight.shape[0]
embedding_size = word_embedding_weight.shape[-1]
share_valid = (self.params.bert_config.hidden_size ==
embedding_size)
if not share_valid and self.params.share_embedding:
logging.warning(
'Share embedding is enabled but hidden_size != embedding_size')
self.share_embedding = self.params.share_embedding & share_valid
if self.share_embedding:
self.share_embedding_layer = TFSharedEmbeddings(
vocab_size=self.vocab_size, hidden_size=word_embedding_weight.shape[1])
self.share_embedding_layer.build([1])
self.share_embedding_layer.weight = word_embedding_weight
else:
self.share_embedding_layer = tf.keras.layers.Dense(self.vocab_size)
def call(self, inputs, mode):
features, hidden_features = inputs
# masking is done inside the model
seq_hidden_feature = hidden_features['seq']
if mode != tf.estimator.ModeKeys.PREDICT:
positions = features['masked_lm_positions']
# gather_indexes will flatten the seq hidden_states, we need to reshape
# back to 3d tensor
input_tensor = gather_indexes(seq_hidden_feature, positions)
shape_tensor = tf.shape(positions)
shape_list = tf.concat([shape_tensor, [seq_hidden_feature.shape.as_list()[-1]]], axis=0)
input_tensor = tf.reshape(input_tensor, shape=shape_list)
# set_shape to determin rank
input_tensor.set_shape(
[None, None, seq_hidden_feature.shape.as_list()[-1]])
else:
input_tensor = seq_hidden_feature
if self.share_embedding:
mlm_logits = self.share_embedding_layer(
input_tensor, mode='linear')
else:
mlm_logits = self.share_embedding_layer(input_tensor)
if mode != tf.estimator.ModeKeys.PREDICT:
mlm_labels = features['masked_lm_ids']
mlm_labels.set_shape([None, None])
# compute loss
mlm_loss = empty_tensor_handling_loss(
mlm_labels,
mlm_logits,
tf.keras.losses.sparse_categorical_crossentropy
)
loss = nan_loss_handling(mlm_loss)
self.add_loss(loss)
return tf.nn.softmax(mlm_logits)
| 42.095137 | 138 | 0.638491 | 18,177 | 0.912912 | 0 | 0 | 951 | 0.047763 | 0 | 0 | 3,444 | 0.17297 |
d3cc0271bb0d934fe7034974b1385e41735a694e | 447 | py | Python | strings/#387/strings.py | sharmarkei/DSA-Practice | c98e9f5ae1824d86f02d1002d908dc24c8be8812 | [
"MIT"
]
| null | null | null | strings/#387/strings.py | sharmarkei/DSA-Practice | c98e9f5ae1824d86f02d1002d908dc24c8be8812 | [
"MIT"
]
| null | null | null | strings/#387/strings.py | sharmarkei/DSA-Practice | c98e9f5ae1824d86f02d1002d908dc24c8be8812 | [
"MIT"
]
| null | null | null | class Solution(object):
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
dict_1 = {}
for i in s:
if i not in dict_1:
dict_1[i] = 1
else:
dict_1[i] += 1
print(dict_1)
for idx, val in enumerate(s):
if dict_1[val] == 1:
return idx
return -1
| 20.318182 | 37 | 0.364653 | 438 | 0.979866 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.12528 |
d3cc0f069903b9e861ac782e53bdcec6efa743dd | 3,332 | py | Python | challenge/utils/cancellation_code.py | AlonViz/IML.HUJI | 107f7c20b8bd64d41452e4a5b66abe843af7eb18 | [
"MIT"
]
| null | null | null | challenge/utils/cancellation_code.py | AlonViz/IML.HUJI | 107f7c20b8bd64d41452e4a5b66abe843af7eb18 | [
"MIT"
]
| null | null | null | challenge/utils/cancellation_code.py | AlonViz/IML.HUJI | 107f7c20b8bd64d41452e4a5b66abe843af7eb18 | [
"MIT"
]
| null | null | null | import re
def process_cancellation_code(code):
regex_days_before = "^(([0-9]+)D)(([0-9]+)N|([0-9]+)P)"
regex_no_show = "(([0-9]+)P|([0-9]+)N)"
options = re.split("_", code)
final = []
for option in options:
days_match = re.match(regex_days_before, option)
if days_match:
days_before = None if days_match.group(2) is None else int(days_match.group(2))
nights_to_pay = None if days_match.group(4) is None else int(days_match.group(4))
percentage = None if days_match.group(5) is None else int(days_match.group(5))
final.append([days_before, nights_to_pay, percentage])
continue
no_show_match = re.match(regex_no_show, option)
if no_show_match:
nights_to_pay = None if no_show_match.group(3) is None else int(no_show_match.group(3))
percentage = None if no_show_match.group(2) is None else int(no_show_match.group(2))
final.append([0, nights_to_pay, percentage])
return final
def evaluate_cancellation_code(cancellation_code: str, booking_time_before: int, stay_duration: int) -> float:
"""
gives a numerical value to given cancellation code, return expected fine in percentage
:return:
"""
cancellations = process_cancellation_code(cancellation_code)
p = min(7, booking_time_before)
chosen_p = min([lst for lst in cancellations if lst[0] > p], key=lambda tup: tup[0], default=[None, None, None])
expected_fine = 0 if chosen_p[0] is None else chosen_p[2] if chosen_p[1] is None else chosen_p[1] / stay_duration
return expected_fine
def filter(cancellation_code: str, booking_time_before: int, stay_duration: int) -> float:
cancellations = process_cancellation_code(cancellation_code)
filtered = [i for i in cancellations if i[0] < booking_time_before]
prec_only = []
for i in filtered:
if i[2] is not None:
prec_only.append([i[0], i[2]])
else:
prec_only.append([i[0], i[1] / stay_duration])
def no_show(cancellation_code: str) -> int:
"""
returns 1 if the cancellation code contains a no-show fee, and 0 otherwise
"""
cancellations = process_cancellation_code(cancellation_code)
return any(lst for lst in cancellations if lst[0] == 0)
def fine_after_x_days(cancellation_code: str, booking_time_before: int, stay_duration: int, days: int):
"""
returns the expected fine in percentages after 'days' days from reservation.
"""
time_before_reservation = booking_time_before - days
if time_before_reservation < 0:
return 0
cancellations = process_cancellation_code(cancellation_code)
# convert cancellation policy to format (Days, Percentage)
percentage_cancellations = []
for cancel in cancellations:
if cancel[1] is None:
percentage_cancellations.append((cancel[0], cancel[2]))
else:
percentage_cancellations.append((cancel[0], cancel[1] / stay_duration))
if not percentage_cancellations:
return 0
# return the fine associated with the smallest number of days larger than time_before_reservation
fines = [x for x in percentage_cancellations if x[0] > time_before_reservation]
if not fines:
return 0
return min(fines, key=lambda x: x[0])[1]
| 40.634146 | 117 | 0.680972 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 513 | 0.153962 |
d3cd8f9eafbfda626f2013905a1df1f02a7ae23e | 1,163 | py | Python | acronym/scoring.py | sigma67/acronym | b197d12aa843fbf0e74efb67361f74b8157cc3e1 | [
"MIT"
]
| 340 | 2018-03-30T21:00:54.000Z | 2022-03-25T20:05:45.000Z | acronym/scoring.py | sigma67/acronym | b197d12aa843fbf0e74efb67361f74b8157cc3e1 | [
"MIT"
]
| 12 | 2018-03-30T15:48:05.000Z | 2020-07-16T08:27:02.000Z | acronym/scoring.py | sigma67/acronym | b197d12aa843fbf0e74efb67361f74b8157cc3e1 | [
"MIT"
]
| 29 | 2018-03-30T16:55:34.000Z | 2022-02-25T03:20:26.000Z | import re
regex = re.compile('[^a-zA-Z]')
def score_word(word, corpus=None):
word = regex.sub('', word) # leave only alpha
score = 0
consec_bonus = 2
for i, letter in enumerate(word):
if letter.islower():
continue
if i > 0 and word[i-1].upper():
score += consec_bonus
if i == 0:
score += 10
elif (i == 1) or (i == len(word)-1):
score += 3
else:
score += 1
if (i >= 1) and (corpus is not None) and (word[i:].lower() in corpus):
score += len(word[i:])-1
return score
def score_acronym(capitalized_acronym, corpus=None):
"""
For each capitalized letter in the acronym:
* 10 points if first letter in a word (with exception of first letter)
* 3 point if second or last letter in a word
* 1 point otherwise
* N bonus points if begins an N-length valid sub-word
(ex: multiVariable -> 8 bonus points)
* 2 bonus points if immediately following a capitalizd letter
"""
return sum([score_word(word, corpus=corpus) for word in capitalized_acronym.split(' ')]) - 10
| 31.432432 | 97 | 0.575236 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 430 | 0.369733 |
d3ce35364812f96b726436b7cd0cab140d019f97 | 956 | py | Python | e2e_test.py | bartossh/hebbian_mirror | 2d080ae7a707845e0922894e5cee2ad7b0119e8f | [
"MIT"
]
| 2 | 2019-11-15T09:10:19.000Z | 2019-12-26T15:05:16.000Z | e2e_test.py | bartOssh/hebbian_mirror | 2d080ae7a707845e0922894e5cee2ad7b0119e8f | [
"MIT"
]
| 1 | 2019-11-07T11:06:09.000Z | 2019-11-07T11:06:09.000Z | e2e_test.py | bartOssh/hebbian_mirror | 2d080ae7a707845e0922894e5cee2ad7b0119e8f | [
"MIT"
]
| null | null | null | import requests
num_of_iter = 2
data = open('./assets/test.jpg', 'rb').read()
for i in range(0, num_of_iter):
res = requests.get(
url='http://0.0.0.0:8000/recognition/object/boxes_names'
)
print("\n RESPONSE GET boxes names for test number {}: \n {}"
.format(i, res.__dict__))
res = requests.post(url='http://0.0.0.0:8000/recognition/object/boxes',
data=data,
headers={'Content-Type': 'application/octet-stream'})
print("\n RESPONSE POST to boxes, test num {} \n Sending buffer length: {},\n Received {}"
.format(i, len(data), res.__dict__))
res = requests.post(url='http://0.0.0.0:8000/recognition/object/image',
data=data,
headers={'Content-Type': 'application/octet-stream'})
print("\n RESPONSE POST to image, test num {} \n Sending buffer length: {},\n Received {}"
.format(i, len(data), res))
| 43.454545 | 94 | 0.58159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 470 | 0.491632 |
d3ce5432cde433f90fde37eb3f5e56f8a23b111c | 7,698 | py | Python | appendix/auc_accuracy/train_nn_metric.py | rit-git/tagging | b075ce1553492be7088026b67f525a529bf03770 | [
"Apache-2.0"
]
| 7 | 2020-11-21T03:45:34.000Z | 2022-03-25T00:40:20.000Z | appendix/auc_accuracy/train_nn_metric.py | rit-git/tagging | b075ce1553492be7088026b67f525a529bf03770 | [
"Apache-2.0"
]
| null | null | null | appendix/auc_accuracy/train_nn_metric.py | rit-git/tagging | b075ce1553492be7088026b67f525a529bf03770 | [
"Apache-2.0"
]
| 5 | 2020-09-21T15:07:21.000Z | 2021-06-02T20:25:36.000Z | import argparse
import os
import torch
import torch.nn as nn
from torchtext.data import TabularDataset, BucketIterator
from torchtext.data import Field
from torchtext.vocab import Vectors, GloVe
from tqdm import tqdm, trange
import sys
import os
sys.path.insert(0, "../../pyfunctor")
sys.path.insert(0, "../../model")
from cnn import CNNModel
from lstm import LSTMModel
from bilstm import BILSTMModel
from sklearn import metrics
import csv_handler as csv_handler
import transform as transform
import time
#from util.weight import WeightClassCSV
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def load_data(path, batch_size, max_seq_length, glove="840B", emb_size=300):
TEXT = Field(sequential=True, fix_length=max_seq_length, lower=True)
LABEL = Field(sequential=False, use_vocab=False)
ID = Field(sequential=False, use_vocab=False)
data_fields = [("id", ID),
("sent", TEXT),
("label", LABEL)]
train_path = os.path.join(path, "train.csv")
train = TabularDataset(path=train_path, format="csv", skip_header=False,
fields=data_fields)
test_path = os.path.join(path, "dev.csv")
test = TabularDataset(path=test_path, format="csv", skip_header=False,
fields=data_fields)
TEXT.build_vocab(train, vectors=GloVe(name=glove, dim=emb_size))
LABEL.build_vocab(train)
vocab_size = len(TEXT.vocab)
vocab_weights = TEXT.vocab.vectors
train_iter = BucketIterator(dataset=train, batch_size=batch_size,
sort_key=lambda x: x.id, shuffle=True, repeat=False)
test_iter = BucketIterator(dataset=test, batch_size=batch_size,
sort_key=lambda x: x.id, shuffle=False, repeat=False)
return train_iter, test_iter, vocab_size, vocab_weights
def F1(predicts, golds):
true_predict = 0
true = 0
predict = 0
for i in range(len(predicts)):
if predicts[i] == 1:
predict += 1
if golds[i] == 1:
true += 1
if predicts[i] == 1 and golds[i] == 1:
true_predict += 1
precision = (true_predict+0.0)/(predict+0.0) if predict>0 else 0
recall = (true_predict+0.0)/(true+0.0) if true>0 else 0
f1 = (2*precision*recall)/(precision+recall) if predict>0 and true>0 else 0
return precision, recall, f1
if __name__ == "__main__":
start_time = time.time()
parser = argparse.ArgumentParser()
parser.add_argument("--dataset",
default=None,
type=str,
required=True,
help="Dataset folder")
parser.add_argument("--model",
default=None,
type=str,
required=True,
help="Model type: CNN, LSTM or BILSTM")
parser.add_argument("--glove",
default="840B",
type=str,
help="Golve version (6B, 42B, 840B)")
parser.add_argument("--emb_size",
default=300,
type=int,
help="Golve embedding size (100, 200, 300)")
parser.add_argument("--max_seq_length",
default=256,
type=int,
help="Maximum sequence length")
parser.add_argument("--num_epoch",
default=9,
type=int,
help="Number of training epoch")
parser.add_argument("--batch_size",
default=32,
type=int,
help="Batch size")
parser.add_argument("--lr",
default=1e-4,
type=float,
help="Learning rate")
parser.add_argument("--fix_emb",
default=False,
type=bool,
help="Fix embedding layer")
parser.add_argument("--log_file",
default=False,
type=str,
required=True,
help="log file path")
args = parser.parse_args()
# Load data
print("Loading data ...")
train_iter, test_iter, vocab_size, vocab_weights = load_data(args.dataset,
args.batch_size, args.max_seq_length, glove=args.glove, emb_size=args.emb_size)
# Initialize model
assert args.model in ["CNN", "LSTM", "BILSTM"], "Only support CNN, LSTM or BILSTM."
if args.model == "CNN":
model = CNNModel(vocab_size, args.emb_size, args.max_seq_length,
weights=vocab_weights, fix_emb_weight=args.fix_emb)
elif args.model == "LSTM":
model = LSTMModel(vocab_size, args.emb_size, args.max_seq_length,
weights=vocab_weights, fix_emb_weight=args.fix_emb)
else:
model = BILSTMModel(vocab_size, args.emb_size, args.max_seq_length,
weights=vocab_weights, fix_emb_weight=args.fix_emb)
model = model.to(device)
# Train
print("Training %s ..." % args.model)
params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
loss_func = nn.CrossEntropyLoss()
#label_weight = WeightClassCSV(args.dataset + "/train.csv").get_weights(['0', '1'])
#loss_func = nn.CrossEntropyLoss(weight = torch.tensor(label_weight).to(device))
model.train()
for epoch in trange(args.num_epoch, desc="Epoch"):
total_loss = 0
for idx, batch in enumerate(tqdm(train_iter, desc="Iteration")):
inputs, labels = batch.sent, batch.label
inputs = inputs.to(device)
labels = labels.to(device)
logits = model(inputs)
loss = loss_func(logits, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.data.item()
print("\tEpoch %d, total loss: %f" % (epoch, total_loss))
train_finish_time = time.time()
train_overall_time = train_finish_time - start_time
# Evaluate
print("Evaluating ...")
model.eval()
predicts = []
golds = []
predicted_proba = []
with torch.no_grad():
for idx, batch in enumerate(tqdm(test_iter, desc="Iteration")):
inputs, labels = batch.sent, batch.label
inputs = inputs.to(device)
logits = model(inputs)
predicted_proba += list(logits.data.cpu().numpy())
predict = torch.argmax(logits, dim=1).data.cpu().numpy()
predicts += list(predict)
golds += list(labels.data.cpu().numpy())
precision, recall, f1 = F1(predicts, golds)
print("Precision: %f, Recall: %f, F1: %f" % (precision, recall, f1))
train_time = train_overall_time
test_time = time.time() - train_finish_time
print(metrics.classification_report(golds, predicts))
(precision, recall, fscore, support) = metrics.precision_recall_fscore_support(golds, predicts)
log_row = []
log_row.append(args.dataset)
log_row.append(precision[1])
log_row.append(recall[1])
log_row.append(fscore[1])
log_row.append(train_time)
log_row.append(test_time)
pos_predicted = transform.map_func(predicted_proba, lambda p : p[1])
auc = metrics.roc_auc_score(golds, pos_predicted)
log_row.append(auc)
accuracy = metrics.accuracy_score(golds, predicts)
log_row.append(accuracy)
csv_handler.append_row(args.log_file, log_row)
| 34.990909 | 99 | 0.587685 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 914 | 0.118732 |
d3ce6f7210df816909e214cda327fef650ba334a | 1,566 | py | Python | setup.py | teamproserve/pinkopy | 48842ac26aff90728482f7cac2977f56d5fc579f | [
"MIT"
]
| null | null | null | setup.py | teamproserve/pinkopy | 48842ac26aff90728482f7cac2977f56d5fc579f | [
"MIT"
]
| null | null | null | setup.py | teamproserve/pinkopy | 48842ac26aff90728482f7cac2977f56d5fc579f | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
import sys
try:
import pypandoc
readme = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
with open('README.md') as f:
readme = f.read()
install_requires = [
'cachetools>=1.1.5',
'requests>=2.7.0',
'xmltodict>=0.9.2',
]
tests_require = [
'pytest',
'requests-mock==0.7.0'
]
setup(
name='pinkopy',
version='2.1.3-dev',
description='Python wrapper for Commvault api',
long_description=readme,
author='Herkermer Sherwood',
author_email='[email protected]',
url='https://github.com/teamproserve/pinkopy',
download_url='https://github.com/teamproserve/pinkopy/archive/2.1.3-dev.zip',
packages=find_packages(),
platforms=['all'],
license='MIT',
install_requires=install_requires,
setup_requires=['pytest-runner'],
tests_require=tests_require,
classifiers=[
'Development Status :: 4 - Beta',
'License :: Other/Proprietary License',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Utilities',
],
)
| 27.964286 | 81 | 0.621328 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 832 | 0.53129 |
d3cf16476f10f947ae96e115903c726cd418feaf | 24,050 | py | Python | scss/extension/core.py | xen0n/pyScss | 86712d21fe7c3abdd7e593973fb35010422f1a41 | [
"MIT"
]
| null | null | null | scss/extension/core.py | xen0n/pyScss | 86712d21fe7c3abdd7e593973fb35010422f1a41 | [
"MIT"
]
| null | null | null | scss/extension/core.py | xen0n/pyScss | 86712d21fe7c3abdd7e593973fb35010422f1a41 | [
"MIT"
]
| null | null | null | """Extension for built-in Sass functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from itertools import product
import math
import os.path
from pathlib import PurePosixPath
from six.moves import xrange
from scss.extension import Extension
from scss.namespace import Namespace
from scss.source import SourceFile
from scss.types import (
Arglist, Boolean, Color, List, Null, Number, String, Map, expect_type)
class CoreExtension(Extension):
name = 'core'
namespace = Namespace()
def handle_import(self, name, compilation, rule):
"""Implementation of the core Sass import mechanism, which just looks
for files on disk.
"""
# TODO virtually all of this is the same as the django stuff, except
# for the bit that actually looks for and tries to open the file.
# would be much easier if you could just stick an object in the search
# path that implements the pathlib API. the only problem is what to do
# when one path is a child of another, so the same file has two names,
# but tbh i'm not actually sure that's something worth protecting
# against...? like, the only cost is that we'll parse twice (or, later
# on, not respect single-import), and the fix is to just Not Do That
# TODO i think with the new origin semantics, i've made it possible to
# import relative to the current file even if the current file isn't
# anywhere in the search path. is that right?
path = PurePosixPath(name)
if path.suffix:
search_exts = [path.suffix]
else:
search_exts = ['.scss', '.sass']
relative_to = path.parent
basename = path.stem
search_path = [] # tuple of (origin, start_from)
if relative_to.is_absolute():
relative_to = PurePosixPath(*relative_to.parts[1:])
elif rule.source_file.origin:
# Search relative to the current file first, only if not doing an
# absolute import
search_path.append(
rule.source_file.origin / rule.source_file.relpath.parent)
search_path.extend(compilation.compiler.search_path)
for prefix, suffix in product(('_', ''), search_exts):
filename = prefix + basename + suffix
for origin in search_path:
relpath = relative_to / filename
# Lexically (ignoring symlinks!) eliminate .. from the part
# of the path that exists within Sass-space. pathlib
# deliberately doesn't do this, but os.path does.
relpath = PurePosixPath(os.path.normpath(str(relpath)))
if rule.source_file.key == (origin, relpath):
# Avoid self-import
# TODO is this what ruby does?
continue
path = origin / relpath
if not path.exists():
continue
# All good!
# TODO if this file has already been imported, we'll do the
# source preparation twice. make it lazy.
return SourceFile.read(origin, relpath)
# Alias to make the below declarations less noisy
ns = CoreExtension.namespace
# ------------------------------------------------------------------------------
# Color creation
def _interpret_percentage(n, relto=1., clamp=True):
expect_type(n, Number, unit='%')
if n.is_unitless:
ret = n.value / relto
else:
ret = n.value / 100
if clamp:
if ret < 0:
return 0
elif ret > 1:
return 1
return ret
@ns.declare
def rgba(r, g, b, a):
r = _interpret_percentage(r, relto=255)
g = _interpret_percentage(g, relto=255)
b = _interpret_percentage(b, relto=255)
a = _interpret_percentage(a, relto=1)
return Color.from_rgb(r, g, b, a)
@ns.declare
def rgb(r, g, b, type='rgb'):
return rgba(r, g, b, Number(1.0))
@ns.declare
def rgba_(color, a=None):
if a is None:
alpha = 1
else:
alpha = _interpret_percentage(a)
return Color.from_rgb(*color.rgba[:3], alpha=alpha)
@ns.declare
def rgb_(color):
return rgba_(color, a=Number(1))
@ns.declare
def hsla(h, s, l, a):
return Color.from_hsl(
h.value / 360 % 1,
# Ruby sass treats plain numbers for saturation and lightness as though
# they were percentages, just without the %
_interpret_percentage(s, relto=100),
_interpret_percentage(l, relto=100),
alpha=a.value,
)
@ns.declare
def hsl(h, s, l):
return hsla(h, s, l, Number(1))
@ns.declare
def hsla_(color, a=None):
return rgba_(color, a)
@ns.declare
def hsl_(color):
return rgba_(color, a=Number(1))
@ns.declare
def mix(color1, color2, weight=Number(50, "%")):
"""
Mixes together two colors. Specifically, takes the average of each of the
RGB components, optionally weighted by the given percentage.
The opacity of the colors is also considered when weighting the components.
Specifically, takes the average of each of the RGB components,
optionally weighted by the given percentage.
The opacity of the colors is also considered when weighting the components.
The weight specifies the amount of the first color that should be included
in the returned color.
50%, means that half the first color
and half the second color should be used.
25% means that a quarter of the first color
and three quarters of the second color should be used.
For example:
mix(#f00, #00f) => #7f007f
mix(#f00, #00f, 25%) => #3f00bf
mix(rgba(255, 0, 0, 0.5), #00f) => rgba(63, 0, 191, 0.75)
"""
# This algorithm factors in both the user-provided weight
# and the difference between the alpha values of the two colors
# to decide how to perform the weighted average of the two RGB values.
#
# It works by first normalizing both parameters to be within [-1, 1],
# where 1 indicates "only use color1", -1 indicates "only use color 0",
# and all values in between indicated a proportionately weighted average.
#
# Once we have the normalized variables w and a,
# we apply the formula (w + a)/(1 + w*a)
# to get the combined weight (in [-1, 1]) of color1.
# This formula has two especially nice properties:
#
# * When either w or a are -1 or 1, the combined weight is also that
# number (cases where w * a == -1 are undefined, and handled as a
# special case).
#
# * When a is 0, the combined weight is w, and vice versa
#
# Finally, the weight of color1 is renormalized to be within [0, 1]
# and the weight of color2 is given by 1 minus the weight of color1.
#
# Algorithm from the Sass project: http://sass-lang.com/
p = _interpret_percentage(weight)
# Scale weight to [-1, 1]
w = p * 2 - 1
# Compute difference in alpha channels
a = color1.alpha - color2.alpha
# Weight of first color
if w * a == -1:
# Avoid zero-div case
scaled_weight1 = w
else:
scaled_weight1 = (w + a) / (1 + w * a)
# Unscale back to [0, 1] and get the weight of the other color
w1 = (scaled_weight1 + 1) / 2
w2 = 1 - w1
# Do the scaling. Note that alpha isn't scaled by alpha, as that wouldn't
# make much sense; it uses the original untwiddled weight, p.
channels = [
ch1 * w1 + ch2 * w2
for (ch1, ch2) in zip(color1.rgba[:3], color2.rgba[:3])]
alpha = color1.alpha * p + color2.alpha * (1 - p)
return Color.from_rgb(*channels, alpha=alpha)
# ------------------------------------------------------------------------------
# Color inspection
@ns.declare
def red(color):
r, g, b, a = color.rgba
return Number(r * 255)
@ns.declare
def green(color):
r, g, b, a = color.rgba
return Number(g * 255)
@ns.declare
def blue(color):
r, g, b, a = color.rgba
return Number(b * 255)
@ns.declare_alias('opacity')
@ns.declare
def alpha(color):
return Number(color.alpha)
@ns.declare
def hue(color):
h, s, l = color.hsl
return Number(h * 360, "deg")
@ns.declare
def saturation(color):
h, s, l = color.hsl
return Number(s * 100, "%")
@ns.declare
def lightness(color):
h, s, l = color.hsl
return Number(l * 100, "%")
@ns.declare
def ie_hex_str(color):
c = Color(color).value
return String("#{3:02X}{0:02X}{1:02X}{2:02X}".format(
int(round(c[0])),
int(round(c[1])),
int(round(c[2])),
int(round(c[3] * 255)),
))
# ------------------------------------------------------------------------------
# Color modification
@ns.declare_alias('fade-in')
@ns.declare_alias('fadein')
@ns.declare
def opacify(color, amount):
r, g, b, a = color.rgba
if amount.is_simple_unit('%'):
amt = amount.value / 100
else:
amt = amount.value
return Color.from_rgb(
r, g, b,
alpha=a + amt)
@ns.declare_alias('fade-out')
@ns.declare_alias('fadeout')
@ns.declare
def transparentize(color, amount):
r, g, b, a = color.rgba
if amount.is_simple_unit('%'):
amt = amount.value / 100
else:
amt = amount.value
return Color.from_rgb(
r, g, b,
alpha=a - amt)
@ns.declare
def lighten(color, amount):
return adjust_color(color, lightness=amount)
@ns.declare
def darken(color, amount):
return adjust_color(color, lightness=-amount)
@ns.declare
def saturate(color, amount):
return adjust_color(color, saturation=amount)
@ns.declare
def desaturate(color, amount):
return adjust_color(color, saturation=-amount)
@ns.declare
def greyscale(color):
h, s, l = color.hsl
return Color.from_hsl(h, 0, l, alpha=color.alpha)
@ns.declare
def grayscale(color):
if isinstance(color, Number):
# grayscale(n) and grayscale(n%) are CSS3 filters and should be left
# intact, but only when using the "a" spelling
return String.unquoted("grayscale(%s)" % (color.render(),))
else:
return greyscale(color)
@ns.declare_alias('spin')
@ns.declare
def adjust_hue(color, degrees):
h, s, l = color.hsl
delta = degrees.value / 360
return Color.from_hsl((h + delta) % 1, s, l, alpha=color.alpha)
@ns.declare
def complement(color):
h, s, l = color.hsl
return Color.from_hsl((h + 0.5) % 1, s, l, alpha=color.alpha)
@ns.declare
def invert(color):
"""Returns the inverse (negative) of a color. The red, green, and blue
values are inverted, while the opacity is left alone.
"""
r, g, b, a = color.rgba
return Color.from_rgb(1 - r, 1 - g, 1 - b, alpha=a)
@ns.declare
def adjust_lightness(color, amount):
return adjust_color(color, lightness=amount)
@ns.declare
def adjust_saturation(color, amount):
return adjust_color(color, saturation=amount)
@ns.declare
def scale_lightness(color, amount):
return scale_color(color, lightness=amount)
@ns.declare
def scale_saturation(color, amount):
return scale_color(color, saturation=amount)
@ns.declare
def adjust_color(
color, red=None, green=None, blue=None,
hue=None, saturation=None, lightness=None, alpha=None):
do_rgb = red or green or blue
do_hsl = hue or saturation or lightness
if do_rgb and do_hsl:
raise ValueError(
"Can't adjust both RGB and HSL channels at the same time")
zero = Number(0)
a = color.alpha + (alpha or zero).value
if do_rgb:
r, g, b = color.rgba[:3]
channels = [
current + (adjustment or zero).value / 255
for (current, adjustment) in zip(color.rgba, (red, green, blue))]
return Color.from_rgb(*channels, alpha=a)
else:
h, s, l = color.hsl
h = (h + (hue or zero).value / 360) % 1
s += _interpret_percentage(saturation or zero, relto=100, clamp=False)
l += _interpret_percentage(lightness or zero, relto=100, clamp=False)
return Color.from_hsl(h, s, l, a)
def _scale_channel(channel, scaleby):
if scaleby is None:
return channel
expect_type(scaleby, Number)
if not scaleby.is_simple_unit('%'):
raise ValueError("Expected percentage, got %r" % (scaleby,))
factor = scaleby.value / 100
if factor > 0:
# Add x% of the remaining range, up to 1
return channel + (1 - channel) * factor
else:
# Subtract x% of the existing channel. We add here because the factor
# is already negative
return channel * (1 + factor)
@ns.declare
def scale_color(
color, red=None, green=None, blue=None,
saturation=None, lightness=None, alpha=None):
do_rgb = red or green or blue
do_hsl = saturation or lightness
if do_rgb and do_hsl:
raise ValueError(
"Can't scale both RGB and HSL channels at the same time")
scaled_alpha = _scale_channel(color.alpha, alpha)
if do_rgb:
channels = [
_scale_channel(channel, scaleby)
for channel, scaleby in zip(color.rgba, (red, green, blue))]
return Color.from_rgb(*channels, alpha=scaled_alpha)
else:
channels = [
_scale_channel(channel, scaleby)
for channel, scaleby
in zip(color.hsl, (None, saturation, lightness))]
return Color.from_hsl(*channels, alpha=scaled_alpha)
@ns.declare
def change_color(
color, red=None, green=None, blue=None,
hue=None, saturation=None, lightness=None, alpha=None):
do_rgb = red or green or blue
do_hsl = hue or saturation or lightness
if do_rgb and do_hsl:
raise ValueError(
"Can't change both RGB and HSL channels at the same time")
if alpha is None:
alpha = color.alpha
else:
alpha = alpha.value
if do_rgb:
channels = list(color.rgba[:3])
if red:
channels[0] = _interpret_percentage(red, relto=255)
if green:
channels[1] = _interpret_percentage(green, relto=255)
if blue:
channels[2] = _interpret_percentage(blue, relto=255)
return Color.from_rgb(*channels, alpha=alpha)
else:
channels = list(color.hsl)
if hue:
expect_type(hue, Number, unit=None)
channels[0] = (hue.value / 360) % 1
# Ruby sass treats plain numbers for saturation and lightness as though
# they were percentages, just without the %
if saturation:
channels[1] = _interpret_percentage(saturation, relto=100)
if lightness:
channels[2] = _interpret_percentage(lightness, relto=100)
return Color.from_hsl(*channels, alpha=alpha)
# ------------------------------------------------------------------------------
# String functions
@ns.declare_alias('e')
@ns.declare_alias('escape')
@ns.declare
def unquote(*args):
arg = List.from_maybe_starargs(args).maybe()
if isinstance(arg, String):
return String(arg.value, quotes=None)
else:
return String(arg.render(), quotes=None)
@ns.declare
def quote(*args):
arg = List.from_maybe_starargs(args).maybe()
if isinstance(arg, String):
return String(arg.value, quotes='"')
else:
return String(arg.render(), quotes='"')
@ns.declare
def str_length(string):
expect_type(string, String)
# nb: can't use `len(string)`, because that gives the Sass list length,
# which is 1
return Number(len(string.value))
# TODO this and several others should probably also require integers
# TODO and assert that the indexes are valid
@ns.declare
def str_insert(string, insert, index):
expect_type(string, String)
expect_type(insert, String)
expect_type(index, Number, unit=None)
py_index = index.to_python_index(len(string.value), check_bounds=False)
return String(
string.value[:py_index] + insert.value + string.value[py_index:],
quotes=string.quotes)
@ns.declare
def str_index(string, substring):
expect_type(string, String)
expect_type(substring, String)
# 1-based indexing, with 0 for failure
return Number(string.value.find(substring.value) + 1)
@ns.declare
def str_slice(string, start_at, end_at=None):
expect_type(string, String)
expect_type(start_at, Number, unit=None)
py_start_at = start_at.to_python_index(len(string.value))
if end_at is None:
py_end_at = None
else:
expect_type(end_at, Number, unit=None)
# Endpoint is inclusive, unlike Python
py_end_at = end_at.to_python_index(len(string.value)) + 1
return String(
string.value[py_start_at:py_end_at],
quotes=string.quotes)
@ns.declare
def to_upper_case(string):
expect_type(string, String)
return String(string.value.upper(), quotes=string.quotes)
@ns.declare
def to_lower_case(string):
expect_type(string, String)
return String(string.value.lower(), quotes=string.quotes)
# ------------------------------------------------------------------------------
# Number functions
@ns.declare
def percentage(value):
expect_type(value, Number, unit=None)
return value * Number(100, unit='%')
ns.set_function('abs', 1, Number.wrap_python_function(abs))
ns.set_function('round', 1, Number.wrap_python_function(round))
ns.set_function('ceil', 1, Number.wrap_python_function(math.ceil))
ns.set_function('floor', 1, Number.wrap_python_function(math.floor))
# ------------------------------------------------------------------------------
# List functions
def __parse_separator(separator, default_from=None):
if separator is None:
separator = 'auto'
separator = String.unquoted(separator).value
if separator == 'comma':
return True
elif separator == 'space':
return False
elif separator == 'auto':
if not default_from:
return True
elif len(default_from) < 2:
return True
else:
return default_from.use_comma
else:
raise ValueError('Separator must be auto, comma, or space')
# TODO get the compass bit outta here
@ns.declare_alias('-compass-list-size')
@ns.declare
def length(*lst):
if len(lst) == 1 and isinstance(lst[0], (list, tuple, List)):
lst = lst[0]
return Number(len(lst))
@ns.declare
def set_nth(list, n, value):
expect_type(n, Number, unit=None)
py_n = n.to_python_index(len(list))
return List(
tuple(list[:py_n]) + (value,) + tuple(list[py_n + 1:]),
use_comma=list.use_comma)
# TODO get the compass bit outta here
@ns.declare_alias('-compass-nth')
@ns.declare
def nth(lst, n):
"""Return the nth item in the list."""
expect_type(n, (String, Number), unit=None)
if isinstance(n, String):
if n.value.lower() == 'first':
i = 0
elif n.value.lower() == 'last':
i = -1
else:
raise ValueError("Invalid index %r" % (n,))
else:
# DEVIATION: nth treats lists as circular lists
i = n.to_python_index(len(lst), circular=True)
return lst[i]
@ns.declare
def join(lst1, lst2, separator=String.unquoted('auto')):
expect_type(separator, String)
ret = []
ret.extend(List.from_maybe(lst1))
ret.extend(List.from_maybe(lst2))
if separator.value == 'comma':
use_comma = True
elif separator.value == 'space':
use_comma = False
elif separator.value == 'auto':
# The Sass docs are slightly misleading here, but the algorithm is: use
# the delimiter from the first list that has at least 2 items, or
# default to spaces.
if len(lst1) > 1:
use_comma = lst1.use_comma
elif len(lst2) > 1:
use_comma = lst2.use_comma
else:
use_comma = False
else:
raise ValueError("separator for join() must be comma, space, or auto")
return List(ret, use_comma=use_comma)
@ns.declare
def min_(*lst):
if len(lst) == 1 and isinstance(lst[0], (list, tuple, List)):
lst = lst[0]
return min(lst)
@ns.declare
def max_(*lst):
if len(lst) == 1 and isinstance(lst[0], (list, tuple, List)):
lst = lst[0]
return max(lst)
@ns.declare
def append(lst, val, separator=None):
ret = []
ret.extend(List.from_maybe(lst))
ret.append(val)
use_comma = __parse_separator(separator, default_from=lst)
return List(ret, use_comma=use_comma)
@ns.declare
def index(lst, val):
for i in xrange(len(lst)):
if lst.value[i] == val:
return Number(i + 1)
return Boolean(False)
@ns.declare
def zip_(*lists):
return List(
[List(zipped) for zipped in zip(*lists)],
use_comma=True)
# TODO need a way to use "list" as the arg name without shadowing the builtin
@ns.declare
def list_separator(list):
if list.use_comma:
return String.unquoted('comma')
else:
return String.unquoted('space')
# ------------------------------------------------------------------------------
# Map functions
@ns.declare
def map_get(map, key):
return map.to_dict().get(key, Null())
@ns.declare
def map_merge(*maps):
key_order = []
index = {}
for map in maps:
for key, value in map.to_pairs():
if key not in index:
key_order.append(key)
index[key] = value
pairs = [(key, index[key]) for key in key_order]
return Map(pairs, index=index)
@ns.declare
def map_keys(map):
return List(
[k for (k, v) in map.to_pairs()],
use_comma=True)
@ns.declare
def map_values(map):
return List(
[v for (k, v) in map.to_pairs()],
use_comma=True)
@ns.declare
def map_has_key(map, key):
return Boolean(key in map.to_dict())
# DEVIATIONS: these do not exist in ruby sass
@ns.declare
def map_get3(map, key, default):
return map.to_dict().get(key, default)
@ns.declare
def map_get_nested3(map, keys, default=Null()):
for key in keys:
map = map.to_dict().get(key, None)
if map is None:
return default
return map
@ns.declare
def map_merge_deep(*maps):
pairs = []
keys = set()
for map in maps:
for key, value in map.to_pairs():
keys.add(key)
for key in keys:
values = [map.to_dict().get(key, None) for map in maps]
values = [v for v in values if v is not None]
if all(isinstance(v, Map) for v in values):
pairs.append((key, map_merge_deep(*values)))
else:
pairs.append((key, values[-1]))
return Map(pairs)
# ------------------------------------------------------------------------------
# Meta functions
@ns.declare
def type_of(obj): # -> bool, number, string, color, list
return String(obj.sass_type_name)
@ns.declare
def unit(number): # -> px, em, cm, etc.
numer = '*'.join(sorted(number.unit_numer))
denom = '*'.join(sorted(number.unit_denom))
if denom:
ret = numer + '/' + denom
else:
ret = numer
return String.unquoted(ret)
@ns.declare
def unitless(value):
if not isinstance(value, Number):
raise TypeError("Expected number, got %r" % (value,))
return Boolean(value.is_unitless)
@ns.declare
def comparable(number1, number2):
left = number1.to_base_units()
right = number2.to_base_units()
return Boolean(
left.unit_numer == right.unit_numer
and left.unit_denom == right.unit_denom)
@ns.declare
def keywords(value):
"""Extract named arguments, as a map, from an argument list."""
expect_type(value, Arglist)
return value.extract_keywords()
# ------------------------------------------------------------------------------
# Miscellaneous
@ns.declare
def if_(condition, if_true, if_false=Null()):
return if_true if condition else if_false
| 27.052868 | 80 | 0.614012 | 2,774 | 0.115343 | 0 | 0 | 17,612 | 0.732308 | 0 | 0 | 6,551 | 0.272391 |
d3d01a6d5b6d4e91e1847f49e77097d90f67ce9c | 906 | py | Python | pypy/module/cpyext/test/test_iterator.py | wdv4758h/mu-client-pypy | d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf | [
"Apache-2.0",
"OpenSSL"
]
| 34 | 2015-07-09T04:53:27.000Z | 2021-07-19T05:22:27.000Z | pypy/module/cpyext/test/test_iterator.py | wdv4758h/mu-client-pypy | d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf | [
"Apache-2.0",
"OpenSSL"
]
| 6 | 2015-05-30T17:20:45.000Z | 2017-06-12T14:29:23.000Z | pypy/module/cpyext/test/test_iterator.py | wdv4758h/mu-client-pypy | d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf | [
"Apache-2.0",
"OpenSSL"
]
| 11 | 2015-09-07T14:26:08.000Z | 2020-04-10T07:20:41.000Z | from pypy.module.cpyext.test.test_api import BaseApiTest
class TestIterator(BaseApiTest):
def test_check_iter(self, space, api):
assert api.PyIter_Check(space.iter(space.wrap("a")))
assert api.PyIter_Check(space.iter(space.newlist([])))
assert not api.PyIter_Check(space.w_type)
assert not api.PyIter_Check(space.wrap(2))
def test_getIter(self, space, api):
w_iter = api.PyObject_GetIter(space.wrap([1, 2, 3]))
assert space.unwrap(api.PyIter_Next(w_iter)) == 1
assert space.unwrap(api.PyIter_Next(w_iter)) == 2
assert space.unwrap(api.PyIter_Next(w_iter)) == 3
assert api.PyIter_Next(w_iter) is None
assert not api.PyErr_Occurred()
def test_iternext_error(self,space, api):
assert api.PyIter_Next(space.w_None) is None
assert api.PyErr_Occurred() is space.w_TypeError
api.PyErr_Clear()
| 39.391304 | 62 | 0.684327 | 846 | 0.933775 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.003311 |
d3d143abd1287d1ebf9fec072e925b1f0bce15d1 | 21,407 | py | Python | capsule_em/experiment.py | jrmendeshurb/google-research | f9fa8cdd2fb77975b524371fd29df008b9dc6cf4 | [
"Apache-2.0"
]
| 6 | 2019-12-16T04:23:57.000Z | 2021-12-09T14:17:14.000Z | capsule_em/experiment.py | jrmendeshurb/google-research | f9fa8cdd2fb77975b524371fd29df008b9dc6cf4 | [
"Apache-2.0"
]
| 13 | 2020-01-28T22:19:53.000Z | 2022-02-10T00:39:26.000Z | capsule_em/experiment.py | ZachT1711/google-research | 662e6837a3efa0c40b11cb4122447c4b028d2115 | [
"Apache-2.0"
]
| 1 | 2020-03-05T09:24:01.000Z | 2020-03-05T09:24:01.000Z | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The runners."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import numpy as np
import tensorflow as tf
from capsule_em import model as f_model
from capsule_em.mnist \
import mnist_record
from capsule_em.norb \
import norb_record
from tensorflow.contrib import tfprof as contrib_tfprof
from tensorflow.python import debug as tf_debug
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('num_prime_capsules', 32,
'Number of first layer capsules.')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate')
tf.app.flags.DEFINE_integer('routing_iteration', 3,
'Number of iterations for softmax routing')
tf.app.flags.DEFINE_float(
'routing_rate', 1,
'ratio for combining routing logits and routing feedback')
tf.app.flags.DEFINE_float('decay_rate', 0.96, 'ratio for learning rate decay')
tf.app.flags.DEFINE_integer('decay_steps', 20000,
'number of steps for learning rate decay')
tf.app.flags.DEFINE_bool('normalize_kernels', False,
'Normalize the capsule weight kernels')
tf.app.flags.DEFINE_integer('num_second_atoms', 16,
'number of capsule atoms for the second layer')
tf.app.flags.DEFINE_integer('num_primary_atoms', 16,
'number of capsule atoms for the first layer')
tf.app.flags.DEFINE_integer('num_start_conv', 32,
'number of channels for the start layer')
tf.app.flags.DEFINE_integer('kernel_size', 5,
'kernel size for the start layer.')
tf.app.flags.DEFINE_integer(
'routing_iteration_prime', 1,
'number of routing iterations for primary capsules.')
tf.app.flags.DEFINE_integer('max_steps', 2000000,
'Number of steps to run trainer.')
tf.app.flags.DEFINE_string('data_dir', '/datasets/mnist/',
'Directory for storing input data')
tf.app.flags.DEFINE_string('summary_dir',
'/tmp/tensorflow/mnist/logs/mnist_with_summaries',
'Summaries log directory')
tf.app.flags.DEFINE_bool('train', True, 'train or test.')
tf.app.flags.DEFINE_integer(
'checkpoint_steps', 1500,
'number of steps before saving a training checkpoint.')
tf.app.flags.DEFINE_bool('verbose_image', False, 'whether to show images.')
tf.app.flags.DEFINE_bool('multi', True,
'whether to use multiple digit dataset.')
tf.app.flags.DEFINE_bool('eval_once', False,
'whether to evaluate once on the ckpnt file.')
tf.app.flags.DEFINE_integer('eval_size', 24300,
'number of examples to evaluate.')
tf.app.flags.DEFINE_string(
'ckpnt',
'/tmp/tensorflow/mnist/logs/mnist_with_summaries/train/model.ckpnt',
'The checkpoint to load and evaluate once.')
tf.app.flags.DEFINE_integer('keep_ckpt', 5, 'number of examples to evaluate.')
tf.app.flags.DEFINE_bool(
'clip_lr', False, 'whether to clip learning rate to not go bellow 1e-5.')
tf.app.flags.DEFINE_integer('stride_1', 2,
'stride for the first convolutinal layer.')
tf.app.flags.DEFINE_integer('kernel_2', 9,
'kernel size for the secon convolutinal layer.')
tf.app.flags.DEFINE_integer('stride_2', 2,
'stride for the second convolutinal layer.')
tf.app.flags.DEFINE_string('padding', 'VALID',
'the padding method for conv layers.')
tf.app.flags.DEFINE_integer('extra_caps', 2, 'number of extra conv capsules.')
tf.app.flags.DEFINE_string('caps_dims', '32,32',
'output dim for extra conv capsules.')
tf.app.flags.DEFINE_string('caps_strides', '2,1',
'stride for extra conv capsules.')
tf.app.flags.DEFINE_string('caps_kernels', '3,3',
'kernel size for extra conv capsuls.')
tf.app.flags.DEFINE_integer('extra_conv', 0, 'number of extra conv layers.')
tf.app.flags.DEFINE_string('conv_dims', '', 'output dim for extra conv layers.')
tf.app.flags.DEFINE_string('conv_strides', '', 'stride for extra conv layers.')
tf.app.flags.DEFINE_string('conv_kernels', '',
'kernel size for extra conv layers.')
tf.app.flags.DEFINE_bool('leaky', False, 'Use leaky routing.')
tf.app.flags.DEFINE_bool('staircase', False, 'Use staircase decay.')
tf.app.flags.DEFINE_integer('num_gpus', 1, 'number of gpus to train.')
tf.app.flags.DEFINE_bool('adam', True, 'Use Adam optimizer.')
tf.app.flags.DEFINE_bool('pooling', False, 'Pooling after convolution.')
tf.app.flags.DEFINE_bool('use_caps', True, 'Use capsule layers.')
tf.app.flags.DEFINE_integer(
'extra_fc', 512, 'number of units in the extra fc layer in no caps mode.')
tf.app.flags.DEFINE_bool('dropout', False, 'Dropout before last layer.')
tf.app.flags.DEFINE_bool('tweak', False, 'During eval recons from tweaked rep.')
tf.app.flags.DEFINE_bool('softmax', False, 'softmax loss in no caps.')
tf.app.flags.DEFINE_bool('c_dropout', False, 'dropout after conv capsules.')
tf.app.flags.DEFINE_bool(
'distort', True,
'distort mnist images by cropping to 24 * 24 and rotating by 15 degrees.')
tf.app.flags.DEFINE_bool('restart', False, 'Clean train checkpoints.')
tf.app.flags.DEFINE_bool('use_em', True,
'If set use em capsules with em routing.')
tf.app.flags.DEFINE_float('final_beta', 0.01, 'Temperature at the sigmoid.')
tf.app.flags.DEFINE_bool('eval_ensemble', False, 'eval over aggregated logits.')
tf.app.flags.DEFINE_string('part1', 'ok', 'ok')
tf.app.flags.DEFINE_string('part2', 'ok', 'ok')
tf.app.flags.DEFINE_bool('debug', False, 'If set use tfdbg wrapper.')
tf.app.flags.DEFINE_bool('reduce_mean', False,
'If set normalize mean of each image.')
tf.app.flags.DEFINE_float('loss_rate', 1.0,
'classification to regularization rate.')
tf.app.flags.DEFINE_integer('batch_size', 64, 'Batch size.')
tf.app.flags.DEFINE_integer('norb_pixel', 48, 'Batch size.')
tf.app.flags.DEFINE_bool('patching', True, 'If set use patching for eval.')
tf.app.flags.DEFINE_string('data_set', 'norb', 'the data set to use.')
tf.app.flags.DEFINE_string('cifar_data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_string('norb_data_dir', '/tmp/smallNORB/',
"""Path to the norb data directory.""")
tf.app.flags.DEFINE_string('affnist_data_dir', '/tmp/affnist_data',
"""Path to the affnist data directory.""")
num_classes = {
'mnist': 10,
'cifar10': 10,
'mnist_multi': 10,
'svhn': 10,
'affnist': 10,
'expanded_mnist': 10,
'norb': 5,
}
def get_features(train, total_batch):
"""Return batched inputs."""
print(FLAGS.data_set)
batch_size = total_batch // max(1, FLAGS.num_gpus)
split = 'train' if train else 'test'
features = []
for i in xrange(FLAGS.num_gpus):
with tf.device('/cpu:0'):
with tf.name_scope('input_tower_%d' % (i)):
if FLAGS.data_set == 'norb':
features += [
norb_record.inputs(
train_dir=FLAGS.norb_data_dir,
batch_size=batch_size,
split=split,
multi=FLAGS.multi,
image_pixel=FLAGS.norb_pixel,
distort=FLAGS.distort,
patching=FLAGS.patching,
)
]
elif FLAGS.data_set == 'affnist':
features += [
mnist_record.inputs(
train_dir=FLAGS.affnist_data_dir,
batch_size=batch_size,
split=split,
multi=FLAGS.multi,
shift=0,
height=40,
train_file='test.tfrecords')
]
elif FLAGS.data_set == 'expanded_mnist':
features += [
mnist_record.inputs(
train_dir=FLAGS.data_dir,
batch_size=batch_size,
split=split,
multi=FLAGS.multi,
height=40,
train_file='train_6shifted_6padded_mnist.tfrecords',
shift=6)
]
else:
if train and not FLAGS.distort:
shift = 2
else:
shift = 0
features += [
mnist_record.inputs(
train_dir=FLAGS.data_dir,
batch_size=batch_size,
split=split,
multi=FLAGS.multi,
shift=shift,
distort=FLAGS.distort)
]
print(features)
return features
def run_training():
"""Train."""
with tf.Graph().as_default():
# Input images and labels.
features = get_features(True, FLAGS.batch_size)
model = f_model.multi_gpu_model
print('so far so good!')
result = model(features)
param_stats = contrib_tfprof.model_analyzer.print_model_analysis(
tf.get_default_graph(),
tfprof_options=contrib_tfprof.model_analyzer
.TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
sys.stdout.write('total_params: %d\n' % param_stats.total_parameters)
contrib_tfprof.model_analyzer.print_model_analysis(
tf.get_default_graph(),
tfprof_options=contrib_tfprof.model_analyzer.FLOAT_OPS_OPTIONS)
merged = result['summary']
train_step = result['train']
# test_writer = tf.summary.FileWriter(FLAGS.summary_dir + '/test')
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
if FLAGS.debug:
sess = tf_debug.LocalCLIDebugWrapperSession(sess, ui_type='curses')
sess.add_tensor_filter('has_inf_or_nan', tf_debug.has_inf_or_nan)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
saver = tf.train.Saver(max_to_keep=FLAGS.keep_ckpt)
if tf.gfile.Exists(FLAGS.summary_dir + '/train'):
ckpt = tf.train.get_checkpoint_state(FLAGS.summary_dir + '/train/')
print(ckpt)
if (not FLAGS.restart) and ckpt and ckpt.model_checkpoint_path:
print('hesllo')
saver.restore(sess, ckpt.model_checkpoint_path)
prev_step = int(
ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
else:
print('what??')
tf.gfile.DeleteRecursively(FLAGS.summary_dir + '/train')
tf.gfile.MakeDirs(FLAGS.summary_dir + '/train')
prev_step = 0
else:
tf.gfile.MakeDirs(FLAGS.summary_dir + '/train')
prev_step = 0
train_writer = tf.summary.FileWriter(FLAGS.summary_dir + '/train',
sess.graph)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
step = 0
for i in range(prev_step, FLAGS.max_steps):
step += 1
summary, _ = sess.run([merged, train_step])
train_writer.add_summary(summary, i)
if (i + 1) % FLAGS.checkpoint_steps == 0:
saver.save(
sess,
os.path.join(FLAGS.summary_dir + '/train', 'model.ckpt'),
global_step=i + 1)
except tf.errors.OutOfRangeError:
print('Done training for %d steps.' % step)
finally:
# When done, ask the threads to stop.
coord.request_stop()
train_writer.close()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def run_eval():
"""Evaluate on test or validation."""
with tf.Graph().as_default():
# Input images and labels.
features = get_features(False, 5)
model = f_model.multi_gpu_model
result = model(features)
merged = result['summary']
correct_prediction_sum = result['correct']
almost_correct_sum = result['almost']
saver = tf.train.Saver()
test_writer = tf.summary.FileWriter(FLAGS.summary_dir + '/test')
seen_step = -1
time.sleep(3 * 60)
paused = 0
while paused < 360:
ckpt = tf.train.get_checkpoint_state(FLAGS.summary_dir + '/train/')
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoin
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
time.sleep(2 * 60)
paused += 2
continue
while seen_step == int(global_step):
time.sleep(2 * 60)
ckpt = tf.train.get_checkpoint_state(FLAGS.summary_dir + '/train/')
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
paused += 2
if paused > 360:
test_writer.close()
return
paused = 0
seen_step = int(global_step)
print(seen_step)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
saver.restore(sess, ckpt.model_checkpoint_path)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
total_tp = 0
total_almost = 0
for i in range(FLAGS.eval_size // 5):
summary_j, tp, almost = sess.run(
[merged, correct_prediction_sum, almost_correct_sum])
total_tp += tp
total_almost += almost
total_false = FLAGS.eval_size - total_tp
total_almost_false = FLAGS.eval_size - total_almost
summary_tp = tf.Summary.FromString(summary_j)
summary_tp.value.add(tag='correct_prediction', simple_value=total_tp)
summary_tp.value.add(tag='wrong_prediction', simple_value=total_false)
summary_tp.value.add(
tag='almost_wrong_prediction', simple_value=total_almost_false)
test_writer.add_summary(summary_tp, global_step)
print('write done')
except tf.errors.OutOfRangeError:
print('Done eval for %d steps.' % i)
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
test_writer.close()
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def eval_ensemble(ckpnts):
"""Evaluate on an ensemble of checkpoints."""
with tf.Graph().as_default():
first_features = get_features(False, 100)[0]
h = first_features['height']
d = first_features['depth']
features = {
'images': tf.placeholder(tf.float32, shape=(100, d, h, h)),
'labels': tf.placeholder(tf.float32, shape=(100, 10)),
'recons_image': tf.placeholder(tf.float32, shape=(100, d, h, h)),
'recons_label': tf.placeholder(tf.int32, shape=(100)),
'height': first_features['height'],
'depth': first_features['depth']
}
model = f_model.multi_gpu_model
result = model([features])
logits = result['logits']
config = tf.ConfigProto(allow_soft_placement=True)
# saver.restore(sess, tf.train.latest_checkpoint(FLAGS.ckpnt))
batch_logits = np.zeros((FLAGS.eval_size // 100, 100, 10), dtype=np.float32)
batch_recons_label = np.zeros((FLAGS.eval_size // 100, 100),
dtype=np.float32)
batch_labels = np.zeros((FLAGS.eval_size // 100, 100, 10), dtype=np.float32)
batch_images = np.zeros((FLAGS.eval_size // 100, 100, d, h, h),
dtype=np.float32)
batch_recons_image = np.zeros((FLAGS.eval_size // 100, 100, d, h, h),
dtype=np.float32)
saver = tf.train.Saver()
sess = tf.Session(config=config)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
for i in range(FLAGS.eval_size // 100):
(batch_recons_label[i, Ellipsis], batch_labels[i, Ellipsis], batch_images[i, Ellipsis],
batch_recons_image[i, Ellipsis]) = sess.run([
first_features['recons_label'], first_features['labels'],
first_features['images'], first_features['recons_image']
])
for ckpnt in ckpnts:
saver.restore(sess, ckpnt)
for i in range(FLAGS.eval_size // 100):
logits_i = sess.run(
logits,
feed_dict={
features['recons_label']: batch_recons_label[i, Ellipsis],
features['labels']: batch_labels[i, Ellipsis],
features['images']: batch_images[i, Ellipsis],
features['recons_image']: batch_recons_image[i, Ellipsis]
})
# batch_logits[i, ...] += softmax(logits_i)
batch_logits[i, Ellipsis] += logits_i
except tf.errors.OutOfRangeError:
print('Done eval for %d steps.' % i)
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
batch_pred = np.argmax(batch_logits, axis=2)
total_wrong = np.sum(np.not_equal(batch_pred, batch_recons_label))
print(total_wrong)
def eval_once(ckpnt):
"""Evaluate on one checkpoint once."""
ptches = np.zeros((14, 14, 32, 32))
for i in range(14):
for j in range(14):
ind_x = i * 2
ind_y = j * 2
for k in range(5):
for h in range(5):
ptches[i, j, ind_x + k, ind_y + h] = 1
ptches = np.reshape(ptches, (14 * 14, 32, 32))
with tf.Graph().as_default():
features = get_features(False, 1)[0]
if FLAGS.patching:
features['images'] = features['cc_images']
features['recons_label'] = features['cc_recons_label']
features['labels'] = features['cc_labels']
model = f_model.multi_gpu_model
result = model([features])
# merged = result['summary']
correct_prediction_sum = result['correct']
# almost_correct_sum = result['almost']
# mid_act = result['mid_act']
logits = result['logits']
saver = tf.train.Saver()
test_writer = tf.summary.FileWriter(FLAGS.summary_dir + '/test_once')
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.per_process_gpu_memory_fraction = 0.3
sess = tf.Session(config=config)
# saver.restore(sess, tf.train.latest_checkpoint(FLAGS.ckpnt))
saver.restore(sess, ckpnt)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
i = 0
try:
total_tp = 0
for i in range(FLAGS.eval_size):
#, g_ac, ac
lb, tp, lg = sess.run([
features['recons_label'],
correct_prediction_sum,
logits,
])
if FLAGS.patching:
batched_lg = np.sum(lg / np.sum(lg, axis=1, keepdims=True), axis=0)
batch_pred = np.argmax(batched_lg)
tp = np.equal(batch_pred, lb[0])
total_tp += tp
total_false = FLAGS.eval_size - total_tp
print('false:{}, true:{}'.format(total_false, total_tp))
# summary_tp = tf.Summary.FromString(summary_j)
# summary_tp.value.add(tag='correct_prediction', simple_value=total_tp)
# summary_tp.value.add(tag='wrong_prediction', simple_value=total_false)
# summary_tp.value.add(
# tag='almost_wrong_prediction', simple_value=total_almost_false)
# test_writer.add_summary(summary_tp, i + 1)
except tf.errors.OutOfRangeError:
print('Done eval for %d steps.' % i)
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
test_writer.close()
def main(_):
if FLAGS.eval_ensemble:
if tf.gfile.Exists(FLAGS.summary_dir + '/test_ensemble'):
tf.gfile.DeleteRecursively(FLAGS.summary_dir + '/test_ensemble')
tf.gfile.MakeDirs(FLAGS.summary_dir + '/test_ensemble')
ensem = []
for i in range(1, 12):
f_name = '/tmp/cifar10/{}{}{}-600000'.format(FLAGS.part1, i, FLAGS.part2)
if tf.train.checkpoint_exists(f_name):
ensem += [f_name]
print(len(ensem))
eval_ensemble(ensem)
elif FLAGS.eval_once:
if tf.gfile.Exists(FLAGS.summary_dir + '/test_once'):
tf.gfile.DeleteRecursively(FLAGS.summary_dir + '/test_once')
tf.gfile.MakeDirs(FLAGS.summary_dir + '/test_once')
eval_once(FLAGS.ckpnt)
elif FLAGS.train:
run_training()
else:
if tf.gfile.Exists(FLAGS.summary_dir + '/test_once'):
tf.gfile.DeleteRecursively(FLAGS.summary_dir + '/test_once')
tf.gfile.MakeDirs(FLAGS.summary_dir + '/test_once')
if tf.gfile.Exists(FLAGS.summary_dir + '/test'):
tf.gfile.DeleteRecursively(FLAGS.summary_dir + '/test')
tf.gfile.MakeDirs(FLAGS.summary_dir + '/test')
run_eval()
if __name__ == '__main__':
tf.app.run()
| 40.390566 | 95 | 0.633998 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,113 | 0.285561 |
d3d3a5b087e35b140a4cca72077a3d96a9f4d93b | 42,865 | py | Python | grafana/common/dashboards/aggregated/client_subnet_statistics_detail.py | MikeAT/visualizer | 946b98d82eaf7ec508861115585afd683fc49e5c | [
"MIT"
]
| 6 | 2021-03-03T17:52:24.000Z | 2022-02-10T11:45:22.000Z | grafana/common/dashboards/aggregated/client_subnet_statistics_detail.py | Acidburn0zzz/visualizer | 20fba91f0d26b98531f97f643c8329640d1c0d11 | [
"MIT"
]
| 1 | 2021-04-29T12:34:04.000Z | 2021-04-29T14:50:17.000Z | grafana/common/dashboards/aggregated/client_subnet_statistics_detail.py | Acidburn0zzz/visualizer | 20fba91f0d26b98531f97f643c8329640d1c0d11 | [
"MIT"
]
| 2 | 2021-04-27T14:02:03.000Z | 2021-11-12T10:34:32.000Z | # Copyright 2021 Internet Corporation for Assigned Names and Numbers.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at https://mozilla.org/MPL/2.0/.
#
# Developed by Sinodun IT (sinodun.com)
#
# Aggregation client subnet statistics
import textwrap
import grafanalib.core as GCore
import grafanacommon as GCommon
def query_classification_chart(chart_title, yaxis_label, prefix_field, agginfo, nodesel):
return GCommon.BarChart(
title = chart_title,
orientation = GCommon.BAR_CHART_ORIENTATION_HORIZONTAL,
layout = GCommon.BarChartLayout(
barmode = GCommon.BAR_CHART_LAYOUT_MODE_STACK,
showlegend = True,
xaxis = GCommon.BarChartAxis(
title = 'Queries per second',
),
yaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_CATEGORY,
tickmargin = 110,
title = yaxis_label,
),
),
traces = [
GCommon.BarChartTrace(
name = 'AForA',
x = 'AForA',
y = 'AForAPrefix',
text = 'AForA',
),
GCommon.BarChartTrace(
name = 'AForRoot',
x = 'AForRoot',
y = 'AForRootPrefix',
text = 'AForRoot',
),
GCommon.BarChartTrace(
name = 'FunnyQueryClass',
x = 'FunnyQueryClass',
y = 'FunnyQueryClassPrefix',
text = 'FunnyQueryClass',
),
GCommon.BarChartTrace(
name = 'FunnyQueryType',
x = 'FunnyQueryType',
y = 'FunnyQueryTypePrefix',
text = 'FunnyQueryType',
),
GCommon.BarChartTrace(
name = 'Localhost',
x = 'Localhost',
y = 'LocalhostPrefix',
text = 'Localhost',
),
GCommon.BarChartTrace(
name = 'NonAuthTld',
x = 'NonAuthTld',
y = 'NonAuthTldPrefix',
text = 'NonAuthTld',
),
GCommon.BarChartTrace(
name = 'Ok',
x = 'Ok',
y = 'OkPrefix',
text = 'Ok',
),
GCommon.BarChartTrace(
name = 'RFC1918Ptr',
x = 'RFC1918Ptr',
y = 'RFC1918PtrPrefix',
text = 'RFC1918Ptr',
),
GCommon.BarChartTrace(
name = 'RootServersNet',
x = 'RootServersNet',
y = 'RootServersNetPrefix',
text = 'RootServersNet',
),
GCommon.BarChartTrace(
name = 'SrcPortZero',
x = 'SrcPortZero',
y = 'SrcPortZeroPrefix',
text = 'SrcPortZero',
),
],
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS AForAPrefix,
AForA,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(AForACount)/($to - $from) AS AForA
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'A'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS AForRootPrefix,
AForRoot,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(AForRootCount)/($to - $from) AS AForRoot
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'B'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS FunnyQueryClassPrefix,
FunnyQueryClass,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(FunnyQueryClassCount)/($to - $from) AS FunnyQueryClass
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'C'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS FunnyQueryTypePrefix,
FunnyQueryType,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(FunnyQueryTypeCount)/($to - $from) AS FunnyQueryType
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count DESC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'D'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS LocalhostPrefix,
Localhost,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(LocalhostCount)/($to - $from) AS Localhost
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'E'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS NonAuthTldPrefix,
NonAuthTld,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(NonAuthTldCount)/($to - $from) AS NonAuthTld
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'F'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS OkPrefix,
Ok,
TotalCount
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS TotalCount,
sum(Count -
(AForACount +
AForRootCount +
FunnyQueryClassCount +
FunnyQueryTypeCount +
LocalhostCount +
NonAuthTldCount +
RFC1918PtrCount +
RootServersNetCount +
SrcPortZeroCount))/($to - $from) AS Ok
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY TotalCount DESC
LIMIT 40
)
ORDER BY TotalCount ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'G'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS RFC1918PtrPrefix,
RFC1918Ptr,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(RFC1918PtrCount)/($to - $from) AS RFC1918Ptr
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'H'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS RootServersNetPrefix,
RootServersNet,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(RootServersNetCount)/($to - $from) AS RootServersNet
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'I'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS SrcPortZeroPrefix,
SrcPortZero,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(SrcPortZeroCount)/($to - $from) AS SrcPortZero
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'J'
),
],
)
def dash(myuid, agginfo, nodesel, **kwargs):
return GCommon.Dashboard(
title = "Client subnet statistics detail",
tags = [
agginfo['graph_tag']
],
uid = myuid,
rows = [
GCore.Row(
height = GCore.Pixels(50),
panels = [
GCommon.HTMLPanel('grafana/common/dashboards/aggregated/client_subnet_statistics_header.html', transparent=True),
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
GCommon.BarChart(
title = 'Clients by fixed subnet',
orientation = GCommon.BAR_CHART_ORIENTATION_HORIZONTAL,
layout = GCommon.BarChartLayout(
xaxis = GCommon.BarChartAxis(
title = 'Queries per second',
),
yaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_CATEGORY,
tickmargin = 110,
title = 'Fixed Subnet',
),
),
traces = [
GCommon.BarChartTrace(
name = 'Subnet',
color = '#A352CC',
x = 'QPS',
y = 'Subnet',
text = 'QPS',
),
],
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'BusiestClientSubnets' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Subnet,
QPS
FROM
(
SELECT
Prefix AS Subnet,
sum(Count)/($to - $from) AS QPS
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY QPS DESC
LIMIT 30
)
ORDER BY QPS ASC""".format(
nodesel=nodesel)),
refId = 'A'
)
],
),
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
GCommon.BarChart(
title = 'RCODE by clients by ASN',
orientation = GCommon.BAR_CHART_ORIENTATION_HORIZONTAL,
layout = GCommon.BarChartLayout(
barmode = GCommon.BAR_CHART_LAYOUT_MODE_STACK,
showlegend = True,
xaxis = GCommon.BarChartAxis(
title = 'Queries per second',
),
yaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_CATEGORY,
tickmargin = 110,
title = 'ASN',
),
),
autotrace = True,
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'BusiestClientSubnets' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
notEmpty(rcodeText) ? rcodeText : concat('RCODE', toString(rcode)) AS DisplayRcode,
sum(rcodeCount) / ($to - $from) AS rcodeCount,
ClientASN
FROM
(
SELECT
ClientASN,
rcode,
sum(rcodeCount) AS rcodeCount,
any(sCount) AS sCount
FROM
(
SELECT
ClientASN,
sum(RcodeMap.Count) AS sCount
FROM $table
ARRAY JOIN RcodeMap
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY
ClientASN
ORDER BY sCount DESC, ClientASN ASC
LIMIT 30
) AS ClientASNCounts
ALL LEFT JOIN
(
SELECT
ClientASN,
RcodeMap.ResponseRcode AS rcode,
sum(RcodeMap.Count) AS rcodeCount
FROM $table
ARRAY JOIN RcodeMap
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY
ClientASN,
rcode
UNION ALL
(
SELECT
ClientASN,
rcode,
CAST(0 AS UInt64) AS rcodeCount
FROM
(
SELECT
0 AS Zero,
ClientASN
FROM $table
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY ClientASN
) AS ZeroClientASN
ALL LEFT JOIN
(
SELECT
0 AS Zero,
RcodeMap.ResponseRcode AS rcode
FROM $table
ARRAY JOIN RcodeMap
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY rcode
) AS ZeroRcode USING Zero
)
) AS ClientASNRcodeCounts USING ClientASN
GROUP BY
ClientASN,
rcode
) AS ClientASNRcodeCountsTotal
ALL INNER JOIN
(
SELECT
value_name AS rcodeText,
toUInt16(value) AS rcode
FROM {nodeinfo_database}.iana_text
WHERE registry_name = 'RCODE'
) AS ClientASNNameCountsTotal USING rcode
GROUP BY
ClientASN,
rcode,
rcodeText
ORDER BY
sum(sCount) ASC,
rcodeText ASC,
ClientASN DESC""".format(
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'A'
)
],
),
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
GCommon.BarChart(
title = 'RCODE by clients by AS subnet',
orientation = GCommon.BAR_CHART_ORIENTATION_HORIZONTAL,
layout = GCommon.BarChartLayout(
barmode = GCommon.BAR_CHART_LAYOUT_MODE_STACK,
showlegend = True,
xaxis = GCommon.BarChartAxis(
title = 'Queries per second',
),
yaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_CATEGORY,
tickmargin = 110,
title = 'AS Subnet',
),
),
autotrace = True,
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'BGPPrefix' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
notEmpty(rcodeText) ? rcodeText : concat('RCODE', toString(rcode)) AS DisplayRcode,
sum(rcodeCount) / ($to - $from) AS rcodeCount,
Prefix
FROM
(
SELECT
Prefix,
rcode,
sum(rcodeCount) AS rcodeCount,
any(sCount) AS sCount
FROM
(
SELECT
Prefix,
sum(RcodeMap.Count) AS sCount
FROM $table
ARRAY JOIN RcodeMap
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY
Prefix
ORDER BY sCount DESC, Prefix ASC
LIMIT 30
) AS PrefixCount
ALL LEFT JOIN
(
SELECT
Prefix,
RcodeMap.ResponseRcode AS rcode,
sum(RcodeMap.Count) AS rcodeCount
FROM $table
ARRAY JOIN RcodeMap
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY
Prefix,
rcode
UNION ALL
(
SELECT
Prefix,
rcode,
CAST(0 AS UInt64) AS rcodeCount
FROM
(
SELECT
0 AS Zero,
Prefix
FROM $table
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
) AS ZeroPrefox
ALL LEFT JOIN
(
SELECT
0 AS Zero,
RcodeMap.ResponseRcode AS rcode
FROM $table
ARRAY JOIN RcodeMap
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY rcode
) AS ZeroRcode USING Zero
)
) AS PrefixRcodeCounts USING Prefix
GROUP BY
Prefix,
rcode
) AS PrefixRcodeCountsTotal
ALL INNER JOIN
(
SELECT
value_name AS rcodeText,
toUInt16(value) AS rcode
FROM {nodeinfo_database}.iana_text
WHERE registry_name = 'RCODE'
) AS PrefixNameCountsTotal USING rcode
GROUP BY
Prefix,
rcode,
rcodeText
ORDER BY
sum(sCount) ASC,
rcodeText ASC,
Prefix DESC""".format(
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'A'
)
],
),
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
GCommon.BarChart(
title = 'RCODE by clients by fixed subnet',
orientation = GCommon.BAR_CHART_ORIENTATION_HORIZONTAL,
layout = GCommon.BarChartLayout(
barmode = GCommon.BAR_CHART_LAYOUT_MODE_STACK,
showlegend = True,
xaxis = GCommon.BarChartAxis(
title = 'Queries per second',
),
yaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_CATEGORY,
tickmargin = 110,
title = 'Fixed Subnet',
),
),
autotrace = True,
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'BusiestClientSubnets' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
notEmpty(rcodeText) ? rcodeText : concat('RCODE', toString(rcode)) AS DisplayRcode,
sum(rcodeCount) / ($to - $from) AS rcodeCount,
Prefix
FROM
(
SELECT
Prefix,
rcode,
sum(rcodeCount) AS rcodeCount,
any(sCount) AS sCount
FROM
(
SELECT
Prefix,
sum(RcodeMap.Count) AS sCount
FROM $table
ARRAY JOIN RcodeMap
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY
Prefix
ORDER BY sCount DESC, Prefix ASC
LIMIT 30
) AS PrefixCount
ALL LEFT JOIN
(
SELECT
Prefix,
RcodeMap.ResponseRcode AS rcode,
sum(RcodeMap.Count) AS rcodeCount
FROM $table
ARRAY JOIN RcodeMap
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY
Prefix,
rcode
UNION ALL
(
SELECT
Prefix,
rcode,
CAST(0 AS UInt64) AS rcodeCount
FROM
(
SELECT
0 AS Zero,
Prefix
FROM $table
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
) AS ZeroPrefix
ALL LEFT JOIN
(
SELECT
0 AS Zero,
RcodeMap.ResponseRcode AS rcode
FROM $table
ARRAY JOIN RcodeMap
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY rcode
) AS ZeroRcode USING Zero
)
) AS PrefixRcodeCounts USING Prefix
GROUP BY
Prefix,
rcode
) AS PrefixRcodeCountsTotal
ALL INNER JOIN
(
SELECT
value_name AS rcodeText,
toUInt16(value) AS rcode
FROM {nodeinfo_database}.iana_text
WHERE registry_name = 'RCODE'
) AS PrefixNameCountsTotal USING rcode
GROUP BY
Prefix,
rcode,
rcodeText
ORDER BY
sum(sCount) ASC,
rcodeText ASC,
Prefix DESC""".format(
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'A'
)
],
),
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
GCommon.BarChart(
title = 'Root abusers by fixed subnet',
orientation = GCommon.BAR_CHART_ORIENTATION_HORIZONTAL,
layout = GCommon.BarChartLayout(
xaxis = GCommon.BarChartAxis(
title = 'Queries per second',
),
yaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_CATEGORY,
tickmargin = 110,
title = 'Fixed Subnet',
),
),
traces = [
GCommon.BarChartTrace(
name = 'Subnet',
color = '#A352CC',
x = 'QPS',
y = 'Subnet',
text = 'QPS',
),
],
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Subnet,
QPS
FROM
(
SELECT
FixedPrefix AS Subnet,
sum(RootAbuseCount)/($to - $from) AS QPS
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY FixedPrefix
ORDER BY QPS DESC
LIMIT 40
)
ORDER BY QPS ASC""".format(
nodesel=nodesel)),
refId = 'A'
)
],
),
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
query_classification_chart(
'Query classification by busiest fixed subnet',
'Fixed Subnet',
'FixedPrefix',
agginfo,
nodesel)
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
query_classification_chart(
'Query classification by busiest ASN',
'ASN',
'ClientASN',
agginfo,
nodesel)
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
query_classification_chart(
'Query classification by busiest AS subnet',
'AS subnet',
'ASPrefix',
agginfo,
nodesel)
],
),
]
)
| 46.04189 | 133 | 0.30778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27,147 | 0.633314 |
d3d41214e53cc3ba9f42c3c82841438366d8ce1d | 2,812 | py | Python | pylearn2/neuroimaging_utils/tutorials/nice/jobman/simple_train.py | rdevon/pylearn2 | f7b9a6ea0e2498176b47202f5bb83aec4976e1dd | [
"BSD-3-Clause"
]
| 1 | 2017-10-29T06:18:35.000Z | 2017-10-29T06:18:35.000Z | pylearn2/neuroimaging_utils/tutorials/nice/jobman/simple_train.py | rdevon/pylearn2 | f7b9a6ea0e2498176b47202f5bb83aec4976e1dd | [
"BSD-3-Clause"
]
| null | null | null | pylearn2/neuroimaging_utils/tutorials/nice/jobman/simple_train.py | rdevon/pylearn2 | f7b9a6ea0e2498176b47202f5bb83aec4976e1dd | [
"BSD-3-Clause"
]
| null | null | null | """
Module to train a simple MLP for demo.
"""
from jobman.tools import expand
from jobman.tools import flatten
import logging
import nice_experiment
import numpy as np
from os import path
from pylearn2.config import yaml_parse
from pylearn2.neuroimaging_utils.datasets import MRI
from pylearn2.neuroimaging_utils.dataset_utils import mri_nifti
from pylearn2.scripts.jobman.experiment import ydict
from pylearn2.utils import serial
logging.basicConfig(format="[%(module)s:%(levelname)s]:%(message)s")
logger = logging.getLogger(__name__)
yaml_file = nice_experiment.yaml_file
def main(dataset_name="smri"):
logger.info("Getting dataset info for %s" % args.dataset_name)
data_path = serial.preprocess("${PYLEARN2_NI_PATH}/" + args.dataset_name)
mask_file = path.join(data_path, "mask.npy")
mask = np.load(mask_file)
input_dim = (mask == 1).sum()
if input_dim % 2 == 1:
input_dim -= 1
mri = MRI.MRI_Standard(which_set="full",
dataset_name=args.dataset_name,
unit_normalize=True,
even_input=True,
apply_mask=True)
variance_map_file = path.join(data_path, "variance_map.npy")
mri_nifti.save_variance_map(mri, variance_map_file)
user = path.expandvars("$USER")
save_path = serial.preprocess("/export/mialab/users/%s/pylearn2_outs/%s"
% (user, "nice_jobman_test"))
file_params = {"save_path": save_path,
"variance_map_file": variance_map_file
}
yaml_template = open(yaml_file).read()
hyperparams = expand(flatten(nice_experiment.default_hyperparams(input_dim=input_dim)),
dict_type=ydict)
for param in hyperparams:
if hasattr(args, param) and getattr(args, param):
val = getattr(args, param)
logger.info("Filling %s with %r" % (param, val))
hyperparams[param] = type(hyperparams[param])(val)
elif param == "weight_decay":
val = getattr(args, "l1_decay")
if val == 0.0:
hyperparams["weight_decay"] = ""
else:
hyperparams["weight_decay"] = {
"__builder__": "pylearn2.costs.mlp.L1WeightDecay",
"coeffs": {"z": val}
}
for param in file_params:
yaml_template = yaml_template.replace("%%(%s)s" % param, file_params[param])
yaml = yaml_template % hyperparams
print yaml
logger.info("Training")
train = yaml_parse.load(yaml)
train.main_loop()
if __name__ == "__main__":
parser = nice_experiment.make_argument_parser()
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
main(args)
| 34.292683 | 91 | 0.629445 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 435 | 0.154694 |
d3d4d6dda5b57fc2a5aa6672a9cd0393e4d62ee6 | 6,199 | py | Python | _Framework/Layer.py | isfopo/MacroPushScript | 46c440aa3f6325d8767e88252c5520d76a9fa634 | [
"MIT"
]
| null | null | null | _Framework/Layer.py | isfopo/MacroPushScript | 46c440aa3f6325d8767e88252c5520d76a9fa634 | [
"MIT"
]
| null | null | null | _Framework/Layer.py | isfopo/MacroPushScript | 46c440aa3f6325d8767e88252c5520d76a9fa634 | [
"MIT"
]
| null | null | null | #Embedded file name: /Users/versonator/Jenkins/live/output/Live/mac_64_static/Release/python-bundle/MIDI Remote Scripts/_Framework/Layer.py
u"""
Module implementing a way to resource-based access to controls in an
unified interface dynamic.
"""
from __future__ import absolute_import, print_function, unicode_literals
from builtins import str
from builtins import object
from future.utils import raise_
from itertools import repeat
from .ControlElement import ControlElementClient
from .Util import nop
from .Resource import ExclusiveResource, CompoundResource
from .Disconnectable import Disconnectable
class LayerError(Exception):
pass
class UnhandledControlError(LayerError):
pass
class SimpleLayerOwner(Disconnectable):
u"""
Simple owner that grabs a given layer until it's disconnected
"""
def __init__(self, layer = None):
self._layer = layer
self._layer.grab(self)
def disconnect(self):
self._layer.release(self)
class LayerClient(ControlElementClient):
u"""
Client of the indivial controls that delivers the controls to the
layer owner.
"""
def __init__(self, layer = None, layer_client = None, *a, **k):
super(LayerClient, self).__init__(*a, **k)
assert layer_client
assert layer
self.layer_client = layer_client
self.layer = layer
def set_control_element(self, control_element, grabbed):
layer = self.layer
owner = self.layer_client
assert owner
assert control_element in layer._control_to_names, u'Control not in layer: %s' % (control_element,)
names = layer._control_to_names[control_element]
if not grabbed:
control_element = None
for name in names:
try:
handler = getattr(owner, u'set_' + name)
except AttributeError:
try:
control = getattr(owner, name)
handler = control.set_control_element
except AttributeError:
if name[0] != u'_':
raise_(UnhandledControlError, u'Component %s has no handler for control_element %s' % (str(owner), name))
else:
handler = nop
handler(control_element or None)
layer._name_to_controls[name] = control_element
class LayerBase(object):
pass
class CompoundLayer(LayerBase, CompoundResource):
u"""
A compound resource takes two layers and makes them look like one,
grabbing both of them. Both can have different priorities
thought.
"""
def _get_priority(self):
assert self.first.priority == self.second.priority
return self.first.priority
def _set_priority(self, priority):
self.first.priority = priority
self.second.priority = priority
priority = property(_get_priority, _set_priority)
def __getattr__(self, key):
try:
return getattr(self.first, key)
except AttributeError:
return getattr(self.second, key)
class Layer(LayerBase, ExclusiveResource):
u"""
A layer provides a convenient interface to control resources. In a
layer, you can group several controls by name. The layer itself
is an exclusive resource. When grabbing the layer, it will try to
grab all controls and will forward them to its own owner when he
receives them, and will take them from him when they are
release. The layer with give and take away the controls from its
client using methods of the form::
client.set[control-name](control)
Where [control-name] is the name the control was given in this
layer. This way, layers are a convenient way to provide controls
to components indirectly, with automatic handling of competition
for them.
Note that [control-name] can not be any of the following reserved
names: priority, grab, release, on_received, on_lost, owner,
get_owner
If [control-name] starts with an underscore (_) it is considered
private. It is grabbed but it is not delivered to the client.
"""
def __init__(self, priority = None, **controls):
super(Layer, self).__init__()
self._priority = priority
self._name_to_controls = dict(zip(iter(controls.keys()), repeat(None)))
self._control_to_names = dict()
self._control_clients = dict()
for name, control in controls.items():
assert control is not None, name
self._control_to_names.setdefault(control, []).append(name)
def __add__(self, other):
return CompoundLayer(self, other)
def _get_priority(self):
return self._priority
def _set_priority(self, priority):
if priority != self._priority:
if self.owner:
raise RuntimeError(u"Cannot change priority of a layer while it's owned")
self._priority = priority
priority = property(_get_priority, _set_priority)
def __getattr__(self, name):
u""" Provides access to controls """
try:
return self._name_to_controls[name]
except KeyError:
raise AttributeError
def grab(self, client, *a, **k):
if client == self.owner:
self.on_received(client, *a, **k)
return True
return super(Layer, self).grab(client, *a, **k)
def on_received(self, client, *a, **k):
u""" Override from ExclusiveResource """
for control in self._control_to_names.keys():
k.setdefault(u'priority', self._priority)
control.resource.grab(self._get_control_client(client), *a, **k)
def on_lost(self, client):
u""" Override from ExclusiveResource """
for control in self._control_to_names.keys():
control.resource.release(self._get_control_client(client))
def _get_control_client(self, client):
try:
control_client = self._control_clients[client]
except KeyError:
control_client = self._control_clients[client] = LayerClient(layer_client=client, layer=self)
return control_client
| 33.874317 | 139 | 0.658816 | 5,575 | 0.899339 | 0 | 0 | 0 | 0 | 0 | 0 | 1,878 | 0.302952 |
d3d6cda09c480bcbc5eba0a289993557df421803 | 1,529 | py | Python | src/retrieve_exons_sequence_genomes.py | naturalis/brassicaceae-hybseq-pipeline | b71462d308b8a4adbc370691bf085d44666914d7 | [
"MIT"
]
| 5 | 2020-04-22T12:08:07.000Z | 2021-09-03T01:56:44.000Z | src/retrieve_exons_sequence_genomes.py | naturalis/brassicaceae-hybseq-pipeline | b71462d308b8a4adbc370691bf085d44666914d7 | [
"MIT"
]
| 1 | 2020-09-16T11:29:15.000Z | 2020-09-16T11:29:15.000Z | src/retrieve_exons_sequence_genomes.py | naturalis/brassicaceae-hybseq-pipeline | b71462d308b8a4adbc370691bf085d44666914d7 | [
"MIT"
]
| 1 | 2020-09-16T14:05:08.000Z | 2020-09-16T14:05:08.000Z | # retrieve_exons_sequence_genomes.py
# This script is to retrieve exons from sequenced genomes which are also present in the reference genome (A. thaliana).
# To identify the contigs from the sequenced genomes, each contig has to be retrieved from A. thaliana first.
# Then, for each sequence query of A. thaliana, the query can be BLAT against the database reference.
# In this case, the database reference will be S. irio and A. lyrata.
# Made by: Elfy Ly
# Date: 19 May 2020
import os
from Bio import SeqIO
path_to_at_exons_dir = "/mnt/c/Users/elfyl/PycharmProjects/brassicaceae-hybseq-pipeline-offline/results/exons"
path_to_at_dir = "/mnt/c/Users/elfyl/PycharmProjects/brassicaceae-hybseq-pipeline-offline/data/reference_genomes"
path_to_at_reference = path_to_at_dir + "/ref-at.fasta"
# Create exons_AT Directory if don't exist
if not os.path.exists(path_to_at_exons_dir):
os.mkdir(path_to_at_exons_dir)
print("Directory ", path_to_at_exons_dir, " Created ")
else:
print("Directory ", path_to_at_exons_dir, " already exists")
# Create new files for every sequence query of the reference genome A. thaliana
count_id = 0
for seq_record in SeqIO.parse(path_to_at_reference, "fasta"):
f = open(path_to_at_exons_dir + "/" + seq_record.id + ".txt", "w+")
print("New text file created: " + seq_record.id + ".fa")
seq_id = seq_record.id
seq_seq = str(seq_record.seq)
f.write(">" + seq_id + "\n" + seq_seq)
f.close()
count_id += 1
print("Number of sequence records: " + str(count_id))
| 41.324324 | 119 | 0.743623 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 929 | 0.607587 |
d3d7b8b121c459940512749ce36dcfbad947c964 | 1,136 | py | Python | lexical/lexical.py | xmeng17/Malicious-URL-Detection | f286aeb50570455486b470cbc2db9aa0fae99b8f | [
"MIT"
]
| null | null | null | lexical/lexical.py | xmeng17/Malicious-URL-Detection | f286aeb50570455486b470cbc2db9aa0fae99b8f | [
"MIT"
]
| null | null | null | lexical/lexical.py | xmeng17/Malicious-URL-Detection | f286aeb50570455486b470cbc2db9aa0fae99b8f | [
"MIT"
]
| null | null | null | import re
class lexical(object):
'''Lexical Features:
Top Level domain (str)
Number of dots in hostname (int)
Average token length of hostname (float)
Max token length of hostname (int)
Average token length of path (float)
Max token length of path (int)
'''
def __init__(self):
pass
def lexical(self,hostname,path):
dot_num=self.dots(hostname)
arr_host=self.split(hostname)
arr_path=self.split(path)
avg_host=self.avg(arr_host)
max_host=self.max(arr_host)
avg_path=self.avg(arr_path)
max_path=self.max(arr_path)
return dot_num,avg_host,max_host,avg_path,max_path
def dots(self,hostname):
# returns number of dots
return hostname.count('.')
def split(self,string):
# returns a list split by ‘/’, ‘?’, ‘.’, ‘=’, ‘-’ and ‘_’
return re.split('/|\?|\.|=|-|_', string)
def avg(self,arr):
# returns average token length
return sum(len(token) for token in arr)/len(arr)
def max(self,arr):
# returns max token length
return max(len(token) for token in arr) | 26.418605 | 70 | 0.617958 | 1,148 | 0.989655 | 0 | 0 | 0 | 0 | 0 | 0 | 418 | 0.360345 |
d3d87b798d29e52210031306b4e2f4fee10a8cd2 | 992 | py | Python | stacker/tests/providers/aws/test_interactive.py | GoodRx/stacker | 0cf1df67b4ae5aeda5845442c84905909101c238 | [
"BSD-2-Clause"
]
| 1 | 2021-11-06T17:01:01.000Z | 2021-11-06T17:01:01.000Z | stacker/tests/providers/aws/test_interactive.py | GoodRx/stacker | 0cf1df67b4ae5aeda5845442c84905909101c238 | [
"BSD-2-Clause"
]
| null | null | null | stacker/tests/providers/aws/test_interactive.py | GoodRx/stacker | 0cf1df67b4ae5aeda5845442c84905909101c238 | [
"BSD-2-Clause"
]
| 1 | 2021-11-06T17:00:53.000Z | 2021-11-06T17:00:53.000Z | import unittest
from ....providers.aws.interactive import requires_replacement
def generate_resource_change(replacement=True):
resource_change = {
"Action": "Modify",
"Details": [],
"LogicalResourceId": "Fake",
"PhysicalResourceId": "arn:aws:fake",
"Replacement": "True" if replacement else "False",
"ResourceType": "AWS::Fake",
"Scope": ["Properties"],
}
return {
"ResourceChange": resource_change,
"Type": "Resource",
}
class TestInteractiveProvider(unittest.TestCase):
def test_requires_replacement(self):
changeset = [
generate_resource_change(),
generate_resource_change(replacement=False),
generate_resource_change(),
]
replacement = requires_replacement(changeset)
self.assertEqual(len(replacement), 2)
for resource in replacement:
self.assertEqual(resource["ResourceChange"]["Replacement"], "True")
| 30.060606 | 79 | 0.634073 | 477 | 0.480847 | 0 | 0 | 0 | 0 | 0 | 0 | 221 | 0.222782 |
d3d9152a0002e3e05bd42184c7b5ca8570672123 | 1,046 | py | Python | setup.py | digicert/digicert_express | 292fb4e7f8a39e53c384a79c50a9488c51258f97 | [
"MIT"
]
| 2 | 2017-03-03T20:37:29.000Z | 2018-06-01T22:22:15.000Z | setup.py | digicert/digicert_express | 292fb4e7f8a39e53c384a79c50a9488c51258f97 | [
"MIT"
]
| null | null | null | setup.py | digicert/digicert_express | 292fb4e7f8a39e53c384a79c50a9488c51258f97 | [
"MIT"
]
| 2 | 2018-01-26T07:11:42.000Z | 2019-03-06T23:30:39.000Z | from setuptools import setup, find_packages
def readme():
with open('README.rst') as f:
return f.read()
setup(
name='digicert-express',
version='1.1dev2',
description='Express Install for DigiCert, Inc.',
long_description=readme(),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Topic :: Security',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
url='https://github.com/digicert/digicert_express',
author='DigiCert, Inc.',
author_email='[email protected]',
license='MIT',
zip_safe=False,
packages=find_packages(exclude=['tests.*', '*.tests.*', '*.tests', 'tests', 'scripts']),
include_package_data=True,
install_requires=[
'python-augeas',
'requests>=2.8.1',
'ndg-httpsclient',
'pyasn1',
'pyOpenSSL' # prefer OS install but we can try here, too
],
)
| 29.885714 | 92 | 0.605163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 536 | 0.512428 |
d3d9c66d64ebac5543f8099d6066695442fe0072 | 11,392 | py | Python | pytorch/plane.py | NunoEdgarGFlowHub/autoregressive-energy-machines | eb5517a513cf4e99db674fa41170f018e212f1e2 | [
"MIT"
]
| 83 | 2019-04-12T10:23:23.000Z | 2022-03-30T12:40:43.000Z | pytorch/plane.py | sten2lu/autoregressive-energy-machines | eb5517a513cf4e99db674fa41170f018e212f1e2 | [
"MIT"
]
| null | null | null | pytorch/plane.py | sten2lu/autoregressive-energy-machines | eb5517a513cf4e99db674fa41170f018e212f1e2 | [
"MIT"
]
| 11 | 2019-04-12T11:26:00.000Z | 2020-05-12T01:14:21.000Z | import argparse
import json
import numpy as np
import os
import torch
import data_
import models
import utils
from matplotlib import cm, pyplot as plt
from tensorboardX import SummaryWriter
from torch import optim
from torch.utils import data
from tqdm import tqdm
from utils import io
parser = argparse.ArgumentParser()
# CUDA
parser.add_argument('--use_gpu', type=bool, default=True, help='Whether to use GPU.')
# data
parser.add_argument('--dataset_name', type=str, default='spirals',
help='Name of dataset to use.')
parser.add_argument('--n_data_points', default=int(1e6),
help='Number of unique data points in training set.')
parser.add_argument('--batch_size', type=int, default=256,
help='Size of batch used for training.')
parser.add_argument('--num_workers', type=int, default=0,
help='Number of workers used in data loaders.')
# MADE
parser.add_argument('--n_residual_blocks_made', default=4,
help='Number of residual blocks in MADE.')
parser.add_argument('--hidden_dim_made', default=256,
help='Dimensionality of hidden layers in MADE.')
parser.add_argument('--activation_made', default='relu',
help='Activation function for MADE.')
parser.add_argument('--use_batch_norm_made', default=False,
help='Whether to use batch norm in MADE.')
parser.add_argument('--dropout_probability_made', default=None,
help='Dropout probability for MADE.')
# energy net
parser.add_argument('--context_dim', default=64,
help='Dimensionality of context vector.')
parser.add_argument('--n_residual_blocks_energy_net', default=4,
help='Number of residual blocks in energy net.')
parser.add_argument('--hidden_dim_energy_net', default=128,
help='Dimensionality of hidden layers in energy net.')
parser.add_argument('--energy_upper_bound', default=0,
help='Max value for output of energy net.')
parser.add_argument('--activation_energy_net', default='relu',
help='Activation function for energy net.')
parser.add_argument('--use_batch_norm_energy_net', default=False,
help='Whether to use batch norm in energy net.')
parser.add_argument('--dropout_probability_energy_net', default=None,
help='Dropout probability for energy net.')
parser.add_argument('--scale_activation', default='softplus',
help='Activation to use for scales in proposal mixture components.')
parser.add_argument('--apply_context_activation', default=False,
help='Whether to apply activation to context vector.')
# proposal
parser.add_argument('--n_mixture_components', default=10,
help='Number of proposal mixture components (per dimension).')
parser.add_argument('--proposal_component', default='gaussian',
help='Type of location-scale family distribution '
'to use in proposal mixture.')
parser.add_argument('--n_proposal_samples_per_input', default=20,
help='Number of proposal samples used to estimate '
'normalizing constant during training.')
parser.add_argument('--n_proposal_samples_per_input_validation', default=100,
help='Number of proposal samples used to estimate '
'normalizing constant during validation.')
parser.add_argument('--mixture_component_min_scale', default=1e-3,
help='Minimum scale for proposal mixture components.')
# optimization
parser.add_argument('--learning_rate', default=5e-4,
help='Learning rate for Adam.')
parser.add_argument('--n_total_steps', default=int(4e5),
help='Number of total training steps.')
parser.add_argument('--alpha_warm_up_steps', default=5000,
help='Number of warm-up steps for AEM density.')
parser.add_argument('--hard_alpha_warm_up', default=True,
help='Whether to use a hard warm up for alpha')
# logging and checkpoints
parser.add_argument('--monitor_interval', default=100,
help='Interval in steps at which to report training stats.')
parser.add_argument('--visualize_interval', default=10000,
help='Interval in steps at which to report training stats.')
parser.add_argument('--save_interval', default=10000,
help='Interval in steps at which to save model.')
# reproducibility
parser.add_argument('--seed', default=1638128,
help='Random seed for PyTorch and NumPy.')
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if args.use_gpu and torch.cuda.is_available():
device = torch.device('cuda')
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
device = torch.device('cpu')
# Generate data
train_dataset = data_.load_plane_dataset(args.dataset_name, args.n_data_points)
train_loader = data_.InfiniteLoader(
dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True,
drop_last=True,
num_epochs=None
)
# Generate test grid data
n_points_per_axis = 512
bounds = np.array([
[-4, 4],
[-4, 4]
])
grid_dataset = data_.TestGridDataset(n_points_per_axis=n_points_per_axis, bounds=bounds)
grid_loader = data.DataLoader(
dataset=grid_dataset,
batch_size=1000,
drop_last=False
)
# various dimensions for autoregressive and energy nets
dim = 2 # D
output_dim_multiplier = args.context_dim + 3 * args.n_mixture_components # K + 3M
# Create MADE
made = models.ResidualMADE(
input_dim=dim,
n_residual_blocks=args.n_residual_blocks_made,
hidden_dim=args.hidden_dim_made,
output_dim_multiplier=output_dim_multiplier,
conditional=False,
activation=utils.parse_activation(args.activation_made),
use_batch_norm=args.use_batch_norm_made,
dropout_probability=args.dropout_probability_made
).to(device)
# create energy net
energy_net = models.ResidualEnergyNet(
input_dim=(args.context_dim + 1),
n_residual_blocks=args.n_residual_blocks_energy_net,
hidden_dim=args.hidden_dim_energy_net,
energy_upper_bound=args.energy_upper_bound,
activation=utils.parse_activation(args.activation_energy_net),
use_batch_norm=args.use_batch_norm_energy_net,
dropout_probability=args.dropout_probability_energy_net
).to(device)
# create AEM
aem = models.AEM(
autoregressive_net=made,
energy_net=energy_net,
context_dim=args.context_dim,
n_proposal_mixture_components=args.n_mixture_components,
proposal_component_family=args.proposal_component,
n_proposal_samples_per_input=args.n_proposal_samples_per_input,
mixture_component_min_scale=args.mixture_component_min_scale,
apply_context_activation=args.apply_context_activation
).to(device)
# make optimizer
parameters = list(made.parameters()) + list(energy_net.parameters())
optimizer = optim.Adam(parameters, lr=args.learning_rate)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, args.n_total_steps)
# create summary writer and write to log directory
timestamp = io.get_timestamp()
log_dir = os.path.join(io.get_log_root(), args.dataset_name, timestamp)
writer = SummaryWriter(log_dir=log_dir)
filename = os.path.join(log_dir, 'config.json')
with open(filename, 'w') as file:
json.dump(vars(args), file)
# Training loop
tbar = tqdm(range(args.n_total_steps))
alpha = 0
for step in tbar:
aem.train()
scheduler.step(step)
optimizer.zero_grad()
# training step
batch = next(train_loader).to(device)
log_density, log_proposal_density, _, log_normalizer = aem(batch)
mean_log_density = torch.mean(log_density)
mean_log_proposal_density = torch.mean(log_proposal_density)
mean_log_normalizer = torch.mean(log_normalizer)
if args.alpha_warm_up_steps is not None:
if args.hard_alpha_warm_up:
alpha = float(step > args.alpha_warm_up_steps)
else:
alpha = torch.Tensor([min(step / args.alpha_warm_up_steps, 1)])
loss = - (alpha * mean_log_density + mean_log_proposal_density)
else:
loss = - (mean_log_density + mean_log_proposal_density)
loss.backward()
optimizer.step()
if (step + 1) % args.monitor_interval == 0:
s = 'Loss: {:.4f}, log p: {:.4f}, log q: {:.4f}'.format(
loss.item(),
mean_log_density.item(),
mean_log_proposal_density.item()
)
tbar.set_description(s)
# write summaries
summaries = {
'loss': loss.detach(),
'log-prob-aem': mean_log_density.detach(),
'log-prob-proposal': mean_log_proposal_density.detach(),
'log-normalizer': mean_log_normalizer.detach(),
'learning-rate': torch.Tensor(scheduler.get_lr()),
}
for summary, value in summaries.items():
writer.add_scalar(tag=summary, scalar_value=value, global_step=step)
if (step + 1) % args.visualize_interval == 0:
# Plotting
aem.eval()
aem.set_n_proposal_samples_per_input_validation(
args.n_proposal_samples_per_input_validation)
log_density_np = []
log_proposal_density_np = []
for batch in grid_loader:
batch = batch.to(device)
log_density, log_proposal_density, unnormalized_log_density, log_normalizer = aem(
batch)
log_density_np = np.concatenate((
log_density_np, utils.tensor2numpy(log_density)
))
log_proposal_density_np = np.concatenate((
log_proposal_density_np, utils.tensor2numpy(log_proposal_density)
))
fig, axs = plt.subplots(1, 3, figsize=(7.5, 2.5))
axs[0].hist2d(train_dataset.data[:, 0], train_dataset.data[:, 1],
range=bounds, bins=512, cmap=cm.viridis, rasterized=False)
axs[0].set_xticks([])
axs[0].set_yticks([])
axs[1].pcolormesh(grid_dataset.X, grid_dataset.Y,
np.exp(log_proposal_density_np).reshape(grid_dataset.X.shape))
axs[1].set_xlim(bounds[0])
axs[1].set_ylim(bounds[1])
axs[1].set_xticks([])
axs[1].set_yticks([])
axs[2].pcolormesh(grid_dataset.X, grid_dataset.Y,
np.exp(log_density_np).reshape(grid_dataset.X.shape))
axs[2].set_xlim(bounds[0])
axs[2].set_ylim(bounds[1])
axs[2].set_xticks([])
axs[2].set_yticks([])
plt.tight_layout()
path = os.path.join(io.get_output_root(), 'pytorch', '{}.png'.format(args.dataset_name))
if not os.path.exists(path):
os.makedirs(io.get_output_root())
plt.savefig(path, dpi=300)
writer.add_figure(tag='test-grid', figure=fig, global_step=step)
plt.close()
if (step + 1) % args.save_interval == 0:
path = os.path.join(io.get_checkpoint_root(), 'pytorch', '{}.t'.format(args.dataset_name))
if not os.path.exists(path):
os.makedirs(io.get_checkpoint_root())
torch.save(aem.state_dict(), path)
path = os.path.join(io.get_checkpoint_root(),
'pytorch', '{}-{}.t'.format(args.dataset_name, timestamp))
torch.save(aem.state_dict(), path)
| 39.147766 | 98 | 0.675298 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,762 | 0.242451 |
d3d9fd962e6f2a91b7a5a73a99c81d54531258d8 | 2,266 | py | Python | music/music.py | spacerunaway/world_recoder | fcafe0d910511cfd043735cf451564febb558e40 | [
"MIT"
]
| null | null | null | music/music.py | spacerunaway/world_recoder | fcafe0d910511cfd043735cf451564febb558e40 | [
"MIT"
]
| null | null | null | music/music.py | spacerunaway/world_recoder | fcafe0d910511cfd043735cf451564febb558e40 | [
"MIT"
]
| null | null | null | import sys
sys.path.append('../utils')
from utils import *
from doubly_linkedlist import *
def link_chords(chordprogression):
"""
Chord progression is a sequences of chords.
A valid linked_chords can be one of the following:
1: the chord name(str) in CHORD dict
2: the key(type Key)
and a music have to a signal of start and end.
>>> c_p1 = [START,C_Major,'C','Am','F','G','C','Am','F','G7',END]
>>> c_p2 = [START,C_Major,'C','Am','F','G','C','Am','F','G',G_Major,'Em','C','D','D7','G',END]
>>> l1 = link_chords(c_p1)
>>> l1
start - C - Am - F - G - C - Am - F - G7 - end
>>> l2 = link_chords(c_p2)
>>> l2
start - C - Am - F - G - C - Am - F - G - Em - C - D - D7 - G - end
>>> l2[8].key is C_Major
True
>>> l2[8].chord == CHORD['G']
True
>>> l2[9].key is G_Major
True
>>> l2[9].chord == CHORD['Em']
True
>>> c_p3 = [C_Major,C_Major,START,'C',END,START,START,END,'F',G_Major]
>>> l3 = link_chords(c_p3)
>>> l3
start - C - end - start - start - end - F
"""
key = None
res = LinkedList()
for item in chordprogression:
if type(item) is Major_Scale or type(item) is minor_Scale:
key = item
else:
if item not in CHORD:
chord = item
else:
chord = CHORD[item]
node = LinkedChord(chord,key,item)
res.append(node)
return res
def parse_chordprogression(chordprogression):
link_chords(chordprogression)
cpd(chordprogression)
class Music(object):
melody = []
chordprogression = []
rhythm = []
def __init__(self,title,composer,key_signature,metre,arranger=''):
self.title = title
self.composer = composer
self.arranger = arranger
self.key = key
self.metre = metre
def add_subtitle(self,subtitle):
self.subtitle = subtitle
def add_chordprogression(self,chordprogression):
self.chordprogression = chordprogression
def add_tags(self,tags):
self.tags = tags
class Info(object):
def __init__(self,key,beat,tempo=90,rhythmtype=''):
self.key = key
self.beat = beat
self.tempo = tempo
self.rhythmtype = rhythmtype
| 29.815789 | 98 | 0.573698 | 706 | 0.311562 | 0 | 0 | 0 | 0 | 0 | 0 | 948 | 0.418358 |
d3da2efce64cb5f88a134e97d2a4092ee8ea80bb | 5,777 | py | Python | azure-mgmt/tests/test_mgmt_documentdb.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
]
| 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure-mgmt/tests/test_mgmt_documentdb.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
]
| 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt/tests/test_mgmt_documentdb.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
]
| 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
import azure.mgmt.documentdb
from msrestazure.azure_exceptions import CloudError
from testutils.common_recordingtestcase import record
from tests.mgmt_testcase import HttpStatusCode, AzureMgmtTestCase
import logging
#logging.basicConfig(level=logging.DEBUG)
class MgmtDocDBTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtDocDBTest, self).setUp()
self.client = self.create_mgmt_client(
azure.mgmt.documentdb.DocumentDB
)
# I don't record resource group creation, since it's another package
if not self.is_playback():
self.create_resource_group()
@record
def test_accounts_create(self):
account_name = self.get_resource_name('pydocdbtst')
self.assertFalse(self.client.database_accounts.check_name_exists(account_name))
async_docdb_create = self.client.database_accounts.create_or_update(
self.group_name,
account_name,
{
'location': self.region,
'locations': [{
'location_name': self.region
}]
}
)
account = async_docdb_create.result()
self.assertIsNotNone(account)
# Rest API issue
# self.assertEqual(account.name, account_name)
def test_accounts_features(self):
account_name = self.get_resource_name('pydocdbtest')
if not self.is_playback():
async_docdb_create = self.client.database_accounts.create_or_update(
self.group_name,
account_name,
{
'location': self.region,
'locations': [{
'location_name': self.region
}]
}
)
async_docdb_create.wait()
with self.recording():
account = self.client.database_accounts.get(
self.group_name,
account_name
)
self.assertEqual(account.name, account_name)
my_accounts = list(self.client.database_accounts.list_by_resource_group(self.group_name))
self.assertEqual(len(my_accounts), 1)
self.assertEqual(my_accounts[0].name, account_name)
my_accounts = list(self.client.database_accounts.list())
self.assertTrue(len(my_accounts) >= 1)
self.assertTrue(any(db.name == account_name for db in my_accounts))
# I guess we can make this test with no error, need to check with DocDB team
# This is an interesting test anyway, this implies that the serialization works
# and error message is available. Since this method does not return an object
# (i.e. no deserialization to test), this is a complete test.
# We are NOT here to test the RestAPI, but the Swagger file and Python code.
with self.assertRaises(CloudError) as cm:
async_change = self.client.database_accounts.failover_priority_change(
self.group_name,
account_name,
[{
'location_name': self.region,
'failover_priority': 0
}]
)
async_change.wait()
self.assertIn('Failover priorities must be unique', cm.exception.message)
my_keys = self.client.database_accounts.list_keys(
self.group_name,
account_name
)
self.assertIsNotNone(my_keys.primary_master_key)
self.assertIsNotNone(my_keys.secondary_master_key)
self.assertIsNotNone(my_keys.primary_readonly_master_key)
self.assertIsNotNone(my_keys.secondary_readonly_master_key)
my_keys = self.client.database_accounts.list_read_only_keys(
self.group_name,
account_name
)
self.assertIsNotNone(my_keys.primary_readonly_master_key)
self.assertIsNotNone(my_keys.secondary_readonly_master_key)
async_regenerate = self.client.database_accounts.regenerate_key(
self.group_name,
account_name,
"primary"
)
async_regenerate.wait()
def test_accounts_delete(self):
account_name = self.get_resource_name('pydocumentdbtst')
if not self.is_playback():
async_docdb_create = self.client.database_accounts.create_or_update(
self.group_name,
account_name,
{
'location': self.region,
'locations': [{
'location_name': self.region
}]
}
)
async_docdb_create.wait()
with self.recording():
# Current implementation of msrestazure does not support 404 as a end of LRO delete
# https://github.com/Azure/msrestazure-for-python/issues/7
async_delete = self.client.database_accounts.delete(self.group_name, account_name)
try:
async_delete.wait()
except CloudError as err:
if err.response.status_code != 404:
raise
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| 37.512987 | 101 | 0.570885 | 5,043 | 0.872642 | 0 | 0 | 668 | 0.115591 | 0 | 0 | 1,317 | 0.227894 |
d3da88558c778364e49a959d5f0d8f942db1cc43 | 3,758 | py | Python | config.py | somritabanerjee/speedplusbaseline | 5913c611d8c182ad8070abcf5f1baffc554dfd90 | [
"MIT"
]
| null | null | null | config.py | somritabanerjee/speedplusbaseline | 5913c611d8c182ad8070abcf5f1baffc554dfd90 | [
"MIT"
]
| null | null | null | config.py | somritabanerjee/speedplusbaseline | 5913c611d8c182ad8070abcf5f1baffc554dfd90 | [
"MIT"
]
| null | null | null | import argparse
PROJROOTDIR = {'mac': '/Users/taehapark/SLAB/speedplusbaseline',
'linux': '/home/somrita/Documents/Satellite_Pose_Estimation/speedplusbaseline'}
DATAROOTDIR = {'mac': '/Users/taehapark/SLAB/speedplus/data/datasets',
'linux': '/home/somrita/Documents/Satellite_Pose_Estimation/dataset'}
parser = argparse.ArgumentParser('Configurations for SPEED+ Baseline Study')
# ------------------------------------------------------------------------------------------
# Basic directories and names
parser.add_argument('--seed', type=int, default=2021)
parser.add_argument('--projroot', type=str, default=PROJROOTDIR['linux'])
parser.add_argument('--dataroot', type=str, default=DATAROOTDIR['linux'])
parser.add_argument('--dataname', type=str, default='speedplus')
parser.add_argument('--savedir', type=str, default='checkpoints/synthetic/krn')
parser.add_argument('--resultfn', type=str, default='')
parser.add_argument('--logdir', type=str, default='log/synthetic/krn')
parser.add_argument('--pretrained', type=str, default='')
# ------------------------------------------------------------------------------------------
# Model config.
parser.add_argument('--model_name', type=str, default='krn')
parser.add_argument('--input_shape', nargs='+', type=int, default=(224, 224))
parser.add_argument('--num_keypoints', type=int, default=11) # KRN-specific
parser.add_argument('--num_classes', type=int, default=5000) # SPN-specific
parser.add_argument('--num_neighbors', type=int, default=5) # SPN-specific
parser.add_argument('--keypts_3d_model', type=str, default='src/utils/tangoPoints.mat')
parser.add_argument('--attitude_class', type=str, default='src/utils/attitudeClasses.mat')
# ------------------------------------------------------------------------------------------
# Training config.
parser.add_argument('--start_over', dest='auto_resume', action='store_false', default=True)
parser.add_argument('--randomize_texture', dest='randomize_texture', action='store_true', default=False)
parser.add_argument('--perform_dann', dest='dann', action='store_true', default=False)
parser.add_argument('--texture_alpha', type=float, default=0.5)
parser.add_argument('--texture_ratio', type=float, default=0.5)
parser.add_argument('--use_fp16', dest='fp16', action='store_true', default=False)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--max_epochs', type=int, default=75)
parser.add_argument('--num_workers', type=int, default=8)
parser.add_argument('--test_epoch', type=int, default=-1)
parser.add_argument('--optimizer', type=str, default='rmsprop')
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=5e-5)
parser.add_argument('--lr_decay_alpha', type=float, default=0.96)
parser.add_argument('--lr_decay_step', type=int, default=1)
# ------------------------------------------------------------------------------------------
# Dataset-related inputs
parser.add_argument('--train_domain', type=str, default='synthetic')
parser.add_argument('--test_domain', type=str, default='lightbox')
parser.add_argument('--train_csv', type=str, default='train.csv')
parser.add_argument('--test_csv', type=str, default='lightbox.csv')
# ------------------------------------------------------------------------------------------
# Other miscellaneous settings
parser.add_argument('--gpu_id', type=int, default=0)
parser.add_argument('--no_cuda', dest='use_cuda', action='store_false', default=True)
# End
cfg = parser.parse_args() | 58.71875 | 104 | 0.631453 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,736 | 0.461948 |
d3dacb32ea41d2fb0546ec04640a3b17315faa08 | 118,963 | py | Python | h1/api/insight_project_journal_api.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
]
| null | null | null | h1/api/insight_project_journal_api.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
]
| null | null | null | h1/api/insight_project_journal_api.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
]
| null | null | null | """
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from h1.api_client import ApiClient, Endpoint as _Endpoint
from h1.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from h1.model.event import Event
from h1.model.inline_response400 import InlineResponse400
from h1.model.insight_project_journal_create import InsightProjectJournalCreate
from h1.model.insight_project_journal_credential_patch import InsightProjectJournalCredentialPatch
from h1.model.insight_project_journal_transfer import InsightProjectJournalTransfer
from h1.model.insight_project_journal_update import InsightProjectJournalUpdate
from h1.model.journal import Journal
from h1.model.journal_credential import JournalCredential
from h1.model.resource_service import ResourceService
from h1.model.tag import Tag
from h1.model.tag_array import TagArray
class InsightProjectJournalApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __insight_project_journal_create(
self,
project_id,
location_id,
insight_project_journal_create,
**kwargs
):
"""Create insight/journal # noqa: E501
Create journal # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_create(project_id, location_id, insight_project_journal_create, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
insight_project_journal_create (InsightProjectJournalCreate):
Keyword Args:
x_idempotency_key (str): Idempotency key. [optional]
x_dry_run (str): Dry run. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Journal
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['insight_project_journal_create'] = \
insight_project_journal_create
return self.call_with_http_info(**kwargs)
self.insight_project_journal_create = _Endpoint(
settings={
'response_type': (Journal,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal',
'operation_id': 'insight_project_journal_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'insight_project_journal_create',
'x_idempotency_key',
'x_dry_run',
],
'required': [
'project_id',
'location_id',
'insight_project_journal_create',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'insight_project_journal_create':
(InsightProjectJournalCreate,),
'x_idempotency_key':
(str,),
'x_dry_run':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'x_idempotency_key': 'x-idempotency-key',
'x_dry_run': 'x-dry-run',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'insight_project_journal_create': 'body',
'x_idempotency_key': 'header',
'x_dry_run': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_create
)
def __insight_project_journal_credential_create(
self,
project_id,
location_id,
journal_id,
journal_credential,
**kwargs
):
"""Create insight/journal.credential # noqa: E501
Create insight/journal.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_credential_create(project_id, location_id, journal_id, journal_credential, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
journal_credential (JournalCredential):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
JournalCredential
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['journal_credential'] = \
journal_credential
return self.call_with_http_info(**kwargs)
self.insight_project_journal_credential_create = _Endpoint(
settings={
'response_type': (JournalCredential,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/credential',
'operation_id': 'insight_project_journal_credential_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'journal_credential',
],
'required': [
'project_id',
'location_id',
'journal_id',
'journal_credential',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'journal_credential':
(JournalCredential,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'journal_credential': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_credential_create
)
def __insight_project_journal_credential_delete(
self,
project_id,
location_id,
journal_id,
credential_id,
**kwargs
):
"""Delete insight/journal.credential # noqa: E501
Delete insight/journal.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_credential_delete(project_id, location_id, journal_id, credential_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
credential_id (str): credentialId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Journal
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['credential_id'] = \
credential_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_credential_delete = _Endpoint(
settings={
'response_type': (Journal,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/credential/{credentialId}',
'operation_id': 'insight_project_journal_credential_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'credential_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
'credential_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'credential_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'credential_id': 'credentialId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'credential_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_credential_delete
)
def __insight_project_journal_credential_get(
self,
project_id,
location_id,
journal_id,
credential_id,
**kwargs
):
"""Get insight/journal.credential # noqa: E501
Get insight/journal.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_credential_get(project_id, location_id, journal_id, credential_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
credential_id (str): credentialId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
JournalCredential
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['credential_id'] = \
credential_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_credential_get = _Endpoint(
settings={
'response_type': (JournalCredential,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/credential/{credentialId}',
'operation_id': 'insight_project_journal_credential_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'credential_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
'credential_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'credential_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'credential_id': 'credentialId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'credential_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_credential_get
)
def __insight_project_journal_credential_list(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""List insight/journal.credential # noqa: E501
List insight/journal.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_credential_list(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[JournalCredential]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_credential_list = _Endpoint(
settings={
'response_type': ([JournalCredential],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/credential',
'operation_id': 'insight_project_journal_credential_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_credential_list
)
def __insight_project_journal_credential_patch(
self,
project_id,
location_id,
journal_id,
credential_id,
insight_project_journal_credential_patch,
**kwargs
):
"""Update insight/journal.credential # noqa: E501
Update insight/journal.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_credential_patch(project_id, location_id, journal_id, credential_id, insight_project_journal_credential_patch, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
credential_id (str): credentialId
insight_project_journal_credential_patch (InsightProjectJournalCredentialPatch):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
JournalCredential
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['credential_id'] = \
credential_id
kwargs['insight_project_journal_credential_patch'] = \
insight_project_journal_credential_patch
return self.call_with_http_info(**kwargs)
self.insight_project_journal_credential_patch = _Endpoint(
settings={
'response_type': (JournalCredential,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/credential/{credentialId}',
'operation_id': 'insight_project_journal_credential_patch',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'credential_id',
'insight_project_journal_credential_patch',
],
'required': [
'project_id',
'location_id',
'journal_id',
'credential_id',
'insight_project_journal_credential_patch',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'credential_id':
(str,),
'insight_project_journal_credential_patch':
(InsightProjectJournalCredentialPatch,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'credential_id': 'credentialId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'credential_id': 'path',
'insight_project_journal_credential_patch': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_credential_patch
)
def __insight_project_journal_delete(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""Delete insight/journal # noqa: E501
Delete journal # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_delete(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_delete = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}',
'operation_id': 'insight_project_journal_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_delete
)
def __insight_project_journal_event_get(
self,
project_id,
location_id,
journal_id,
event_id,
**kwargs
):
"""Get insight/journal.event # noqa: E501
Get insight/journal.event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_event_get(project_id, location_id, journal_id, event_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
event_id (str): eventId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Event
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['event_id'] = \
event_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_event_get = _Endpoint(
settings={
'response_type': (Event,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/event/{eventId}',
'operation_id': 'insight_project_journal_event_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'event_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
'event_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'event_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'event_id': 'eventId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'event_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_event_get
)
def __insight_project_journal_event_list(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""List insight/journal.event # noqa: E501
List insight/journal.event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_event_list(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
limit (float): $limit. [optional] if omitted the server will use the default value of 100
skip (float): $skip. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Event]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_event_list = _Endpoint(
settings={
'response_type': ([Event],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/event',
'operation_id': 'insight_project_journal_event_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'limit',
'skip',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
'limit',
]
},
root_map={
'validations': {
('limit',): {
'inclusive_maximum': 1000,
'inclusive_minimum': 1,
},
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'limit':
(float,),
'skip':
(float,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'limit': '$limit',
'skip': '$skip',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'limit': 'query',
'skip': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_event_list
)
def __insight_project_journal_get(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""Get insight/journal # noqa: E501
Returns a single journal # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_get(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Journal
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_get = _Endpoint(
settings={
'response_type': (Journal,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}',
'operation_id': 'insight_project_journal_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_get
)
def __insight_project_journal_list(
self,
project_id,
location_id,
**kwargs
):
"""List insight/journal # noqa: E501
List journal # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_list(project_id, location_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
Keyword Args:
name (str): Filter by name. [optional]
tag_value (str): Filter by tag.value. [optional]
tag_key (str): Filter by tag.key. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Journal]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_list = _Endpoint(
settings={
'response_type': ([Journal],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal',
'operation_id': 'insight_project_journal_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'name',
'tag_value',
'tag_key',
],
'required': [
'project_id',
'location_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'name':
(str,),
'tag_value':
(str,),
'tag_key':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'name': 'name',
'tag_value': 'tag.value',
'tag_key': 'tag.key',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'name': 'query',
'tag_value': 'query',
'tag_key': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_list
)
def __insight_project_journal_log_get(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""Get insight/journal.log # noqa: E501
websocket is also supported # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_log_get(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
since (datetime): since. [optional]
until (datetime): until. [optional]
follow (bool): follow. [optional] if omitted the server will use the default value of False
tail (float): tail. [optional]
tag (TagArray): tag. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_log_get = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/log',
'operation_id': 'insight_project_journal_log_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'since',
'until',
'follow',
'tail',
'tag',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'since':
(datetime,),
'until':
(datetime,),
'follow':
(bool,),
'tail':
(float,),
'tag':
(TagArray,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'since': 'since',
'until': 'until',
'follow': 'follow',
'tail': 'tail',
'tag': 'tag',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'since': 'query',
'until': 'query',
'follow': 'query',
'tail': 'query',
'tag': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_log_get
)
def __insight_project_journal_service_get(
self,
project_id,
location_id,
journal_id,
service_id,
**kwargs
):
"""Get insight/journal.service # noqa: E501
Get insight/journal.service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_service_get(project_id, location_id, journal_id, service_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
service_id (str): serviceId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ResourceService
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['service_id'] = \
service_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_service_get = _Endpoint(
settings={
'response_type': (ResourceService,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/service/{serviceId}',
'operation_id': 'insight_project_journal_service_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'service_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
'service_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'service_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'service_id': 'serviceId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'service_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_service_get
)
def __insight_project_journal_service_list(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""List insight/journal.service # noqa: E501
List insight/journal.service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_service_list(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[ResourceService]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_service_list = _Endpoint(
settings={
'response_type': ([ResourceService],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/service',
'operation_id': 'insight_project_journal_service_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_service_list
)
def __insight_project_journal_tag_create(
self,
project_id,
location_id,
journal_id,
tag,
**kwargs
):
"""Create insight/journal.tag # noqa: E501
Create insight/journal.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_tag_create(project_id, location_id, journal_id, tag, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
tag (Tag):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Tag
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['tag'] = \
tag
return self.call_with_http_info(**kwargs)
self.insight_project_journal_tag_create = _Endpoint(
settings={
'response_type': (Tag,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/tag',
'operation_id': 'insight_project_journal_tag_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'tag',
],
'required': [
'project_id',
'location_id',
'journal_id',
'tag',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'tag':
(Tag,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'tag': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_tag_create
)
def __insight_project_journal_tag_delete(
self,
project_id,
location_id,
journal_id,
tag_id,
**kwargs
):
"""Delete insight/journal.tag # noqa: E501
Delete insight/journal.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_tag_delete(project_id, location_id, journal_id, tag_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
tag_id (str): tagId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['tag_id'] = \
tag_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_tag_delete = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/tag/{tagId}',
'operation_id': 'insight_project_journal_tag_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'tag_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
'tag_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'tag_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'tag_id': 'tagId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'tag_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_tag_delete
)
def __insight_project_journal_tag_get(
self,
project_id,
location_id,
journal_id,
tag_id,
**kwargs
):
"""Get insight/journal.tag # noqa: E501
Get insight/journal.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_tag_get(project_id, location_id, journal_id, tag_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
tag_id (str): tagId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Tag
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['tag_id'] = \
tag_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_tag_get = _Endpoint(
settings={
'response_type': (Tag,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/tag/{tagId}',
'operation_id': 'insight_project_journal_tag_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'tag_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
'tag_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'tag_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'tag_id': 'tagId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'tag_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_tag_get
)
def __insight_project_journal_tag_list(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""List insight/journal.tag # noqa: E501
List insight/journal.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_tag_list(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Tag]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_tag_list = _Endpoint(
settings={
'response_type': ([Tag],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/tag',
'operation_id': 'insight_project_journal_tag_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_tag_list
)
def __insight_project_journal_tag_put(
self,
project_id,
location_id,
journal_id,
tag_array,
**kwargs
):
"""Replace insight/journal.tag # noqa: E501
Replace insight/journal.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_tag_put(project_id, location_id, journal_id, tag_array, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
tag_array (TagArray):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Tag]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['tag_array'] = \
tag_array
return self.call_with_http_info(**kwargs)
self.insight_project_journal_tag_put = _Endpoint(
settings={
'response_type': ([Tag],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/tag',
'operation_id': 'insight_project_journal_tag_put',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'tag_array',
],
'required': [
'project_id',
'location_id',
'journal_id',
'tag_array',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'tag_array':
(TagArray,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'tag_array': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_tag_put
)
def __insight_project_journal_transfer(
self,
project_id,
location_id,
journal_id,
insight_project_journal_transfer,
**kwargs
):
"""Transfer insight/journal # noqa: E501
action transfer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_transfer(project_id, location_id, journal_id, insight_project_journal_transfer, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
insight_project_journal_transfer (InsightProjectJournalTransfer):
Keyword Args:
x_idempotency_key (str): Idempotency key. [optional]
x_dry_run (str): Dry run. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Journal
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['insight_project_journal_transfer'] = \
insight_project_journal_transfer
return self.call_with_http_info(**kwargs)
self.insight_project_journal_transfer = _Endpoint(
settings={
'response_type': (Journal,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/actions/transfer',
'operation_id': 'insight_project_journal_transfer',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'insight_project_journal_transfer',
'x_idempotency_key',
'x_dry_run',
],
'required': [
'project_id',
'location_id',
'journal_id',
'insight_project_journal_transfer',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'insight_project_journal_transfer':
(InsightProjectJournalTransfer,),
'x_idempotency_key':
(str,),
'x_dry_run':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'x_idempotency_key': 'x-idempotency-key',
'x_dry_run': 'x-dry-run',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'insight_project_journal_transfer': 'body',
'x_idempotency_key': 'header',
'x_dry_run': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_transfer
)
def __insight_project_journal_update(
self,
project_id,
location_id,
journal_id,
insight_project_journal_update,
**kwargs
):
"""Update insight/journal # noqa: E501
Returns modified journal # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_update(project_id, location_id, journal_id, insight_project_journal_update, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
insight_project_journal_update (InsightProjectJournalUpdate):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Journal
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['insight_project_journal_update'] = \
insight_project_journal_update
return self.call_with_http_info(**kwargs)
self.insight_project_journal_update = _Endpoint(
settings={
'response_type': (Journal,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}',
'operation_id': 'insight_project_journal_update',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'insight_project_journal_update',
],
'required': [
'project_id',
'location_id',
'journal_id',
'insight_project_journal_update',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'insight_project_journal_update':
(InsightProjectJournalUpdate,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'insight_project_journal_update': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_update
)
| 37.362751 | 179 | 0.451342 | 117,868 | 0.990795 | 0 | 0 | 0 | 0 | 0 | 0 | 64,388 | 0.541244 |
d3db48db20a20bc47e28f0062af79ebd64f3fa41 | 811 | py | Python | forms/views.py | urchinpro/L2-forms | 37f33386984efbb2d1e92c73d915256247801109 | [
"MIT"
]
| null | null | null | forms/views.py | urchinpro/L2-forms | 37f33386984efbb2d1e92c73d915256247801109 | [
"MIT"
]
| null | null | null | forms/views.py | urchinpro/L2-forms | 37f33386984efbb2d1e92c73d915256247801109 | [
"MIT"
]
| null | null | null | from django.http import HttpResponse
from django.utils.module_loading import import_string
def pdf(request):
"""
Get form's number (decimal type: 101.15 - where "101" is form's group and "15"-number itsels).
Can't use 1,2,3,4,5,6,7,8,9 for number itsels - which stands after the point.
Bacause in database field store in decimal format xxx.yy - two number after dot, and active status.
Must use: 01,02,03-09,10,11,12-19,20,21,22-29,30,31.....
:param request:
:return:
"""
response = HttpResponse(content_type='application/pdf')
t = request.GET.get("type")
response['Content-Disposition'] = 'inline; filename="form-' + t + '.pdf"'
f = import_string('forms.forms' + t[0:3] + '.form_' + t[4:6])
response.write(f(request_data=request.GET))
return response
| 38.619048 | 103 | 0.670777 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 487 | 0.600493 |
d3dcc92c42ee28b5565e1b1bdf3f0bd8727161d9 | 5,087 | py | Python | main.py | code-aifarmer/Python-EXE-maker | 4b7436353c9a0d46b52543304209b057dcac51c1 | [
"MIT"
]
| 2 | 2021-01-26T10:19:15.000Z | 2021-06-27T03:38:00.000Z | main.py | code-aifarmer/Python-EXE-maker | 4b7436353c9a0d46b52543304209b057dcac51c1 | [
"MIT"
]
| null | null | null | main.py | code-aifarmer/Python-EXE-maker | 4b7436353c9a0d46b52543304209b057dcac51c1 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
import PySimpleGUI as sg
import cv2
import subprocess
import shutil
import os
import sys
# Demonstrates a number of PySimpleGUI features including:
# Default element size
# auto_size_buttons
# Button
# Dictionary return values
# update of elements in form (Text, Input)
def runCommand(cmd, timeout=None, window=None):
""" run shell command
@param cmd: command to execute
@param timeout: timeout for command execution
@return: (return code from command, command output)
"""
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = ''
for line in p.stdout:
line = line.decode(errors='replace' if (sys.version_info) < (3, 5)
else 'backslashreplace').rstrip()
output += line
print(line)
if window:
window.Refresh()
retval = p.wait(timeout)
return (retval, output)
def w():
sg.theme('LightGreen')
layout = [[sg.Text(' Python EXE Creator', font='Any 15')],
[sg.Text('Source Python File'), sg.Input(key='-sourcefile-', size=(45, 1)),
sg.FileBrowse(file_types=(("Python Files", "*.py"),))],
[sg.Text('Icon File'), sg.Input(key='-iconfile-', size=(45, 1)),
sg.FileBrowse(file_types=(("Icon Files", "*.ico"),))],
[sg.Frame('Output', font='Any 15', layout=[
[sg.Output(size=(65, 15), font='Courier 10')]])],
[sg.Button('Make EXE', bind_return_key=True),
sg.Button('Quit', button_color=('white', 'firebrick3'))],
]
window = sg.Window('PySimpleGUI EXE Maker', layout, auto_size_text=False, auto_size_buttons=False,
default_element_size=(20, 1), text_justification='right')
# ---===--- Loop taking in user input --- #
while True:
event, values = window.read()
if event in ('Exit', 'Quit', None):
break
source_file = values['-sourcefile-']
icon_file = values['-iconfile-']
icon_option = '-i "{}"'.format(icon_file) if icon_file else ''
source_path, source_filename = os.path.split(source_file)
workpath_option = '--workpath "{}"'.format(source_path)
dispath_option = '--distpath "{}"'.format(source_path)
specpath_option = '--specpath "{}"'.format(source_path)
folder_to_remove = os.path.join(source_path, source_filename[:-3])
file_to_remove = os.path.join(source_path, source_filename[:-3] + '.spec')
command_line = 'pyinstaller -wF --clean "{}" {} {} {} {}'.format(source_file, icon_option, workpath_option,
dispath_option, specpath_option)
if event == 'Make EXE':
try:
print(command_line)
print('Making EXE...the program has NOT locked up...')
window.refresh()
# print('Running command {}'.format(command_line))
out, err = runCommand(command_line, window=window)
shutil.rmtree(folder_to_remove)
os.remove(file_to_remove)
print('**** DONE ****')
except:
sg.PopupError('Something went wrong',
'close this window and copy command line from text printed out in main window',
'Here is the output from the run', out)
print('Copy and paste this line into the command prompt to manually run PyInstaller:\n\n', command_line)
layout = [[sg.Text('Enter Your Passcode')],
[sg.Input('', size=(10, 1), key='input')],
[sg.Button('1'), sg.Button('2'), sg.Button('3')],
[sg.Button('4'), sg.Button('5'), sg.Button('6')],
[sg.Button('7'), sg.Button('8'), sg.Button('9')],
[sg.Button('Submit'), sg.Button('0'), sg.Button('Clear')],
[sg.Text('', size=(15, 1), font=('Helvetica', 18),
text_color='red', key='out')],
]
window = sg.Window('Keypad', layout,
default_button_element_size=(5, 2),
auto_size_buttons=False,
grab_anywhere=False)
# Loop forever reading the form's values, updating the Input field
keys_entered = ''
while True:
event, values = window.read() # read the form
if event == sg.WIN_CLOSED: # if the X button clicked, just exit
break
if event == 'Clear': # clear keys if clear button
keys_entered = ''
elif event in '1234567890':
keys_entered = values['input'] # get what's been entered so far
keys_entered += event # add the new digit
elif event == 'Submit':
keys_entered = values['input']
if values['input']=='123456':
sg.popup('输入正确')
w()
else:
sg.popup('输入错误')
window['out'].update(keys_entered) # output the final string
# change the form to reflect current key string
window['input'].update(keys_entered)
window.close()
| 38.832061 | 120 | 0.569294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,628 | 0.319028 |
d3dd8c075e4b425fd099e7113200bcfa2c88d3c5 | 152 | py | Python | seisflows/system/lsf_sm.py | jpvantassel/seisflows | 5155ec177b5df0218e1fb5204f1fcd6969c66f20 | [
"BSD-2-Clause"
]
| 97 | 2016-11-18T21:19:28.000Z | 2022-03-31T15:02:42.000Z | seisflows/system/lsf_sm.py | SuwenJunliu/seisflows | 14d246691acf8e8549487a5db7c7cd877d23a2ae | [
"BSD-2-Clause"
]
| 30 | 2017-02-21T14:54:14.000Z | 2021-08-30T01:44:39.000Z | seisflows/system/lsf_sm.py | SuwenJunliu/seisflows | 14d246691acf8e8549487a5db7c7cd877d23a2ae | [
"BSD-2-Clause"
]
| 78 | 2017-03-01T15:32:29.000Z | 2022-01-31T09:09:17.000Z | #
# This is Seisflows
#
# See LICENCE file
#
###############################################################################
raise NotImplementedError
| 16.888889 | 79 | 0.348684 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.782895 |
d3ddd574dde8899b673c876fe79246ef6fe9f23e | 938 | py | Python | data/objects/sample.py | predictive-analytics-lab/tiny-comparison-framework | 8ae482a2e69aa5affe94bcd7982e53ad69228d43 | [
"Apache-2.0"
]
| null | null | null | data/objects/sample.py | predictive-analytics-lab/tiny-comparison-framework | 8ae482a2e69aa5affe94bcd7982e53ad69228d43 | [
"Apache-2.0"
]
| null | null | null | data/objects/sample.py | predictive-analytics-lab/tiny-comparison-framework | 8ae482a2e69aa5affe94bcd7982e53ad69228d43 | [
"Apache-2.0"
]
| null | null | null | from data.objects.data import Data
class Sample(Data):
"""
A way to sample from a dataset for testing purposes.
"""
def __init__(self, data, num = 100):
self.data = data
self.dataset_name = data.get_dataset_name()
self.class_attr = data.get_class_attribute()
self.positive_class_val = data.get_positive_class_val("") # sigh
self.sensitive_attrs = data.get_sensitive_attributes()
self.privileged_class_names = data.get_privileged_class_names("") # sigh
self.categorical_features = data.get_categorical_features()
self.features_to_keep = data.get_features_to_keep()
self.missing_val_indicators = data.get_missing_val_indicators()
self.num_to_sample = num
def data_specific_processing(self, dataframe):
dataframe = self.data.data_specific_processing(dataframe)
return dataframe.sample(n = self.num_to_sample, replace=True)
| 42.636364 | 80 | 0.711087 | 901 | 0.960554 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.089552 |
d3de757442c04a58c632f23911d3bb3230eadbab | 572 | py | Python | parkrundata/views.py | remarkablerocket/parkrundata | c717b59771629d6308ec093e29fd373981726fde | [
"BSD-3-Clause"
]
| null | null | null | parkrundata/views.py | remarkablerocket/parkrundata | c717b59771629d6308ec093e29fd373981726fde | [
"BSD-3-Clause"
]
| null | null | null | parkrundata/views.py | remarkablerocket/parkrundata | c717b59771629d6308ec093e29fd373981726fde | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from .models import Country, Event
from .serializers import CountrySerializer, EventSerializer
class CountryViewSet(viewsets.ModelViewSet):
queryset = Country.objects.all()
serializer_class = CountrySerializer
permission_classes = [IsAuthenticatedOrReadOnly]
class EventViewSet(viewsets.ModelViewSet):
queryset = Event.objects.all()
serializer_class = EventSerializer
permission_classes = [IsAuthenticatedOrReadOnly]
| 28.6 | 64 | 0.798951 | 344 | 0.601399 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.04021 |
d3df3020e02d0033dd7ab9554f7528acd2742527 | 21,764 | py | Python | spearmint/models/gp_classifier.py | jatinarora2409/Spearmint | a209eb8aa7d5d93f2fdca6cff50dc17a94d926ab | [
"RSA-MD"
]
| null | null | null | spearmint/models/gp_classifier.py | jatinarora2409/Spearmint | a209eb8aa7d5d93f2fdca6cff50dc17a94d926ab | [
"RSA-MD"
]
| null | null | null | spearmint/models/gp_classifier.py | jatinarora2409/Spearmint | a209eb8aa7d5d93f2fdca6cff50dc17a94d926ab | [
"RSA-MD"
]
| null | null | null | # -*- coding: utf-8 -*-
# Spearmint
#
# Academic and Non-Commercial Research Use Software License and Terms
# of Use
#
# Spearmint is a software package to perform Bayesian optimization
# according to specific algorithms (the “Software”). The Software is
# designed to automatically run experiments (thus the code name
# 'spearmint') in a manner that iteratively adjusts a number of
# parameters so as to minimize some objective in as few runs as
# possible.
#
# The Software was developed by Ryan P. Adams, Michael Gelbart, and
# Jasper Snoek at Harvard University, Kevin Swersky at the
# University of Toronto (“Toronto”), and Hugo Larochelle at the
# Université de Sherbrooke (“Sherbrooke”), which assigned its rights
# in the Software to Socpra Sciences et Génie
# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement
# between the parties, it is distributed for free academic and
# non-commercial research use by the President and Fellows of Harvard
# College (“Harvard”).
#
# Using the Software indicates your agreement to be bound by the terms
# of this Software Use Agreement (“Agreement”). Absent your agreement
# to the terms below, you (the “End User”) have no rights to hold or
# use the Software whatsoever.
#
# Harvard agrees to grant hereunder the limited non-exclusive license
# to End User for the use of the Software in the performance of End
# User’s internal, non-commercial research and academic use at End
# User’s academic or not-for-profit research institution
# (“Institution”) on the following terms and conditions:
#
# 1. NO REDISTRIBUTION. The Software remains the property Harvard,
# Toronto and Socpra, and except as set forth in Section 4, End User
# shall not publish, distribute, or otherwise transfer or make
# available the Software to any other party.
#
# 2. NO COMMERCIAL USE. End User shall not use the Software for
# commercial purposes and any such use of the Software is expressly
# prohibited. This includes, but is not limited to, use of the
# Software in fee-for-service arrangements, core facilities or
# laboratories or to provide research services to (or in collaboration
# with) third parties for a fee, and in industry-sponsored
# collaborative research projects where any commercial rights are
# granted to the sponsor. If End User wishes to use the Software for
# commercial purposes or for any other restricted purpose, End User
# must execute a separate license agreement with Harvard.
#
# Requests for use of the Software for commercial purposes, please
# contact:
#
# Office of Technology Development
# Harvard University
# Smith Campus Center, Suite 727E
# 1350 Massachusetts Avenue
# Cambridge, MA 02138 USA
# Telephone: (617) 495-3067
# Facsimile: (617) 495-9568
# E-mail: [email protected]
#
# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own
# all intellectual property in the Software. End User shall gain no
# ownership to the Software. End User shall not remove or delete and
# shall retain in the Software, in any modifications to Software and
# in any Derivative Works, the copyright, trademark, or other notices
# pertaining to Software as provided with the Software.
#
# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,
# as such term is defined under U.S. copyright laws, provided that any
# such Derivative Works shall be restricted to non-commercial,
# internal research and academic use at End User’s Institution. End
# User may distribute Derivative Works to other Institutions solely
# for the performance of non-commercial, internal research and
# academic use on terms substantially similar to this License and
# Terms of Use.
#
# 5. FEEDBACK. In order to improve the Software, comments from End
# Users may be useful. End User agrees to provide Harvard with
# feedback on the End User’s use of the Software (e.g., any bugs in
# the Software, the user experience, etc.). Harvard is permitted to
# use such information provided by End User in making changes and
# improvements to the Software without compensation or an accounting
# to End User.
#
# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or
# Sherbrooke or Socpra may develop modifications to the Software that
# may be based on the feedback provided by End User under Section 5
# above. Harvard, Toronto and Sherbrooke/Socpra shall not be
# restricted in any way by End User regarding their use of such
# information. End User acknowledges the right of Harvard, Toronto
# and Sherbrooke/Socpra to prepare, publish, display, reproduce,
# transmit and or use modifications to the Software that may be
# substantially similar or functionally equivalent to End User’s
# modifications and/or improvements if any. In the event that End
# User obtains patent protection for any modification or improvement
# to Software, End User agrees not to allege or enjoin infringement of
# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,
# or any of the researchers, medical or research staff, officers,
# directors and employees of those institutions.
#
# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,
# present, or share results from the use of the Software. In
# accordance with customary academic practice, End User will
# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers
# of the Software and may cite the relevant reference(s) from the
# following list of publications:
#
# Practical Bayesian Optimization of Machine Learning Algorithms
# Jasper Snoek, Hugo Larochelle and Ryan Prescott Adams
# Neural Information Processing Systems, 2012
#
# Multi-Task Bayesian Optimization
# Kevin Swersky, Jasper Snoek and Ryan Prescott Adams
# Advances in Neural Information Processing Systems, 2013
#
# Input Warping for Bayesian Optimization of Non-stationary Functions
# Jasper Snoek, Kevin Swersky, Richard Zemel and Ryan Prescott Adams
# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013
#
# Bayesian Optimization and Semiparametric Models with Applications to
# Assistive Technology Jasper Snoek, PhD Thesis, University of
# Toronto, 2013
#
# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS." TO THE FULLEST
# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA
# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR
# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND
# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,
# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE
# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT
# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.
#
# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT
# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,
# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL
# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR
# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,
# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER
# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH
# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS
# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,
# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES.
#
# 10. INDEMNIFICATION. To the extent permitted by law, End User shall
# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke
# and Socpra, their corporate affiliates, current or future directors,
# trustees, officers, faculty, medical and professional staff,
# employees, students and agents and their respective successors,
# heirs and assigns (the "Indemnitees"), against any liability,
# damage, loss or expense (including reasonable attorney's fees and
# expenses of litigation) incurred by or imposed upon the Indemnitees
# or any one of them in connection with any claims, suits, actions,
# demands or judgments arising from End User’s breach of this
# Agreement or its Institution’s use of the Software except to the
# extent caused by the gross negligence or willful misconduct of
# Harvard, Toronto or Sherbrooke or Socpra. This indemnification
# provision shall survive expiration or termination of this Agreement.
#
# 11. GOVERNING LAW. This Agreement shall be construed and governed by
# the laws of the Commonwealth of Massachusetts regardless of
# otherwise applicable choice of law standards.
#
# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall
# be construed as granting End Users or their Institutions any rights
# or licenses to use any trademarks, service marks or logos associated
# with the Software. You may not use the terms “Harvard” or
# “University of Toronto” or “Université de Sherbrooke” or “Socpra
# Sciences et Génie S.E.C.” (or a substantially similar term) in any
# way that is inconsistent with the permitted uses described
# herein. You agree not to use any name or emblem of Harvard, Toronto
# or Sherbrooke, or any of their subdivisions for any purpose, or to
# falsely suggest any relationship between End User (or its
# Institution) and Harvard, Toronto and/or Sherbrooke, or in any
# manner that would infringe or violate any of their rights.
#
# 13. End User represents and warrants that it has the legal authority
# to enter into this License and Terms of Use on behalf of itself and
# its Institution.
import copy
import sys, logging
import numpy as np
import numpy.random as npr
import scipy.linalg as spla
import scipy.optimize as spo
import scipy.io as sio
import scipy.stats as sps
try:
import scipy.weave as weave
except ImportError:
import weave
from .gp import GP
from ..utils.param import Param as Hyperparameter
from ..kernels import Matern52, Noise, Scale, SumKernel, TransformKernel
from ..sampling.slice_sampler import SliceSampler
from ..sampling.whitened_prior_slice_sampler import WhitenedPriorSliceSampler
from ..sampling.elliptical_slice_sampler import EllipticalSliceSampler
from ..utils import priors
from ..transformations import BetaWarp, Transformer
try:
module = sys.modules['__main__'].__file__
log = logging.getLogger(module)
except:
log = logging.getLogger()
print 'Not running from main.'
class GPClassifier(GP):
def __init__(self, num_dims, **options):
self.counts = None
log.debug('GP Classifier initialized with options: %s' % (options))
self.ess_thinning = int(options.get("ess-thinning", 10))
self._set_likelihood(options)
self.prior_whitening = options.get('prior-whitening', True)
sigmoid = options.get("sigmoid", "probit")
if not self.noiseless:
if sigmoid == "probit":
self.sigmoid = sps.norm.cdf
self.sigmoid_derivative = sps.norm.pdf # not used
self.sigmoid_inverse = sps.norm.ppf
elif sigmoid == "logistic":
self.sigmoid = sps.logistic.cdf
self.sigmoid_derivative = sps.logistic.pdf
self.sigmoid_inverse = sps.logistic.ppf
else:
raise Exception("Only probit and logistic sigmoids are supported")
else:
# If no noise we use the step function and ignore the "sigmoid" argument.
# (This is the step function likelihood)
# assert options['likelihood'] == 'STEP'
self.sigmoid = lambda x: np.greater_equal(x, 0)
self.sigmoid_derivative = lambda x: 0.
self.sigmoid_inverse = lambda x: 0.
# The constraint is that p=s(f) > 1-epsilon
# where s if the sigmoid and f is the latent function value, and p is the binomial probability
# This is only in more complicated situations. The main situation where this is used
# we want f>0. This is equivalent to epsilon=0.5 for the sigmoids we use
# The point is: do not set epsilon unless you know what you are doing!
# (and do not confuse it with delta, the min constraint confidence)
self._one_minus_epsilon = 1.0 - float(options.get("epsilon", 0.5))
self.latent_values_list = []
super(GPClassifier, self).__init__(num_dims, **options)
def _set_likelihood(self, options):
self.likelihood = options.get('likelihood', 'binomial').lower()
if self.likelihood.lower() == "binomial":
self.noiseless = False
elif self.likelihood.lower() == "step":
self.noiseless = True
else:
raise Exception("GP classifier only supports step or binomial likelihood, not %s" % (options['likelihood']))
def _reset(self):
super(GPClassifier, self)._reset()
# Reset the latent values
if self.counts is not None:
initial_latent_vals = self.counts - 0.5
else:
initial_latent_vals = np.zeros(0)
self.latent_values.initial_value = initial_latent_vals
self.latent_values.reset_value()
self._latent_values_list = []
def _set_latent_values_from_dict(self, latent_values_dict):
# Read in the latent values. For pre-existing data, just load them in
# For new data, set them to a default.
default_latent_values = self.counts - 0.5
latent_values = np.zeros(self._inputs.shape[0])
for i in xrange(self._inputs.shape[0]):
key = str(hash(self._inputs[i].tostring()))
if key in latent_values_dict:
latent_values[i] = latent_values_dict[key]
else:
latent_values[i] = default_latent_values[i]
self.latent_values.value = latent_values
def _burn_samples(self, num_samples):
# sys.stderr.write('GPClassifer: burning %s: ' % ', '.join(self.params.keys()))
# sys.stderr.write('%04d/%04d' % (0, num_samples))
for i in xrange(num_samples):
# sys.stderr.write('\b'*9+'%04d/%04d' % (i, num_samples))
for sampler in self._samplers:
sampler.sample(self)
self.latent_values_sampler.sample(self)
self.chain_length += 1
# sys.stderr.write('\n')
def _collect_samples(self, num_samples):
# sys.stderr.write('GPClassifer: sampling %s: ' % ', '.join(self.params.keys()))
# sys.stderr.write('%04d/%04d' % (0, num_samples))
hypers_list = []
latent_values_list = []
for i in xrange(num_samples):
# sys.stderr.write('\b'*9+'%04d/%04d' % (i, num_samples))
for sampler in self._samplers:
sampler.sample(self)
self.latent_values_sampler.sample(self)
current_dict = self.to_dict()
hypers_list.append(current_dict['hypers'])
latent_values_list.append(current_dict['latent values'])
self.chain_length += 1
# sys.stderr.write('\n')
return hypers_list, latent_values_list
def _build(self):
self.params = {}
self.latent_values = None
# Build the transformer
beta_warp = BetaWarp(self.num_dims)
beta_alpha, beta_beta = beta_warp.hypers
self.params['beta_alpha'] = beta_alpha
self.params['beta_beta'] = beta_beta
transformer = Transformer(self.num_dims)
transformer.add_layer(beta_warp)
# Build the component kernels
input_kernel = Matern52(self.num_dims)
ls = input_kernel.hypers
self.params['ls'] = ls
# Now apply the transformation.
transform_kernel = TransformKernel(input_kernel, transformer)
# Add some perturbation for stability
stability_noise = Noise(self.num_dims)
# Finally make a noisy version if necessary
# In a classifier GP the notion of "noise" is really just the scale.
if self.noiseless:
self._kernel = SumKernel(transform_kernel, stability_noise)
else:
scaled_kernel = Scale(transform_kernel)
self._kernel = SumKernel(scaled_kernel, stability_noise)
amp2 = scaled_kernel.hypers
self.params['amp2'] = amp2
# Build the mean function (just a constant mean for now)
self.mean = Hyperparameter(
initial_value = 0.0,
prior = priors.Gaussian(0.0,1.0),
name = 'mean'
)
self.params['mean'] = self.mean
# Buld the latent values. Empty for now until the GP gets data.
self.latent_values = Hyperparameter(
initial_value = np.array([]),
name = 'latent values'
)
# Build the samplers
to_sample = [self.mean] if self.noiseless else [self.mean, amp2]
self._samplers.append(SliceSampler(*to_sample, compwise=False, thinning=self.thinning))
self._samplers.append(WhitenedPriorSliceSampler(ls, beta_alpha, beta_beta, compwise=True, thinning=self.thinning))
self.latent_values_sampler = EllipticalSliceSampler(self.latent_values, thinning=self.ess_thinning)
@property
def values(self):
if self.pending is None or len(self._fantasy_values_list) < self.num_states:
return self.observed_values
if self.num_fantasies == 1:
return np.append(self.latent_values.value, self._fantasy_values_list[self.state].flatten(), axis=0)
else:
return np.append(np.tile(self.latent_values.value[:,None], (1,self.num_fantasies)), self._fantasy_values_list[self.state], axis=0)
@property
def observed_values(self):
if self.latent_values is not None:
return self.latent_values.value
else:
return np.array([])
def set_state(self, state):
self.state = state
self._set_params_from_dict(self._hypers_list[state])
self._set_latent_values_from_dict(self._latent_values_list[state])
def pi(self, pred, compute_grad=False):
return super(GPClassifier, self).pi( pred, compute_grad=compute_grad,
C=self.sigmoid_inverse(self._one_minus_epsilon) )
def fit(self, inputs, counts, pending=None, hypers=None, reburn=False, fit_hypers=True):
# Set the data for the GP
self._inputs = inputs
self.counts = counts
# Reset the GP
self._reset()
# Initialize the GP with hypers if provided
if hypers:
self.from_dict(hypers)
if fit_hypers:
# Burn samples (if needed)
num_samples = self.burnin if reburn or self.chain_length < self.burnin else 0
self._burn_samples(num_samples)
# Now collect some samples
self._hypers_list, self._latent_values_list = self._collect_samples(self.mcmc_iters)
# Now we have more states
self.num_states = self.mcmc_iters
elif not self._hypers_list:
# Just use the current hypers as the only state
current_dict = self.to_dict()
self._hypers_list = [current_dict['hypers']]
self._latent_values_list = [current_dict['latent values']]
self.num_states = 1
# Set pending data and generate corresponding fantasies
if pending is not None:
self.pending = pending
self._fantasy_values_list = self._collect_fantasies(pending)
# Get caching ready
if self.caching:
self._prepare_cache()
# Set the hypers to the final state of the chain
self.set_state(len(self._hypers_list)-1)
return self.to_dict()
def log_binomial_likelihood(self, y=None):
# If no data, don't do anything
if not self.has_data:
return 0.0
if y is None:
y = self.latent_values.value
p = self.sigmoid(y)
# Note on the below: the obvious implementation would be
# return np.sum( pos*np.log(p) + neg*np.log(1-p) )
# The problem is, if pos = 0, and p=0, we will get a 0*-Inf = nan
# This messes things up. So we use the safer implementation below that ignores
# the term entirely if the counts are 0.
pos = self.counts # positive counts
neg = 1 - pos
with np.errstate(divide='ignore'): # suppress warnings about log(0)
return np.sum( pos[pos>0]*np.log(p[pos>0]) ) + np.sum( neg[neg>0]*np.log(1-p[neg>0]) )
def to_dict(self):
gp_dict = {}
gp_dict['hypers'] = {}
for name, hyper in self.params.iteritems():
gp_dict['hypers'][name] = hyper.value
# Save the latent values as a dict with keys as hashes of the data
# so that each latent value is associated with its input
# then when we load them in we know which ones are which
gp_dict['latent values'] = {str(hash(self._inputs[i].tostring())) : self.latent_values.value[i]
for i in xrange(self._inputs.shape[0])}
gp_dict['chain length'] = self.chain_length
return gp_dict
def from_dict(self, gp_dict):
self._set_params_from_dict(gp_dict['hypers'])
self._set_latent_values_from_dict(gp_dict['latent values'])
self.chain_length = gp_dict['chain length']
| 43.354582 | 142 | 0.684111 | 11,149 | 0.510626 | 0 | 0 | 635 | 0.029083 | 0 | 0 | 12,354 | 0.565815 |
d3e0c86fec4a82ec7ab6e46d19afaf6635bc9e88 | 1,125 | py | Python | pycom_lopy4_LoRaBattMonitor/transmitter/main.py | AidanTek/Fab-Cre8_IoT | 3d358a484aea2e2a50d6dbef443e9a2757ef9ab8 | [
"MIT"
]
| null | null | null | pycom_lopy4_LoRaBattMonitor/transmitter/main.py | AidanTek/Fab-Cre8_IoT | 3d358a484aea2e2a50d6dbef443e9a2757ef9ab8 | [
"MIT"
]
| null | null | null | pycom_lopy4_LoRaBattMonitor/transmitter/main.py | AidanTek/Fab-Cre8_IoT | 3d358a484aea2e2a50d6dbef443e9a2757ef9ab8 | [
"MIT"
]
| null | null | null | from machine import Pin, ADC
from network import LoRa
import socket
from utime import sleep
# Use a pin for a 'config' mode
configPin = Pin('P21', Pin.IN, Pin.PULL_UP)
# Create an ADC object
adc = ADC()
# vbatt pin:
vbatt = adc.channel(attn=1, pin='P16')
def battConversion():
adcVoltage = vbatt()
voltage = adcVoltage*3*1.334/4095
return voltage
# Initialise LoRa in LoRa mode
# For Europe, use LoRa.EU868
lora = LoRa(mode=LoRa.LORA, region=LoRa.EU868)
# Create a raw LoRa socket
s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
# Check the Config pin:
configMode = not configPin()
if not configMode:
print('Reading Battery')
pycom.rgbled(0x0000FF)
message = 'Battery Status: {}'.format(battConversion())
print(message)
sleep(2)
print('Sending battery status estimate...')
pycom.rgbled(0xFF0000)
sleep(2)
s.setblocking(True)
# Send some data
s.send(message)
print('Message Sent!')
pycom.rgbled(0x00FF00)
sleep(2)
print('Going to sleep')
machine.deepsleep(300000)
# Otherwise, we are in 'config' so exit to REPL
print('Config Mode')
| 20.454545 | 59 | 0.686222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 362 | 0.321778 |
d3e103df05be9ac935fd9148b0ac418dab33e33f | 7,209 | py | Python | scrapi/harvesters/lwbin.py | wearpants/scrapi | b1619a1212d9fc7e1f2247336fc2e4a3d453a4bb | [
"Apache-2.0"
]
| 34 | 2015-10-06T20:08:43.000Z | 2022-03-16T12:46:24.000Z | scrapi/harvesters/lwbin.py | jgw4sq/twilio | 796e97dc6a8fdb8983fd736b328ad907bb1ff73e | [
"Apache-2.0"
]
| 100 | 2015-09-10T19:57:32.000Z | 2016-06-22T03:09:51.000Z | scrapi/harvesters/lwbin.py | jgw4sq/twilio | 796e97dc6a8fdb8983fd736b328ad907bb1ff73e | [
"Apache-2.0"
]
| 32 | 2015-09-09T21:28:54.000Z | 2019-05-09T03:18:02.000Z | """
A Lake Winnipeg Basin Information Network (BIN) harvester for the SHARE project
Example API request: http://130.179.67.140/api/3/action/package_search?q= (problematic)
http://130.179.67.140/api/3/action/current_package_list_with_resources (currently using)
It oddly returns 5 more datasets than all searchable ones on LWBIN data hub.
Known issues:
1 -- Five datasets can be searched but cannot be accessed via LWBIN.
Clicking on the searching result would result in linking to a redirected page like this:
http://130.179.67.140/user/login?came_from=http://130.179.67.140/dataset/mpca-surface-water-data-access-interactive-map
Within each dataset there are resouces that contain urls to source pages. For future work considering using resources
urls as canonical urls.
2 -- Resouces properties contained in raw metadata of the datasets are not added to the normalized metadata at this
point.
3 -- Single name contributors can be used as filters or an invalid query will be returned. Has nothing to do with scrapi but the frontend.
"""
from __future__ import unicode_literals
import json
import logging
from dateutil.parser import parse
from scrapi import requests
from scrapi.base import JSONHarvester
from scrapi.linter.document import RawDocument
from scrapi.base.helpers import build_properties, datetime_formatter, parse_name
logger = logging.getLogger(__name__)
ORGANIZATIONS = (
"organization", "fund", "canada", "agriculture", "commitee", "international", "council", "office", "of",
"observation", "institute", "lwbin", "cocorahs", "usgs", "nsidc"
)
def is_organization(name):
"""Return a boolean to indicate if the name passed to the function is an organization
"""
words = name.split(' ')
return any(word.strip(";").lower() in ORGANIZATIONS for word in words)
def clean_authors(authors):
"""Cleam authors list.
"""
authors = authors.strip().replace('<span class="author-names">', '').replace('</span>', '')
authors = authors.split(',')
new_authors = []
for author in authors:
if is_organization(author):
new_authors.append(author)
else:
if ' and ' in author or ' <em>et al.</em>' in author:
split_name = author.replace(' <em>et al.</em>', '').split(' and ')
new_authors.extend(split_name)
else:
new_authors.append(author)
return new_authors
def process_contributors(authors, emails):
"""Process authors and add author emails
If multiple authors and one email, put email in a new author
"""
emails = emails.split(',')
authors = clean_authors(authors)
contributor_list = []
append_emails = len(authors) == 1 and len(emails) == 1 and not emails[0] == u'' # append the email to the author only when 1 record is observed
for i, author in enumerate(authors):
if is_organization(author):
contributor = {
'name': author
}
else:
contributor = parse_name(author)
if append_emails:
contributor['email'] = emails[i]
contributor_list.append(contributor)
if not append_emails and emails[0] != u'':
for email in emails:
contributor = {
'name': '',
'email': email
}
contributor_list.append(contributor)
return contributor_list
def process_licenses(license_title, license_url, license_id):
"""Process licenses to comply with the normalized schema
"""
if not license_url:
return []
else:
license = {
'uri': license_url,
'description': "{} ({})".format(license_title, license_id) or ""
}
return [license]
def construct_url(url, dataset_path, end_point):
"""
:return: a url that directs back to the page on LBWIN Data Hub instead of the source page.
:param url: host url
:param dataset_path: parent path of all datasets
:param end_point: name of datasets
"""
return "/".join([url, dataset_path, end_point])
def process_object_uris(url, extras):
"""Extract doi from /extras, and return a list of object uris including /url and doi if it exists.
"""
doi = []
for d in extras:
if d['key'] == "DOI" or d['key'] == "DOI:":
doi.append(d['value'])
if doi == []:
return [url]
else:
return [url].extend(doi)
class LWBINHarvester(JSONHarvester):
short_name = 'lwbin'
long_name = 'Lake Winnipeg Basin Information Network'
url = 'http://130.179.67.140'
dataset_path = "dataset" # dataset base url for constructing urls that go back to LWBIN instead of source pages.
DEFAULT_ENCODING = 'UTF-8'
record_encoding = None
@property
def schema(self):
return {
'title': ('/title', lambda x: x or ''),
'description': ('/notes'),
'providerUpdatedDateTime': ('/metadata_modified', datetime_formatter),
'uris': {
'canonicalUri': ('/name', lambda x: construct_url(self.url, self.dataset_path, x)), # Construct new urls directing to LWBIN
'objectUris': ('/url', '/extras', process_object_uris) # Default urls from the metadata directing to source pages
},
'contributors': ('/author', '/author_email', process_contributors),
'licenses': ('/license_title', '/license_url', '/license_id', process_licenses),
'tags': ('/tags', lambda x: [tag['name'].lower() for tag in (x or [])]),
'freeToRead': {
'startDate': ('/isopen', '/metadata_created', lambda x, y: parse(y).date().isoformat() if x else None)
},
'otherProperties': build_properties(
('maintainer', '/maintainer'),
('maintainerEmail', '/maintainer_email'),
('revisionTimestamp', ('/revision_timestamp', datetime_formatter)),
('id', '/id'),
('metadataCreated', ('/metadata_created', datetime_formatter)),
('state', '/state'),
('version', '/version'),
('creatorUserId', '/creator_user_id'),
('type', '/type'),
('numberOfResources', '/num_resources'),
('numberOfTags', '/num_tags'),
('name', '/name'),
('groups', '/groups'),
)
}
def harvest(self, start_date=None, end_date=None):
"""Returns a list of Rawdocuments (metadata)
Searching by time is not supported by LWBIN CKAN API. all datasets have to be scanned each time.
"""
base_url = 'http://130.179.67.140/api/3/action/current_package_list_with_resources'
records = requests.get(base_url).json()['result']
total = len(records) # Total number of documents
logger.info('{} documents to be harvested'.format(total))
return [
RawDocument({
'doc': json.dumps(record),
'source': self.short_name,
'docID': record['id'],
'filetype': 'json'
}) for record in records
]
| 36.226131 | 148 | 0.615758 | 2,761 | 0.382993 | 0 | 0 | 1,670 | 0.231655 | 0 | 0 | 3,312 | 0.459426 |
d3e19347ed0ddda8633be363dd6cfd4b345245b2 | 402 | py | Python | catalog/bindings/gmd/point.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
]
| null | null | null | catalog/bindings/gmd/point.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
]
| null | null | null | catalog/bindings/gmd/point.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
]
| null | null | null | from dataclasses import dataclass
from bindings.gmd.point_type import PointType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class Point(PointType):
"""A Point is defined by a single coordinate tuple.
The direct position of a point is specified by the pos element which
is of type DirectPositionType.
"""
class Meta:
namespace = "http://www.opengis.net/gml"
| 23.647059 | 72 | 0.721393 | 262 | 0.651741 | 0 | 0 | 273 | 0.679104 | 0 | 0 | 224 | 0.557214 |
d3e28f994d4f8d390af434d713b2e934cf2435a9 | 1,050 | py | Python | hknweb/exams/migrations/0019_auto_20200413_0212.py | AndrewKe/hknweb | 8b0591625ffe0b2fa1f50fec453d674a03f52a2e | [
"MIT"
]
| null | null | null | hknweb/exams/migrations/0019_auto_20200413_0212.py | AndrewKe/hknweb | 8b0591625ffe0b2fa1f50fec453d674a03f52a2e | [
"MIT"
]
| null | null | null | hknweb/exams/migrations/0019_auto_20200413_0212.py | AndrewKe/hknweb | 8b0591625ffe0b2fa1f50fec453d674a03f52a2e | [
"MIT"
]
| null | null | null | # Generated by Django 2.2.8 on 2020-04-13 09:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('exams', '0018_auto_20200412_1715'),
]
operations = [
migrations.CreateModel(
name='ExamChoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('exam_Choice', models.CharField(max_length=50)),
],
),
migrations.AlterField(
model_name='exam',
name='exam_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exams.ExamChoice'),
),
migrations.AlterField(
model_name='exam',
name='instructor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exams.Instructor'),
),
migrations.DeleteModel(
name='CourseSemester',
),
]
| 30 | 114 | 0.590476 | 924 | 0.88 | 0 | 0 | 0 | 0 | 0 | 0 | 199 | 0.189524 |
d3e3af31f72741b010fae28e3067fb84c2fb37ac | 1,028 | py | Python | tools/linear_algebra/preconditioners/Jacobi.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
]
| 1 | 2020-10-14T12:48:35.000Z | 2020-10-14T12:48:35.000Z | tools/linear_algebra/preconditioners/Jacobi.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
]
| null | null | null | tools/linear_algebra/preconditioners/Jacobi.py | mathischeap/mifem | 3242e253fb01ca205a76568eaac7bbdb99e3f059 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""Jacobian preconditioner.
"""
from root.config.main import *
from scipy import sparse as spspa
from tools.linear_algebra.preconditioners.base import Preconditioner
class JacobiPreconditioner(Preconditioner):
""""""
def __init__(self, A):
""""""
super(JacobiPreconditioner, self).__init__(A)
self._freeze_self_()
@property
def invM(self):
A = self._A_.M
diag = A.diagonal()
if rAnk != mAster_rank:
DIAG = None
else:
DIAG = np.empty((sIze, self._A_.shape[0]))
cOmm.Gather(diag, DIAG, root=mAster_rank)
if rAnk == mAster_rank:
DIAG = np.sum(DIAG, axis=0)
DIAG = np.reciprocal(DIAG)
else:
DIAG = np.empty((self._A_.shape[0],))
cOmm.Bcast(DIAG, root=mAster_rank)
invM = spspa.dia_matrix((DIAG, 0), shape=self._A_.shape)
return invM
@property
def ___applying_method___(self):
return 'left_multiply_invM' | 25.073171 | 68 | 0.592412 | 836 | 0.81323 | 0 | 0 | 645 | 0.627432 | 0 | 0 | 86 | 0.083658 |
d3e577b90c506a8bda99f5b1083dfe14aebd03c5 | 904 | py | Python | social_redirects/models.py | JoshZero87/site | c8024b805ff5ff0e16f54dce7bf05097fd2f08e0 | [
"MIT"
]
| 4 | 2017-01-29T00:38:41.000Z | 2019-09-04T14:30:24.000Z | social_redirects/models.py | JoshZero87/site | c8024b805ff5ff0e16f54dce7bf05097fd2f08e0 | [
"MIT"
]
| 74 | 2017-10-02T04:42:54.000Z | 2022-01-13T00:44:16.000Z | social_redirects/models.py | JoshZero87/site | c8024b805ff5ff0e16f54dce7bf05097fd2f08e0 | [
"MIT"
]
| 3 | 2017-03-24T23:26:46.000Z | 2019-10-21T01:16:03.000Z | from django.contrib.sites.models import Site
from django.db import models
class Redirect(models.Model):
title = models.CharField(max_length=200)
description = models.CharField(max_length=1024, blank=True, null=True)
social_image = models.ImageField(null=True, blank=True)
old_path = models.CharField(max_length=200, db_index=True, verbose_name="Redirect From", help_text="This should be an absolute path, excluding the domain name. Example: '/events/search/'.")
new_path = models.CharField(max_length=200, blank=True, verbose_name="Redirect To", help_text="This can be either an absolute path (as above) or a full URL starting with 'http://'.")
site = models.ForeignKey(Site, models.CASCADE)
class Meta:
unique_together = (('site', 'old_path'),)
ordering = ('old_path',)
def __str__(self):
return "%s ---> %s" % (self.old_path, self.new_path)
| 45.2 | 193 | 0.710177 | 826 | 0.913717 | 0 | 0 | 0 | 0 | 0 | 0 | 242 | 0.267699 |
d3e869c2c7fff869303aff8cf0f763aad3c88462 | 7,767 | py | Python | response_model/python/population_subunits/coarse/analysis/few_cells_tf_analyse_all.py | googlearchive/rgc-models | 0dea94bbd54f591d82d95169e33d40bb55b6be94 | [
"Apache-2.0"
]
| 1 | 2018-09-18T16:47:09.000Z | 2018-09-18T16:47:09.000Z | response_model/python/population_subunits/coarse/analysis/few_cells_tf_analyse_all.py | google/rgc-models | 0dea94bbd54f591d82d95169e33d40bb55b6be94 | [
"Apache-2.0"
]
| null | null | null | response_model/python/population_subunits/coarse/analysis/few_cells_tf_analyse_all.py | google/rgc-models | 0dea94bbd54f591d82d95169e33d40bb55b6be94 | [
"Apache-2.0"
]
| 1 | 2022-01-12T12:44:17.000Z | 2022-01-12T12:44:17.000Z | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
'''Analysis file.'''
import sys
import os.path
import tensorflow as tf
from absl import app
from absl import flags
from absl import gfile
import cPickle as pickle
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pylab
import matplotlib.pyplot as plt
import numpy as np, h5py
import scipy.io as sio
from scipy import ndimage
import random
import re # regular expression matching
FLAGS = flags.FLAGS
flags.DEFINE_string('folder_name', 'experiment4', 'folder where to store all the data')
flags.DEFINE_string('save_location',
'/home/bhaishahster/',
'where to store logs and outputs?');
flags.DEFINE_string('data_location',
'/home/bhaishahster/data_breakdown/',
'where to take data from?')
flags.DEFINE_integer('n_b_in_c', 10, 'number of batches in one chunk of data')
flags.DEFINE_integer('np_randseed', 23, 'numpy RNG seed')
flags.DEFINE_integer('randseed', 65, 'python RNG seed')
flags.DEFINE_integer('ratio_SU', 2, 'ratio of subunits/cells')
flags.DEFINE_string('model_id', 'poisson', 'which model to fit')
FLAGS = flags.FLAGS
def main(argv):
print('\nCode started')
np.random.seed(FLAGS.np_randseed)
random.seed(FLAGS.randseed)
## Load data summary
filename = FLAGS.data_location + 'data_details.mat'
summary_file = gfile.Open(filename, 'r')
data_summary = sio.loadmat(summary_file)
cells = np.squeeze(data_summary['cells'])
if FLAGS.model_id == 'poisson' or FLAGS.model_id == 'logistic' or FLAGS.model_id == 'hinge':
cells_choose = (cells ==3287) | (cells ==3318 ) | (cells ==3155) | (cells ==3066)
if FLAGS.model_id == 'poisson_full':
cells_choose = np.array(np.ones(np.shape(cells)), dtype='bool')
n_cells = np.sum(cells_choose)
tot_spks = np.squeeze(data_summary['tot_spks'])
total_mask = np.squeeze(data_summary['totalMaskAccept_log']).T
tot_spks_chosen_cells = tot_spks[cells_choose]
chosen_mask = np.array(np.sum(total_mask[cells_choose,:],0)>0, dtype='bool')
print(np.shape(chosen_mask))
print(np.sum(chosen_mask))
stim_dim = np.sum(chosen_mask)
print('\ndataset summary loaded')
# use stim_dim, chosen_mask, cells_choose, tot_spks_chosen_cells, n_cells
# decide the number of subunits to fit
n_su = FLAGS.ratio_SU*n_cells
#batchsz = [100, 500, 1000, 100, 500, 1000, 100, 500, 1000, 1000, 1000, 5000, 10000, 5000, 10000]
#n_b_in_c = [10, 2, 1, 10, 2, 1, 10, 2, 1, 1, 1, 1, 1, 1, 1 ]
#step_sz = [0.0001, 0.0001, 0.0001, 0.01, 0.01, 0.01 , 1, 1, 1, 10, 100, 10, 10, 1, 1 ]
batchsz = [100, 500, 1000, 5000, 1000, 100, 500, 1000, 5000, 10000, 100, 500, 1000, 5000, 10000, 100, 500, 1000, 5000, 10000]
n_b_in_c = [10, 2, 1, 1, 1, 10, 2, 1, 1, 1, 10, 2, 1, 1, 1, 10, 2, 1, 1, 1 ]
step_sz = [0.1, 0.1, 0.1, 0.1, 0.1, 1 , 1, 1, 1, 1, 5, 5, 5, 5, 5, 10, 10, 10, 10, 10 ]
with tf.Session() as sess:
# Learn population model!
stim = tf.placeholder(tf.float32, shape=[None, stim_dim], name='stim')
resp = tf.placeholder(tf.float32, name='resp')
data_len = tf.placeholder(tf.float32, name='data_len')
# get filename
if FLAGS.model_id == 'poisson' or FLAGS.model_id == 'poisson_full':
w = tf.Variable(np.array(0.01 * np.random.randn(stim_dim, n_su), dtype='float32'))
a = tf.Variable(np.array(0.1 * np.random.rand(n_cells, 1, n_su), dtype='float32'))
if FLAGS.model_id == 'logistic' or FLAGS.model_id == 'hinge':
w = tf.Variable(np.array(0.01 * np.random.randn(stim_dim, n_su), dtype='float32'))
a = tf.Variable(np.array(0.01 * np.random.rand(n_su, n_cells), dtype='float32'))
b_init = np.random.randn(n_cells) #np.log((np.sum(response,0))/(response.shape[0]-np.sum(response,0)))
b = tf.Variable(b_init,dtype='float32')
plt.figure()
for icnt, ibatchsz in enumerate(batchsz):
in_b_in_c = n_b_in_c[icnt]
istep_sz = np.array(step_sz[icnt],dtype='double')
print(icnt)
if FLAGS.model_id == 'poisson':
short_filename = ('data_model=ASM_pop_batch_sz='+ str(ibatchsz) + '_n_b_in_c' + str(in_b_in_c) +
'_step_sz'+ str(istep_sz)+'_bg')
else:
short_filename = ('data_model='+ str(FLAGS.model_id) +'_batch_sz='+ str(ibatchsz) + '_n_b_in_c' + str(in_b_in_c) +
'_step_sz'+ str(istep_sz)+'_bg')
parent_folder = FLAGS.save_location + FLAGS.folder_name + '/'
save_location = parent_folder +short_filename + '/'
print(gfile.IsDirectory(save_location))
print(save_location)
save_filename = save_location + short_filename
#determine filelist
file_list = gfile.ListDirectory(save_location)
save_filename = save_location + short_filename
print('\nLoading: ', save_filename)
bin_files = []
meta_files = []
for file_n in file_list:
if re.search(short_filename + '.', file_n):
if re.search('.meta', file_n):
meta_files += [file_n]
else:
bin_files += [file_n]
#print(bin_files)
print(len(meta_files), len(bin_files), len(file_list))
# get latest iteration
iterations = np.array([])
for file_name in bin_files:
try:
iterations = np.append(iterations, int(file_name.split('/')[-1].split('-')[-1]))
except:
print('Could not load filename: ' + file_name)
iterations.sort()
print(iterations)
iter_plot = iterations[-1]
print(int(iter_plot))
# load tensorflow variables
saver_var = tf.train.Saver(tf.all_variables())
restore_file = save_filename + '-' + str(int(iter_plot))
saver_var.restore(sess, restore_file)
a_eval = a.eval()
print(np.exp(np.squeeze(a_eval)))
#print(np.shape(a_eval))
# get 2D region to plot
mask2D = np.reshape(chosen_mask, [40, 80])
nz_idx = np.nonzero(mask2D)
np.shape(nz_idx)
print(nz_idx)
ylim = np.array([np.min(nz_idx[0])-1, np.max(nz_idx[0])+1])
xlim = np.array([np.min(nz_idx[1])-1, np.max(nz_idx[1])+1])
w_eval = w.eval()
#plt.figure()
n_su = w_eval.shape[1]
for isu in np.arange(n_su):
xx = np.zeros((3200))
xx[chosen_mask] = w_eval[:, isu]
fig = plt.subplot(20, n_su, n_su * icnt + isu+1)
plt.imshow(np.reshape(xx, [40, 80]), interpolation='nearest', cmap='gray')
plt.ylim(ylim)
plt.xlim(xlim)
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
#if FLAGS.model_id == 'logistic' or FLAGS.model_id == 'hinge':
# plt.title(str(a_eval[isu, :]))
#else:
# plt.title(str(np.squeeze(np.exp(a_eval[:, 0, isu]))), fontsize=12)
if isu == 4:
plt.title('Iteration:' + str(int(iter_plot)) + ' batchSz:' + str(ibatchsz) + ' step size:' + str(istep_sz), fontsize=18)
plt.show()
plt.draw()
if __name__ == '__main__':
app.run()
| 38.450495 | 143 | 0.624179 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,459 | 0.316596 |
d3e9aaa50f57573a484dd6d782bbd14b01bbbceb | 2,074 | py | Python | wagtail/admin/views/pages/unpublish.py | brownaa/wagtail | c97bc56c6822eb1b6589d5c33e07f71acfc48845 | [
"BSD-3-Clause"
]
| 8,851 | 2016-12-09T19:01:45.000Z | 2022-03-31T04:45:06.000Z | wagtail/admin/views/pages/unpublish.py | brownaa/wagtail | c97bc56c6822eb1b6589d5c33e07f71acfc48845 | [
"BSD-3-Clause"
]
| 5,197 | 2016-12-09T19:24:37.000Z | 2022-03-31T22:17:55.000Z | wagtail/admin/views/pages/unpublish.py | brownaa/wagtail | c97bc56c6822eb1b6589d5c33e07f71acfc48845 | [
"BSD-3-Clause"
]
| 2,548 | 2016-12-09T18:16:55.000Z | 2022-03-31T21:34:38.000Z | from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.translation import gettext as _
from wagtail.admin import messages
from wagtail.admin.views.pages.utils import get_valid_next_url_from_request
from wagtail.core import hooks
from wagtail.core.models import Page, UserPagePermissionsProxy
def unpublish(request, page_id):
page = get_object_or_404(Page, id=page_id).specific
user_perms = UserPagePermissionsProxy(request.user)
if not user_perms.for_page(page).can_unpublish():
raise PermissionDenied
next_url = get_valid_next_url_from_request(request)
if request.method == 'POST':
include_descendants = request.POST.get("include_descendants", False)
for fn in hooks.get_hooks('before_unpublish_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
page.unpublish(user=request.user)
if include_descendants:
for live_descendant_page in page.get_descendants().live().defer_streamfields().specific():
if user_perms.for_page(live_descendant_page).can_unpublish():
live_descendant_page.unpublish()
for fn in hooks.get_hooks('after_unpublish_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
messages.success(request, _("Page '{0}' unpublished.").format(page.get_admin_display_title()), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
])
if next_url:
return redirect(next_url)
return redirect('wagtailadmin_explore', page.get_parent().id)
return TemplateResponse(request, 'wagtailadmin/pages/confirm_unpublish.html', {
'page': page,
'next': next_url,
'live_descendant_count': page.get_descendants().live().count(),
})
| 37.709091 | 112 | 0.696721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 254 | 0.122469 |
d3e9c1e71145908db6a37bf71e7072535569119d | 14,783 | py | Python | bcipy/display/rsvp/display.py | mberkanbicer/BciPy | c18878ad6fc4d1f69e2091b8f029f3b9ab9a923a | [
"MIT"
]
| 32 | 2020-11-13T17:53:25.000Z | 2022-03-24T21:12:31.000Z | bcipy/display/rsvp/display.py | mberkanbicer/BciPy | c18878ad6fc4d1f69e2091b8f029f3b9ab9a923a | [
"MIT"
]
| 20 | 2020-12-02T17:40:42.000Z | 2022-03-16T16:38:05.000Z | bcipy/display/rsvp/display.py | mberkanbicer/BciPy | c18878ad6fc4d1f69e2091b8f029f3b9ab9a923a | [
"MIT"
]
| 10 | 2020-12-16T02:32:37.000Z | 2022-03-23T16:31:59.000Z | import logging
import os.path as path
from typing import List, Optional, Tuple
from psychopy import core, visual
from bcipy.acquisition.marker_writer import NullMarkerWriter, MarkerWriter
from bcipy.helpers.task import SPACE_CHAR
from bcipy.helpers.stimuli import resize_image
from bcipy.helpers.system_utils import get_screen_resolution
from bcipy.helpers.triggers import TriggerCallback, _calibration_trigger
class RSVPDisplay(object):
"""RSVP Display Object for inquiry Presentation.
Animates a inquiry in RSVP. Mode should be determined outside.
"""
def __init__(
self,
window: visual.Window,
static_clock,
experiment_clock: core.Clock,
marker_writer: Optional[MarkerWriter] = None,
task_color: List[str] = ['white'],
task_font: str = 'Times',
task_pos: Tuple[float, float] = (-.8, .9),
task_height: float = 0.2,
task_text: str = '1/100',
info_color: List[str] = ['white'],
info_text: List[str] = ['Information Text'],
info_font: List[str] = ['Times'],
info_pos=[(.8, .9)],
info_height=[0.2],
stim_font='Times',
stim_pos=(-.8, .9),
stim_height=0.2,
stim_inquiry: List[str] = ['a'] * 10,
stim_colors: List[str] = ['white'] * 10,
stim_timing: List[float] = [1] * 10,
is_txt_stim: bool = True,
static_time: float = .05,
trigger_type: str = 'image',
space_char: SPACE_CHAR = SPACE_CHAR):
"""Initialize RSVP window parameters and objects.
PARAMETERS:
----------
# Experiment
window(visual.Window): PsychoPy Window
static_clock(TODO): no idea
experiment_clock(core.Clock): Clock used to timestamp experiment
marker_writer(MarkerWriter): object used to write triggers to
the daq stream.
# Task
task_color(list[string]): Color of the task string. Shares the
length of the task_text. If of length 1 the entire task
bar shares the same color.
task_font(string): Font of task string
task_pos(tuple): position of task string
task_height(float): height for task string
task_text(string): text of the task bar
# Info
info_text(list[string]): Text list for information texts
info_color(list[string]): Color of the information text string
info_font(list[string]): Font of the information text string
info_pos(list[tuple]): Position of the information text string
info_height(list[float]): Height of the information text string
# Stimuli
stim_height(float): height of the stimuli object
stim_pos(tuple): position of stimuli
stim_font(string): font of the stimuli
stim_inquiry(list[string]): list of elements to flash
stim_colors(list[string]): list of colors for stimuli
stim_timing(list[float]): timing for each letter flash
"""
self.window = window
self.refresh_rate = window.getActualFrameRate()
self.logger = logging.getLogger(__name__)
self.stimuli_inquiry = stim_inquiry
self.stimuli_colors = stim_colors
self.stimuli_timing = stim_timing
self.is_txt_stim = is_txt_stim
self.staticPeriod = static_clock
self.static_time = static_time
self.experiment_clock = experiment_clock
self.timing_clock = core.Clock()
# Used to handle writing the marker stimulus
self.marker_writer = marker_writer or NullMarkerWriter()
# Length of the stimuli (number of flashes)
self.stim_length = len(stim_inquiry)
# Informational Parameters
self.info_text = info_text
# Stim parameters
self.stimuli_font = stim_font
self.stimuli_height = stim_height
self.stimuli_pos = stim_pos
# Trigger Items
self.first_run = True
self.trigger_type = trigger_type
self.trigger_callback = TriggerCallback()
# Callback used on presentation of first stimulus.
self.first_stim_callback = lambda _sti: None
self.size_list_sti = []
self.space_char = space_char
self.task = visual.TextStim(win=self.window, color=task_color[0],
height=task_height,
text=task_text,
font=task_font, pos=task_pos,
wrapWidth=None, colorSpace='rgb',
opacity=1, depth=-6.0)
# Create multiple text objects based on input
self.text = []
for idx in range(len(self.info_text)):
self.text.append(visual.TextStim(
win=self.window,
color=info_color[idx],
height=info_height[idx],
text=self.info_text[idx],
font=info_font[idx],
pos=info_pos[idx],
wrapWidth=None, colorSpace='rgb',
opacity=1, depth=-6.0))
# Create Stimuli Object
if self.is_txt_stim:
self.sti = visual.TextStim(
win=self.window,
color='white',
height=self.stimuli_height,
text='+',
font=self.stimuli_font,
pos=self.stimuli_pos,
wrapWidth=None, colorSpace='rgb',
opacity=1, depth=-6.0)
else:
self.sti = visual.ImageStim(
win=self.window,
image=None,
mask=None,
pos=self.stimuli_pos,
ori=0.0)
def draw_static(self):
"""Draw static elements in a stimulus."""
self.task.draw()
for idx in range(len(self.text)):
self.text[idx].draw()
def schedule_to(self, ele_list=[], time_list=[], color_list=[]):
"""Schedule stimuli elements (works as a buffer).
Args:
ele_list(list[string]): list of elements of stimuli
time_list(list[float]): list of timings of stimuli
color_list(list[string]): colors of elements of stimuli
"""
self.stimuli_inquiry = ele_list
self.stimuli_timing = time_list
self.stimuli_colors = color_list
def update_task(self, text: str, color_list: List[str], pos: Tuple[float]):
"""Update Task Object.
PARAMETERS:
-----------
text: text for task
color_list: list of the colors for each char
pos: position of task
"""
self.task.text = text
self.task.color = color_list[0]
self.task.pos = pos
def do_inquiry(self):
"""Do inquiry.
Animates a inquiry of flashing letters to achieve RSVP.
"""
# init an array for timing information
timing = []
if self.first_run:
# play a inquiry start sound to help orient triggers
first_stim_timing = _calibration_trigger(
self.experiment_clock,
trigger_type=self.trigger_type, display=self.window,
on_trigger=self.marker_writer.push_marker)
timing.append(first_stim_timing)
self.first_stim_time = first_stim_timing[-1]
self.first_run = False
# generate a inquiry (list of stimuli with meta information)
inquiry = self._generate_inquiry()
# do the inquiry
for idx in range(len(inquiry)):
self.is_first_stim = (idx == 0)
# set a static period to do all our stim setting.
# will warn if ISI value is violated.
self.staticPeriod.name = 'Stimulus Draw Period'
self.staticPeriod.start(self.stimuli_timing[idx])
# Reset the timing clock to start presenting
self.window.callOnFlip(
self.trigger_callback.callback,
self.experiment_clock,
inquiry[idx]['sti_label'])
self.window.callOnFlip(self.marker_writer.push_marker, inquiry[idx]['sti_label'])
if idx == 0 and callable(self.first_stim_callback):
self.first_stim_callback(inquiry[idx]['sti'])
# Draw stimulus for n frames
inquiry[idx]['sti'].draw()
self.draw_static()
self.window.flip()
core.wait((inquiry[idx]['time_to_present'] - 1) / self.refresh_rate)
# End static period
self.staticPeriod.complete()
# append timing information
if self.is_txt_stim:
timing.append(self.trigger_callback.timing)
else:
timing.append(self.trigger_callback.timing)
self.trigger_callback.reset()
# draw in static and flip once more
self.draw_static()
self.window.flip()
return timing
def _generate_inquiry(self):
"""Generate inquiry.
Generate stimuli for next RSVP inquiry.
"""
stim_info = []
for idx in range(len(self.stimuli_inquiry)):
current_stim = {}
# turn ms timing into frames! Much more accurate!
current_stim['time_to_present'] = int(self.stimuli_timing[idx] * self.refresh_rate)
# check if stimulus needs to use a non-default size
if self.size_list_sti:
this_stimuli_size = self.size_list_sti[idx]
else:
this_stimuli_size = self.stimuli_height
# Set the Stimuli attrs
if self.stimuli_inquiry[idx].endswith('.png'):
current_stim['sti'] = self.create_stimulus(mode='image', height_int=this_stimuli_size)
current_stim['sti'].image = self.stimuli_inquiry[idx]
current_stim['sti'].size = resize_image(
current_stim['sti'].image, current_stim['sti'].win.size, this_stimuli_size)
current_stim['sti_label'] = path.splitext(
path.basename(self.stimuli_inquiry[idx]))[0]
else:
# text stimulus
current_stim['sti'] = self.create_stimulus(mode='text', height_int=this_stimuli_size)
txt = self.stimuli_inquiry[idx]
# customize presentation of space char.
current_stim['sti'].text = txt if txt != SPACE_CHAR else self.space_char
current_stim['sti'].color = self.stimuli_colors[idx]
current_stim['sti_label'] = txt
# test whether the word will be too big for the screen
text_width = current_stim['sti'].boundingBox[0]
if text_width > self.window.size[0]:
monitor_width, monitor_height = get_screen_resolution()
text_height = current_stim['sti'].boundingBox[1]
# If we are in full-screen, text size in Psychopy norm units
# is monitor width/monitor height
if self.window.size[0] == monitor_width:
new_text_width = monitor_width / monitor_height
else:
# If not, text width is calculated relative to both
# monitor size and window size
new_text_width = (
self.window.size[1] / monitor_height) * (
monitor_width / monitor_height)
new_text_height = (text_height * new_text_width) / text_width
current_stim['sti'].height = new_text_height
stim_info.append(current_stim)
return stim_info
def update_task_state(self, text: str, color_list: List[str]) -> None:
"""Update task state.
Removes letters or appends to the right.
Args:
text(string): new text for task state
color_list(list[string]): list of colors for each
"""
task_state_text = visual.TextStim(
win=self.window, font=self.task.font, text=text)
x_task_position = task_state_text.boundingBox[0] / \
self.window.size[0] - 1
task_pos = (x_task_position, 1 - self.task.height)
self.update_task(text=text, color_list=color_list, pos=task_pos)
def wait_screen(self, message, color):
"""Wait Screen.
Args:
message(string): message to be displayed while waiting
"""
# Construct the wait message
wait_message = visual.TextStim(win=self.window, font=self.stimuli_font,
text=message,
height=.1,
color=color,
pos=(0, -.5),
wrapWidth=2,
colorSpace='rgb',
opacity=1, depth=-6.0)
# Try adding our BCI logo. Pass if not found.
try:
wait_logo = visual.ImageStim(
self.window,
image='bcipy/static/images/gui_images/bci_cas_logo.png',
pos=(0, .5),
mask=None,
ori=0.0)
wait_logo.size = resize_image(
'bcipy/static/images/gui_images/bci_cas_logo.png',
self.window.size, 1)
wait_logo.draw()
except Exception:
self.logger.debug('Cannot load logo image')
pass
# Draw and flip the screen.
wait_message.draw()
self.window.flip()
def create_stimulus(self, height_int: int, mode: str = 'text'):
"""Create Stimulus.
Returns a TextStim or ImageStim object.
Args:
height_int: The height of the stimulus
mode: "text" or "image", determines which to return
"""
if mode == 'text':
return visual.TextStim(
win=self.window,
color='white',
height=height_int,
text='+',
font=self.stimuli_font,
pos=self.stimuli_pos,
wrapWidth=None,
colorSpace='rgb',
opacity=1,
depth=-6.0)
if mode == 'image':
return visual.ImageStim(
win=self.window,
image=None,
mask=None,
units='',
pos=self.stimuli_pos,
size=(height_int, height_int),
ori=0.0)
| 37.330808 | 102 | 0.558412 | 14,367 | 0.97186 | 0 | 0 | 0 | 0 | 0 | 0 | 4,453 | 0.301224 |
d3e9f7de3f63d7f3de57a5c2272c7c0ae564d742 | 932 | py | Python | cloud/db/db.py | bother3000/Smart-IoT-Planting-System | 7c33f150850fb8c9bc250fa02bf306f02f7cafb8 | [
"MIT"
]
| 171 | 2017-09-22T08:25:18.000Z | 2022-02-28T07:56:41.000Z | cloud/db/db.py | bother3000/Smart-IoT-Planting-System | 7c33f150850fb8c9bc250fa02bf306f02f7cafb8 | [
"MIT"
]
| 2 | 2018-06-28T02:33:04.000Z | 2021-06-09T06:56:58.000Z | cloud/db/db.py | bother3000/Smart-IoT-Planting-System | 7c33f150850fb8c9bc250fa02bf306f02f7cafb8 | [
"MIT"
]
| 108 | 2017-10-03T20:11:52.000Z | 2022-03-19T15:21:48.000Z | #!/usr/bin/env python
import pymysql #Python3
db = pymysql.connect("localhost","sips","root","zaijian" )
cursor = db.cursor()
cursor.execute("SELECT VERSION()")
data = cursor.fetchone()
print ("Database version : %s " % data)
db.close()
def create_table():
db = pymysql.connect("localhost","sips","root","zaijian" )
cursor = db.cursor()
cursor.execute("DROP TABLE IF EXISTS EMPLOYEE")
sql = """CREATE TABLE EMPLOYEE (
FIRST_NAME CHAR(20) NOT NULL,
LAST_NAME CHAR(20),
AGE INT,
SEX CHAR(1),
INCOME FLOAT )"""
cursor.execute(sql)
db.close()
def db_insert():
db = pymysql.connect("localhost","sips","root","zaijian" )
cursor = db.cursor()
sql = """INSERT INTO EMPLOYEE(FIRST_NAME,
LAST_NAME, AGE, SEX, INCOME)
VALUES ('Mac', 'Mohan', 20, 'M', 2000)"""
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
db.close()
| 23.897436 | 60 | 0.610515 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 487 | 0.522532 |
d3ea46bda3dee2d1a7eb7b7fac100d0a90820e25 | 14,363 | py | Python | sdk/python/pulumi_aws_native/amplify/_inputs.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
]
| 29 | 2021-09-30T19:32:07.000Z | 2022-03-22T21:06:08.000Z | sdk/python/pulumi_aws_native/amplify/_inputs.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
]
| 232 | 2021-09-30T19:26:26.000Z | 2022-03-31T23:22:06.000Z | sdk/python/pulumi_aws_native/amplify/_inputs.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
]
| 4 | 2021-11-10T19:42:01.000Z | 2022-02-05T10:15:49.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'AppAutoBranchCreationConfigArgs',
'AppBasicAuthConfigArgs',
'AppCustomRuleArgs',
'AppEnvironmentVariableArgs',
'AppTagArgs',
'BranchBasicAuthConfigArgs',
'BranchEnvironmentVariableArgs',
'BranchTagArgs',
'DomainSubDomainSettingArgs',
]
@pulumi.input_type
class AppAutoBranchCreationConfigArgs:
def __init__(__self__, *,
auto_branch_creation_patterns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
basic_auth_config: Optional[pulumi.Input['AppBasicAuthConfigArgs']] = None,
build_spec: Optional[pulumi.Input[str]] = None,
enable_auto_branch_creation: Optional[pulumi.Input[bool]] = None,
enable_auto_build: Optional[pulumi.Input[bool]] = None,
enable_performance_mode: Optional[pulumi.Input[bool]] = None,
enable_pull_request_preview: Optional[pulumi.Input[bool]] = None,
environment_variables: Optional[pulumi.Input[Sequence[pulumi.Input['AppEnvironmentVariableArgs']]]] = None,
pull_request_environment_name: Optional[pulumi.Input[str]] = None,
stage: Optional[pulumi.Input['AppAutoBranchCreationConfigStage']] = None):
if auto_branch_creation_patterns is not None:
pulumi.set(__self__, "auto_branch_creation_patterns", auto_branch_creation_patterns)
if basic_auth_config is not None:
pulumi.set(__self__, "basic_auth_config", basic_auth_config)
if build_spec is not None:
pulumi.set(__self__, "build_spec", build_spec)
if enable_auto_branch_creation is not None:
pulumi.set(__self__, "enable_auto_branch_creation", enable_auto_branch_creation)
if enable_auto_build is not None:
pulumi.set(__self__, "enable_auto_build", enable_auto_build)
if enable_performance_mode is not None:
pulumi.set(__self__, "enable_performance_mode", enable_performance_mode)
if enable_pull_request_preview is not None:
pulumi.set(__self__, "enable_pull_request_preview", enable_pull_request_preview)
if environment_variables is not None:
pulumi.set(__self__, "environment_variables", environment_variables)
if pull_request_environment_name is not None:
pulumi.set(__self__, "pull_request_environment_name", pull_request_environment_name)
if stage is not None:
pulumi.set(__self__, "stage", stage)
@property
@pulumi.getter(name="autoBranchCreationPatterns")
def auto_branch_creation_patterns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "auto_branch_creation_patterns")
@auto_branch_creation_patterns.setter
def auto_branch_creation_patterns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "auto_branch_creation_patterns", value)
@property
@pulumi.getter(name="basicAuthConfig")
def basic_auth_config(self) -> Optional[pulumi.Input['AppBasicAuthConfigArgs']]:
return pulumi.get(self, "basic_auth_config")
@basic_auth_config.setter
def basic_auth_config(self, value: Optional[pulumi.Input['AppBasicAuthConfigArgs']]):
pulumi.set(self, "basic_auth_config", value)
@property
@pulumi.getter(name="buildSpec")
def build_spec(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "build_spec")
@build_spec.setter
def build_spec(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "build_spec", value)
@property
@pulumi.getter(name="enableAutoBranchCreation")
def enable_auto_branch_creation(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_auto_branch_creation")
@enable_auto_branch_creation.setter
def enable_auto_branch_creation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_auto_branch_creation", value)
@property
@pulumi.getter(name="enableAutoBuild")
def enable_auto_build(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_auto_build")
@enable_auto_build.setter
def enable_auto_build(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_auto_build", value)
@property
@pulumi.getter(name="enablePerformanceMode")
def enable_performance_mode(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_performance_mode")
@enable_performance_mode.setter
def enable_performance_mode(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_performance_mode", value)
@property
@pulumi.getter(name="enablePullRequestPreview")
def enable_pull_request_preview(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_pull_request_preview")
@enable_pull_request_preview.setter
def enable_pull_request_preview(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_pull_request_preview", value)
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AppEnvironmentVariableArgs']]]]:
return pulumi.get(self, "environment_variables")
@environment_variables.setter
def environment_variables(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AppEnvironmentVariableArgs']]]]):
pulumi.set(self, "environment_variables", value)
@property
@pulumi.getter(name="pullRequestEnvironmentName")
def pull_request_environment_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "pull_request_environment_name")
@pull_request_environment_name.setter
def pull_request_environment_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pull_request_environment_name", value)
@property
@pulumi.getter
def stage(self) -> Optional[pulumi.Input['AppAutoBranchCreationConfigStage']]:
return pulumi.get(self, "stage")
@stage.setter
def stage(self, value: Optional[pulumi.Input['AppAutoBranchCreationConfigStage']]):
pulumi.set(self, "stage", value)
@pulumi.input_type
class AppBasicAuthConfigArgs:
def __init__(__self__, *,
enable_basic_auth: Optional[pulumi.Input[bool]] = None,
password: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None):
if enable_basic_auth is not None:
pulumi.set(__self__, "enable_basic_auth", enable_basic_auth)
if password is not None:
pulumi.set(__self__, "password", password)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="enableBasicAuth")
def enable_basic_auth(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_basic_auth")
@enable_basic_auth.setter
def enable_basic_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_basic_auth", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
@pulumi.input_type
class AppCustomRuleArgs:
def __init__(__self__, *,
source: pulumi.Input[str],
target: pulumi.Input[str],
condition: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "source", source)
pulumi.set(__self__, "target", target)
if condition is not None:
pulumi.set(__self__, "condition", condition)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def source(self) -> pulumi.Input[str]:
return pulumi.get(self, "source")
@source.setter
def source(self, value: pulumi.Input[str]):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def target(self) -> pulumi.Input[str]:
return pulumi.get(self, "target")
@target.setter
def target(self, value: pulumi.Input[str]):
pulumi.set(self, "target", value)
@property
@pulumi.getter
def condition(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "condition")
@condition.setter
def condition(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "condition", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class AppEnvironmentVariableArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class AppTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class BranchBasicAuthConfigArgs:
def __init__(__self__, *,
password: pulumi.Input[str],
username: pulumi.Input[str],
enable_basic_auth: Optional[pulumi.Input[bool]] = None):
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "username", username)
if enable_basic_auth is not None:
pulumi.set(__self__, "enable_basic_auth", enable_basic_auth)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="enableBasicAuth")
def enable_basic_auth(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_basic_auth")
@enable_basic_auth.setter
def enable_basic_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_basic_auth", value)
@pulumi.input_type
class BranchEnvironmentVariableArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class BranchTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class DomainSubDomainSettingArgs:
def __init__(__self__, *,
branch_name: pulumi.Input[str],
prefix: pulumi.Input[str]):
pulumi.set(__self__, "branch_name", branch_name)
pulumi.set(__self__, "prefix", prefix)
@property
@pulumi.getter(name="branchName")
def branch_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "branch_name")
@branch_name.setter
def branch_name(self, value: pulumi.Input[str]):
pulumi.set(self, "branch_name", value)
@property
@pulumi.getter
def prefix(self) -> pulumi.Input[str]:
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: pulumi.Input[str]):
pulumi.set(self, "prefix", value)
| 34.609639 | 124 | 0.664555 | 13,540 | 0.9427 | 0 | 0 | 13,711 | 0.954606 | 0 | 0 | 2,111 | 0.146975 |
d3eaa974be46c94752b5084755d30c91ec1e2ca1 | 4,203 | py | Python | awsecommerceservice/models/item_lookup_request.py | nidaizamir/Test-PY | 26ea1019115a1de3b1b37a4b830525e164ac55ce | [
"MIT"
]
| null | null | null | awsecommerceservice/models/item_lookup_request.py | nidaizamir/Test-PY | 26ea1019115a1de3b1b37a4b830525e164ac55ce | [
"MIT"
]
| null | null | null | awsecommerceservice/models/item_lookup_request.py | nidaizamir/Test-PY | 26ea1019115a1de3b1b37a4b830525e164ac55ce | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
awsecommerceservice
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class ItemLookupRequest(object):
"""Implementation of the 'ItemLookupRequest' model.
TODO: type model description here.
Attributes:
condition (ConditionEnum): TODO: type description here.
id_type (IdTypeEnum): TODO: type description here.
merchant_id (string): TODO: type description here.
item_id (list of string): TODO: type description here.
response_group (list of string): TODO: type description here.
search_index (string): TODO: type description here.
variation_page (object): TODO: type description here.
related_item_page (object): TODO: type description here.
relationship_type (list of string): TODO: type description here.
include_reviews_summary (string): TODO: type description here.
truncate_reviews_at (int): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"condition":'Condition',
"id_type":'IdType',
"merchant_id":'MerchantId',
"item_id":'ItemId',
"response_group":'ResponseGroup',
"search_index":'SearchIndex',
"variation_page":'VariationPage',
"related_item_page":'RelatedItemPage',
"relationship_type":'RelationshipType',
"include_reviews_summary":'IncludeReviewsSummary',
"truncate_reviews_at":'TruncateReviewsAt'
}
def __init__(self,
condition=None,
id_type=None,
merchant_id=None,
item_id=None,
response_group=None,
search_index=None,
variation_page=None,
related_item_page=None,
relationship_type=None,
include_reviews_summary=None,
truncate_reviews_at=None):
"""Constructor for the ItemLookupRequest class"""
# Initialize members of the class
self.condition = condition
self.id_type = id_type
self.merchant_id = merchant_id
self.item_id = item_id
self.response_group = response_group
self.search_index = search_index
self.variation_page = variation_page
self.related_item_page = related_item_page
self.relationship_type = relationship_type
self.include_reviews_summary = include_reviews_summary
self.truncate_reviews_at = truncate_reviews_at
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
condition = dictionary.get('Condition')
id_type = dictionary.get('IdType')
merchant_id = dictionary.get('MerchantId')
item_id = dictionary.get('ItemId')
response_group = dictionary.get('ResponseGroup')
search_index = dictionary.get('SearchIndex')
variation_page = dictionary.get('VariationPage')
related_item_page = dictionary.get('RelatedItemPage')
relationship_type = dictionary.get('RelationshipType')
include_reviews_summary = dictionary.get('IncludeReviewsSummary')
truncate_reviews_at = dictionary.get('TruncateReviewsAt')
# Return an object of this model
return cls(condition,
id_type,
merchant_id,
item_id,
response_group,
search_index,
variation_page,
related_item_page,
relationship_type,
include_reviews_summary,
truncate_reviews_at)
| 35.618644 | 83 | 0.623602 | 4,056 | 0.965025 | 0 | 0 | 1,613 | 0.383773 | 0 | 0 | 2,057 | 0.489412 |
d3eb09e186d2266dd713792bddd6301d09f60a0f | 6,372 | py | Python | data.py | zhaoyun630/R-NET-in-Keras | 425ed06ff5322cd5187b8e321865ab0459ec3825 | [
"MIT"
]
| 207 | 2017-07-12T18:14:38.000Z | 2021-08-01T20:25:44.000Z | data.py | zhaoyun630/R-NET-in-Keras | 425ed06ff5322cd5187b8e321865ab0459ec3825 | [
"MIT"
]
| 31 | 2017-08-20T08:30:48.000Z | 2021-03-03T17:47:46.000Z | data.py | zhaoyun630/R-NET-in-Keras | 425ed06ff5322cd5187b8e321865ab0459ec3825 | [
"MIT"
]
| 102 | 2017-07-28T11:19:49.000Z | 2021-08-01T20:26:31.000Z | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import cPickle as pickle
from keras import backend as K
from keras.utils import np_utils
from keras.preprocessing import sequence
from random import shuffle
import itertools
def load_dataset(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
def padded_batch_input(input, indices=None, dtype=K.floatx(), maxlen=None):
if indices is None:
indices = np.arange(len(input))
batch_input = [input[i] for i in indices]
return sequence.pad_sequences(batch_input, maxlen, dtype, padding='post')
def categorical_batch_target(target, classes, indices=None, dtype=K.floatx()):
if indices is None:
indices = np.arange(len(target))
batch_target = [min(target[i], classes-1) for i in indices]
return np_utils.to_categorical(batch_target, classes).astype(dtype)
def lengthGroup(length):
if length < 150:
return 0
if length < 240:
return 1
if length < 380:
return 2
if length < 520:
return 3
if length < 660:
return 4
return 5
class BatchGen(object):
def __init__(self, inputs, targets=None, batch_size=None, stop=False,
shuffle=True, balance=False, dtype=K.floatx(),
flatten_targets=False, sort_by_length=False,
group=False, maxlen=None):
assert len(set([len(i) for i in inputs])) == 1
assert(not shuffle or not sort_by_length)
self.inputs = inputs
self.nb_samples = len(inputs[0])
self.batch_size = batch_size if batch_size else self.nb_samples
self.dtype = dtype
self.stop = stop
self.shuffle = shuffle
self.balance = balance
self.targets = targets
self.flatten_targets = flatten_targets
if isinstance(maxlen, (list, tuple)):
self.maxlen = maxlen
else:
self.maxlen = [maxlen] * len(inputs)
self.sort_by_length = None
if sort_by_length:
self.sort_by_length = np.argsort([-len(p) for p in inputs[0]])
# if self.targets and self.balance:
# self.class_weight = class_weight(self.targets)
self.generator = self._generator()
self._steps = -(-self.nb_samples // self.batch_size) # round up
self.groups = None
if group is not False:
indices = np.arange(self.nb_samples)
ff = lambda i: lengthGroup(len(inputs[0][i]))
indices = np.argsort([ff(i) for i in indices])
self.groups = itertools.groupby(indices, ff)
self.groups = {k: np.array(list(v)) for k, v in self.groups}
def _generator(self):
while True:
if self.shuffle:
permutation = np.random.permutation(self.nb_samples)
elif self.sort_by_length is not None:
permutation = self.sort_by_length
elif self.groups is not None:
# permutation = np.arange(self.nb_samples)
# tmp = permutation.copy()
# for id in self.group_ids:
# mask = (self.groups==id)
# tmp[mask] = np.random.permutation(permutation[mask])
# permutation = tmp
# import ipdb
# ipdb.set_trace()
for k, v in self.groups.items():
np.random.shuffle(v)
tmp = np.concatenate(self.groups.values())
batches = np.array_split(tmp, self._steps)
remainder = []
if len(batches[-1]) < self._steps:
remainder = batches[-1:]
batches = batches[:-1]
shuffle(batches)
batches += remainder
permutation = np.concatenate(batches)
else:
permutation = np.arange(self.nb_samples)
i = 0
longest = 767
while i < self.nb_samples:
if self.sort_by_length is not None:
bs = self.batch_size * 767 // self.inputs[0][permutation[i]].shape[0]
else:
bs = self.batch_size
indices = permutation[i : i + bs]
i = i + bs
# for i in range(0, self.nb_samples, self.batch_size):
# indices = permutation[i : i + self.batch_size]
batch_X = [padded_batch_input(x, indices, self.dtype, maxlen)
for x, maxlen in zip(self.inputs, self.maxlen)]
P = batch_X[0].shape[1]
if not self.targets:
yield batch_X
continue
batch_Y = [categorical_batch_target(target, P,
indices, self.dtype)
for target in self.targets]
if self.flatten_targets:
batch_Y = np.concatenate(batch_Y, axis=-1)
if not self.balance:
yield (batch_X, batch_Y)
continue
# batch_W = np.array([self.class_weight[y] for y in batch_targets])
batch_W = np.array([bs / self.batch_size for x in batch_X[0]]).astype(self.dtype)
yield (batch_X, batch_Y, batch_W)
if self.stop:
raise StopIteration
def __iter__(self):
return self.generator
def next(self):
return self.generator.next()
def __next__(self):
return self.generator.__next__()
def steps(self):
if self.sort_by_length is None:
return self._steps
print("Steps was called")
if self.shuffle:
permutation = np.random.permutation(self.nb_samples)
elif self.sort_by_length is not None:
permutation = self.sort_by_length
else:
permutation = np.arange(self.nb_samples)
i = 0
longest = 767
self._steps = 0
while i < self.nb_samples:
bs = self.batch_size * 767 // self.inputs[0][permutation[i]].shape[0]
i = i + bs
self._steps += 1
return self._steps
batch_gen = BatchGen # for backward compatibility
| 31.701493 | 97 | 0.557439 | 5,142 | 0.806968 | 2,750 | 0.431576 | 0 | 0 | 0 | 0 | 555 | 0.0871 |
d3ebad071ed8577b67556835d306ad97a7a130ad | 217 | py | Python | algoritmos/ajuste-curvas/caso-linear/Teste.py | mauriciomoccelin/metodos-numericos | 67bdb305d4db8a59943a17128ba2c06fefcc4a36 | [
"MIT"
]
| 3 | 2019-07-03T18:05:44.000Z | 2020-02-04T16:37:21.000Z | algoritmos/ajuste-curvas/caso-linear/Teste.py | mauriciomoccelin/metodos-numericos | 67bdb305d4db8a59943a17128ba2c06fefcc4a36 | [
"MIT"
]
| null | null | null | algoritmos/ajuste-curvas/caso-linear/Teste.py | mauriciomoccelin/metodos-numericos | 67bdb305d4db8a59943a17128ba2c06fefcc4a36 | [
"MIT"
]
| null | null | null | from RegressaoLinear import RegressaoLinear
planoCartesiano = {
0.5: 4.4,
2.8: 1.8,
4.2: 1.0,
6.7: 0.4,
8.3: 0.2
}
regressaoLinear = RegressaoLinear(planoCartesiano)
print(regressaoLinear.gerar_equacao())
| 16.692308 | 50 | 0.705069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d3ec165a9fa9c3cf83a87cdf6db9f7df86904452 | 1,528 | py | Python | src/app.py | UBC-MDS/dsci_532_group19 | 9814f8f3bc2cf95c5855becf2d92265b8a97893d | [
"MIT"
]
| null | null | null | src/app.py | UBC-MDS/dsci_532_group19 | 9814f8f3bc2cf95c5855becf2d92265b8a97893d | [
"MIT"
]
| 27 | 2022-02-15T01:08:06.000Z | 2022-03-18T23:49:45.000Z | src/app.py | UBC-MDS/dsci_532_group19 | 9814f8f3bc2cf95c5855becf2d92265b8a97893d | [
"MIT"
]
| 2 | 2022-02-17T06:11:41.000Z | 2022-03-02T03:24:54.000Z | import dash
from dash import html
from dash import dcc
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
from .layout import *
from .plot import *
# from layout import *
# from plot import *
app = dash.Dash(
__name__, external_stylesheets=[dbc.themes.BOOTSTRAP, "/css/button.css"]
)
app.title = "Data Science Salaries"
server = app.server
app.layout = html.Div(
[
dcc.Location(id="url", refresh=False),
topbar,
content,
# sidebar,
]
)
@app.callback(Output("scatter", "srcDoc"), Input("data_scientist", "value"))
def update(DS_identity):
rst = plot_sidebar(DS_identity)
return rst
@app.callback(
Output("world_map", "srcDoc"),
[Input("select-country", "value")],
)
def update(xcon):
return plot_map(xcon)
@app.callback(
Output("salary_heatmap", "srcDoc"),
[Input("xslider_1", "value"), Input("select-country", "value")],
)
def update(xmax, xcon):
return plot_salary_heatmap(xmax, xcon)
@app.callback(
Output("gender-boxplot", "srcDoc"),
[Input("xslider_1", "value"), Input("select-country", "value")],
)
def update(xmax, xcon):
return plot_gender_boxplot(xmax, xcon)
@app.callback(
Output("edu_histogram", "srcDoc"),
[
Input("xslider_1", "value"),
Input("select-country", "value"),
Input("select-stacking", "value")
]
)
def update(xmax, xcon, stack):
return plot_edu_histo(xmax, xcon, stack)
if __name__ == "__main__":
app.run_server(debug=True)
| 20.931507 | 76 | 0.655759 | 0 | 0 | 0 | 0 | 936 | 0.612565 | 0 | 0 | 407 | 0.266361 |
d3ed6b32008718ad48f1726c2f8858cca85dfe86 | 3,035 | py | Python | jaeger-cli/rpc.py | shwsun/jaeger-cli | cf8bb7a00184220c206ccd7468b89ce4ab5a706e | [
"Apache-2.0"
]
| null | null | null | jaeger-cli/rpc.py | shwsun/jaeger-cli | cf8bb7a00184220c206ccd7468b89ce4ab5a706e | [
"Apache-2.0"
]
| null | null | null | jaeger-cli/rpc.py | shwsun/jaeger-cli | cf8bb7a00184220c206ccd7468b89ce4ab5a706e | [
"Apache-2.0"
]
| null | null | null | # Copyright 2018 MassOpenCloud.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__all__ = [
'init',
'cleanup',
'set_defaults',
'add_extra_exmods',
'clear_extra_exmods',
'get_allowed_exmods',
'RequestContextSerializer',
'get_client',
'get_server',
'get_notifier',
'TRANSPORT_ALIASES',
]
import functools
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import importutils
from oslo_utils import timeutils
import nova.conf
import nova.context
import nova.exception
from nova.i18n import _
from nova import objects
profiler = importutils.try_import("osprofiler.profiler")
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
TRANSPORT = None
LEGACY_NOTIFIER = None
NOTIFICATION_TRANSPORT = None
NOTIFIER = None
ALLOWED_EXMODS = [
nova.exception.__name__,
]
EXTRA_EXMODS = []
# NOTE(markmc): The nova.openstack.common.rpc entries are for backwards compat
# with Havana rpc_backend configuration values. The nova.rpc entries are for
# compat with Essex values.
TRANSPORT_ALIASES = {
'nova.openstack.common.rpc.impl_kombu': 'rabbit',
'nova.openstack.common.rpc.impl_qpid': 'qpid',
'nova.openstack.common.rpc.impl_zmq': 'zmq',
'nova.rpc.impl_kombu': 'rabbit',
'nova.rpc.impl_qpid': 'qpid',
'nova.rpc.impl_zmq': 'zmq',
}
class RequestContextSerializer(messaging.Serializer):
"""Request context serializer and deserializer from Nova.rpc.
This is the original serializer from nova. Nothing is changed besides
the docstring.
"""
def __init__(self, base):
self._base = base
def serialize_entity(self, context, entity):
if not self._base:
return entity
return self._base.serialize_entity(context, entity)
def deserialize_entity(self, context, entity):
if not self._base:
return entity
return self._base.deserialize_entity(context, entity)
def serialize_context(self, context):
return context.to_dict()
def deserialize_context(self, context):
return nova.context.RequestContext.from_dict(context)
class ProfilerRequestContextSerializer(RequestContextSerializer):
"""Serializer and deserializer impl.
Serializer and deserializer impl based on Jaeger tracing metadata
propagation. For usage check out docs/how-to.md . This is the only impl that
is important.
"""
pass
| 27.590909 | 80 | 0.725206 | 1,086 | 0.357825 | 0 | 0 | 0 | 0 | 0 | 0 | 1,552 | 0.511367 |
d3ededa11ab3bf342dc8f952cbe323bf1951fb20 | 3,296 | py | Python | data_structure/tree/test_binarytree.py | lp1225/my_algorithm | c5995fd6ec8692a4f25280097b5c7f1459481b40 | [
"MIT"
]
| 1 | 2019-07-03T16:23:09.000Z | 2019-07-03T16:23:09.000Z | data_structure/tree/test_binarytree.py | lp1225/my_algorithm | c5995fd6ec8692a4f25280097b5c7f1459481b40 | [
"MIT"
]
| null | null | null | data_structure/tree/test_binarytree.py | lp1225/my_algorithm | c5995fd6ec8692a4f25280097b5c7f1459481b40 | [
"MIT"
]
| null | null | null | # 搜索二叉树
from queue import Queue
class Node(object):
"""节点"""
def __init__(self, data):
self.data = data
self.node_left = None
self.node_right = None
class BinaryTree(object):
def __init__(self):
self.root = None
def insert(tree, node):
"""插入节点"""
if tree.root == None:
tree.root = node
else:
temp = tree.root # 必须要有一个临时节点
while temp != None:
if temp.data > node.data:
if temp.node_left == None:
temp.node_left = node
return
else:
temp = temp.node_left
else:
if temp.node_right == None:
temp.node_right = node
return
else:
temp = temp.node_right
def preorder(node):
"""先序遍历"""
if node != None:
print(node.data, end='')
preorder(node.node_left)
preorder(node.node_right)
def inorder(node):
"""中序遍历"""
if node != None:
inorder(node.node_left)
print(node.data, end='')
inorder(node.node_right)
def postorder(node):
"""后序遍历"""
if node != None:
postorder(node.node_left)
postorder(node.node_right)
print(node.data, end='')
def get_height(node):
"""得到最大高度k"""
if node == None:
return 0
max_left = get_height(node.node_left)
max_right = get_height(node.node_right)
max_value = max(max_left, max_right)
return max_value+1
def get_node(node, k):
"""得到k层的节点"""
if node == None:
return
if k == 1:
if node.data !=None:
print(node.data, end='')
get_node(node.node_left, k-1)
get_node(node.node_right, k-1)
def get_max(node):
"""查找最大值
在右子树中找
"""
if node != None:
while node.node_right != None:
node = node.node_right
return node.data
def get_min(node):
"""查找最小值"""
if node != None:
while node.node_left != None:
node = node.node_left
return node.data
def comorder(node):
q = Queue()
q.put(node)
if node == None:
return
else:
while q.empty() != True:
node = q.get(0)
print(node.data, end='')
if node.node_left != None:
q.put(node.node_left)
if node.node_right != None:
q.put(node.node_right)
def Mirror(node):
"""反转二叉树,
顺序执行,nice
"""
if node == None:
return
if node.node_left == None and node.node_right == None:
return
temp = node.node_left
node.node_left = node.node_right
node.node_right = temp
Mirror(node.node_left)
Mirror(node.node_right)
if __name__ == '__main__':
tree = BinaryTree()
arr_test = [6, 3, 8, 2, 5, 1, 7]
for i in arr_test:
insert(tree, Node(i))
# preorder(tree.root)
# print()
# inorder(tree.root)
# print()
# get_node(tree.root, 3)
# print()
# result = get_height(tree.root)
# print(result)
# max_value = get_max(tree.root)
# print(max_value)
# min_value = get_min(tree.root)
# print(min_value)
comorder(tree.root)
Mirror(tree.root)
print()
comorder(tree.root)
| 22.575342 | 58 | 0.531553 | 231 | 0.06719 | 0 | 0 | 0 | 0 | 0 | 0 | 569 | 0.165503 |
d3eed0d68517bbf2b89eb59f3fef60d4cac6c141 | 703 | py | Python | Python-desenvolvimento/ex036.py | MarcosMaciel-MMRS/Desenvolvimento-python | 2b2fc54788da3ca110d495b9e80a494f2b31fb09 | [
"MIT"
]
| null | null | null | Python-desenvolvimento/ex036.py | MarcosMaciel-MMRS/Desenvolvimento-python | 2b2fc54788da3ca110d495b9e80a494f2b31fb09 | [
"MIT"
]
| null | null | null | Python-desenvolvimento/ex036.py | MarcosMaciel-MMRS/Desenvolvimento-python | 2b2fc54788da3ca110d495b9e80a494f2b31fb09 | [
"MIT"
]
| null | null | null | #empréstimos bancários. pegue o valor da casa, o salario da pessoa e em quanto tempo ela quer pagar.
#se as parcelas ficarem acima de 30% do salario, negue o imprestimo.
casa = float(input('Informe o valor da casa: R$'))
salario = float(input('informe seu salario: R$'))
tempo = int(input('Em quanto tempo planeja pagar: '))
parcela = casa/(tempo*12)#para fazer a conta com base em anos, levando em conta as parcelas mensais.
print('Para pagar um casa de R${:.2f} e em {}anos, suas parcelas ficariam de R${:.2f}'.format(casa, tempo, parcela))
if parcela >= (salario*30/100):
print('Com seu salário atual, não é possível efetuar esse empréstimo.')
else:
print('Empréstimo aprovado')
| 58.583333 | 117 | 0.709815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 508 | 0.714487 |
d3f026b7191d98da19a4514bcacdc0c4c65fbbab | 433 | py | Python | UDEMY-Learn Python Programming Masterclass/Section 3-Stepping into the World of Python/exercise4.py | Sanjay9921/Python | 05ac161dd46f9b4731a5c14ff5ef52adb705e8e6 | [
"MIT"
]
| null | null | null | UDEMY-Learn Python Programming Masterclass/Section 3-Stepping into the World of Python/exercise4.py | Sanjay9921/Python | 05ac161dd46f9b4731a5c14ff5ef52adb705e8e6 | [
"MIT"
]
| null | null | null | UDEMY-Learn Python Programming Masterclass/Section 3-Stepping into the World of Python/exercise4.py | Sanjay9921/Python | 05ac161dd46f9b4731a5c14ff5ef52adb705e8e6 | [
"MIT"
]
| null | null | null | #Integer division
#You have a shop selling buns for $2.40 each. A customer comes in with $15, and would like to buy as many buns as possible.
#Complete the code to calculate how many buns the customer can afford.
#Note: Your customer won't be happy if you try to sell them part of a bun.
#Print only the result, any other text in the output will cause the checker to fail.
bun_price = 2.40
money = 15
print( money // bun_price ) | 36.083333 | 124 | 0.745958 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 369 | 0.852194 |
d3f12db2d9a12b691dfde83d5ec0f772b55deb37 | 1,708 | py | Python | scripts/test_maths.py | paulscottrobson/Basic65816 | 167c25b1bbc2680f55acdf4bff771750acfe907c | [
"MIT"
]
| null | null | null | scripts/test_maths.py | paulscottrobson/Basic65816 | 167c25b1bbc2680f55acdf4bff771750acfe907c | [
"MIT"
]
| 4 | 2019-06-18T14:45:35.000Z | 2019-06-22T17:18:22.000Z | scripts/test_maths.py | paulscottrobson/Basic65816 | 167c25b1bbc2680f55acdf4bff771750acfe907c | [
"MIT"
]
| 1 | 2021-03-18T04:31:44.000Z | 2021-03-18T04:31:44.000Z | # *******************************************************************************************
# *******************************************************************************************
#
# Name : test_maths.py
# Purpose : Create lots of variables/arrays and arithmetic/bitwise.
# Date : 10th June 2019
# Author : Paul Robson ([email protected])
#
# *******************************************************************************************
# *******************************************************************************************
import random
from variables import *
def calculate(op,a,b):
if op == "+":
return a + b
if op == "-":
return a - b
if op == "*":
return a * b
if op == "%":
return a % b
if op == "/":
return int(a / b)
if op == "&":
return a & b
if op == "|":
return a | b
if op == "^":
return a ^ b
assert False
if __name__ == "__main__":
print("Arithmetic/Bitwise test code.")
operators = "+,-,*,/,&,|,^".split(",")
eb = EntityBucket(-1,60,0,10,0)
#
bs = BasicSource()
bs.append(eb.setupCode())
bs.append(eb.assignCode())
for i in range(0,900):
ok = False
while not ok:
v1 = eb.pickOne()
v2 = eb.pickOne()
operator = operators[random.randint(0,len(operators)-1)]
ok = True
if abs(v1.getValue()*v2.getValue()) >= 32768*4096:
ok = False
if (operator == "/" or operator == "%") and v2.getValue() == 0:
ok = False
r = calculate(operator,v1.getValue(),v2.getValue())
bs.append("assert ({0}{1}{2}) = {3}".format(v1.getEither(),operator,v2.getEither(),r))
bs.append(eb.checkCode())
bs.save()
#
blk = BasicBlock(0x4000,0x8000)
blk.setBoot("run",False)
blk.loadProgram()
blk.exportFile("temp/basic.bin")
| 26.6875 | 93 | 0.466042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 676 | 0.395785 |
d3f286f3f58a2b5e1d9a8943be97eb809e75f53c | 19,191 | py | Python | qa/char_analyze.py | JinkelaCrops/t2t-learning | 5d9b5a5164af763c24f1cbce9d97561e9f2b772c | [
"Apache-2.0"
]
| 5 | 2019-03-28T03:52:32.000Z | 2021-02-24T07:09:26.000Z | qa/char_analyze.py | JinkelaCrops/t2t-learning | 5d9b5a5164af763c24f1cbce9d97561e9f2b772c | [
"Apache-2.0"
]
| null | null | null | qa/char_analyze.py | JinkelaCrops/t2t-learning | 5d9b5a5164af763c24f1cbce9d97561e9f2b772c | [
"Apache-2.0"
]
| 2 | 2018-08-07T03:43:09.000Z | 2019-12-09T06:41:40.000Z | # 所有的unicode字符
from collections import Counter
import qa.regex_utils as regutil
import re
resource_path = "/media/tmxmall/a36811aa-0e87-4ba1-b14f-370134452449/data/medicine.txt"
with open(resource_path, "r", encoding="utf8") as f:
char_stream = f.read()
char_dictionary = Counter(list(char_stream))
med_unicodes = regutil.expr_converter("[[%s]]" % "".join(char_dictionary.keys()).replace("\n", "") + "#[[\\u4e00-\\u9fff]]")
format_med_unicodes = re.sub("(?<!-)(?=\\\\u)", "\n", med_unicodes)
print(format_med_unicodes)
lines = char_stream.split("\n")
unknown_char = "[^\\u0020-\\u007e\\u4e00-\\u9fff]"
def regex_filter_line(regex, lines):
filter_sentence = list(filter(lambda x: re.search(regex, x) is not None, lines))
print("%20s" % regex, len(filter_sentence))
return len(filter_sentence)
regutil.uu_enum("\\u0020-\\u007e")
regex_filter_line("[\\u0020-\\u007e]", lines)
regex_filter_line("[\\u00a0-\\u00ff]", lines)
regex_filter_line("[\\u0100-\\u01ff]", lines)
regex_filter_line("[\\u0251]", lines)
regex_filter_line("[\\u025b]", lines)
regex_filter_line("[\\u0261]", lines)
regex_filter_line("[\\u028a]", lines)
regex_filter_line("[\\u02c6-\\u02cb]", lines)
regex_filter_line("[\\u02d0]", lines)
regex_filter_line("[\\u02d8-\\u02da]", lines)
regex_filter_line("[\\u02dc]", lines)
regex_filter_line("[\\u037a]", lines)
regex_filter_line("[\\u037e]", lines)
regex_filter_line("[\\u038a]", lines)
regex_filter_line("[\\u038c]", lines)
regex_filter_line("[\\u03cb]", lines)
regex_filter_line("[\\u03d6]", lines)
regex_filter_line("[\\u0384-\\u0385]", lines)
regex_filter_line("[\\u0387-\\u0388]", lines)
regex_filter_line("[\\u038e-\\u038f]", lines)
regex_filter_line("[\\u0391-\\u03c9]", lines)
regex_filter_line("[\\u0400-\\u04ff]", lines)
regex_filter_line("[\\u0590-\\u05ff]", lines)
regex_filter_line("[\\u0652]", lines)
regex_filter_line("[\\u11bc]", lines)
regex_filter_line("[\\u1868]", lines)
regex_filter_line("[\\u1d31]", lines)
regex_filter_line("[\\u1d52]", lines)
regex_filter_line("[\\u1d5b]", lines)
regex_filter_line("[\\u1ef7]", lines)
regex_filter_line("[\\u2016-\\u206a]", lines)
regex_filter_line("[\\u2070]", lines)
regex_filter_line("[\\u2074-\\u2075]", lines)
regex_filter_line("[\\u2077-\\u2078]", lines)
regex_filter_line("[\\u2082-\\u2084]", lines)
regex_filter_line("[\\u20ac]", lines)
regex_filter_line("[\\u2103]", lines)
regex_filter_line("[\\u2105]", lines)
regex_filter_line("[\\u2109]", lines)
regex_filter_line("[\\u2116]", lines)
regex_filter_line("[\\u2122]", lines)
regex_filter_line("[\\u212b]", lines)
regex_filter_line("[\\u2160-\\u216b]", lines)
regex_filter_line("[\\u2170-\\u2179]", lines)
regex_filter_line("[\\u21d2]", lines)
regex_filter_line("[\\u2190-\\u2193]", lines)
regex_filter_line("[\\u2206]", lines)
regex_filter_line("[\\u2208]", lines)
regex_filter_line("[\\u2211-\\u2212]", lines)
regex_filter_line("[\\u2217-\\u221a]", lines)
regex_filter_line("[\\u221d-\\u2220]", lines)
regex_filter_line("[\\u2223]", lines)
regex_filter_line("[\\u2225]", lines)
regex_filter_line("[\\u2227-\\u222b]", lines)
regex_filter_line("[\\u222e]", lines)
regex_filter_line("[\\u2234]", lines)
regex_filter_line("[\\u2237]", lines)
regex_filter_line("[\\u223c-\\u223d]", lines)
regex_filter_line("[\\u2245]", lines)
regex_filter_line("[\\u224c]", lines)
regex_filter_line("[\\u2252]", lines)
regex_filter_line("[\\u2260-\\u2261]", lines)
regex_filter_line("[\\u2264-\\u2267]", lines)
regex_filter_line("[\\u226f]", lines)
regex_filter_line("[\\u2295]", lines)
regex_filter_line("[\\u2299]", lines)
regex_filter_line("[\\u22a5]", lines)
regex_filter_line("[\\u22bf]", lines)
regex_filter_line("[\\u2312]", lines)
regex_filter_line("[\\u2395]", lines)
regex_filter_line("[\\u2460-\\u2473]", lines)
regex_filter_line("[\\u2474-\\u2487]", lines)
regex_filter_line("[\\u2488-\\u249b]", lines)
regex_filter_line("[\\u2500-\\u257f]", lines)
regex_filter_line("[\\u25a0-\\u25a1]", lines)
regex_filter_line("[\\u25b2-\\u25b4]", lines)
regex_filter_line("[\\u25c6-\\u25c7]", lines)
regex_filter_line("[\\u25ca-\\u25cb]", lines)
regex_filter_line("[\\u25ce-\\u25cf]", lines)
regex_filter_line("[\\u2605-\\u2606]", lines)
regex_filter_line("[\\u2609]", lines)
regex_filter_line("[\\u2610]", lines)
regex_filter_line("[\\u2640]", lines)
regex_filter_line("[\\u2642]", lines)
regex_filter_line("[\\u2666]", lines)
regex_filter_line("[\\u266a-\\u266b]", lines)
regex_filter_line("[\\u2714]", lines)
regex_filter_line("[\\u2717]", lines)
regex_filter_line("[\\u274f]", lines)
regex_filter_line("[\\u2751]", lines)
regex_filter_line("[\\u279f]", lines)
regex_filter_line("[\\u27a2]", lines)
regex_filter_line("[\\u27a5]", lines)
regex_filter_line("[\\u2a7d]", lines)
regex_filter_line("[\\u2fd4]", lines)
regex_filter_line("[\\u3001-\\u301e]", lines)
regex_filter_line("[\\u3022-\\u3025]", lines)
regex_filter_line("[\\u3105-\\u3107]", lines)
regex_filter_line("[\\u310a]", lines)
regex_filter_line("[\\u3111]", lines)
regex_filter_line("[\\u3113]", lines)
regex_filter_line("[\\u3116-\\u3117]", lines)
regex_filter_line("[\\u311a-\\u311b]", lines)
regex_filter_line("[\\u3122]", lines)
regex_filter_line("[\\u3125]", lines)
regex_filter_line("[\\u3127-\\u3128]", lines)
regex_filter_line("[\\u3220-\\u3229]", lines)
regex_filter_line("[\\u32a3]", lines)
regex_filter_line("[\\u338e-\\u338f]", lines)
regex_filter_line("[\\u339c-\\u339d]", lines)
regex_filter_line("[\\u33a1]", lines)
regex_filter_line("[\\u33a5]", lines)
regex_filter_line("[\\u33d5]", lines)
regex_filter_line("[\\u33d1-\\u33d2]", lines)
regex_filter_line("[\\u359e]", lines)
regex_filter_line("[\\u39d1]", lines)
regex_filter_line("[\\u41f2]", lines)
regex_filter_line("[\\u4341]", lines)
regex_filter_line("[\\u4d13]", lines)
regex_filter_line("[\\u4d15]", lines)
regex_filter_line("[\\u4e00-\\u9fff]", lines)
regex_filter_line("[\\uacf3]", lines)
regex_filter_line("[\\ucd38]", lines)
regex_filter_line("[\\ue20c-\\ue2ff]", lines)
regex_filter_line("[\\uf900-\\ufaff]", lines)
regex_filter_line("[\\ufb03]", lines)
regex_filter_line("[\\ufe30-\\ufe31]", lines)
regex_filter_line("[\\ufe33]", lines)
regex_filter_line("[\\ufe38]", lines)
regex_filter_line("[\\ufe3c-\\ufe3d]", lines)
regex_filter_line("[\\ufe3f-\\ufe41]", lines)
regex_filter_line("[\\ufe4d-\\ufe4e]", lines)
regex_filter_line("[\\ufe55-\\ufe57]", lines)
regex_filter_line("[\\ufe59-\\ufe5c]", lines)
regex_filter_line("[\\ufe5f]", lines)
regex_filter_line("[\\ufe63]", lines)
regex_filter_line("[\\ufe65-\\ufe66]", lines)
regex_filter_line("[\\ufe6a-\\ufe6b]", lines)
regex_filter_line("[\\ufeff]", lines)
regex_filter_line("[\\uff01]", lines)
regex_filter_line("[\\uff08-\\uff09]", lines)
regex_filter_line("[\\uff0c]", lines)
regex_filter_line("[\\uff1a]", lines)
regex_filter_line("[\\uff1f]", lines)
regex_filter_line("[\\uff61]", lines)
regex_filter_line("[\\uff63]", lines)
regex_filter_line("[\\uff65]", lines)
regex_filter_line("[\\uff6c]", lines)
regex_filter_line("[\\uff72]", lines)
regex_filter_line("[\\uff86]", lines)
regex_filter_line("[\\uff89]", lines)
regex_filter_line("[\\uffe0-\\uffe1]", lines)
regex_filter_line("[\\uffe3]", lines)
regex_filter_line("[\\uffe5]", lines)
regex_filter_line("[\\uffed]", lines)
regex_filter_line("[\\ufffc]", lines)
"""
[\u0020-\u007e] 13056272 \\u0020-\\u007e Latin
[\u00a0-\u00ff] 258619 \\u00a0-\\u00ff Latin ++
[\u0100-\u01ff] 353 \\u0100-\\u01ff Latin ++
[\u0251] 302 \\u0251 ɑ
[\u025b] 2 \\u025b ɛ
[\u0261] 25 \\u0261 ɡ
[\u028a] 1 \\u028a ʊ
[\u02c6-\u02cb] 870 \\u02c6-\\u02cb ˆˇˈˉˊˋ
[\u02d0] 1 \\u02d0 ː
[\u02d8-\u02da] 25 \\u02d8-\\u02da ˘˙˚
[\u02dc] 10 \\u02dc ˜
[\u037a] 1 \\u037a ͺ
[\u037e] 4 \\u037e ;
[\u038a] 3 \\u038a Ί
[\u038c] 1 \\u038c Ό
[\u03cb] 3 \\u03cb ϋ
[\u03d6] 2 \\u03d6 ϖ
[\u0384-\u0385] 8 \\u0384-\\u0385 ΄΅
[\u0387-\u0388] 2 \\u0387-\\u0388 ·Έ
[\u038e-\u038f] 2 \\u038e-\\u038f ΎΏ
[\u0391-\u03c9] 567276 \\u0391-\\u03c9 希腊
[\u0400-\u04ff] 2058 \\u0400-\\u04ff 西里尔
[\u0590-\u05ff] 34 \\u0590-\\u05ff 希伯来
[\u0652] 1 \\u0652 阿拉伯
[\u11bc] 3 \\u11bc 朝鲜
[\u1868] 1 \\u1868 ᡨ 蒙古
[\u1d31] 1 \\u1d31 ᴱ
[\u1d52] 1 \\u1d52 ᵒ
[\u1d5b] 1 \\u1d5b ᵛ
[\u1ef7] 1 \\u1ef7 ỷ Latin ++
[\u2016-\u206a] 323353 \\u2016-\\u206a punc++
[\u2070] 4 \\u2070 ⁰
[\u2074-\u2075] 9 \\u2074-\\u2075 ⁴⁵
[\u2077-\u2078] 11 \\u2077-\\u2078 ⁷⁸
[\u2082-\u2084] 13 \\u2082-\\u2084 ₂₃₄
[\u20ac] 58 \\u20ac €
[\u2103] 132218 \\u2103 ℃
[\u2105] 64 \\u2105 ℅
[\u2109] 45 \\u2109 ℉
[\u2116] 559 \\u2116 №
[\u2122] 348 \\u2122 ™
[\u212b] 5 \\u212b Å
[\u2160-\u216b] 235239 \\u2160-\\u216b ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ
[\u2170-\u2179] 1557 \\u2170-\\u2179 ⅰⅱⅲⅳⅴⅵⅶⅷⅸ
[\u21d2] 3 \\u21d2 ⇒
[\u2190-\u2193] 15107 \\u2190-\\u2193 ←↑→↓
[\u2206] 5 \\u2206 ∆
[\u2208] 281 \\u2208 ∈
[\u2211-\u2212] 839 \\u2211-\\u2212 ∑−
[\u2217-\u221a] 75 \\u2217-\\u221a ∗∘∙√
[\u221d-\u2220] 861 \\u221d-\\u2220 ∝∞∟∠
[\u2223] 1 \\u2223 ∣
[\u2225] 80 \\u2225 ∥
[\u2227-\u222b] 226 \\u2227-\\u222b ∧∨∩∪∫
[\u222e] 8 \\u222e ∮
[\u2234] 46 \\u2234 ∴
[\u2237] 333 \\u2237 ∷
[\u223c-\u223d] 29 \\u223c-\\u223d ∼∽
[\u2245] 1 \\u2245 ≅
[\u224c] 33 \\u224c ≌
[\u2252] 4 \\u2252 ≒
[\u2260-\u2261] 555 \\u2260-\\u2261 ≠≡
[\u2264-\u2267] 31397 \\u2264-\\u2267 ≤≥≦≧
[\u226f] 3 \\u226f ≯
[\u2295] 4 \\u2295 ⊕
[\u2299] 17 \\u2299 ⊙
[\u22a5] 41 \\u22a5 ⊥
[\u22bf] 116 \\u22bf ⊿
[\u2312] 5 \\u2312 ⌒
[\u2395] 4 \\u2395 ⎕
[\u2460-\u2473] 48470 \\u2460-\\u2473 ①②③④⑤⑥⑦⑧⑨⑩ ⑳
[\u2474-\u2487] 1267 \\u2474-\\u2487 ⑴⑵⑶⑷⑸⑹⑺⑻⑼⑽ ⒇
[\u2488-\u249b] 107 \\u2488-\\u249b ⒈⒉⒊⒋⒌⒍⒎⒏⒐⒑ ⒛
[\u2500-\u257f] 566 \\u2500-\\u257f ─━│┃┄┅┆┇┈┉┊
[\u25a0-\u25a1] 1052 \\u25a0-\\u25a1 ■□
[\u25b2-\u25b4] 3695 \\u25b2-\\u25b4 ▲△▴
[\u25c6-\u25c7] 205 \\u25c6-\\u25c7 ◆◇
[\u25ca-\u25cb] 339 \\u25ca-\\u25cb ◊○
[\u25ce-\u25cf] 767 \\u25ce-\\u25cf ◎●
[\u2605-\u2606] 196 \\u2605-\\u2606 ★☆
[\u2609] 3 \\u2609 ☉
[\u2610] 35 \\u2610 ☐
[\u2640] 1017 \\u2640 ♀
[\u2642] 1108 \\u2642 ♂
[\u2666] 2 \\u2666 ♦
[\u266a-\u266b] 9 \\u266a-\\u266b ♪♫
[\u2714] 4 \\u2714 ✔
[\u2717] 1 \\u2717 ✗
[\u274f] 1 \\u274f ❏
[\u2751] 2 \\u2751 ❑
[\u279f] 1 \\u279f ➟
[\u27a2] 6 \\u27a2 ➢
[\u27a5] 1 \\u27a5 ➥
[\u2a7d] 3 \\u2a7d ⩽
[\u2fd4] 2 \\u2fd4 ⿔ CJK++
[\u3001-\u301e] 7028921 \\u3001-\\u301e CJK punc
[\u3022-\u3025] 8 \\u3022-\\u3025 〢〣〤〥
[\u3105-\u3107] 8 \\u3105-\\u3107 ㄅㄆ
[\u310a] 1 \\u310a ㄊ
[\u3111] 1 \\u3111 ㄑ
[\u3113] 2 \\u3113 ㄓ
[\u3116-\u3117] 6 \\u3116-\\u3117 ㄖㄗ
[\u311a-\u311b] 2 \\u311a-\\u311b ㄚㄛ
[\u3122] 1 \\u3122 ㄢ
[\u3125] 1 \\u3125 ㄥ
[\u3127-\u3128] 11 \\u3127-\\u3128 ㄧㄨ
[\u3220-\u3229] 312 \\u3220-\\u3229 ㈠㈡㈢㈣㈤㈥㈦㈧㈨
[\u32a3] 6 \\u32a3 ㊣
[\u338e-\u338f] 125 \\u338e-\\u338f ㎎㎏
[\u339c-\u339d] 75 \\u339c-\\u339d ㎜㎝
[\u33a1] 59 \\u33a1 ㎡
[\u33a5] 1 \\u33a5 ㎥
[\u33d5] 24 \\u33d5 ㏕
[\u33d1-\u33d2] 9 \\u33d1-\\u33d2 ㏑㏒
[\u359e] 6 \\u359e 㖞
[\u39d1] 3 \\u39d1 㧑
[\u41f2] 13 \\u41f2 䇲
[\u4341] 2 \\u4341 䍁
[\u4d13] 2 \\u4d13 䴓
[\u4d15] 1 \\u4d15 䴕
[\u4e00-\u9fff] 13056199 \\u4e00-\\u9fff CJK
[\uacf3] 2 \\uacf3 곳 朝鲜++
[\ucd38] 1 \\ucd38 촸 朝鲜++
[\ue20c-\ue2ff] 1305 \\ue20c-\\ue2ff ???
[\uf900-\ufaff] 136 \\uf900-\\ufaff CJK ++
[\ufb03] 1 \\ufb03 ffi
[\ufe30-\ufe31] 941 \\ufe30-\\ufe31 ︰︱
[\ufe33] 2 \\ufe33 ︳
[\ufe38] 4 \\ufe38 ︸
[\ufe3c-\ufe3d] 33 \\ufe3c-\\ufe3d ︼︽
[\ufe3f-\ufe41] 19 \\ufe3f-\\ufe41 ︿﹀﹁
[\ufe4d-\ufe4e] 7 \\ufe4d-\\ufe4e ﹍﹎
[\ufe55-\ufe57] 102 \\ufe55-\\ufe57 ﹕﹖﹗
[\ufe59-\ufe5c] 185 \\ufe59-\\ufe5c ﹙﹚﹛
[\ufe5f] 10 \\ufe5f ﹟
[\ufe63] 70 \\ufe63 ﹣
[\ufe65-\ufe66] 551 \\ufe65-\\ufe66 ﹥﹦
[\ufe6a-\ufe6b] 233 \\ufe6a-\\ufe6b ﹪﹫
[\ufeff] 4 \\ufeff arabic ++ # FE70-FEFF
[\uff01] 886 \\uff01 !
[\uff08-\uff09] 622070 \\uff08-\\uff09 ()
[\uff0c] 3445520 \\uff0c ,
[\uff1a] 471609 \\uff1a :
[\uff1f] 9822 \\uff1f ?
[\uff61] 2 \\uff61 。
[\uff63] 1 \\uff63 」
[\uff65] 8 \\uff65 ・
[\uff6c] 2 \\uff6c ャ
[\uff72] 1 \\uff72 イ
[\uff86] 1 \\uff86 ニ
[\uff89] 1 \\uff89 ノ
[\uffe0-\uffe1] 160 \\uffe0-\\uffe1 ¢£
[\uffe3] 7143 \\uffe3  ̄
[\uffe5] 57 \\uffe5 ¥
[\uffed] 9 \\uffed ■
[\ufffc] 1 \\ufffc 
"""
"""
\\u0020-\\u007e Latin
\\u00a0-\\u00ff Latin ++
\\u0100-\\u01ff Latin ++
\\u0251 ɑ
\\u025b ɛ
\\u0261 ɡ
\\u028a ʊ
\\u02c6-\\u02cb ˆˇˈˉˊˋ
\\u02d0 ː
\\u02d8-\\u02da ˘˙˚
\\u02dc ˜
\\u037a ͺ
\\u037e ;
\\u038a Ί
\\u038c Ό
\\u03cb ϋ
\\u03d6 ϖ
\\u0384-\\u0385 ΄΅
\\u0387-\\u0388 ·Έ
\\u038e-\\u038f ΎΏ
\\u0391-\\u03c9 希腊
\\u0400-\\u04ff 西里尔
\\u0590-\\u05ff 希伯来
\\u0652 阿拉伯
\\u11bc 朝鲜
\\u1868 ᡨ 蒙古
\\u1d31 ᴱ
\\u1d52 ᵒ
\\u1d5b ᵛ
\\u1ef7 ỷ Latin ++
\\u2016-\\u206a punc++
\\u2070 ⁰
\\u2074-\\u2075 ⁴⁵
\\u2077-\\u2078 ⁷⁸
\\u2082-\\u2084 ₂₃₄
\\u20ac €
\\u2103 ℃
\\u2105 ℅
\\u2109 ℉
\\u2116 №
\\u2122 ™
\\u212b Å
\\u2160-\\u216b ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ
\\u2170-\\u2179 ⅰⅱⅲⅳⅴⅵⅶⅷⅸ
\\u21d2 ⇒
\\u2190-\\u2193 ←↑→↓
\\u2206 ∆
\\u2208 ∈
\\u2211-\\u2212 ∑−
\\u2217-\\u221a ∗∘∙√
\\u221d-\\u2220 ∝∞∟∠
\\u2223 ∣
\\u2225 ∥
\\u2227-\\u222b ∧∨∩∪∫
\\u222e ∮
\\u2234 ∴
\\u2237 ∷
\\u223c-\\u223d ∼∽
\\u2245 ≅
\\u224c ≌
\\u2252 ≒
\\u2260-\\u2261 ≠≡
\\u2264-\\u2267 ≤≥≦≧
\\u226f ≯
\\u2295 ⊕
\\u2299 ⊙
\\u22a5 ⊥
\\u22bf ⊿
\\u2312 ⌒
\\u2395 ⎕
\\u2460-\\u2473 ①②③④⑤⑥⑦⑧⑨⑩ ⑳
\\u2474-\\u2487 ⑴⑵⑶⑷⑸⑹⑺⑻⑼⑽ ⒇
\\u2488-\\u249b ⒈⒉⒊⒋⒌⒍⒎⒏⒐⒑ ⒛
\\u2500-\\u257f ─━│┃┄┅┆┇┈┉┊
\\u25a0-\\u25a1 ■□
\\u25b2-\\u25b4 ▲△▴
\\u25c6-\\u25c7 ◆◇
\\u25ca-\\u25cb ◊○
\\u25ce-\\u25cf ◎●
\\u2605-\\u2606 ★☆
\\u2609 ☉
\\u2610 ☐
\\u2640 ♀
\\u2642 ♂
\\u2666 ♦
\\u266a-\\u266b ♪♫
\\u2714 ✔
\\u2717 ✗
\\u274f ❏
\\u2751 ❑
\\u279f ➟
\\u27a2 ➢
\\u27a5 ➥
\\u2a7d ⩽
\\u2fd4 ⿔ CJK++
\\u3001-\\u301e CJK punc
\\u3022-\\u3025 〢〣〤〥
\\u3105-\\u3107 ㄅㄆ
\\u310a ㄊ
\\u3111 ㄑ
\\u3113 ㄓ
\\u3116-\\u3117 ㄖㄗ
\\u311a-\\u311b ㄚㄛ
\\u3122 ㄢ
\\u3125 ㄥ
\\u3127-\\u3128 ㄧㄨ
\\u3220-\\u3229 ㈠㈡㈢㈣㈤㈥㈦㈧㈨
\\u32a3 ㊣
\\u338e-\\u338f ㎎㎏
\\u339c-\\u339d ㎜㎝
\\u33a1 ㎡
\\u33a5 ㎥
\\u33d5 ㏕
\\u33d1-\\u33d2 ㏑㏒
\\u359e 㖞
\\u39d1 㧑
\\u41f2 䇲
\\u4341 䍁
\\u4d13 䴓
\\u4d15 䴕
\\u4e00-\\u9fff CJK
\\uacf3 곳 朝鲜++
\\ucd38 촸 朝鲜++
\\ue20c-\\ue2ff ???
\\uf900-\\ufaff CJK ++
\\ufb03 ffi
\\ufe30-\\ufe31 ︰︱
\\ufe33 ︳
\\ufe38 ︸
\\ufe3c-\\ufe3d ︼︽
\\ufe3f-\\ufe41 ︿﹀﹁
\\ufe4d-\\ufe4e ﹍﹎
\\ufe55-\\ufe57 ﹕﹖﹗
\\ufe59-\\ufe5c ﹙﹚﹛
\\ufe5f ﹟
\\ufe63 ﹣
\\ufe65-\\ufe66 ﹥﹦
\\ufe6a-\\ufe6b ﹪﹫
\\ufeff arabic ++ # FE70-FEFF
\\uff01 !
\\uff08-\\uff09 ()
\\uff0c ,
\\uff1a :
\\uff1f ?
\\uff61 。
\\uff63 」
\\uff65 ・
\\uff6c ャ
\\uff72 イ
\\uff86 ニ
\\uff89 ノ
\\uffe0-\\uffe1 ¢£
\\uffe3  ̄
\\uffe5 ¥
\\uffed ■
\\ufffc 
"""
| 35.604824 | 124 | 0.472617 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15,462 | 0.761112 |
d3f53c7bfc8c69e3589a13dbe7ec8f22516451e9 | 622 | py | Python | challenges/largest_product_array/test_largest_product.py | jayadams011/data-structures-and-algorithms | b9a49c65ca769c82b2a34d840bd1e4dd626be025 | [
"MIT"
]
| null | null | null | challenges/largest_product_array/test_largest_product.py | jayadams011/data-structures-and-algorithms | b9a49c65ca769c82b2a34d840bd1e4dd626be025 | [
"MIT"
]
| 4 | 2018-03-22T16:56:06.000Z | 2018-03-28T23:30:29.000Z | challenges/largest_product_array/test_largest_product.py | jayadams011/data-structures-and-algorithms | b9a49c65ca769c82b2a34d840bd1e4dd626be025 | [
"MIT"
]
| null | null | null | from largest_product.py import largest_product
import pytest
def test_product_returns():
"""test if return is a single product """
assert largest_product.largest([[2, 2]]) is 4
def test_returns_largest():
""" test if return is the largest of longer array """
assert largest_product.largest([[1, 3], [6, 10], [4, 5]]) is 60
def test_empty_list():
""" test if returns msg if empty list """
assert largest_product.largest([]) == 'empty arr used'
def test_check_if_syb_has_only_1_el():
"""test for one value"""
arr = [3]
val = 0
assert largest_product.node_inside(arr, val) == 3
| 24.88 | 67 | 0.672026 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.28135 |
d3f5e1c2b805610da261a12d23a2c36165256863 | 4,086 | py | Python | examples/transformer/model.py | namin/dm-haiku | 7ddb4776761a7220031c6b323fa6f797bb02f75c | [
"Apache-2.0"
]
| null | null | null | examples/transformer/model.py | namin/dm-haiku | 7ddb4776761a7220031c6b323fa6f797bb02f75c | [
"Apache-2.0"
]
| null | null | null | examples/transformer/model.py | namin/dm-haiku | 7ddb4776761a7220031c6b323fa6f797bb02f75c | [
"Apache-2.0"
]
| null | null | null | # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transformer model components."""
from typing import Optional
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
class CausalSelfAttention(hk.MultiHeadAttention):
"""Self attention with a causal mask applied."""
def __call__(
self,
query: jnp.ndarray,
key: Optional[jnp.ndarray] = None,
value: Optional[jnp.ndarray] = None,
mask: Optional[jnp.ndarray] = None,
) -> jnp.ndarray:
key = key if key is not None else query
value = value if value is not None else query
if query.ndim != 3:
raise ValueError('Expect queries of shape [B, T, D].')
seq_len = query.shape[1]
causal_mask = np.tril(np.ones((seq_len, seq_len)))
mask = mask * causal_mask if mask is not None else causal_mask
return super().__call__(query, key, value, mask)
class DenseBlock(hk.Module):
"""A 2-layer MLP which widens then narrows the input."""
def __init__(self,
init_scale: float,
widening_factor: int = 4,
name: Optional[str] = None):
super().__init__(name=name)
self._init_scale = init_scale
self._widening_factor = widening_factor
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
hiddens = x.shape[-1]
initializer = hk.initializers.VarianceScaling(self._init_scale)
x = hk.Linear(self._widening_factor * hiddens, w_init=initializer)(x)
x = jax.nn.gelu(x)
return hk.Linear(hiddens, w_init=initializer)(x)
class Transformer(hk.Module):
"""A transformer stack."""
def __init__(self,
num_heads: int,
num_layers: int,
dropout_rate: float,
name: Optional[str] = None):
super().__init__(name=name)
self._num_layers = num_layers
self._num_heads = num_heads
self._dropout_rate = dropout_rate
def __call__(self,
h: jnp.ndarray,
mask: Optional[jnp.ndarray],
is_training: bool) -> jnp.ndarray:
"""Connects the transformer.
Args:
h: Inputs, [B, T, D].
mask: Padding mask, [B, T].
is_training: Whether we're training or not.
Returns:
Array of shape [B, T, D].
"""
init_scale = 2. / self._num_layers
dropout_rate = self._dropout_rate if is_training else 0.
if mask is not None:
mask = mask[:, None, None, :]
# Note: names chosen to approximately match those used in the GPT-2 code;
# see https://github.com/openai/gpt-2/blob/master/src/model.py.
for i in range(self._num_layers):
h_norm = layer_norm(h, name=f'h{i}_ln_1')
h_attn = CausalSelfAttention(
num_heads=self._num_heads,
key_size=32,
w_init_scale=init_scale,
name=f'h{i}_attn')(h_norm, mask=mask)
h_attn = hk.dropout(hk.next_rng_key(), dropout_rate, h_attn)
h = h + h_attn
h_norm = layer_norm(h, name=f'h{i}_ln_2')
h_dense = DenseBlock(init_scale, name=f'h{i}_mlp')(h_norm)
h_dense = hk.dropout(hk.next_rng_key(), dropout_rate, h_dense)
h = h + h_dense
h = layer_norm(h, name='ln_f')
return h
def layer_norm(x: jnp.ndarray, name: Optional[str] = None) -> jnp.ndarray:
"""Apply a unique LayerNorm to x with default settings."""
return hk.LayerNorm(axis=-1,
create_scale=True,
create_offset=True,
name=name)(x)
| 32.428571 | 80 | 0.634606 | 2,954 | 0.722956 | 0 | 0 | 0 | 0 | 0 | 0 | 1,335 | 0.326725 |
d3f792b947cbc2077ccc06bcee6654417c6fd2ce | 7,827 | py | Python | face.py | shwang95/Intelligence-Surveillance-System | f7107096b447e929c36808c341ff91b0b5f49010 | [
"MIT"
]
| 1 | 2018-04-01T18:25:44.000Z | 2018-04-01T18:25:44.000Z | face.py | shwang95/Intelligence-Surveillance-System | f7107096b447e929c36808c341ff91b0b5f49010 | [
"MIT"
]
| null | null | null | face.py | shwang95/Intelligence-Surveillance-System | f7107096b447e929c36808c341ff91b0b5f49010 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
import boto3
import cv2
import numpy
import os
import base64
import gspread
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from httplib2 import Http
from time import localtime, strftime, time, sleep
from oauth2client.service_account import ServiceAccountCredentials
from apiclient import discovery, errors
from apiclient.discovery import build
from oauth2client import client
from oauth2client import tools
from oauth2client import file
def compare_faces(
bucket,
key,
bucket_target,
key_target,
threshold=80,
region='us-east-1'):
'''
Require for face comparision
'''
rekognition = boto3.client('rekognition', region)
response = rekognition.compare_faces(
SourceImage={
'S3Object': {
'Bucket': bucket,
'Name': key,
}
},
TargetImage={
'S3Object': {
'Bucket': bucket_target,
'Name': key_target,
}
},
SimilarityThreshold=threshold,
)
return response['SourceImageFace'], response['FaceMatches']
def upload_log(text):
'''
Upload the Alert time to the google drive sheet
'''
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'ProjectLog-41cafcffcf13.json', scope)
gc = gspread.authorize(credentials)
wks = gc.open('ISeeU_Log').sheet1
wks.append_row([text])
def send(service, user_id, message):
'''
Send the mime email package
'''
try:
message = (
service.users().messages().send(
userId=user_id,
body=message).execute())
print('Message Id: %s' % message['id'])
return message
except errors.HttpError as error:
print('An error occurred: %s' % error)
def create_email(sender, to, subject, message_text, pic):
'''
Create the email
Included information: Sender, Receiver, Subject, Text, Attached Image
'''
message = MIMEMultipart()
message['to'] = to
message['from'] = sender
message['Subject'] = subject
msg = MIMEText(message_text)
message.attach(msg)
fp = open(pic, 'rb')
msg = MIMEImage(fp.read(), _subtype='jpeg')
fp.close()
imagename = os.path.basename(pic)
msg.add_header('Content-Disposition', 'attachment', filename=imagename)
message.attach(msg)
return {'raw': base64.urlsafe_b64encode(message.as_string())}
def authenticate():
'''
Using oauth2 to get the credentials.
It will give all permission related to gmail.
client_secret.json is the secret key you get from google.
Reference: Gmail API python quickstart
'''
SCOPES = 'https://mail.google.com'
store = file.Storage('credentials.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)
creds = tools.run_flow(flow, store)
service = discovery.build('gmail', 'v1', http=creds.authorize(Http()))
return service
def stranger_detected(pic):
'''
Recore the date time and make them as the code for the user the trigger
alarm
'''
nowtime = strftime("%Y-%m-%d %H:%M:%S", localtime())
trigcode = strftime("%d%H%M%S", localtime())
# Upload log to Google drive
text = 'Stranger show up at ' + nowtime
upload_log(text)
# Information of email
# pic = 'guldan.jpg' # Attached Image
sender = "[email protected]"
to = "[email protected]" # User email address
subject = "Alert from ISeeU!"
text = text + '\nReply ' + trigcode + ' to trigger the alarm.'
# Sending email to user
service = authenticate()
message = create_email(sender, to, subject, text, pic)
send(service, 'me', message)
return service, subject, trigcode
def main():
while True:
print('No face detected...')
if os.path.isfile('face.jpg'):
print('Face found!')
bucket_name = 'ec500j1-project-iseeu'
source_name = ['sh.jpg'] # User input faces
target_name = 'face.jpg' # Temporary image
s3 = boto3.client('s3')
# Upload images to s3 server
for img in source_name:
s3.upload_file(img, bucket_name, img)
s3.upload_file(target_name, bucket_name, target_name)
while True:
try:
# Check if the images are successfully uploaded
for img in source_name:
boto3.resource('s3').Object(bucket_name, img).load()
boto3.resource('s3').Object(
bucket_name, target_name).load()
except BaseException:
continue
break
sources, matches = {}, {}
for img in source_name:
try:
sources[img], matches[img] = compare_faces(
bucket_name, img, bucket_name, target_name)
except Exception as e:
# If Rekognition failure
print('Rekognition error: ' + e)
os.remove('face.jpg')
if len(matches[img]) == 0:
# Send notification email
service, target, trigcode = stranger_detected(
'face.jpg')
user_id = 'me'
flag = False # Flag for trigger alert
st = time()
while time() - st < 120: # Listen for 2 minutes
'''
Check all the email for user's reply every 30 seconds.
If the subject match, check if the trigcode match.
If the trigcode match too, return True to set off alarm.
'''
threads = service.users().threads().list(
userId=user_id).execute().get('threads', [])
for thread in threads:
tdata = service.users().threads().get(
userId=user_id, id=thread['id']).execute()
nmsgs = len(tdata['messages'])
msg = tdata['messages'][0]['payload']
subject = ''
for header in msg['headers']:
if header['name'] == 'Subject':
subject = header['value']
break
if subject == target:
if thread[u'snippet'][0:8] == trigcode:
# If user replies with trigcode
flag = True
break
if flag:
# If user replies with trigcode
break
nt = strftime('%Y-%m-%d %H:%M:%S', localtime())
print('Still listening: ' + nt)
sleep(30)
print('Alert!') # Emulated alert
else:
print('Not a stranger') # Do nothing
# Delete all images from s3 server
for img in source_name:
s3.delete_object(Bucket=bucket_name, Key=img)
s3.delete_object(Bucket=bucket_name, Key=target_name)
os.remove('face.jpg') # Delete temperary image
sleep(10)
if __name__ == '__main__':
main()
| 33.592275 | 80 | 0.536093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,149 | 0.274562 |
d3f7c63f805726000751c5b956986db551d0d877 | 8,480 | py | Python | unpythonic/syntax/autoref.py | aisha-w/unpythonic | 0f63abf6ac7efb7304b676d0e1ebce0ef4040438 | [
"BSD-2-Clause"
]
| null | null | null | unpythonic/syntax/autoref.py | aisha-w/unpythonic | 0f63abf6ac7efb7304b676d0e1ebce0ef4040438 | [
"BSD-2-Clause"
]
| null | null | null | unpythonic/syntax/autoref.py | aisha-w/unpythonic | 0f63abf6ac7efb7304b676d0e1ebce0ef4040438 | [
"BSD-2-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
"""Implicitly reference attributes of an object."""
from ast import Name, Assign, Load, Call, Lambda, With, Str, arg, \
Attribute, Subscript, Store, Del
from macropy.core.quotes import macros, q, u, name, ast_literal
from macropy.core.hquotes import macros, hq
from macropy.core.walkers import Walker
from .util import wrapwith, AutorefMarker
from .letdoutil import isdo, islet, ExpandedDoView, ExpandedLetView
from ..dynassign import dyn
from ..lazyutil import force1, mark_lazy
# with autoref(o):
# with autoref(scipy.loadmat("mydata.mat")): # evaluate once, assign to a gensym
# with autoref(scipy.loadmat("mydata.mat")) as o: # evaluate once, assign to given name
#
# We need something like::
#
# with autoref(o):
# x # --> (o.x if hasattr(o, "x") else x)
# x.a # --> (o.x.a if hasattr(o, "x") else x.a)
# x[s] # --> (o.x[s] if hasattr(o, "x") else x[s])
# o # --> o
# with autoref(p):
# x # --> (p.x if hasattr(p, "x") else (o.x if hasattr(o, "x") else x))
# x.a # --> (p.x.a if hasattr(p, "x") else (o.x.a if hasattr(o, "x") else x.a))
# x[s] # --> (p.x[s] if hasattr(p, "x") else (o.x[s] if hasattr(o, "x") else x[s]))
# o # --> (p.o if hasattr(p, "o") else o)
# o.x # --> (p.o.x if hasattr(p, "o") else o.x)
# o[s] # --> (p.o[s] if hasattr(p, "o") else o[s])
#
# One possible clean-ish implementation is::
#
# with AutorefMarker("o"): # no-op at runtime
# x # --> (lambda _ar271: _ar271[1] if _ar271[0] else x)(_autoref_resolve((o, "x")))
# x.a # --> ((lambda _ar271: _ar271[1] if _ar271[0] else x)(_autoref_resolve((o, "x")))).a
# x[s] # --> ((lambda _ar271: _ar271[1] if _ar271[0] else x)(_autoref_resolve((o, "x"))))[s]
# o # --> o (can only occur if an asname is supplied)
# with AutorefMarker("p"):
# x # --> (lambda _ar314: _ar314[1] if _ar314[0] else x)(_autoref_resolve((p, o, "x")))
# x.a # --> ((lambda _ar314: _ar314[1] if _ar314[0] else x)(_autoref_resolve((p, o, "x"))).a
# x[s] # --> ((lambda _ar314: _ar314[1] if _ar314[0] else x)(_autoref_resolve((p, o, "x")))[s]
# # when the inner autoref expands, it doesn't know about the outer one, so we will get this:
# o # --> (lambda _ar314: _ar314[1] if _ar314[0] else o)(_autoref_resolve((p, "o")))
# o.x # --> ((lambda _ar314: _ar314[1] if _ar314[0] else o)(_autoref_resolve((p, "o")))).x
# o[s] # --> ((lambda _ar314: _ar314[1] if _ar314[0] else o)(_autoref_resolve((p, "o"))))[s]
# # the outer autoref needs the marker to know to skip this (instead of looking up o.p):
# p # --> p
#
# The lambda is needed, because the lexical-variable lookup for ``x`` must occur at the use site,
# and it can only be performed by Python itself. We could modify ``_autoref_resolve`` to take
# ``locals()`` and ``globals()`` as arguments and look also in the ``builtins`` module,
# but that way we get no access to the enclosing scopes (the "E" in LEGB).
#
# Recall the blocks expand from inside out.
#
# We must leave an AST marker in place of the each autoref block, so that any outer autoref block (when it expands)
# understands that within that block, any read access to the name "p" is to be left alone.
#
# In ``_autoref_resolve``, we use a single args parameter to avoid dealing with ``*args``
# when analyzing the Call node, thus avoiding much special-case code for the AST differences
# between Python 3.4 and 3.5+.
#
# In reality, we also capture-and-assign the autoref'd expr into a gensym'd variable (instead of referring
# to ``o`` and ``p`` directly), so that arbitrary expressions can be autoref'd without giving them
# a name in user code.
@mark_lazy
def _autoref_resolve(args):
*objs, s = [force1(x) for x in args]
for o in objs:
if hasattr(o, s):
return True, force1(getattr(o, s))
return False, None
def autoref(block_body, args, asname):
assert len(args) == 1, "expected exactly one argument, the expr to implicitly reference"
assert block_body, "expected at least one statement inside the 'with autoref' block"
gen_sym = dyn.gen_sym
o = asname.id if asname else gen_sym("_o") # Python itself guarantees asname to be a bare Name.
# with AutorefMarker("_o42"):
def isexpandedautorefblock(tree):
if not (type(tree) is With and len(tree.items) == 1):
return False
ctxmanager = tree.items[0].context_expr
return type(ctxmanager) is Call and \
type(ctxmanager.func) is Name and ctxmanager.func.id == "AutorefMarker" and \
len(ctxmanager.args) == 1 and type(ctxmanager.args[0]) is Str
def getreferent(tree):
return tree.items[0].context_expr.args[0].s
# (lambda _ar314: _ar314[1] if _ar314[0] else x)(_autoref_resolve((p, o, "x")))
def isautoreference(tree):
return type(tree) is Call and \
len(tree.args) == 1 and type(tree.args[0]) is Call and \
type(tree.args[0].func) is Name and tree.args[0].func.id == "_autoref_resolve" and \
type(tree.func) is Lambda and len(tree.func.args.args) == 1 and \
tree.func.args.args[0].arg.startswith("_ar")
def get_resolver_list(tree): # (p, o, "x")
return tree.args[0].args[0].elts
def add_to_resolver_list(tree, objnode):
lst = get_resolver_list(tree)
lst.insert(-1, objnode)
# x --> the autoref code above.
def makeautoreference(tree):
assert type(tree) is Name and (type(tree.ctx) is Load or not tree.ctx)
newtree = hq[(lambda __ar_: __ar_[1] if __ar_[0] else ast_literal[tree])(_autoref_resolve((name[o], u[tree.id])))]
our_lambda_argname = gen_sym("_ar")
@Walker
def renametmp(tree, **kw):
if type(tree) is Name and tree.id == "__ar_":
tree.id = our_lambda_argname
elif type(tree) is arg and tree.arg == "__ar_":
tree.arg = our_lambda_argname
return tree
return renametmp.recurse(newtree)
@Walker
def transform(tree, *, referents, set_ctx, stop, **kw):
if type(tree) in (Attribute, Subscript, Name) and type(tree.ctx) in (Store, Del):
stop()
# skip autoref lookup for let/do envs
elif islet(tree):
view = ExpandedLetView(tree)
set_ctx(referents=referents + [view.body.args.args[0].arg]) # lambda e14: ...
elif isdo(tree):
view = ExpandedDoView(tree)
set_ctx(referents=referents + [view.body[0].args.args[0].arg]) # lambda e14: ...
elif isexpandedautorefblock(tree):
set_ctx(referents=referents + [getreferent(tree)])
elif isautoreference(tree): # generated by an inner already expanded autoref block
stop()
thename = get_resolver_list(tree)[-1].s
if thename in referents:
# remove autoref lookup for an outer referent, inserted early by an inner autoref block
# (that doesn't know that any outer block exists)
tree = q[name[thename]] # (lambda ...)(_autoref_resolve((p, "o"))) --> o
else:
add_to_resolver_list(tree, q[name[o]]) # _autoref_resolve((p, "x")) --> _autoref_resolve((p, o, "x"))
elif type(tree) is Call and type(tree.func) is Name and tree.func.id == "AutorefMarker": # nested autorefs
stop()
elif type(tree) is Name and (type(tree.ctx) is Load or not tree.ctx) and tree.id not in referents:
stop()
tree = makeautoreference(tree)
# Attribute works as-is, because a.b.c --> Attribute(Attribute(a, "b"), "c"), so Name "a" gets transformed.
# Subscript similarly, a[1][2] --> Subscript(Subscript(a, 1), 2), so Name "a" gets transformed.
return tree
# skip (by name) some common references inserted by other macros
always_skip = ['letter', 'dof', 'namelambda', 'curry', 'currycall', 'lazy', 'lazyrec', 'lazycall']
newbody = [Assign(targets=[q[name[o]]], value=args[0])]
for stmt in block_body:
newbody.append(transform.recurse(stmt, referents=always_skip + [o]))
return wrapwith(item=hq[AutorefMarker(u[o])],
body=newbody,
locref=block_body[0])
| 51.393939 | 122 | 0.604481 | 0 | 0 | 0 | 0 | 2,255 | 0.26592 | 0 | 0 | 4,470 | 0.527123 |
d3fa9c7a54272bce3d4b342c353619a4cf77a19a | 2,935 | py | Python | doit/exceptions.py | m4ta1l/doit | d1a1b7b3abc7641d977d3b78b580d97aea4e27ea | [
"MIT"
]
| 1,390 | 2015-01-01T21:11:47.000Z | 2022-03-31T11:35:44.000Z | doit/exceptions.py | m4ta1l/doit | d1a1b7b3abc7641d977d3b78b580d97aea4e27ea | [
"MIT"
]
| 393 | 2015-01-05T11:18:29.000Z | 2022-03-20T11:46:46.000Z | doit/exceptions.py | m4ta1l/doit | d1a1b7b3abc7641d977d3b78b580d97aea4e27ea | [
"MIT"
]
| 176 | 2015-01-07T16:58:56.000Z | 2022-03-28T12:12:11.000Z | """Handle exceptions generated from 'user' code"""
import sys
import traceback
class InvalidCommand(Exception):
"""Invalid command line argument."""
def __init__(self, *args, **kwargs):
self.not_found = kwargs.pop('not_found', None)
super(InvalidCommand, self).__init__(*args, **kwargs)
self.cmd_used = None
self.bin_name = 'doit' # default but might be overwriten
def __str__(self):
if self.not_found is None:
return super(InvalidCommand, self).__str__()
if self.cmd_used:
msg_task_not_found = (
'command `{cmd_used}` invalid parameter: "{not_found}".' +
' Must be a task, or a target.\n' +
'Type "{bin_name} list" to see available tasks')
return msg_task_not_found.format(**self.__dict__)
else:
msg_cmd_task_not_found = (
'Invalid parameter: "{not_found}".' +
' Must be a command, task, or a target.\n' +
'Type "{bin_name} help" to see available commands.\n' +
'Type "{bin_name} list" to see available tasks.\n')
return msg_cmd_task_not_found.format(**self.__dict__)
class InvalidDodoFile(Exception):
"""Invalid dodo file"""
pass
class InvalidTask(Exception):
"""Invalid task instance. User error on specifying the task."""
pass
class CatchedException(object):
"""This used to save info from caught exceptions
The traceback from the original exception is saved
"""
def __init__(self, msg, exception=None):
self.message = msg
self.traceback = ''
if isinstance(exception, CatchedException):
self.traceback = exception.traceback
elif exception is not None:
# TODO remove doit-code part from traceback
self.traceback = traceback.format_exception(
exception.__class__, exception, sys.exc_info()[2])
def get_msg(self):
"""return full exception description (includes traceback)"""
return "%s\n%s" % (self.message, "".join(self.traceback))
def get_name(self):
"""get Exception name"""
return self.__class__.__name__
def __repr__(self):
return "(<%s> %s)" % (self.get_name(), self.message)
def __str__(self):
return "%s\n%s" % (self.get_name(), self.get_msg())
class TaskFailed(CatchedException):
"""Task execution was not successful."""
pass
class UnmetDependency(TaskFailed):
"""Task was not executed because a dependent task failed or is ignored"""
pass
class TaskError(CatchedException):
"""Error while trying to execute task."""
pass
class SetupError(CatchedException):
"""Error while trying to execute setup object"""
pass
class DependencyError(CatchedException):
"""Error while trying to check if task is up-to-date or saving task status"""
pass
| 29.35 | 81 | 0.627257 | 2,826 | 0.962862 | 0 | 0 | 0 | 0 | 0 | 0 | 1,086 | 0.370017 |
d3fd7d52c6c44f8af7fa618793093cc47061e14f | 17,358 | py | Python | python/redmonster/tools/plot_fits.py | timahutchinson/redmonster | 73adfb20213b89e43b950eb6a7f6525f3d11cec4 | [
"CNRI-Python"
]
| 5 | 2017-03-22T21:29:40.000Z | 2022-03-24T16:02:21.000Z | python/redmonster/tools/plot_fits.py | timahutchinson/redmonster | 73adfb20213b89e43b950eb6a7f6525f3d11cec4 | [
"CNRI-Python"
]
| 4 | 2016-06-24T10:50:23.000Z | 2016-11-09T19:48:00.000Z | python/redmonster/tools/plot_fits.py | timahutchinson/redmonster | 73adfb20213b89e43b950eb6a7f6525f3d11cec4 | [
"CNRI-Python"
]
| 6 | 2016-06-21T16:52:34.000Z | 2020-03-12T05:24:14.000Z | # GUI used for quickly plotting BOSS spectra. Also allows overplotting of best-fit template as
# determined by redmonster pipeline. Sort of a redmonster version of plotspec.pro, though currently
# with less bells and whistles.
#
# Tim Hutchinson, University of Utah, April 2014
# Signifcantly updated by TH, October 2014
#
# [email protected]
from os import environ
from os.path import join, exists
try:
from tkinter import *
except ImportError:
from Tkinter import *
import numpy as n
import matplotlib
matplotlib.use('Agg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, \
NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from astropy.io import fits
from astropy.convolution import convolve, Box1DKernel
import seaborn as sns
sns.set_style('whitegrid')
from redmonster.physics.misc import poly_array
class PlotFit(Frame):
def __init__ (self):
self.root = Tk()
self.ablinelist = [
3890.2, 3933.7, 3968.5, 4102.9, 4307, 4341.7, 4862.7,
5175, 5889, 5895
]
self.ablinenames = [
r'H$\epsilon$','Ca K', 'Ca H', r'H$\delta$', 'Ca G',
r'H$\gamma$', r'H$\beta$', 'Mg I', 'Na I', 'Na I'
]
self.emlinelist = [2500]
self.emlinenames = ['test emline']
self.plate = None
self.mjd = None
#
plate = StringVar()
plate.set('7848')
mjd = StringVar()
mjd.set('56959')
#
L1 = Label(self.root, text='Plate')
L1.grid(sticky=E)
L2 = Label(self.root, text='MJD')
L2.grid(sticky=E)
L3 = Label(self.root, text='Fiber')
L3.grid(stick=E)
L5 = Label(self.root, text='z num')
L5.grid(stick=E)
self.e1 = Entry(self.root, textvariable=plate)
self.e1.bind()
self.e1.grid(row=0, column=1)
self.e2 = Entry(self.root, textvariable=mjd)
self.e2.grid(row=1, column=1)
fiber = StringVar()
fiber.set('0')
self.e3 = Entry(self.root, textvariable=fiber)
self.e3.grid(row=2, column=1)
znum = StringVar()
znum.set('1')
self.e5 = Entry(self.root, textvariable=znum)
self.e5.grid(row=3, column=1)
nextz = Button(self.root, text='+', command=self.next_z)
nextz.grid(row=3, column=4)
prevz = Button(self.root, text='-', command=self.prev_z)
prevz.grid(row=3, column=3)
self.var = BooleanVar()
self.var.set(1)
self.restframe = BooleanVar()
self.restframe.set(0)
self.ablines = BooleanVar()
self.ablines.set(0)
self.emlines = BooleanVar()
self.emlines.set(0)
c = Checkbutton(self.root, text='Overplot best-fit model',
variable=self.var)
c.grid(row=4, column=1)
restframe = Checkbutton(self.root, text='Rest-frame wavelength',
variable=self.restframe)
restframe.grid(row=5,column=1)
ablines = Checkbutton(self.root, text='Show absorption lines ',
variable=self.ablines)
ablines.grid(row=6, column=1)
emlines = Checkbutton(self.root, text='Show emission lines ',
variable=self.emlines)
emlines.grid(row=7, column=1)
#
smooth = StringVar()
smooth.set('5')
L4 = Label(self.root, text='Smooth')
L4.grid(sticky=E)
self.e4 = Entry(self.root, textvariable=smooth)
self.e4.grid(row=8, column=1)
plot = Button(self.root, text='Plot', command=self.do_plot)
plot.grid(row=9, column=1)
qbutton = Button(self.root, text='QUIT', fg='red',
command=self.root.destroy)
qbutton.grid(row=10, column=1)
nextfiber = Button(self.root, text='>', command=self.next_fiber)
nextfiber.grid(row=2, column=4)
prevfiber = Button(self.root, text='<', command=self.prev_fiber)
prevfiber.grid(row=2, column=3)
Frame.__init__(self,self.root)
self.root.mainloop()
def do_plot(self):
if self.plate != int(self.e1.get()) or self.mjd != int(self.e2.get()):
self.plate = int(self.e1.get())
self.mjd = int(self.e2.get())
self.fiber = int(self.e3.get())
self.znum = int(self.e5.get())
self.platepath = join(environ['BOSS_SPECTRO_REDUX'],
environ['RUN2D'], '%s' % self.plate,
'spPlate-%s-%s.fits' % (self.plate, self.mjd))
hdu = fits.open(self.platepath)
self.specs = hdu[0].data
self.wave = 10**(hdu[0].header['COEFF0'] +
n.arange(hdu[0].header['NAXIS1']) *
hdu[0].header['COEFF1'])
self.models = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],
environ['RUN2D'], '%s' % self.plate,
environ['RUN1D'],
'redmonster-%s-%s.fits' %
(self.plate, self.mjd)))[2].data
self.fiberid = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],
environ['RUN2D'], '%s' % self.plate,
environ['RUN1D'],
'redmonster-%s-%s.fits' %
(self.plate,
self.mjd)))[1].data.FIBERID
self.type1 = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],
environ['RUN2D'], '%s' % self.plate,
environ['RUN1D'],
'redmonster-%s-%s.fits' %
(self.plate, self.mjd)))[1].data.CLASS1
self.type2 = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],
environ['RUN2D'], '%s' % self.plate,
environ['RUN1D'],
'redmonster-%s-%s.fits' %
(self.plate, self.mjd)))[1].data.CLASS2
self.type3 = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],
environ['RUN2D'], '%s' % self.plate,
environ['RUN1D'],
'redmonster-%s-%s.fits' %
(self.plate, self.mjd)))[1].data.CLASS3
self.type4 = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],
environ['RUN2D'], '%s' % self.plate,
environ['RUN1D'],
'redmonster-%s-%s.fits' %
(self.plate, self.mjd)))[1].data.CLASS4
self.type5 = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],
environ['RUN2D'], '%s' % self.plate,
environ['RUN1D'],
'redmonster-%s-%s.fits' %
(self.plate, self.mjd)))[1].data.CLASS5
self.z = n.zeros((self.fiberid.shape[0],5))
self.z[:,0] = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],
environ['RUN2D'], '%s' % self.plate,
environ['RUN1D'],
'redmonster-%s-%s.fits' %
(self.plate, self.mjd)))[1].data.Z1
self.z[:,1] = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],
environ['RUN2D'], '%s' % self.plate,
environ['RUN1D'],
'redmonster-%s-%s.fits' %
(self.plate, self.mjd)))[1].data.Z2
self.z[:,2] = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],
environ['RUN2D'], '%s' % self.plate,
environ['RUN1D'],
'redmonster-%s-%s.fits' %
(self.plate, self.mjd)))[1].data.Z3
self.z[:,3] = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],
environ['RUN2D'], '%s' % self.plate,
environ['RUN1D'],
'redmonster-%s-%s.fits' %
(self.plate, self.mjd)))[1].data.Z4
self.z[:,4] = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],
environ['RUN2D'], '%s' % self.plate,
environ['RUN1D'],
'redmonster-%s-%s.fits' %
(self.plate, self.mjd)))[1].data.Z5
self.zwarning = fits.open(join(environ['REDMONSTER_SPECTRO_REDUX'],
environ['RUN2D'], '%s' % self.plate,
environ['RUN1D'],
'redmonster-%s-%s.fits' %
(self.plate,
self.mjd)))[1].data.ZWARNING
else:
self.fiber = int(self.e3.get())
f = Figure(figsize=(10,6), dpi=100)
a = f.add_subplot(111)
loc = n.where(self.fiberid == self.fiber)[0]
if self.znum == 1:
z = self.z[loc[0],0]
thistype = self.type1[loc[0]]
elif self.znum == 2:
z = self.z[loc[0],1]
thistype = self.type2[loc[0]]
elif self.znum == 3:
z = self.z[loc[0],2]
thistype = self.type3[loc[0]]
elif self.znum == 4:
z = self.z[loc[0],3]
thistype = self.type4[loc[0]]
elif self.znum == 5:
z = self.z[loc[0],4]
thistype = self.type5[loc[0]]
if self.var.get() == 0:
if self.restframe.get() == 0:
a.plot(self.wave, self.specs[self.fiber], color='black')
elif self.restframe.get() == 1:
a.plot(self.wave/(1+self.z[loc][0]), self.specs[self.fiber],
color='black')
elif self.var.get() == 1:
smooth = self.e4.get()
if smooth is '':
if self.restframe.get() == 0:
a.plot(self.wave, self.specs[self.fiber], color='black')
elif self.restframe.get() == 1:
a.plot(self.wave/(1+z), self.specs[self.fiber],
color='black')
else:
if self.restframe.get() == 0:
a.plot(self.wave, convolve(self.specs[self.fiber],
Box1DKernel(int(smooth))),
color='black')
elif self.restframe.get() == 1:
a.plot(self.wave/(1+z), convolve(self.specs[self.fiber],
Box1DKernel(int(smooth))),
color='black')
# Overplot model
if len(loc) is not 0:
if self.restframe.get() == 0:
#a.plot(self.wave, self.models[loc[0]], color='black')
# This for when multiple models are in redmonster file
a.plot(self.wave, self.models[loc[0],self.znum-1],
color='cyan')
if self.ablines.get() == 1:
for i, line in enumerate(self.ablinelist):
if ((line*(1+z) > self.wave[0]) &
(line*(1+z) < self.wave[-1])):
a.axvline(line*(1+z), color='blue',
linestyle='--',
label=self.ablinenames[i])
if self.emlines.get() == 1:
for i, line in enumerate(self.emlinelist):
if (line*(1+z) > self.wave[0]) & (line*(1+z) < \
self.wave[-1]):
a.axvline(line*(1+z), color='red',
linestyle='--',
label=self.emlinenames[i])
if self.ablines.get() == 1 or self.emlines.get() == 1:
a.legend(prop={'size':10})
elif self.restframe.get() == 1:
a.plot(self.wave/(1+z), self.models[loc[0],self.znum-1],
color='cyan')
if self.ablines.get() == 1:
for i, line in enumerate(self.ablinelist):
if (line > self.wave[0]) & (line < self.wave[-1]):
a.axvline(line, color='blue', linestyle='--',
label=self.ablinenames[i])
if self.emlines.get() == 1:
for i, line in enumerate(self.emlinelist):
if (line > self.wave[0]) & (line < self.wave[-1]):
a.axvline(line, color='red', linestyle='--',
label=self.emlinenames[i])
if self.ablines.get() == 1 or self.emlines.get() == 1:
a.legend(prop={'size':10})
a.set_title('Plate %s Fiber %s: z=%s class=%s zwarning=%s' %
(self.plate, self.fiber, z, thistype,
self.zwarning[loc[0]]))
else:
print('Fiber %s is not in redmonster-%s-%s.fits' % \
(self.fiber, self.plate, self.mjd))
a.set_title('Plate %s Fiber %s' % (self.plate, self.fiber))
if self.restframe.get() == 1:
lower_data, upper_data = self.set_limits()
a.axis([self.wave[0]/(1+z)-100,self.wave[-1]/(1+z)+100,
lower_data,upper_data])
elif self.restframe.get() == 0:
lower_data, upper_data = self.set_limits()
a.axis([self.wave[0]-100,self.wave[-1]+100,lower_data,upper_data])
a.set_xlabel('Wavelength ($\AA$)')
a.set_ylabel('Flux ($10^{-17} erg\ cm^2 s^{-1} \AA^{-1}$)')
canvas = FigureCanvasTkAgg(f, master=self.root)
canvas.get_tk_widget().grid(row=0, column=5, rowspan=20)
toolbar_frame = Frame(self.root)
toolbar_frame.grid(row=20,column=5)
toolbar = NavigationToolbar2TkAgg( canvas, toolbar_frame )
canvas.show()
def next_fiber(self):
self.fiber += 1
self.e3.delete(0, END)
self.e3.insert(0, str(self.fiber))
self.do_plot()
def prev_fiber(self):
self.fiber -= 1
self.e3.delete(0, END)
self.e3.insert(0, str(self.fiber))
self.do_plot()
def next_z(self):
if (self.znum >= 1) & (self.znum < 5):
self.znum += 1
self.e5.delete(0, END)
self.e5.insert(0, str(self.znum))
self.do_plot()
else:
if self.znum < 1:
self.znum = 1
self.e5.delete(0, END)
self.e5.insert(0, str(self.znum))
self.do_plot()
elif self.znum >= 5:
self.znum = 5
self.e5.delete(0, END)
self.e5.insert(0, str(self.znum))
self.do_plot()
else:
self.znum = 1
self.e5.delete(0, END)
self.e5.insert(0, str(self.znum))
self.do_plot()
def prev_z(self):
if (self.znum > 1) & (self.znum <= 5):
self.znum -= 1
self.e5.delete(0, END)
self.e5.insert(0, str(self.znum))
self.do_plot()
else:
if self.znum <= 1:
self.znum = 1
self.e5.delete(0, END)
self.e5.insert(0, str(self.znum))
self.do_plot()
elif self.znum > 5:
self.znum = 5
self.e5.delete(0, END)
self.e5.insert(0, str(self.znum))
self.do_plot()
else:
self.znum = 1
self.e5.delete(0, END)
self.e5.insert(0, str(self.znum))
self.do_plot()
def set_limits(self, percentile=.95):
sorted_flux = n.sort( self.specs[self.fiber] )
bottom_ind = int(n.floor((1-percentile)/2. * sorted_flux.shape[0]))
top_ind = n.ceil(sorted_flux.shape[0] - bottom_ind)
return sorted_flux[bottom_ind], sorted_flux[top_ind]
app = PlotFit()
| 47.04065 | 100 | 0.44625 | 16,489 | 0.949937 | 0 | 0 | 0 | 0 | 0 | 0 | 1,987 | 0.114472 |
d3fe4ecd726c46ffcb95a62b678fd7cd36fc2ddd | 565 | py | Python | QueueReconstruction.py | yashpatel0369/PythonDataStructures | 4839150c9eb4882e975859084d6b3787c72ce5f3 | [
"MIT"
]
| null | null | null | QueueReconstruction.py | yashpatel0369/PythonDataStructures | 4839150c9eb4882e975859084d6b3787c72ce5f3 | [
"MIT"
]
| null | null | null | QueueReconstruction.py | yashpatel0369/PythonDataStructures | 4839150c9eb4882e975859084d6b3787c72ce5f3 | [
"MIT"
]
| 1 | 2020-10-01T03:53:22.000Z | 2020-10-01T03:53:22.000Z | # An algorithm to reconstruct the queue.
# Suppose you have a random list of people standing in a queue.
# Each person is described by a pair of integers (h,k), where h is the height of the person and k is the number of people in front of this person who have a height greater than or equal to h.
class Solution:
def reconstructQueue(self, people: List[List[int]]) -> List[List[int]]:
people = sorted(people, key = lambda x: (-x[0], x[1]))
ans = []
for pep in people:
ans.insert(pep[1], pep)
return ans
| 43.461538 | 191 | 0.644248 | 266 | 0.470796 | 0 | 0 | 0 | 0 | 0 | 0 | 294 | 0.520354 |
d3ffb66f226d9d5f6c6c34d8eb1cb0a764f6d67e | 255 | py | Python | passagens/models/classe_viagem.py | carlosrjhoe/Aplicacao_Formulario_com_Django | e16b8bc99aeb120e89c615a34261372bc81f6528 | [
"MIT"
]
| null | null | null | passagens/models/classe_viagem.py | carlosrjhoe/Aplicacao_Formulario_com_Django | e16b8bc99aeb120e89c615a34261372bc81f6528 | [
"MIT"
]
| null | null | null | passagens/models/classe_viagem.py | carlosrjhoe/Aplicacao_Formulario_com_Django | e16b8bc99aeb120e89c615a34261372bc81f6528 | [
"MIT"
]
| null | null | null | from django.db import models
from django.utils.translation import gettext_lazy as _
class ClasseViagem(models.TextChoices):
ECONOMICA = 'ECO', _('Econômica')
EXECUTIVA = 'EXE', _('Executiva')
PRIMEIRA_CLASSE = 'PRI', _('Primeira') | 36.428571 | 54 | 0.690196 | 171 | 0.667969 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.1875 |
d3ffbfb469a902f14133370371345f427175f9fd | 9,046 | py | Python | tests/unit/test_HashlistsByAlgLoaderThread.py | AntonKuzminRussia/hbs-cli | a4109adeb7f4fe3b1d85b29f90c3f2329a8c4153 | [
"MIT"
]
| 5 | 2016-07-13T18:21:57.000Z | 2018-03-15T21:35:30.000Z | tests/unit/test_HashlistsByAlgLoaderThread.py | AntonKuzminRussia/hbs-cli | a4109adeb7f4fe3b1d85b29f90c3f2329a8c4153 | [
"MIT"
]
| null | null | null | tests/unit/test_HashlistsByAlgLoaderThread.py | AntonKuzminRussia/hbs-cli | a4109adeb7f4fe3b1d85b29f90c3f2329a8c4153 | [
"MIT"
]
| 2 | 2016-12-04T01:06:03.000Z | 2018-09-01T17:44:14.000Z | # -*- coding: utf-8 -*-
"""
This is part of HashBruteStation software
Docs EN: http://hack4sec.pro/wiki/index.php/Hash_Brute_Station_en
Docs RU: http://hack4sec.pro/wiki/index.php/Hash_Brute_Station
License: MIT
Copyright (c) Anton Kuzmin <http://anton-kuzmin.ru> (ru) <http://anton-kuzmin.pro> (en)
Integration tests for HashlistsByAlgLoaderThread
"""
import sys
import os
import time
import pytest
sys.path.append('../../')
from libs.common import file_get_contents, md5
from classes.HashlistsByAlgLoaderThread import HashlistsByAlgLoaderThread
from CommonUnit import CommonUnit
class Test_HashlistsByAlgLoaderThread(CommonUnit):
""" Unit tests for HashlistsByAlgLoaderThread """
db = None
thrd = None
def setup(self):
""" Tests setup """
self._clean_db()
self.thrd = HashlistsByAlgLoaderThread()
self.thrd.catch_exceptions = False
def teardown(self):
""" Tests teardown """
if isinstance(self.thrd, HashlistsByAlgLoaderThread):
self.thrd.available = False
time.sleep(1)
del self.thrd
self._clean_db()
def test_get_common_hashlist_id_by_alg_get(self):
""" Test get_common_hashlist_id_by_alg_get() """
self._add_hashlist(have_salts=1, common_by_alg=3)
assert self.thrd.get_common_hashlist_id_by_alg(3) == 1
test_data = [
(
1,
{'hash': 'a', 'salt': '1', 'summ': md5('a:1')},
),
(
0,
{'hash': 'a', 'salt': '', 'summ': md5('a')},
),
]
@pytest.mark.parametrize("have_salt,_hash", test_data)
def test_get_common_hashlist_id_by_alg_create(self, have_salt, _hash):
"""
Test get_common_hashlist_id_by_alg_create()
:param have_salt: does hashlist has salt?
:param _hash: hash data row
:return:
"""
self._add_hashlist(have_salts=have_salt, common_by_alg=0)
self._add_hash(hash=_hash['hash'], salt=_hash['salt'], summ=_hash['summ'])
assert self.thrd.get_common_hashlist_id_by_alg(3) == 2
test_hashlist_data = {'id': 2, 'name': 'All-MD4', 'have_salts': have_salt, 'delimiter': self.thrd.DELIMITER,
'cracked': 0, 'uncracked': 0, 'errors': '', 'parsed': 0, 'status': 'ready',
'common_by_alg': 3}
hashlist_data = self.db.fetch_row("SELECT * FROM hashlists WHERE id = 2")
for field in test_hashlist_data:
assert hashlist_data[field] == test_hashlist_data[field]
def test_get_common_hashlist_id_by_alg_with_salt_create_one_salt_forget(self):
""" Test get_common_hashlist_id_by_alg_create() """
self._add_hashlist(have_salts=1, common_by_alg=0)
self._add_hash(hash='a', salt='b', summ='333')
self._add_hashlist(id=2, have_salts=0, common_by_alg=0)
self._add_hash(hashlist_id=2, hash='c', salt='d', summ='111')
assert self.thrd.get_common_hashlist_id_by_alg(3) == 3
test_hashlist_data = {'id': 3, 'name': 'All-MD4', 'have_salts': 1, 'delimiter': self.thrd.DELIMITER,
'cracked': 0, 'uncracked': 0, 'errors': '', 'parsed': 0, 'status': 'ready',
'common_by_alg': 3}
hashlist_data = self.db.fetch_row("SELECT * FROM hashlists WHERE id = 3")
for field in test_hashlist_data:
assert hashlist_data[field] == test_hashlist_data[field]
def test_get_current_work_hashlist(self):
""" Test get_current_work_hashlist() """
assert not self.thrd.get_current_work_hashlist()
self.db.insert("task_works", {'hashlist_id': 3, 'status': 'work', 'task_id': 1})
assert self.thrd.get_current_work_hashlist() == 3
def test_get_hashlist_status(self):
""" Test get_hashlist_status() """
self._add_hashlist(common_by_alg=1)
assert self.thrd.get_hashlist_status(1) == 'ready'
def test_is_alg_in_parse(self):
""" Test is_alg_in_parse() """
assert self.thrd.is_alg_in_parse(3) is False
self._add_hashlist(common_by_alg=1)
self.db.insert("task_works", {'hashlist_id': 1, 'status': 'waitoutparse', 'task_id': 1})
assert self.thrd.is_alg_in_parse(3) is True
assert self.thrd.is_alg_in_parse(4) is False
self._add_hashlist(id=2, alg_id=4, common_by_alg=1)
self.db.insert("task_works", {'hashlist_id': 2, 'status': 'outparsing', 'task_id': 1})
assert self.thrd.is_alg_in_parse(4) is True
def test_hashes_count_in_hashlist(self):
""" Test hashes_count_in_hashlist() """
assert self.thrd.hashes_count_in_hashlist(1) == 0
self._add_hash()
assert self.thrd.hashes_count_in_hashlist(1) == 1
def test_hashes_count_by_algs(self):
""" Test hashes_count_by_algs() """
assert self.thrd.hashes_count_by_algs() == {}
self._add_hashlist()
self._add_hash(summ='111')
self._add_hash(summ='222', hash='a', salt='b')
self._add_hashlist(id=2, alg_id=4)
self._add_hash(hashlist_id=2, summ='333')
assert self.thrd.hashes_count_by_algs() == {3: 2, 4: 1}
def test_is_alg_have_salts(self):
""" Test is_alg_have_salts() """
self._add_hashlist()
assert self.thrd.is_alg_have_salts(3) is False
self._add_hashlist(id=2, have_salts=1) # Forget salt bug
assert self.thrd.is_alg_have_salts(3) is True
def test_get_possible_hashlist_and_alg_simple(self):
""" Test get_possible_hashlist_and_alg_simple() """
self._add_hashlist()
self._add_hash(hash='a', summ='111')
self._add_hash(hash='b', summ='222')
assert self.thrd.get_possible_hashlist_and_alg() == {'hashlist_id': 2, 'alg_id': 3}
def test_get_possible_hashlist_and_alg_none_already(self):
""" Test get_possible_hashlist_and_alg_none_already() """
self._add_hashlist()
self._add_hash(hash='a', summ='111')
self._add_hash(hash='b', summ='222')
self._add_hashlist(id=2, common_by_alg=3)
self._add_hash(hashlist_id=2, hash='a', summ='111')
self._add_hash(hashlist_id=2, hash='b', summ='222')
assert self.thrd.get_possible_hashlist_and_alg() is None
def test_get_possible_hashlist_and_alg_none_in_parse(self):
""" Test get_possible_hashlist_and_alg_none_in_parse() """
self.db.insert("task_works", {'hashlist_id': 1, 'status': 'waitoutparse', 'task_id': 1})
self._add_hashlist()
self._add_hash(hash='a', summ='111')
self._add_hash(hash='b', summ='222')
assert self.thrd.get_possible_hashlist_and_alg() is None
self.db.update("task_works", {'status': 'outparsing'}, "id=1")
assert self.thrd.get_possible_hashlist_and_alg() is None
def test_get_possible_hashlist_and_alg_none_not_ready(self):
""" Test get_possible_hashlist_and_alg_none_not_ready() """
self._add_hashlist()
self._add_hash(hash='a', summ='111')
self._add_hash(hash='b', summ='222')
self._add_hashlist(id=2, status='wait', common_by_alg=3)
assert self.thrd.get_possible_hashlist_and_alg() is None
def test_get_possible_hashlist_and_alg_none_in_work(self):
""" Test get_possible_hashlist_and_alg_none_in_work() """
self._add_hashlist()
self._add_hash(hash='a', summ='111')
self._add_hash(hash='b', summ='222')
self._add_hashlist(id=2, common_by_alg=3)
self.db.insert("task_works", {'hashlist_id': 2, 'status': 'work', 'task_id': 1})
assert self.thrd.get_possible_hashlist_and_alg() is None
def test_clean_old_hashes(self):
""" Test clean_old_hashes() """
self._add_hashlist()
self._add_hash(hash='a', summ='111')
self._add_hash(hash='b', summ='222')
assert self.db.fetch_one("SELECT COUNT(*) FROM hashes WHERE hashlist_id = 1") == 2
self.thrd.clean_old_hashes(1)
assert self.db.fetch_one("SELECT COUNT(*) FROM hashes WHERE hashlist_id = 1") == 0
assert self.db.fetch_one("SELECT cracked+uncracked FROM hashlists WHERE id = 1") == 0
def test_put_all_hashes_of_alg_in_file(self):
""" Test put_all_hashes_of_alg_in_file() """
self._add_hashlist()
self._add_hash(hash='a', summ='111')
self._add_hash(summ='222')
self._add_hash(hash='b', summ='333')
path = self.thrd.put_all_hashes_of_alg_in_file(3)
assert os.path.exists(path)
assert file_get_contents(path) == 'a\nb\n'
self._add_hashlist(id=2, have_salts=1, alg_id=4)
self._add_hash(hashlist_id=2, hash='a', salt='b', summ='111')
self._add_hash(hashlist_id=2, summ='222')
self._add_hash(hashlist_id=2, hash='c', salt='d', summ='333')
path = self.thrd.put_all_hashes_of_alg_in_file(4)
assert os.path.exists(path)
assert file_get_contents(path) == 'a{0}b\nc{0}d\n'.format(self.thrd.DELIMITER)
| 38.330508 | 116 | 0.64371 | 8,458 | 0.934999 | 0 | 0 | 988 | 0.10922 | 0 | 0 | 2,406 | 0.265974 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.