hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
27af82c734c9c172d86f1e925df82c41889d2af8 | 5,388 | py | Python | main.py | GuruOfPython/Python-Tkinter-GUI | de17e819cc6008274077d8347d722e779cb9166b | [
"MIT"
]
| null | null | null | main.py | GuruOfPython/Python-Tkinter-GUI | de17e819cc6008274077d8347d722e779cb9166b | [
"MIT"
]
| null | null | null | main.py | GuruOfPython/Python-Tkinter-GUI | de17e819cc6008274077d8347d722e779cb9166b | [
"MIT"
]
| null | null | null | # from binary_tree import *
#
# root = Node(8)
#
# root.insert(3)
# root.insert(10)
# root.insert(1)
# root.insert(6)
# root.insert(4)
# root.insert(7)
# root.insert(14)
# root.insert(13)
# node, parent = root.lookup(6)
# print(node, parent)
# root.print_tree()
#
# root.delete(10)
#
# root.print_tree()
import tkinter as tk
from tkinter import *
# import tkMessageBox as messagesbox
import tkinter.messagebox as messagebox
import ttk
from tkinter import simpledialog
from treeview import TreeView
from random import shuffle
from naive import NaiveBST, perfect_inserter
from random import *
import random
class main_GUI(Tk):
def __init__(self, parent):
tk.Tk.__init__(self, parent)
self.parent = parent
self.resizable(0, 0)
self.geometry("1200x800")
self.setting_frame = LabelFrame(self, text="Setting")
create_btn = Button(self.setting_frame, text="Create", height=1, width=10, command=self.create)
create_btn.grid(row=0, padx=5, pady=5)
insert_btn = Button(self.setting_frame, text="Insert", height=1, width=10, command=self.insert)
insert_btn.grid(row=2, padx=5, pady=5)
# self.insert_e = Entry(self.setting_frame, height=1, width=10)
self.insert_e = Entry(self.setting_frame)
self.insert_e.grid(row=2, column=1, padx=5, pady=5)
delete_btn = Button(self.setting_frame, text="Delete", height=1, width=10, command=self.delete)
delete_btn.grid(row=4, padx=5, pady=5)
# self.delete_e = Entry(self.setting_frame, height=1, width=10)
self.delete_e = Entry(self.setting_frame)
self.delete_e.grid(row=4, column=1, padx=5, pady=5)
search_btn = Button(self.setting_frame, text="Search", height=1, width=10, command=self.search)
search_btn.grid(row=6, padx=5, pady=5)
# self.search_e = Entry(self.setting_frame, height=1, width=10)
self.search_e = Entry(self.setting_frame)
self.search_e.grid(row=6, column=1, padx=5, pady=5)
# self.setting_frame.grid(row=1, padx=5, pady=5, sticky=N+S)
self.setting_frame.pack(padx=5, pady=5, side=LEFT)
self.drawing_frame = tk.LabelFrame(self, text="Drawing")
# self.drawing_frame.grid(row=1, column=2, padx=5, pady=5, sticky=N+S)
self.drawing_frame.pack(padx=5, pady=5, fill=BOTH, expand=1)
self.tree = NaiveBST()
self.treeview = TreeView(self.drawing_frame, tree=self.tree)
def callback():
if messagebox.askokcancel("Quit", "Do you really wish to quit?"):
self.destroy()
self.treeview.end_pause = True
self.protocol("WM_DELETE_WINDOW", callback)
def create(self):
# keys = list(range(20))
# shuffle(keys)
# print(keys)
# keys = [randint(1,30) for i in range(20)]
keys = random.sample(range(1, 30), 20)
self.tree.root = None
print(keys)
for i in keys:
self.tree.insert(i)
# perfect_inserter(self.tree, sorted(keys))
self.tree.view()
def insert(self):
if self.tree.root is None:
messagebox.showerror("No Tree", "There is no tree. Please create a tree")
return
if not self.insert_e.get():
messagebox.showerror("No Value", "Please enter a node key")
return
elif not self.insert_e.get().isdigit():
messagebox.showerror("Invalid Value", "Please enter an integer value")
return
node_key = int(self.insert_e.get())
[flag, p] = self.tree.search(node_key)
if not flag:
self.tree.insert(node_key)
self.tree.view()
else:
messagebox.showerror("Invalid Value", "The key already exists. Please enter another value")
return
def delete(self):
if self.tree.root is None:
messagebox.showerror("No Tree", "There is no tree. Please create a tree")
return
if not self.delete_e.get():
messagebox.showerror("No Value", "Please enter a node key")
return
elif not self.delete_e.get().isdigit():
messagebox.showerror("Invalid Value", "Please enter an integer value")
return
node_key = int(self.delete_e.get())
[flag, p] = self.tree.search(node_key)
if flag:
self.tree.delete(node_key)
self.tree.view()
else:
messagebox.showerror("Invalid Value", "The key doesn't exists. Please enter another value")
return
def search(self):
if self.tree.root is None:
messagebox.showerror("No Tree", "There is no tree. Please create a tree")
return
if not self.search_e.get():
messagebox.showerror("No Value", "Please enter a node key")
return
elif not self.search_e.get().isdigit():
messagebox.showerror("Invalid Value", "Please enter an integer value")
return
node_key = int(self.search_e.get())
[flag, p] = self.tree.search(node_key)
if flag and p:
self.tree.view(highlight_nodes=[p])
else:
messagebox.showerror("Invalid Value", "The key can't be found")
if __name__ == '__main__':
app = main_GUI(None)
app.title("Binary Search Tree")
app.mainloop()
| 32.853659 | 103 | 0.615256 | 4,669 | 0.866555 | 0 | 0 | 0 | 0 | 0 | 0 | 1,484 | 0.275427 |
27b12ffdc16386ed1ffaa3ad7820397e93894fcc | 4,634 | py | Python | cbagent/collectors/sgimport_latency.py | sharujayaram/perfrunner | 8fe8ff42a5c74c274b569ba2c45cd43b320f48eb | [
"Apache-2.0"
]
| null | null | null | cbagent/collectors/sgimport_latency.py | sharujayaram/perfrunner | 8fe8ff42a5c74c274b569ba2c45cd43b320f48eb | [
"Apache-2.0"
]
| null | null | null | cbagent/collectors/sgimport_latency.py | sharujayaram/perfrunner | 8fe8ff42a5c74c274b569ba2c45cd43b320f48eb | [
"Apache-2.0"
]
| 1 | 2019-05-20T13:44:29.000Z | 2019-05-20T13:44:29.000Z | import requests
import json
from concurrent.futures import ProcessPoolExecutor as Executor
from concurrent.futures import ThreadPoolExecutor
from time import sleep, time
from couchbase.bucket import Bucket
from cbagent.collectors import Latency, Collector
from logger import logger
from perfrunner.helpers.misc import uhex
from spring.docgen import Document
from cbagent.metadata_client import MetadataClient
from cbagent.stores import PerfStore
from perfrunner.settings import (
ClusterSpec,
PhaseSettings,
TargetIterator,
TestConfig,
)
def new_client(host, bucket, password, timeout):
connection_string = 'couchbase://{}/{}?password={}'
connection_string = connection_string.format(host,
bucket,
password)
client = Bucket(connection_string=connection_string)
client.timeout = timeout
return client
class SGImport_latency(Collector):
COLLECTOR = "sgimport_latency"
METRICS = "sgimport_latency"
INITIAL_POLLING_INTERVAL = 0.001 # 1 ms
TIMEOUT = 3600 # 1hr minutes
MAX_SAMPLING_INTERVAL = 10 # 250 ms
def __init__(self, settings,
cluster_spec: ClusterSpec,
test_config: TestConfig
):
self.cluster_spec = cluster_spec
self.test_config = test_config
self.mc = MetadataClient(settings)
self.store = PerfStore(settings.cbmonitor_host)
self.workload_setting = PhaseSettings
self.interval = self.MAX_SAMPLING_INTERVAL
self.cluster = settings.cluster
self.clients = []
self.cb_host = self.cluster_spec.servers[int(self.test_config.nodes)]
self.sg_host = next(self.cluster_spec.masters)
src_client = new_client(host=self.cb_host,
bucket='bucket-1',
password='password',
timeout=self.TIMEOUT)
self.clients.append(('bucket-1', src_client))
self.new_docs = Document(1024)
def check_longpoll_changefeed(self, host: str, key: str, last_sequence: str):
sg_db = 'db'
api = 'http://{}:4985/{}/_changes'.format(host, sg_db)
last_sequence_str = "{}".format(last_sequence)
data = {'filter': 'sync_gateway/bychannel',
'feed': 'longpoll',
"channels": "123",
"since": last_sequence_str,
"heartbeat": 3600000}
response = requests.post(url=api, data=json.dumps(data))
t1 = time()
record_found = 0
if response.status_code == 200:
for record in response.json()['results']:
if record['id'] == key:
record_found = 1
break
if record_found != 1:
self.check_longpoll_changefeed(host=host, key=key, last_sequence=last_sequence)
return t1
def insert_doc(self, src_client, key: str, doc):
src_client.upsert(key, doc)
return time()
def get_lastsequence(self, host: str):
sg_db = 'db'
api = 'http://{}:4985/{}/_changes'.format(host, sg_db)
data = {'filter': 'sync_gateway/bychannel',
'feed': 'normal',
"channels": "123",
"since": "0"
}
response = requests.post(url=api, data=json.dumps(data))
last_sequence = response.json()['last_seq']
return last_sequence
def measure(self, src_client):
key = "sgimport_{}".format(uhex())
doc = self.new_docs.next(key)
last_sequence = self.get_lastsequence(host=self.sg_host)
executor = ThreadPoolExecutor(max_workers=2)
future1 = executor.submit(self.check_longpoll_changefeed, host=self.sg_host,
key=key,
last_sequence=last_sequence)
future2 = executor.submit(self.insert_doc, src_client=src_client, key=key, doc=doc)
t1, t0 = future1.result(), future2.result()
print('import latency t1, t0', t1, t0, (t1 - t0) * 1000)
return {'sgimport_latency': (t1 - t0) * 1000} # s -> ms
def sample(self):
for bucket, src_client in self.clients:
lags = self.measure(src_client)
self.store.append(lags,
cluster=self.cluster,
collector=self.COLLECTOR)
def update_metadata(self):
self.mc.add_cluster()
self.mc.add_metric(self.METRICS, collector=self.COLLECTOR)
| 30.486842 | 95 | 0.594087 | 3,691 | 0.796504 | 0 | 0 | 0 | 0 | 0 | 0 | 430 | 0.092792 |
27b2e2a025ad448d149dbcc0d2fb399829c3c2bf | 370 | py | Python | clean_junos_routes.py | JNPRAutomate/event_driven_automation_with_a_TIG_stack | 4e2cebdec4dc8d681d71374a7c342f016b8b649e | [
"MIT"
]
| 4 | 2019-08-23T10:55:48.000Z | 2021-06-24T01:00:12.000Z | clean_junos_routes.py | JNPRAutomate/event_driven_automation_with_a_TIG_stack | 4e2cebdec4dc8d681d71374a7c342f016b8b649e | [
"MIT"
]
| null | null | null | clean_junos_routes.py | JNPRAutomate/event_driven_automation_with_a_TIG_stack | 4e2cebdec4dc8d681d71374a7c342f016b8b649e | [
"MIT"
]
| 5 | 2019-03-25T11:12:00.000Z | 2021-12-23T03:01:14.000Z | from jnpr.junos import Device
from jnpr.junos.utils.config import Config
def clean_routing_table():
device=Device (host='100.123.1.3', user='jcluser', password='Juniper!1')
device.open()
cfg=Config(device, mode='private')
cfg.load(path='junos_configuration/clean_routes.conf', format='text')
cfg.commit()
device.close()
clean_routing_table()
| 24.666667 | 76 | 0.716216 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.235135 |
27b4b4442e8234ce781c98d6ea27cb6fba57c3a9 | 5,000 | py | Python | Tools/renew-navi-npc.py | vakhet/ragnarok-navigation | df7d3ff95a9bd1c0497744113ad664a31d248de6 | [
"MIT"
]
| 3 | 2017-12-02T16:40:32.000Z | 2020-02-11T17:44:02.000Z | Tools/renew-navi-npc.py | vakhet/ragnarok-navigation | df7d3ff95a9bd1c0497744113ad664a31d248de6 | [
"MIT"
]
| null | null | null | Tools/renew-navi-npc.py | vakhet/ragnarok-navigation | df7d3ff95a9bd1c0497744113ad664a31d248de6 | [
"MIT"
]
| null | null | null | """
Author : vakhet at gmail.com
This script gets all your NPC names from the original rAthena folder
and updates their lines in navi_npc_krpri.lub
wherever matches the map_name and coords
"""
import re
import os
import random
import sqlite3
NPC_match = r'^[\w\d_]+,\d+,\d+,\d+\tscript\t[\w\d_ -]+#*[\w\d_ -]*\t[\d,{]+$'
allfiles = []
log = open('result.log', 'w', errors='ignore')
conn = sqlite3.connect('db.sqlite')
db = conn.cursor()
intro = '''
Renew navi_npc_krpri.lub | Version 0.2 | (C) 2017 vakhet @ gmail.com
Changes:
v0.2 - *.new file now creates in same folder with original *.lub
'''
outro = '''
Check results in result.log
NEW file generated: navi_npc_krpri.new
'''
db.executescript('''
DROP TABLE IF EXISTS npc;
CREATE TABLE npc (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
map TEXT,
thing1 INTEGER,
thing2 INTEGER,
thing3 INTEGER,
name TEXT,
shadow TEXT,
x INTEGER,
y INTEGER
)
''')
def parse_npc(line):
ln = line.split(',')
map_name, x, y = ln[0], int(ln[1]), int(ln[2])
fullname = ln[3].split('\t')
fullname = fullname[2]
if re.search('#', fullname):
ln = fullname.split('#')
name = ln[0]
shadow = ln[1]
# print(line,'\n',shadow,'<\n=====')
else:
name = fullname
shadow = ''
return name, map_name, x, y, shadow
def parse_navi(line):
line = re.sub('^.*{\s*', '', line)
line = re.sub('\s*}.*$', '', line)
line = line.split(', ')
for i in range(len(line)):
line[i] = re.sub('"', '', line[i], count=2)
try:
line[i] = int(line[i])
except ValueError:
pass
return tuple(line)
def stage_1():
for root, dirs, files in os.walk(path_rathena):
for file in files:
if file.endswith('.txt'):
line = os.path.join(root, file)
allfiles.append(line)
def stage_2():
fh = open(path_navi+'\\navi_npc_krpri.lub', 'r', errors='ignore')
for line in fh.readlines():
navi = parse_navi(line)
if len(navi) != 8:
continue
db.execute('''INSERT INTO npc
(map, thing1, thing2, thing3, name, shadow, x, y)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)''', navi)
conn.commit()
fh.close()
def stage_3():
total, updated = 0, 0
print('Working... ', end='')
for file in allfiles:
fh = open(file, 'r', errors='ignore')
for line in fh.readlines():
print('\b'+chr(random.randint(65, 122)), end='')
if re.match(NPC_match, line) is None:
continue
npc = parse_npc(line)
total = total + 1
db.execute('''SELECT COUNT(id), id, name, map, x, y, shadow FROM npc
WHERE map=? AND x=? AND y=?''', (npc[1], npc[2], npc[3]))
sql = db.fetchone()
if sql[0] == 0 or (sql[2] == npc[0] and sql[6] == npc[4]):
continue
log.writelines('({},{},{}) {} -> {}#{}\n'.format(
sql[3], str(sql[4]), str(sql[5]), sql[2], npc[0], npc[4]))
db.execute('UPDATE npc SET name=?, shadow=? WHERE id=?',
(npc[0], npc[4], sql[1]))
conn.commit()
updated += 1
fh.close()
log.close()
print('\bOK!')
print('Found {} NPC definitions (warps not included)'.format(total))
print('Updated {} NPC names'.format(updated))
def stage_4():
file = open(path_navi+'navi_npc_krpri.new', 'w', errors='ignore')
file.writelines('Navi_Npc = {\n')
sql = db.execute('SELECT * FROM npc WHERE thing1<>0 ORDER BY map, thing1')
for row in sql:
line = '\t{ '
for i in range(1, 9):
try:
item = str(row[i])
except (ValueError, TypeError):
pass
if i in (1, 5, 6):
item = '"{}"'.format(row[i])
line += item + ', '
line = line[:-2] + ' },\n'
file.writelines(line)
file.writelines('\t{ "NULL", 0, 0, 0, "", "", 0, 0 }\n}\n\n')
file.close()
# The Beginning
print(intro)
while True:
path_rathena = input('Enter path to NPC: ')
if not os.path.exists(path_rathena):
print('Wrong path!\n\n')
continue
else:
break
while True:
path_navi = input('Enter path to navi_npc_krpri.lub: ')
if not os.path.exists(path_navi+'\\navi_npc_krpri.lub'):
print('Wrong path!\n\n')
continue
else:
break
stage_1() # scan for *.txt in \npc directory
stage_2() # build DB from navi_npc_krpri.lub
stage_3() # update NPC names in DB from *.txt
stage_4() # building navi_npc_krpri.new
print('Complete list of changes see in log.txt')
print('NEW file generated: navi_npc_krpri.new')
input('\nPress any key')
| 28.571429 | 82 | 0.523 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,870 | 0.374 |
27b65665bc694cadf63afa04994bea99c68ab2c9 | 85 | py | Python | rpicarserver/backlight/__init__.py | krixian/rpi-car-server | 8058bec91462b5f2645119de898779d4bf292a84 | [
"MIT"
]
| null | null | null | rpicarserver/backlight/__init__.py | krixian/rpi-car-server | 8058bec91462b5f2645119de898779d4bf292a84 | [
"MIT"
]
| null | null | null | rpicarserver/backlight/__init__.py | krixian/rpi-car-server | 8058bec91462b5f2645119de898779d4bf292a84 | [
"MIT"
]
| null | null | null | from rpicarserver import ext
class Extension(ext.Extension):
name = "backlight"
| 17 | 31 | 0.752941 | 54 | 0.635294 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.129412 |
27b801a71ed41ab9ae80dc219943a39cdead01b2 | 712 | py | Python | tests/components/rtsp_to_webrtc/test_diagnostics.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
]
| 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/rtsp_to_webrtc/test_diagnostics.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
]
| 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | tests/components/rtsp_to_webrtc/test_diagnostics.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
]
| 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Test nest diagnostics."""
from typing import Any
from .conftest import ComponentSetup
from tests.common import MockConfigEntry
from tests.components.diagnostics import get_diagnostics_for_config_entry
THERMOSTAT_TYPE = "sdm.devices.types.THERMOSTAT"
async def test_entry_diagnostics(
hass,
hass_client,
config_entry: MockConfigEntry,
rtsp_to_webrtc_client: Any,
setup_integration: ComponentSetup,
):
"""Test config entry diagnostics."""
await setup_integration()
assert await get_diagnostics_for_config_entry(hass, hass_client, config_entry) == {
"discovery": {"attempt": 1, "web.failure": 1, "webrtc.success": 1},
"web": {},
"webrtc": {},
}
| 25.428571 | 87 | 0.716292 | 0 | 0 | 0 | 0 | 0 | 0 | 452 | 0.634831 | 156 | 0.219101 |
27b8c7ca0cbfe891ed4189a0d771be178c3ebb62 | 556 | py | Python | Modulo_5/semana_3/pandas/pd.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
]
| null | null | null | Modulo_5/semana_3/pandas/pd.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
]
| null | null | null | Modulo_5/semana_3/pandas/pd.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
]
| 1 | 2022-03-04T00:57:18.000Z | 2022-03-04T00:57:18.000Z | import pandas as pd
import numpy as np
df = pd.read_csv('poblacion.csv')
pd.options.display.float_format = '{:,.1f}'.format
df = pd.read_csv('poblacion.csv')
df['year'] = pd.Categorical(df['year'].apply(str))
idx_filtro = df['Country'].isin(['Mexico','Panama'])
df_filtro_country = df[idx_filtro]
df_filtro_country =df_filtro_country.set_index(['Country','year']).sort_index(ascending= [False,True])
print(df_filtro_country.unstack('Country'))
ids = pd.IndexSlice
print(df_filtro_country.loc[ids['Albania':'Azerbaijan','2015':'2016'],:].sort_index())
| 30.888889 | 102 | 0.733813 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.239209 |
27b8f98dbc5944c52c7fdf99ecb0474a2db0ffed | 3,477 | py | Python | reachweb/models.py | kamauvick/ReachOutDash | ceb7da731982bc9d1b1bb4185f34822b4dcf6526 | [
"MIT"
]
| null | null | null | reachweb/models.py | kamauvick/ReachOutDash | ceb7da731982bc9d1b1bb4185f34822b4dcf6526 | [
"MIT"
]
| 9 | 2020-02-12T02:44:31.000Z | 2022-03-12T00:03:57.000Z | reachweb/models.py | kamauvick/ReachOutDash | ceb7da731982bc9d1b1bb4185f34822b4dcf6526 | [
"MIT"
]
| null | null | null | from django.contrib.auth.models import User
from django.db import models
class Chv(models.Model):
name = models.OneToOneField(User, on_delete=models.PROTECT, related_name='profile')
age = models.IntegerField()
phonenumber = models.CharField(max_length=255)
profile_picture = models.ImageField(upload_to='chv_profiles/', blank=True, default='prof.jpg')
location = models.CharField(max_length=200)
class Meta:
db_table = 'chv'
ordering = ['-name']
def __str__(self):
return f'{self.name}'
@classmethod
def get_all_chvs(cls):
chvs = cls.objects.all()
return chvs
# @receiver(post_save, sender=User)
# def create_chv(sender, instance, created, **kwargs):
# if created:
# Chv.objects.create(name=instance)
#
# @receiver(post_save, sender=User)
# def save_chv(sender, instance, **kwargs):
# instance.profile.save()
class Patient(models.Model):
URGENCY_LEVELS = (
('red', 'High severity'),
('yellow', 'Moderate severity'),
('green', 'Low severity'),
('blue', 'Unknown severity'),
)
LOCATIONS = (
('Juja', 'Gachororo'),
('High Point', 'Sewage'),
('K-road', 'Stage'),
('Gwa-Kairu', 'Estate'),
('Ruiru', 'Kimbo'),
('Kasarani', 'Nairobi'),
)
name = models.CharField(max_length=255)
examiner = models.ForeignKey('Chv', on_delete=models.CASCADE, related_name='chv')
age = models.IntegerField()
gender = models.CharField(max_length=200)
location = models.CharField(choices=LOCATIONS, max_length=200, default='Ruiru')
time = models.DateTimeField()
symptoms = models.TextField()
urgency = models.CharField(max_length=200, choices=URGENCY_LEVELS, default='blue')
action_taken = models.TextField()
class Meta:
db_table = 'patient'
ordering = ['-name']
def __str__(self):
return f'{self.name},::: {self.location}'
@classmethod
def get_all_patients(cls):
patients = cls.objects.all()
return patients
class Emergencies(models.Model):
Emergency_TYPES = (
('Road', 'Road accidents'),
('Fire', 'Fire emergencies'),
('Water', 'Water related accidents'),
('Sickness', 'Sick people emergencies'),
)
type = models.CharField(max_length=200, choices=Emergency_TYPES, default='Sickness')
location = models.ForeignKey('Location', on_delete=models.CASCADE, related_name='locale')
reported_by = models.ForeignKey('Chv', on_delete=models.CASCADE, related_name='reporter')
class Meta:
db_table = 'emergencies'
ordering = ['type']
@classmethod
def get_all_emergencies(cls):
emergencies = cls.objects.all()
return emergencies
class Location(models.Model):
ROAD_ACCESS = (
('Great', 'The roads are well passable in all weather conditions'),
('Good', 'The roads are passable in favourable weather conditions'),
('Bad', 'The roads are not passable'),
)
name = models.CharField(max_length=200)
county = models.CharField(max_length=200)
accessibility = models.CharField(max_length=200, choices=ROAD_ACCESS)
class Meta:
db_table = 'location'
ordering = ['-name']
def __str__(self):
return f'{self.name}'
@classmethod
def get_all_locations(cls):
locations = cls.objects.all()
return locations
| 30.5 | 98 | 0.632442 | 3,392 | 0.975554 | 0 | 0 | 416 | 0.119643 | 0 | 0 | 950 | 0.273224 |
27bb547681e27f63805f0e3f2bcfba62a6d181f3 | 4,876 | py | Python | distances/symmetric_amd_distance.py | npielawski/py_alpha_amd_release | 6fb5b3cdef65ba8902daea050785dd73970002c2 | [
"MIT"
]
| 14 | 2019-02-12T20:30:23.000Z | 2021-11-04T01:10:34.000Z | distances/symmetric_amd_distance.py | npielawski/py_alpha_amd_release | 6fb5b3cdef65ba8902daea050785dd73970002c2 | [
"MIT"
]
| 2 | 2021-05-12T05:02:59.000Z | 2021-10-11T14:40:10.000Z | distances/symmetric_amd_distance.py | npielawski/py_alpha_amd_release | 6fb5b3cdef65ba8902daea050785dd73970002c2 | [
"MIT"
]
| 7 | 2019-02-20T12:19:28.000Z | 2021-02-09T10:12:06.000Z |
#
# Py-Alpha-AMD Registration Framework
# Author: Johan Ofverstedt
# Reference: Fast and Robust Symmetric Image Registration Based on Distances Combining Intensity and Spatial Information
#
# Copyright 2019 Johan Ofverstedt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
#
# Symmetric Average Minimal Distances (AMD) Distance implemented as a class.
#
import numpy as np
class SymmetricAMDDistance:
def __init__(self, symmetric_measure = True, squared_measure = False):
self.ref_image_source = None
self.flo_image_source = None
self.ref_image_target = None
self.flo_image_target = None
self.sampling_fraction = 1.0
self.sampling_count = np.nan
self.symmetric_measure = symmetric_measure
self.squared_measure = squared_measure
def set_ref_image_source(self, image):
self.ref_image_source = image
def set_flo_image_source(self, image):
self.flo_image_source = image
def set_ref_image_target(self, image):
self.ref_image_target = image
def set_flo_image_target(self, image):
self.flo_image_target = image
def set_sampling_fraction(self, sampling_fraction):
self.sampling_fraction = sampling_fraction
def initialize(self):
self.sampling_count_forward = self.ref_image_source.get_sampling_fraction_count(self.sampling_fraction)
self.sampling_count_inverse = self.flo_image_source.get_sampling_fraction_count(self.sampling_fraction)
def asymmetric_value_and_derivatives(self, transform, source, target, target_cp, sampling_count):
w_acc = 0.0
value_acc = 0.0
grad_acc = np.zeros(transform.get_param_count())
sampled_points = source.random_sample(sampling_count)
for q in range(len(sampled_points)):
sampled_points_q = sampled_points[q]
if sampled_points_q.size == 0:
continue
w_q = sampled_points_q[:, -1:]
pnts_q = sampled_points_q[:, 0:-1]
tf_pnts = transform.transform(pnts_q) + target_cp
(eval_pnts, eval_w) = target.compute_spatial_grad_and_value(tf_pnts, w_q, q)
values_q = eval_pnts[:, -1:]
grads_q = eval_pnts[:, :-1]
if self.squared_measure:
grads_q = 2.0 * values_q * grads_q
values_q = np.square(values_q)
value_acc = value_acc + np.sum(values_q)
w_acc = w_acc + np.sum(eval_w)
grad_q_2 = transform.grad(pnts_q, grads_q, False)
grad_acc[:] = grad_acc[:] + grad_q_2
#print("grad_acc: " + str(grad_acc))
if w_acc < 0.000001:
w_acc = 1.0
#print("w_acc: " + str(w_acc))
#print("grad_acc: " + str(grad_acc))
w_rec = 1.0 / w_acc
value_acc = value_acc * w_rec
grad_acc[:] = grad_acc[:] * w_rec
#print("grad_acc: " + str(grad_acc))
return (value_acc, grad_acc)
def value_and_derivatives(self, transform):
ref_cp = self.ref_image_source.get_center_point()
flo_cp = self.flo_image_source.get_center_point()
(forward_value, forward_grad) = self.asymmetric_value_and_derivatives(transform, self.ref_image_source, self.flo_image_target, flo_cp, self.sampling_count_forward)
if self.symmetric_measure:
inv_transform = transform.invert()
(inverse_value, inverse_grad) = self.asymmetric_value_and_derivatives(inv_transform, self.flo_image_source, self.ref_image_target, ref_cp, self.sampling_count_inverse)
inverse_grad = transform.grad_inverse_to_forward(inverse_grad)
value = 0.5 * (forward_value + inverse_value)
grad = 0.5 * (forward_grad + inverse_grad)
else:
value = forward_value
grad = forward_grad
return (value, grad)
| 41.322034 | 179 | 0.689295 | 3,502 | 0.718212 | 0 | 0 | 0 | 0 | 0 | 0 | 1,467 | 0.300861 |
27bbbbc489c4d13faa511e2c2877df5e0ce8a2dd | 431 | py | Python | shop/migrations/0005_product_discounted_price.py | RitvikDayal/The-Stone-Shop | fed85d5ebb993fb1ce235f5e8a8ebc06a76fb956 | [
"MIT"
]
| 2 | 2020-08-27T21:02:54.000Z | 2020-08-27T21:03:44.000Z | shop/migrations/0005_product_discounted_price.py | RitvikDayal/The-Stone-Shop | fed85d5ebb993fb1ce235f5e8a8ebc06a76fb956 | [
"MIT"
]
| null | null | null | shop/migrations/0005_product_discounted_price.py | RitvikDayal/The-Stone-Shop | fed85d5ebb993fb1ce235f5e8a8ebc06a76fb956 | [
"MIT"
]
| null | null | null | # Generated by Django 3.0.8 on 2020-07-28 12:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0004_product_product_image'),
]
operations = [
migrations.AddField(
model_name='product',
name='discounted_price',
field=models.FloatField(default=None),
preserve_default=False,
),
]
| 21.55 | 50 | 0.605568 | 338 | 0.784223 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.25058 |
27bc982db629d22f64003fb61afd5dee8511c5de | 131 | py | Python | tudo/ex060b.py | Ramon-Erik/Exercicios-Python | 158a7f1846dd3d486aa0517fa337d46d73aab649 | [
"MIT"
]
| 1 | 2021-07-08T00:35:57.000Z | 2021-07-08T00:35:57.000Z | tudo/ex060b.py | Ramon-Erik/Exercicios-Python | 158a7f1846dd3d486aa0517fa337d46d73aab649 | [
"MIT"
]
| null | null | null | tudo/ex060b.py | Ramon-Erik/Exercicios-Python | 158a7f1846dd3d486aa0517fa337d46d73aab649 | [
"MIT"
]
| null | null | null | from math import factorial
n = int(input('Digite um número, para obter seu fatorial: '))
print('{}! é {}'.format(n, factorial(n)))
| 32.75 | 61 | 0.679389 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.428571 |
27bd073801f417f0a990ea8f8617bbc868baa23e | 157 | py | Python | setup.py | messa/bloom | ce975471d0fabac436bcbd3040d22c6e5a97e47c | [
"MIT"
]
| 1 | 2021-03-14T13:54:42.000Z | 2021-03-14T13:54:42.000Z | setup.py | messa/bloom | ce975471d0fabac436bcbd3040d22c6e5a97e47c | [
"MIT"
]
| 1 | 2021-03-15T09:02:24.000Z | 2021-03-16T07:41:46.000Z | setup.py | messa/bloom | ce975471d0fabac436bcbd3040d22c6e5a97e47c | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
from setuptools import setup, Extension
setup(
ext_modules=[
Extension('bloom._hashc', ['bloom/_hashcmodule.c'])
])
| 17.444444 | 59 | 0.66242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.369427 |
27be070f86ae724315deda03de85e57e9b0b008d | 5,645 | py | Python | misc/util.py | winder/indexer | 18f48f026f022cdeef92dcac558d3900d6ea798d | [
"MIT"
]
| 87 | 2020-08-20T19:14:02.000Z | 2022-03-30T21:31:59.000Z | misc/util.py | hassoon1986/indexer | 0a58e9a78ba7684c7f4cfb4fe7cb24b3d4622d9b | [
"MIT"
]
| 615 | 2020-06-03T14:13:29.000Z | 2022-03-31T12:08:38.000Z | misc/util.py | hassoon1986/indexer | 0a58e9a78ba7684c7f4cfb4fe7cb24b3d4622d9b | [
"MIT"
]
| 58 | 2020-06-03T21:33:48.000Z | 2022-03-26T15:39:50.000Z | #!/usr/bin/env python3
import atexit
import logging
import os
import random
import subprocess
import sys
import time
import msgpack
logger = logging.getLogger(__name__)
def maybedecode(x):
if hasattr(x, 'decode'):
return x.decode()
return x
def mloads(x):
return msgpack.loads(x, strict_map_key=False, raw=True)
def unmsgpack(ob):
"convert dict from msgpack.loads() with byte string keys to text string keys"
if isinstance(ob, dict):
od = {}
for k,v in ob.items():
k = maybedecode(k)
okv = False
if (not okv) and (k == 'note'):
try:
v = unmsgpack(mloads(v))
okv = True
except:
pass
if (not okv) and k in ('type', 'note'):
try:
v = v.decode()
okv = True
except:
pass
if not okv:
v = unmsgpack(v)
od[k] = v
return od
if isinstance(ob, list):
return [unmsgpack(v) for v in ob]
#if isinstance(ob, bytes):
# return base64.b64encode(ob).decode()
return ob
def _getio(p, od, ed):
if od is not None:
od = maybedecode(od)
elif p.stdout:
try:
od = maybedecode(p.stdout.read())
except:
logger.error('subcomand out', exc_info=True)
if ed is not None:
ed = maybedecode(ed)
elif p.stderr:
try:
ed = maybedecode(p.stderr.read())
except:
logger.error('subcomand err', exc_info=True)
return od, ed
def xrun(cmd, *args, **kwargs):
timeout = kwargs.pop('timeout', None)
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.STDOUT
cmdr = ' '.join(map(repr,cmd))
try:
p = subprocess.Popen(cmd, *args, **kwargs)
except Exception as e:
logger.error('subprocess failed {}'.format(cmdr), exc_info=True)
raise
stdout_data, stderr_data = None, None
try:
if timeout:
stdout_data, stderr_data = p.communicate(timeout=timeout)
else:
stdout_data, stderr_data = p.communicate()
except subprocess.TimeoutExpired as te:
logger.error('subprocess timed out {}'.format(cmdr), exc_info=True)
stdout_data, stderr_data = _getio(p, stdout_data, stderr_data)
if stdout_data:
sys.stderr.write('output from {}:\n{}\n\n'.format(cmdr, stdout_data))
if stderr_data:
sys.stderr.write('stderr from {}:\n{}\n\n'.format(cmdr, stderr_data))
raise
except Exception as e:
logger.error('subprocess exception {}'.format(cmdr), exc_info=True)
stdout_data, stderr_data = _getio(p, stdout_data, stderr_data)
if stdout_data:
sys.stderr.write('output from {}:\n{}\n\n'.format(cmdr, stdout_data))
if stderr_data:
sys.stderr.write('stderr from {}:\n{}\n\n'.format(cmdr, stderr_data))
raise
if p.returncode != 0:
logger.error('cmd failed ({}) {}'.format(p.returncode, cmdr))
stdout_data, stderr_data = _getio(p, stdout_data, stderr_data)
if stdout_data:
sys.stderr.write('output from {}:\n{}\n\n'.format(cmdr, stdout_data))
if stderr_data:
sys.stderr.write('stderr from {}:\n{}\n\n'.format(cmdr, stderr_data))
raise Exception('error: cmd failed: {}'.format(cmdr))
if logger.isEnabledFor(logging.DEBUG):
logger.debug('cmd success: %s\n%s\n%s\n', cmdr, maybedecode(stdout_data), maybedecode(stderr_data))
def atexitrun(cmd, *args, **kwargs):
cargs = [cmd]+list(args)
atexit.register(xrun, *cargs, **kwargs)
def find_indexer(indexer_bin, exc=True):
if indexer_bin:
return indexer_bin
# manually search local build and PATH for algorand-indexer
path = ['cmd/algorand-indexer'] + os.getenv('PATH').split(':')
for pd in path:
ib = os.path.join(pd, 'algorand-indexer')
if os.path.exists(ib):
return ib
msg = 'could not find algorand-indexer. use --indexer-bin or PATH environment variable.'
if exc:
raise Exception(msg)
logger.error(msg)
return None
def ensure_test_db(connection_string, keep_temps=False):
if connection_string:
# use the passed db
return connection_string
# create a temporary database
dbname = 'e2eindex_{}_{}'.format(int(time.time()), random.randrange(1000))
xrun(['dropdb', '--if-exists', dbname], timeout=5)
xrun(['createdb', dbname], timeout=5)
if not keep_temps:
atexitrun(['dropdb', '--if-exists', dbname], timeout=5)
else:
logger.info("leaving db %r", dbname)
return 'dbname={} sslmode=disable'.format(dbname)
# whoever calls this will need to import boto and get the s3 client
def firstFromS3Prefix(s3, bucket, prefix, desired_filename, outdir=None, outpath=None):
response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix, MaxKeys=10)
if (not response.get('KeyCount')) or ('Contents' not in response):
raise Exception('nothing found in s3://{}/{}'.format(bucket, prefix))
for x in response['Contents']:
path = x['Key']
_, fname = path.rsplit('/', 1)
if fname == desired_filename:
if outpath is None:
if outdir is None:
outdir = '.'
outpath = os.path.join(outdir, desired_filename)
logger.info('s3://%s/%s -> %s', bucket, x['Key'], outpath)
s3.download_file(bucket, x['Key'], outpath)
return
| 34.845679 | 107 | 0.589725 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,055 | 0.186891 |
27c0f66f70a59c9a16bcacfd772c973fa3bad2e9 | 11,093 | py | Python | coconut/_pyparsing.py | evhub/coconut | 27a4af9dc06667870f736f20c862930001b8cbb2 | [
"Apache-2.0"
]
| 3,624 | 2015-02-22T07:06:18.000Z | 2022-03-31T03:38:00.000Z | coconut/_pyparsing.py | evhub/coconut | 27a4af9dc06667870f736f20c862930001b8cbb2 | [
"Apache-2.0"
]
| 627 | 2015-03-31T01:18:53.000Z | 2022-03-28T07:48:31.000Z | coconut/_pyparsing.py | evhub/coconut | 27a4af9dc06667870f736f20c862930001b8cbb2 | [
"Apache-2.0"
]
| 162 | 2016-03-02T05:22:55.000Z | 2022-03-31T23:42:55.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------------------------------------------------
# INFO:
# -----------------------------------------------------------------------------------------------------------------------
"""
Author: Evan Hubinger
License: Apache 2.0
Description: Wrapper around PyParsing that selects the best available implementation.
"""
# -----------------------------------------------------------------------------------------------------------------------
# IMPORTS:
# -----------------------------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
from coconut.root import * # NOQA
import os
import sys
import traceback
import functools
import inspect
from warnings import warn
from collections import defaultdict
from coconut.constants import (
PURE_PYTHON,
PYPY,
use_fast_pyparsing_reprs,
use_packrat_parser,
packrat_cache_size,
default_whitespace_chars,
varchars,
min_versions,
pure_python_env_var,
enable_pyparsing_warnings,
use_left_recursion_if_available,
)
from coconut.util import get_clock_time # NOQA
from coconut.util import (
ver_str_to_tuple,
ver_tuple_to_str,
get_next_version,
)
# warning: do not name this file cPyparsing or pyparsing or it might collide with the following imports
try:
if PURE_PYTHON:
raise ImportError("skipping cPyparsing check due to " + pure_python_env_var + " = " + os.environ.get(pure_python_env_var, ""))
import cPyparsing as _pyparsing
from cPyparsing import * # NOQA
from cPyparsing import __version__
PYPARSING_PACKAGE = "cPyparsing"
PYPARSING_INFO = "Cython cPyparsing v" + __version__
except ImportError:
try:
import pyparsing as _pyparsing
from pyparsing import * # NOQA
from pyparsing import __version__
PYPARSING_PACKAGE = "pyparsing"
PYPARSING_INFO = "Python pyparsing v" + __version__
except ImportError:
traceback.print_exc()
__version__ = None
PYPARSING_PACKAGE = "cPyparsing"
PYPARSING_INFO = None
# -----------------------------------------------------------------------------------------------------------------------
# VERSION CHECKING:
# -----------------------------------------------------------------------------------------------------------------------
min_ver = min(min_versions["pyparsing"], min_versions["cPyparsing"][:3]) # inclusive
max_ver = get_next_version(max(min_versions["pyparsing"], min_versions["cPyparsing"][:3])) # exclusive
cur_ver = None if __version__ is None else ver_str_to_tuple(__version__)
if cur_ver is None or cur_ver < min_ver:
min_ver_str = ver_tuple_to_str(min_ver)
raise ImportError(
"Coconut requires pyparsing/cPyparsing version >= " + min_ver_str
+ ("; got " + PYPARSING_INFO if PYPARSING_INFO is not None else "")
+ " (run '{python} -m pip install --upgrade {package}' to fix)".format(python=sys.executable, package=PYPARSING_PACKAGE),
)
elif cur_ver >= max_ver:
max_ver_str = ver_tuple_to_str(max_ver)
warn(
"This version of Coconut was built for pyparsing/cPyparsing versions < " + max_ver_str
+ ("; got " + PYPARSING_INFO if PYPARSING_INFO is not None else "")
+ " (run '{python} -m pip install {package}<{max_ver}' to fix)".format(python=sys.executable, package=PYPARSING_PACKAGE, max_ver=max_ver_str),
)
# -----------------------------------------------------------------------------------------------------------------------
# SETUP:
# -----------------------------------------------------------------------------------------------------------------------
if cur_ver >= (3,):
MODERN_PYPARSING = True
_trim_arity = _pyparsing.core._trim_arity
_ParseResultsWithOffset = _pyparsing.core._ParseResultsWithOffset
else:
MODERN_PYPARSING = False
_trim_arity = _pyparsing._trim_arity
_ParseResultsWithOffset = _pyparsing._ParseResultsWithOffset
USE_COMPUTATION_GRAPH = (
not MODERN_PYPARSING # not yet supported
and not PYPY # experimentally determined
)
if enable_pyparsing_warnings:
if MODERN_PYPARSING:
_pyparsing.enable_all_warnings()
else:
_pyparsing._enable_all_warnings()
_pyparsing.__diag__.warn_name_set_on_empty_Forward = False
if MODERN_PYPARSING and use_left_recursion_if_available:
ParserElement.enable_left_recursion()
elif use_packrat_parser:
ParserElement.enablePackrat(packrat_cache_size)
ParserElement.setDefaultWhitespaceChars(default_whitespace_chars)
Keyword.setDefaultKeywordChars(varchars)
# -----------------------------------------------------------------------------------------------------------------------
# FAST REPRS:
# -----------------------------------------------------------------------------------------------------------------------
if PY2:
def fast_repr(cls):
"""A very simple, fast __repr__/__str__ implementation."""
return "<" + cls.__name__ + ">"
else:
fast_repr = object.__repr__
_old_pyparsing_reprs = []
def set_fast_pyparsing_reprs():
"""Make pyparsing much faster by preventing it from computing expensive nested string representations."""
for obj in vars(_pyparsing).values():
try:
if issubclass(obj, ParserElement):
_old_pyparsing_reprs.append((obj, (obj.__repr__, obj.__str__)))
obj.__repr__ = functools.partial(fast_repr, obj)
obj.__str__ = functools.partial(fast_repr, obj)
except TypeError:
pass
def unset_fast_pyparsing_reprs():
"""Restore pyparsing's default string representations for ease of debugging."""
for obj, (repr_method, str_method) in _old_pyparsing_reprs:
obj.__repr__ = repr_method
obj.__str__ = str_method
if use_fast_pyparsing_reprs:
set_fast_pyparsing_reprs()
# -----------------------------------------------------------------------------------------------------------------------
# PROFILING:
# -----------------------------------------------------------------------------------------------------------------------
_timing_info = [None] # in list to allow reassignment
class _timing_sentinel(object):
pass
def add_timing_to_method(cls, method_name, method):
"""Add timing collection to the given method.
It's a monstrosity, but it's only used for profiling."""
from coconut.terminal import internal_assert # hide to avoid circular import
args, varargs, keywords, defaults = inspect.getargspec(method)
internal_assert(args[:1] == ["self"], "cannot add timing to method", method_name)
if not defaults:
defaults = []
num_undefaulted_args = len(args) - len(defaults)
def_args = []
call_args = []
fix_arg_defaults = []
defaults_dict = {}
for i, arg in enumerate(args):
if i >= num_undefaulted_args:
default = defaults[i - num_undefaulted_args]
def_args.append(arg + "=_timing_sentinel")
defaults_dict[arg] = default
fix_arg_defaults.append(
"""
if {arg} is _timing_sentinel:
{arg} = _exec_dict["defaults_dict"]["{arg}"]
""".strip("\n").format(
arg=arg,
),
)
else:
def_args.append(arg)
call_args.append(arg)
if varargs:
def_args.append("*" + varargs)
call_args.append("*" + varargs)
if keywords:
def_args.append("**" + keywords)
call_args.append("**" + keywords)
new_method_name = "new_" + method_name + "_func"
_exec_dict = globals().copy()
_exec_dict.update(locals())
new_method_code = """
def {new_method_name}({def_args}):
{fix_arg_defaults}
_all_args = (lambda *args, **kwargs: args + tuple(kwargs.values()))({call_args})
_exec_dict["internal_assert"](not any(_arg is _timing_sentinel for _arg in _all_args), "error handling arguments in timed method {new_method_name}({def_args}); got", _all_args)
_start_time = _exec_dict["get_clock_time"]()
try:
return _exec_dict["method"]({call_args})
finally:
_timing_info[0][str(self)] += _exec_dict["get_clock_time"]() - _start_time
{new_method_name}._timed = True
""".format(
fix_arg_defaults="\n".join(fix_arg_defaults),
new_method_name=new_method_name,
def_args=", ".join(def_args),
call_args=", ".join(call_args),
)
exec(new_method_code, _exec_dict)
setattr(cls, method_name, _exec_dict[new_method_name])
return True
def collect_timing_info():
"""Modifies pyparsing elements to time how long they're executed for.
It's a monstrosity, but it's only used for profiling."""
from coconut.terminal import logger # hide to avoid circular imports
logger.log("adding timing to pyparsing elements:")
_timing_info[0] = defaultdict(float)
for obj in vars(_pyparsing).values():
if isinstance(obj, type) and issubclass(obj, ParserElement):
added_timing = False
for attr_name in dir(obj):
attr = getattr(obj, attr_name)
if (
callable(attr)
and not isinstance(attr, ParserElement)
and not getattr(attr, "_timed", False)
and attr_name not in (
"__getattribute__",
"__setattribute__",
"__init_subclass__",
"__subclasshook__",
"__class__",
"__setattr__",
"__getattr__",
"__new__",
"__init__",
"__str__",
"__repr__",
"__hash__",
"__eq__",
"_trim_traceback",
"_ErrorStop",
"enablePackrat",
"inlineLiteralsUsing",
"setDefaultWhitespaceChars",
"setDefaultKeywordChars",
"resetCache",
)
):
added_timing |= add_timing_to_method(obj, attr_name, attr)
if added_timing:
logger.log("\tadded timing to", obj)
def print_timing_info():
"""Print timing_info collected by collect_timing_info()."""
print(
"""
=====================================
Timing info:
(timed {num} total pyparsing objects)
=====================================
""".rstrip().format(
num=len(_timing_info[0]),
),
)
sorted_timing_info = sorted(_timing_info[0].items(), key=lambda kv: kv[1])
for method_name, total_time in sorted_timing_info:
print("{method_name}:\t{total_time}".format(method_name=method_name, total_time=total_time))
| 35.554487 | 180 | 0.554945 | 40 | 0.003606 | 0 | 0 | 0 | 0 | 0 | 0 | 4,251 | 0.383215 |
27c1c2dd0bdd326bf942be3440f758392e7db45f | 4,948 | py | Python | tests/test_explicit_hll.py | aholyoke/python-hll | 30793aeb18103600fce0f3ad0b0c9e99e8b756fe | [
"MIT"
]
| 13 | 2019-11-19T07:38:46.000Z | 2022-02-11T13:23:25.000Z | tests/test_explicit_hll.py | aholyoke/python-hll | 30793aeb18103600fce0f3ad0b0c9e99e8b756fe | [
"MIT"
]
| 4 | 2019-12-12T04:19:34.000Z | 2021-06-09T17:52:52.000Z | tests/test_explicit_hll.py | aholyoke/python-hll | 30793aeb18103600fce0f3ad0b0c9e99e8b756fe | [
"MIT"
]
| 6 | 2019-11-06T21:33:25.000Z | 2022-02-21T14:43:35.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
from python_hll.hlltype import HLLType
from python_hll.hll import HLL
from python_hll.serialization import SerializationUtil
"""Unit tests for BitVector."""
def test_add_basic():
"""
Tests basic set semantics of ``HLL.add_raw()``.
"""
# Adding a single positive value to an empty set should work.
hll = new_hll(128) # arbitrary
hll.add_raw(1) # positive
assert hll.cardinality() == 1
# Adding a single negative value to an empty set should work.
hll = new_hll(128) # arbitrary
hll.add_raw(-1) # negative
assert hll.cardinality() == 1
# Adding a duplicate value to a set should be a no-op.
hll = new_hll(128) # arbitrary
hll.add_raw(1) # positive
hll.add_raw(1) # dupe
assert hll.cardinality() == 1
def test_union():
"""
Tests ``HLL.union()``.
"""
# Unioning two distinct sets should work
hll_a = new_hll(128) # arbitrary
hll_b = new_hll(128) # arbitrary
hll_a.add_raw(1)
hll_a.add_raw(2)
hll_b.add_raw(3)
hll_a.union(hll_b)
assert hll_a.cardinality() == 3
# Unioning two sets whose union doesn't exceed the cardinality cap should not promote
hll_a = new_hll(128) # arbitrary
hll_b = new_hll(128) # arbitrary
hll_a.add_raw(1)
hll_a.add_raw(2)
hll_b.add_raw(1)
hll_a.union(hll_b)
assert hll_a.cardinality() == 2
assert hll_a.get_type() == HLLType.EXPLICIT
# Unioning two sets whose union exceeds the cardinality cap should promote
hll_a = new_hll(128) # arbitrary
hll_b = new_hll(128) # arbitrary
for i in range(0, 128):
hll_a.add_raw(i)
hll_b.add_raw(i+128)
hll_a.union(hll_b)
assert hll_a.get_type() == HLLType.SPARSE
def test_clear():
"""
Tests ``HLL.clear()``
"""
hll = new_hll(128) # arbitrary
hll.add_raw(1)
hll.clear()
assert hll.cardinality() == 0
def test_to_from_bytes():
"""
Tests ``HLL.to_bytes() and ``HLL.from_bytes().
"""
schema_version = SerializationUtil.DEFAULT_SCHEMA_VERSION
type = HLLType.EXPLICIT
padding = schema_version.padding_bytes(type)
bytes_per_word = 8
# Should work on an empty set
hll = new_hll(128)
bytes = hll.to_bytes(schema_version)
assert len(bytes) == padding # no elements, just padding
in_hll = HLL.from_bytes(bytes)
assert_elements_equal(hll, in_hll)
# Should work on a partially filled set
hll = new_hll(128)
for i in range(0, 3):
hll.add_raw(i)
bytes = hll.to_bytes(schema_version)
assert len(bytes) == padding + bytes_per_word * 3
in_hll = HLL.from_bytes(bytes)
assert_elements_equal(hll, in_hll)
# Should work on a full set
explicit_threshold = 128
hll = new_hll(explicit_threshold)
for i in range(0, explicit_threshold):
hll.add_raw(27 + i)
bytes = hll.to_bytes(schema_version)
assert len(bytes) == padding + bytes_per_word * explicit_threshold
in_hll = HLL.from_bytes(bytes)
assert_elements_equal(hll, in_hll)
def test_random_values():
"""
Tests correctness against `set()`.
"""
explicit_threshold = 4096
canonical = set()
hll = new_hll(explicit_threshold)
seed = 1 # constant so results are reproducible
random.seed(seed)
max_java_long = 9223372036854775807
for i in range(0, explicit_threshold):
random_long = random.randint(1, max_java_long)
canonical.add(random_long)
hll.add_raw(random_long)
canonical_cardinality = len(canonical)
assert hll.cardinality() == canonical_cardinality
def test_promotion():
"""
Tests promotion to ``HLLType.SPARSE`` and ``HLLType.FULL``.
"""
explicit_threshold = 128
hll = HLL.create_for_testing(11, 5, explicit_threshold, 256, HLLType.EXPLICIT)
for i in range(0, explicit_threshold + 1):
hll.add_raw(i)
assert hll.get_type() == HLLType.SPARSE
hll = HLL(11, 5, 4, False, HLLType.EXPLICIT) # expthresh=4 => explicit_threshold=8
for i in range(0, 9):
hll.add_raw(i)
assert hll.get_type() == HLLType.FULL
# ------------------------------------------------------------
# assertion helpers
def assert_elements_equal(hll_a, hll_b):
"""
Asserts that values in both sets are exactly equal.
"""
assert hll_a._explicit_storage == hll_b._explicit_storage
def new_hll(explicit_threshold):
"""
Builds a ``HLLType.EXPLICIT`` ``HLL`` instance with the specified
explicit threshold.
:param explicit_threshold: explicit threshold to use for the constructed
``HLL``. This must be greater than zero.
:type explicit_threshold: int
:returns: A default-sized ``HLLType.EXPLICIT`` empty ``HLL`` instance. This
will never be ``None``.
:rtype: HLL
"""
return HLL.create_for_testing(11, 5, explicit_threshold, 256, HLLType.EXPLICIT)
| 27.337017 | 89 | 0.653597 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,669 | 0.337308 |
27c4d9ac4a8dbf1e90e1e11bbe903ce9523aee39 | 4,792 | py | Python | weixin/HelloMyDear.py | FantasyZsp/py-utils | 4ccebd298780508d58400d2d8967f59ca7c6603d | [
"Apache-2.0"
]
| null | null | null | weixin/HelloMyDear.py | FantasyZsp/py-utils | 4ccebd298780508d58400d2d8967f59ca7c6603d | [
"Apache-2.0"
]
| null | null | null | weixin/HelloMyDear.py | FantasyZsp/py-utils | 4ccebd298780508d58400d2d8967f59ca7c6603d | [
"Apache-2.0"
]
| null | null | null | from weixin.utils.WeiXinUtils import *
# 5.主函数main()
def hello(wxNames, atHours, atMinutes, cityCode):
names = wxNames
hours = atHours
minutes = atMinutes
number = cityCode
g = getYMD()
g1 = get_iciba_everyday_chicken_soup()
# 天气接口的网站 number为城市编号
name = 'http://t.weather.sojson.com/api/weather/city/' + number
# 向get_sentence 传入参数
g2 = get_sentence(name)
times = g2['cityInfo']
for key, name in times.items():
city = times['city']
parent = times['parent']
# 字典嵌套字典
time1 = g2['data']
for key, name in time1.items():
shidu = time1['shidu']
pm25 = time1['pm25']
quality = time1['quality']
ganmao = time1['ganmao']
time1 = g2['data']
time2 = time1.get('forecast', '不存在该键')
time2 = time2[0]
itchat.auto_login(hotReload=True)
for key, name in time2.items():
high = time2['high']
low = time2['low']
fx = time2['fx']
fl = time2['fl']
type = time2['type']
notice = time2['type']
# 调用微信机器人
users = itchat.search_friends(names) # 找到用户
userName = users[0]['UserName']
while True:
t = datetime.datetime.now()
t1 = t.strftime('%Y-%m-%d %H:%M:%S')
hour = t.hour
minute = t.minute
second = t.second
print('%d:%d:%d' % (hour, minute, second))
if hour == hours and minute == minutes:
itchat.send_msg("%s" % g, toUserName=userName)
itchat.send_msg('%s' % g1, toUserName=userName)
itchat.send_msg('所在省份:%s\n'
'所在城市:%s\n'
'今日最高温度:%s\n '
'今日最低温度:%s\n'
'风向:%s\n '
'风力:%s\n'
'湿度:%s \n'
'PM2.5: %s\n'
'空气质量:%s \n'
'易感指数:%s\n'
'天气:%s - %s ' % (parent, city, high, low, fx, fl, shidu, pm25,
quality, ganmao, type, notice), toUserName=userName)
break
else:
time.sleep(5) # 延迟5秒
continue
itchat.run()
time.sleep(86400)
# 5.主函数main()
if __name__ == '__main__':
# names = input("请输入你要发送人的微信名:")
# hours = int(input("请输入几点发送消息:"))
# minutes = int(input("请输入几分发送消息:"))
# number = input("输入所在城市的编号:")
# hello(names, hours, minutes, number)
names = input("请输入你要发送人的微信名:")
hours = int(input("请输入几点发送消息:"))
minutes = int(input("请输入几分发送消息:"))
number = input("输入所在城市的编号:")
print(names)
print(hours)
print(minutes)
print(number)
g = getYMD()
g1 = get_iciba_everyday_chicken_soup()
# 天气接口的网站 number为城市编号
name = 'http://t.weather.sojson.com/api/weather/city/' + number
# 向get_sentence 传入参数
g2 = get_sentence(name)
times = g2['cityInfo']
for key, name in times.items():
city = times['city']
parent = times['parent']
# 字典嵌套字典
time1 = g2['data']
for key, name in time1.items():
shidu = time1['shidu']
pm25 = time1['pm25']
quality = time1['quality']
ganmao = time1['ganmao']
time1 = g2['data']
time2 = time1.get('forecast', '不存在该键')
time2 = time2[0]
itchat.auto_login(hotReload=True)
for key, name in time2.items():
high = time2['high']
low = time2['low']
fx = time2['fx']
fl = time2['fl']
type = time2['type']
notice = time2['type']
# 调用微信机器人
users = itchat.search_friends(names) # 找到用户
userName = users[0]['UserName']
while True:
t = datetime.datetime.now()
t1 = t.strftime('%Y-%m-%d %H:%M:%S')
hour = t.hour
minute = t.minute
second = t.second
print('%d:%d:%d' % (hour, minute, second))
if hour == hours and minute == minutes:
itchat.send_msg("%s" % g, toUserName=userName)
itchat.send_msg('%s' % g1, toUserName=userName)
itchat.send_msg('所在省份:%s\n'
'所在城市:%s\n'
'今日最高温度:%s\n '
'今日最低温度:%s\n'
'风向:%s\n '
'风力:%s\n'
'湿度:%s \n'
'PM2.5: %s\n'
'空气质量:%s \n'
'易感指数:%s\n'
'天气:%s - %s ' % (parent, city, high, low, fx, fl, shidu, pm25,
quality, ganmao, type, notice), toUserName=userName)
break
else:
time.sleep(5) # 延迟5秒
continue
itchat.run()
time.sleep(86400)
| 31.320261 | 97 | 0.47788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,611 | 0.302365 |
27c607ecb317226ea26c46b6beec6b1d9d516ae8 | 7,892 | py | Python | Medicine-manag-django/pharma/views.py | DanielDDHM/my-projects-py | f6c3af7f6cd61c69234d25c956027e8c7e626470 | [
"MIT"
]
| null | null | null | Medicine-manag-django/pharma/views.py | DanielDDHM/my-projects-py | f6c3af7f6cd61c69234d25c956027e8c7e626470 | [
"MIT"
]
| null | null | null | Medicine-manag-django/pharma/views.py | DanielDDHM/my-projects-py | f6c3af7f6cd61c69234d25c956027e8c7e626470 | [
"MIT"
]
| null | null | null | from .models import Dealer
from .models import Employee
from .models import Customer
from .models import Medicine
from .models import Purchase
from django.shortcuts import render
from django.db import IntegrityError
def home(request):
return render(request, 'pharma/index.html')
def dealerform(request):
dict = {'add': True, }
return render(request, 'pharma/dealer.html', dict)
def dealerforminsert(request):
try:
dealer = Dealer()
dealer.dname = request.POST['dname']
dealer.address = request.POST['address']
dealer.phn_no = request.POST['pno']
dealer.email = request.POST['email']
dealer.save()
except IntegrityError:
return render(request, "pharma/new.html")
return render(request, 'pharma/index.html')
def dealerformupdate(request, foo):
try:
dealer = Dealer.objects.get(pk=foo)
dealer.dname = request.POST['dname']
dealer.address = request.POST['address']
dealer.phn_no = request.POST['pno']
dealer.email = request.POST['email']
dealer.save()
except IntegrityError:
return render(request, "pharma/new.html")
return render(request, 'pharma/index.html')
def dealerformview(request, foo):
dealer = Dealer.objects.get(pk=foo)
dict = {'dealer': dealer}
return render(request, 'pharma/dealer.html', dict)
def dealerformdelete(request, foo):
dealer = Dealer.objects.get(pk=foo)
dealer.delete()
return render(request, 'pharma/index.html')
def dealertable(request):
dealer = Dealer.objects.all()
dict = {"dealer": dealer}
return render(request, 'pharma/dealertable.html', dict)
def empform(request):
dict = {'add': True}
return render(request, 'pharma/emp.html', dict)
def empforminsert(request):
try:
emp = Employee()
emp.e_id = request.POST['eid']
emp.fname = request.POST['fname']
emp.lname = request.POST['lname']
emp.address = request.POST['address']
emp.phn_no = request.POST['pno']
emp.email = request.POST['email']
emp.sal = request.POST['sal']
emp.save()
except IntegrityError:
return render(request, "pharma/new.html")
return render(request, 'pharma/index.html')
def empformupdate(request, foo):
try:
emp = Employee.objects.get(pk=foo)
emp.e_id = request.POST['eid']
emp.fname = request.POST['fname']
emp.lname = request.POST['lname']
emp.address = request.POST['address']
emp.phn_no = request.POST['pno']
emp.email = request.POST['email']
emp.sal = request.POST['sal']
emp.save()
except IntegrityError:
return render(request, "pharma/new.html")
return render(request, 'pharma/index.html')
def empformview(request, foo):
emp = Employee.objects.get(pk=foo)
dict = {'emp': emp}
return render(request, 'pharma/emp.html', dict)
def empformdelete(request, foo):
emp = Employee.objects.get(pk=foo)
emp.delete()
return render(request, 'pharma/index.html')
def emptable(request):
emp = Employee.objects.all()
dict = {"emp": emp}
return render(request, 'pharma/emptable.html', dict)
def custform(request):
dict = {'add': True}
return render(request, 'pharma/cust.html', dict)
def custforminsert(request):
try:
cust = Customer()
cust.fname = request.POST['fname']
cust.lname = request.POST['lname']
cust.address = request.POST['address']
cust.phn_no = request.POST['pno']
cust.email = request.POST['email']
cust.save()
except IntegrityError:
return render(request, "pharma/new.html")
return render(request, 'pharma/index.html')
def custformupdate(request, foo):
try:
cust = Customer.objects.get(pk=foo)
cust.fname = request.POST['fname']
cust.lname = request.POST['lname']
cust.address = request.POST['address']
cust.phn_no = request.POST['pno']
cust.email = request.POST['email']
cust.save()
except IntegrityError:
return render(request, "pharma/new.html")
return render(request, 'pharma/index.html')
def custformview(request, foo):
cust = Customer.objects.get(pk=foo)
dict = {'cust': cust}
return render(request, 'pharma/cust.html', dict)
def custformdelete(request, foo):
cust = Customer.objects.get(pk=foo)
cust.delete()
return render(request, 'pharma/index.html')
def custtable(request):
cust = Customer.objects.all()
dict = {"cust": cust}
return render(request, 'pharma/custtable.html', dict)
def medform(request):
dict = {'add': True}
return render(request, 'pharma/med.html', dict)
def medforminsert(request):
try:
med = Medicine()
med.m_id = request.POST['mid']
med.mname = request.POST['mname']
med.dname = request.POST['dname']
med.desc = request.POST['desc']
med.price = request.POST['price']
med.stock = request.POST['stock']
med.save()
except IntegrityError:
return render(request, "pharma/new.html")
return render(request, 'pharma/index.html')
def medformupdate(request, foo):
try:
med = Medicine.objects.get(pk=foo)
med.m_id = request.POST['mid']
med.mname = request.POST['mname']
med.dname = request.POST['dname']
med.desc = request.POST['desc']
med.price = request.POST['price']
med.stock = request.POST['stock']
med.save()
except IntegrityError:
return render(request, "pharma/new.html")
return render(request, 'pharma/index.html')
def medformview(request, foo):
med = Medicine.objects.get(pk=foo)
dict = {'med': med}
return render(request, 'pharma/med.html', dict)
def medformdelete(request, foo):
med = Medicine.objects.get(pk=foo)
med.delete()
return render(request, 'pharma/index.html')
def medtable(request):
med = Medicine.objects.all()
dict = {"med": med}
return render(request, 'pharma/medtable.html', dict)
def purchaseform(request):
dict = {'add': True}
return render(request, 'pharma/purchase.html', dict)
def purchaseforminsert(request):
try:
purchase = Purchase()
purchase.pname = request.POST['pname']
purchase.fname = request.POST['fname']
purchase.lname = request.POST['lname']
purchase.qty = request.POST['qty']
purchase.phn_no = request.POST['pno']
purchase.price = request.POST['price']
a = (int(purchase.price)) * (int(purchase.qty))
purchase.total = a
purchase.save()
except IntegrityError:
return render(request, "pharma/new.html")
return render(request, 'pharma/index.html')
def purchaseformupdate(request, foo):
try:
purchase = Purchase.objects.get(pk=foo)
purchase.pname = request.POST['pname']
purchase.fname = request.POST['fname']
purchase.lname = request.POST['lname']
purchase.qty = request.POST['qty']
purchase.phn_no = request.POST['pno']
purchase.price = request.POST['price']
a = (int(purchase.price)) * (int(purchase.qty))
purchase.total = a
purchase.save()
except IntegrityError:
return render(request, "pharma/new.html")
return render(request, 'pharma/index.html')
def purchaseformview(request, foo):
purchase = Purchase.objects.get(pk=foo)
dict = {'purchase': purchase}
return render(request, 'pharma/purchase.html', dict)
def purchaseformdelete(request, foo):
purchase = Purchase.objects.get(pk=foo)
purchase.delete()
return render(request, 'pharma/index.html')
def purchasetable(request):
purchase = Purchase.objects.all()
dict = {"purchase": purchase}
return render(request, 'pharma/purchasetable.html', dict)
| 28.490975 | 61 | 0.637988 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,244 | 0.157628 |
27c811e47a423871511471f8e6a47527924900eb | 202 | py | Python | resume/display/views.py | Varun789/Profile | 990818d233ac0279ef4d55641e1e284850bdbfb2 | [
"BSD-3-Clause"
]
| null | null | null | resume/display/views.py | Varun789/Profile | 990818d233ac0279ef4d55641e1e284850bdbfb2 | [
"BSD-3-Clause"
]
| null | null | null | resume/display/views.py | Varun789/Profile | 990818d233ac0279ef4d55641e1e284850bdbfb2 | [
"BSD-3-Clause"
]
| null | null | null | from django.shortcuts import render
from .models import Profile
# Create your views here.
def home(request):
profile=Profile.objects
return render(request,'home.html',{'profile' : profile})
| 22.444444 | 60 | 0.737624 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.222772 |
27c8bd46f163cd7e8d07a019d3a50bfc0ca3baa3 | 458 | py | Python | libStash/books/migrations/0023_auto_20210320_1241.py | Dev-Rem/libStash | a364e9997c1c91b09f5db8a004deb4df305fa8cf | [
"MIT"
]
| null | null | null | libStash/books/migrations/0023_auto_20210320_1241.py | Dev-Rem/libStash | a364e9997c1c91b09f5db8a004deb4df305fa8cf | [
"MIT"
]
| null | null | null | libStash/books/migrations/0023_auto_20210320_1241.py | Dev-Rem/libStash | a364e9997c1c91b09f5db8a004deb4df305fa8cf | [
"MIT"
]
| null | null | null | # Generated by Django 3.0.4 on 2021-03-20 12:41
import phone_field.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('books', '0022_auto_20210215_1053'),
]
operations = [
migrations.AlterField(
model_name='warehouse',
name='phone',
field=phone_field.models.PhoneField(blank=True, max_length=31, verbose_name='Phone number'),
),
]
| 22.9 | 104 | 0.637555 | 347 | 0.757642 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.242358 |
27c9faa515cbfcb516d2a78da11f8590793a0cac | 6,912 | py | Python | src/PyTorch2ONNX/PyTorch2ONNX_Run_in_ONNX_RUNTIME.py | Yulv-git/Model_Inference_Deployment | 623f9955dfb60fe7af9d17415bfec58fc4c86c1b | [
"MIT"
]
| 4 | 2022-02-05T14:16:05.000Z | 2022-03-27T13:35:06.000Z | src/PyTorch2ONNX/PyTorch2ONNX_Run_in_ONNX_RUNTIME.py | Yulv-git/Model_Inference_Deployment | 623f9955dfb60fe7af9d17415bfec58fc4c86c1b | [
"MIT"
]
| null | null | null | src/PyTorch2ONNX/PyTorch2ONNX_Run_in_ONNX_RUNTIME.py | Yulv-git/Model_Inference_Deployment | 623f9955dfb60fe7af9d17415bfec58fc4c86c1b | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# coding=utf-8
'''
Author: Shuangchi He / Yulv
Email: [email protected]
Date: 2022-01-28 14:21:09
Motto: Entities should not be multiplied unnecessarily.
LastEditors: Shuangchi He
LastEditTime: 2022-04-06 11:40:23
FilePath: /Model_Inference_Deployment/src/PyTorch2ONNX/PyTorch2ONNX_Run_in_ONNX_RUNTIME.py
Description: Init from https://pytorch.org/tutorials/advanced/super_resolution_with_onnxruntime.html
Exporting a model from PyTorch to ONNX and running it using ONNX RUNTIME.
'''
import argparse
import os
import numpy as np
from PIL import Image
import torch.nn as nn
import torch.nn.init as init
import torch.utils.model_zoo as model_zoo
import torchvision.transforms as transforms
import onnx
import torch.onnx
import onnxruntime
from utils import check_dir, torchtensor2numpy
# Super Resolution model definition in PyTorch
class SuperResolutionNet(nn.Module):
def __init__(self, upscale_factor, inplace=False):
super(SuperResolutionNet, self).__init__()
self.relu = nn.ReLU(inplace=inplace)
self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))
self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))
self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1))
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
self._initialize_weights()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.pixel_shuffle(self.conv4(x))
return x
def _initialize_weights(self):
init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv4.weight)
def PyTorch2ONNX(torch_model, dummy_input_to_model, onnx_save_dir, check_onnx_model=True):
''' Export the model. (PyTorch2ONNX) '''
torch.onnx.export(
torch_model, # model being run.
dummy_input_to_model, # model input (or a tuple for multiple inputs).
onnx_save_dir, # where to save the model (can be a file or file-like object).
export_params=True, # store the trained parameter weights inside the model file.
opset_version=10, # the ONNX version to export the model to.
do_constant_folding=True, # whether to execute constant folding for optimization.
input_names=['input'], # the model's input names.
output_names=['output'], # the model's output names.
dynamic_axes={ # variable length axes.
'input': {0: 'batch_size'},
'output': {0: 'batch_size'}})
if check_onnx_model: # Verify the model’s structure and confirm that the model has a valid schema.
onnx_model = onnx.load(onnx_save_dir)
onnx.checker.check_model(onnx_model)
def Verify_ONNX_in_ONNX_RUNTIME(onnx_dir, dummy_input_to_model, torch_out):
''' Verify ONNX Runtime and PyTorch are computing the same value for the model. '''
# Create an inference session.
ort_session = onnxruntime.InferenceSession(onnx_dir)
# Compute ONNX Runtime output prediction.
ort_inputs = {ort_session.get_inputs()[0].name: torchtensor2numpy(dummy_input_to_model)}
ort_outs = ort_session.run(None, ort_inputs)
# Compare ONNX Runtime and PyTorch results
np.testing.assert_allclose(torchtensor2numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)
print("Exported model has been tested with ONNXRuntime, and the result looks good!")
def Run_ONNX_in_ONNX_RUNTIME(onnx_dir, img_path, img_save_path):
''' Running the model on an image using ONNX Runtime. '''
# Take the tensor representing the greyscale resized image.
img = Image.open(img_path)
resize = transforms.Resize([224, 224])
img = resize(img)
img_ycbcr = img.convert('YCbCr')
img_y, img_cb, img_cr = img_ycbcr.split()
to_tensor = transforms.ToTensor()
img_y = to_tensor(img_y)
img_y.unsqueeze_(0)
# Create an inference session.
ort_session = onnxruntime.InferenceSession(onnx_dir)
# Run the ONNX model in ONNX Runtime.
ort_inputs = {ort_session.get_inputs()[0].name: torchtensor2numpy(img_y)}
ort_outs = ort_session.run(None, ort_inputs)
img_out_y = ort_outs[0]
# Get the output image.
img_out_y = Image.fromarray(np.uint8((img_out_y[0] * 255.0).clip(0, 255)[0]), mode='L')
final_img = Image.merge(
"YCbCr", [
img_out_y,
img_cb.resize(img_out_y.size, Image.BICUBIC),
img_cr.resize(img_out_y.size, Image.BICUBIC),
]).convert("RGB")
# Save the image, compare this with the output image from mobile device.
final_img.save(img_save_path)
def main(args):
# Create the super-resolution model.
torch_model = SuperResolutionNet(upscale_factor=3)
# Initialize model with the pretrained weights.
def map_location(storage, loc): return storage
if torch.cuda.is_available():
map_location = None
torch_model.load_state_dict(model_zoo.load_url(
url='https://s3.amazonaws.com/pytorch/test_data/export/superres_epoch100-44c6958e.pth', map_location=map_location))
# Set the model to inference mode.
torch_model.eval()
# Input to the model.
batch_size = 1
dummy_input_to_model = torch.randn(batch_size, 1, 224, 224, requires_grad=True)
torch_out = torch_model(dummy_input_to_model)
# Export the model. (PyTorch2ONNX)
PyTorch2ONNX(torch_model, dummy_input_to_model, args.onnx_save_dir, args.check_onnx_model)
# Verify ONNX Runtime and PyTorch are computing the same value for the model.
Verify_ONNX_in_ONNX_RUNTIME(args.onnx_save_dir, dummy_input_to_model, torch_out)
# Running the model on an image using ONNX Runtime.
Run_ONNX_in_ONNX_RUNTIME(args.onnx_save_dir, args.img_path, args.img_save_path)
if __name__ == "__main__":
parse = argparse.ArgumentParser(description='PyTorch2ONNX_Run_in_ONNX_RUNTIME')
parse.add_argument('--img_path', type=str, default='{}/data/cat.jpg'.format(os.path.dirname(os.path.abspath(__file__))))
parse.add_argument('--check_onnx_model', type=bool, default=True)
parse.add_argument('--output_dir', type=str, default='{}/output'.format(os.path.dirname(os.path.abspath(__file__))))
args = parse.parse_args()
check_dir(args.output_dir)
args.onnx_save_dir = '{}/super_resolution.onnx'.format(args.output_dir)
args.img_save_path = '{}/cat_superres_with_ort.jpg'.format(args.output_dir)
main(args)
| 41.638554 | 124 | 0.684172 | 1,053 | 0.1523 | 0 | 0 | 0 | 0 | 0 | 0 | 2,230 | 0.322534 |
27cc788dc3d49e45198c96fa1cec36fea676e304 | 2,085 | py | Python | scripts/dataset.py | MarcGroef/deeplearning | d1ef095fbe0f7e9b56017808d976efe7502e6b81 | [
"MIT"
]
| null | null | null | scripts/dataset.py | MarcGroef/deeplearning | d1ef095fbe0f7e9b56017808d976efe7502e6b81 | [
"MIT"
]
| null | null | null | scripts/dataset.py | MarcGroef/deeplearning | d1ef095fbe0f7e9b56017808d976efe7502e6b81 | [
"MIT"
]
| null | null | null | import numpy as np
import tensorflow as tf
from sklearn.model_selection import StratifiedKFold
# Set dataset seed
np.random.seed(seed=842102)
class SingletonDecorator:
def __init__(self,klass):
self.klass = klass
self.instance = None
def __call__(self,*args,**kwds):
if self.instance == None:
self.instance = self.klass(*args,**kwds)
return self.instance
@SingletonDecorator
class Dataset(object):
def __init__(self, nSplits, split_index):
print("DATASET: You should only see this message once.")
(self._trainImages, self._trainLabels), (self._testImages, self._testLabels) = tf.keras.datasets.fashion_mnist.load_data()
# Cross validation
skf = StratifiedKFold(n_splits=nSplits)
indices_by_expIdx = []
for train_index, val_index in skf.split(self._trainImages, self._trainLabels):
indices_by_expIdx.append((train_index, val_index))
def convert_to_tf(data):
# reshape data to fit shape
data = data.astype('float32') / 255
return np.expand_dims(data, axis=-1)
def get_split(type, split_index):
# Get the training or validation data+labels, by given split
train, val = indices_by_expIdx[split_index]
indices = train
if type == 'validation':
indices = val
train_data = convert_to_tf(self._trainImages[indices])
train_labels = tf.keras.utils.to_categorical(self._trainLabels[indices])
return train_data, train_labels
self.trainImages = lambda : get_split('train', split_index)[0]
self.trainLabels = lambda : get_split('train', split_index)[1]
self.valImages = lambda : get_split('validation', split_index)[0]
self.valLabels = lambda : get_split('validation', split_index)[1]
self.testImages = lambda : convert_to_tf(self._testImages)
self.testLabels = lambda : tf.keras.utils.to_categorical(self._testLabels)
if __name__ == "__main__":
dataset = Dataset();
| 35.948276 | 130 | 0.659472 | 1,865 | 0.894484 | 0 | 0 | 1,621 | 0.777458 | 0 | 0 | 241 | 0.115588 |
27cf1141da0cf1cbeff01d7fcd33d6536ff17b4d | 1,962 | py | Python | src/python/utils/image.py | Lamzigit/manifold_learning | f699fe4f25dbabdbc2dc9635c4e654b59806e17d | [
"MIT"
]
| 10 | 2017-06-14T08:04:44.000Z | 2021-07-06T07:13:16.000Z | src/python/utils/image.py | Lamzigit/manifold_learning | f699fe4f25dbabdbc2dc9635c4e654b59806e17d | [
"MIT"
]
| 1 | 2020-11-18T13:08:43.000Z | 2020-11-18T13:12:39.000Z | src/python/utils/image.py | Lamzigit/manifold_learning | f699fe4f25dbabdbc2dc9635c4e654b59806e17d | [
"MIT"
]
| 3 | 2017-06-14T08:04:53.000Z | 2019-11-18T13:21:15.000Z | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 15 14:03:52 2015
@author: jemanjohnson
"""
import numpy as np
import matplotlib.pyplot as plt
import os
import scipy.io
from sklearn import preprocessing
from time import time
from sklearn.preprocessing import MinMaxScaler
# Image Reshape Function
def img_as_array(img, gt=False):
"""Takes a N*M*D image
where:
* N - number of rows
* M - number of columns
* D - dimension of data
Returns:
--------
Image as an array with dimensions -
(N*M) by D
"""
if gt == False:
img_array = img.reshape(
img.shape[0]*img.shape[1], img.shape[2])
else:
img_array = img.reshape(
img.shape[0]*img.shape[1])
return img_array
# Image Normalization function
def standardize(data):
"""
Quick function to standardize my data between 0 and 1
"""
return MinMaxScaler().fit_transform(data)
# Define HSI X and y Ground Truth pairing function
def img_gt_idx(img, img_gt, printinfo=False):
"""Takes a flattened image array and
extracts the image indices that correspond
to the ground truth that we have.
"""
# Find the non-zero entries
n_samples = (img_gt>0).sum()
# Find the classification labels
classlabels = np.unique(img_gt[img_gt>0])
# Create X matrix containing the features
X = img[img_gt>0,:]
# Create y matrix containing the labels
y = img_gt[img_gt>0]
# Print out useful information
if printinfo:
print('We have {n} ground-truth samples.'.format(
n=n_samples))
print('The training data includes {n} classes: {classes}'.format(
n=classlabels.size, classes=classlabels.T))
print('Dimensions of matrix X: {sizeX}'.format(sizeX=X.shape))
print('Dimensions of matrix y: {sizey}'.format(sizey=y.shape))
return X, y
# | 25.480519 | 73 | 0.618756 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 955 | 0.486748 |
27cf8bbca13f1461fc47b01d2bfbcfa734035197 | 1,767 | py | Python | test/finsignia/gae/controllers.py | finsignia/finsignia-gae | e8cf6a3855cb2844a3a7e113f26b600bd952d371 | [
"MIT"
]
| 1 | 2016-05-08T21:47:10.000Z | 2016-05-08T21:47:10.000Z | test/finsignia/gae/controllers.py | finsignia/finsignia-gae | e8cf6a3855cb2844a3a7e113f26b600bd952d371 | [
"MIT"
]
| null | null | null | test/finsignia/gae/controllers.py | finsignia/finsignia-gae | e8cf6a3855cb2844a3a7e113f26b600bd952d371 | [
"MIT"
]
| null | null | null | """
Tests for the finsignia.gae.controllers module.
"""
import os
import sys
from finsignia.gae import loader
import unittest
class ApplicationControllerTest(unittest.TestCase):
def setUp(self):
loader.load()
from finsignia.gae import controllers
class TestController(controllers.ApplicationController):
def template_path(self):
return os.path.join(os.path.dirname(__file__), 'templates')
self.controller = TestController()
def tearDown(self):
del self.controller
loader.unload()
def testRenderWithoutData(self):
text = self.controller.render(template='testRenderWithoutData.txt')
self.assertEqual("testRenderWithoutData", text.strip())
def testRenderWithData(self):
# I just watched 'The Reader' at the movie theatre this evening...thus the following name
name = 'Hannah Schmidtz'
text = self.controller.render(template='testRenderWithData.txt', data={'name': name})
self.assertEqual("%s did a very bad thing and then a very stupid thing" % name, text.strip())
class ResourceControllerTest(unittest.TestCase):
def setUp(self):
loader.load()
self._modelClass = object()
self._templateMappings = {}
self._objectFields = []
from finsignia.gae import controllers
class TestController(controllers.ResourceController):
def modelClass(self): return self._modelClass
def templateMappings(self): return self._templateMappings
def objectFields(self): return self._objectFields
self._testController = TestController()
def tearDown(self):
loader.unload()
# TODO: create tests for ResourceController
def test_cases():
return [ApplicationControllerTest, ResourceControllerTest]
if '__main__' == __name__:
unittest.main()
| 29.949153 | 97 | 0.730051 | 1,508 | 0.853424 | 0 | 0 | 0 | 0 | 0 | 0 | 359 | 0.203169 |
27d2e55ac297493daba610855afc860802f2e6c9 | 2,074 | py | Python | tests/test_visualize_poll.py | UBC-MDS/tweepypoll | 62ea4ea0ab381eecf8f24bd13da0a0cdfb18eaa6 | [
"MIT"
]
| null | null | null | tests/test_visualize_poll.py | UBC-MDS/tweepypoll | 62ea4ea0ab381eecf8f24bd13da0a0cdfb18eaa6 | [
"MIT"
]
| 30 | 2022-01-14T17:10:08.000Z | 2022-02-02T21:17:05.000Z | tests/test_visualize_poll.py | UBC-MDS/tweepypoll | 62ea4ea0ab381eecf8f24bd13da0a0cdfb18eaa6 | [
"MIT"
]
| 1 | 2022-01-14T16:10:11.000Z | 2022-01-14T16:10:11.000Z | from tweepypoll.tweepypoll import visualize_poll
import pandas as pd
import altair as alt
def test_visualize_poll():
"""Test visualize_poll on a dictionary input"""
sample_poll_obj = [
{
"text": "Important research!!!",
"duration": 1440,
"date": "2022-01-22T04:01:08.000Z",
"poll options": [
{"position": 1, "label": "Cookies", "votes": 29},
{"position": 2, "label": "Cupcakes", "votes": 5},
{"position": 3, "label": "Donuts", "votes": 24},
{"position": 4, "label": "Ice Cream", "votes": 25},
],
"user": "GregShahade",
"total": 83,
}
]
test_plot = visualize_poll(sample_poll_obj)
# test settings on altair plot
assert isinstance(
test_plot[0], alt.Chart
), "The type of the output mush be a altair chart"
assert (
test_plot[0].encoding.x.shorthand == "votes"
), "The votes should be mapped to the x axis"
assert (
test_plot[0].encoding.y.shorthand == "label"
), "The label should be mapped to the y axis"
assert test_plot[0].mark == "bar", "mark should be a bar"
assert (
test_plot[0].encoding.color.title == "Options"
), "Option should be the legend title"
# check if show_user=True, correct user name is printed
assert sample_poll_obj[0]["user"] == "GregShahade", "The user name is not correct."
# check if show_date=True, correct date and time is printed
assert (
pd.Timestamp(sample_poll_obj[0]["date"]).strftime("%Y-%m-%d %H:%M:%S")
== "2022-01-22 04:01:08"
), "Date and time is not correct."
# check if show_duration=True, correct duration is printed
assert sample_poll_obj[0]["duration"] / 60 == 24.0, "Duration is not correct."
# check if calculated total votes is equal to the input dict
df = pd.DataFrame(sample_poll_obj[0]["poll options"])
assert (
df["votes"].sum() == sample_poll_obj[0]["total"]
), "Total response is not correct."
| 35.152542 | 87 | 0.590646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 993 | 0.478785 |
27d3c4c2fd777115e15bd0efa78ebba378f85ab4 | 5,629 | py | Python | quilt/avahiservice.py | rossdylan/quilt | 463e3cfe419410b41ee6945ab96d51692d46b036 | [
"MIT"
]
| 2 | 2015-07-23T03:49:42.000Z | 2015-11-05T18:49:53.000Z | quilt/avahiservice.py | rossdylan/quilt | 463e3cfe419410b41ee6945ab96d51692d46b036 | [
"MIT"
]
| null | null | null | quilt/avahiservice.py | rossdylan/quilt | 463e3cfe419410b41ee6945ab96d51692d46b036 | [
"MIT"
]
| null | null | null | """
Avahi Network Service Scripting
"""
import Queue
import threading
import avahi, dbus, gobject
from dbus import DBusException
from dbus.mainloop.glib import DBusGMainLoop
__all__ = ["QuiltAvahiServer", "QuiltAvahiClient"]
TYPE = '_quilt._tcp'
class QuiltAvahiNode(object):
""" Quilt Avahi Connection Node, represents a
found connection, and its connection details. """
def __init__(self, domain="local", hostname="none", address="", port=""):
""" Construct Connection Node """
self.domain = domain
self.hostname = hostname
self.address = address
self.port = port
def __str__(self):
""" String representation. """
return "Quilt Avahi Connection Node:\n -domain: %s\n -hostname: %s\n -address: %s\n -port: %s\n"\
% (self.domain, self.hostname, self.address, self.port)
from threading import Thread
class QuiltAvahiClient(Thread):
""" Qulit's Avahi Service Discovery Object """
def __init__(self):
""" Construct Search Client """
threading.Thread.__init__(self)
self.Nodes = Queue.Queue()
self.loop = DBusGMainLoop()
self.bus = dbus.SystemBus(mainloop = self.loop)
self.server = dbus.Interface(self.bus.get_object(avahi.DBUS_NAME, '/'),
'org.freedesktop.Avahi.Server')
self.sbrowser = dbus.Interface(self.bus.get_object(avahi.DBUS_NAME,
self.server.ServiceBrowserNew(avahi.IF_UNSPEC,
avahi.PROTO_UNSPEC, TYPE, 'local', dbus.UInt32(0))),
avahi.DBUS_INTERFACE_SERVICE_BROWSER)
def resolve(self, *args):
"""
:param args: Arguments of the resolved service
:type args: Array of mixed string and integer arguments.
"""
# Handle Connection Pattern Here, for now just print that we found
# the service #TODO
node = QuiltAvahiNode(args[4], args[5].split('.')[0], args[7], args[8])
print node
self.Nodes.put(node)
def error(self, *args):
"""
:param args: Arguments of the error in the resolved service
:type args: Mixed strings and integers
"""
print "Error: %s" % args[0]
def search_handler(self, interface, protocol, name, stype, domain, flags):
""" Handles the finding of a service on the local network.
:param interface: Interface Name
:type interface: String
:param protocol: Protocol Type
:type protocol: String
:param name: Name of service
:type name: String
:param stype: Service Type
:type stype: String
:param domain: Domain of service
:type domain: String
:param flags: Flags of the Service
:type flags: int
"""
print "Service Found %s type %s domain %s" % (name, stype, domain)
# We can determine if the service is local, avoiding uncessary connections
if flags & avahi.LOOKUP_RESULT_LOCAL:
# TODO: Handle local service here
pass
self.server.ResolveService(interface, protocol, name, stype, domain,
avahi.PROTO_UNSPEC, dbus.UInt32(0),
reply_handler=self.resolve, error_handler=self.error)
def run(self):
""" Searches the local network for broadcasting avahi services,
handles found services in the resolved method
"""
self.sbrowser.connect_to_signal("ItemNew", self.search_handler)
gobject.MainLoop().run()
class QuiltAvahiServer(object):
""" Quilt's Avahi Server Object
NOTE: Ports 9375-9379 should be our target ports
"""
def __init__(self, name="Quilt", port=9375, stype="_quilt._tcp",
domain="", host="", text=""):
"""
Construct the avahi service
:type name: string
:param name: name of service
:type port: int
:param port: port to be run on.
:type stype: str
:param stype: service type
:type domain: str
:param domain: service domain
:type host: str
:param host: service host
:type text: --
:param text: --
"""
self.name = name
self.stype = stype
self.domain = domain
self.host = host
self.port = port
self.text = text
def publish(self):
""" Make the service discoverable on the network """
bus = dbus.SystemBus()
server = dbus.Interface(
bus.get_object(
avahi.DBUS_NAME,
avahi.DBUS_PATH_SERVER),
avahi.DBUS_INTERFACE_SERVER)
interface = dbus.Interface(
bus.get_object(avahi.DBUS_NAME,
server.EntryGroupNew()),
avahi.DBUS_INTERFACE_ENTRY_GROUP)
interface.AddService(
avahi.IF_UNSPEC,
avahi.PROTO_UNSPEC,
dbus.UInt32(0),
self.name,
self.stype,
self.domain,
self.host,
dbus.UInt16(self.port),
self.text)
interface.Commit()
self.group = interface
def unpublish(self):
""" Remove the service """
self.group.Reset()
| 35.626582 | 105 | 0.551252 | 5,340 | 0.948659 | 0 | 0 | 0 | 0 | 0 | 0 | 2,460 | 0.437023 |
27d5437b9102d270c520f2be5bfb611ac3f22737 | 612 | py | Python | tests/test.py | Robert-96/altwalker-appium-example | 17bb3087d13fed62a4cb98ac0d25b7aa3b8a937d | [
"MIT"
]
| null | null | null | tests/test.py | Robert-96/altwalker-appium-example | 17bb3087d13fed62a4cb98ac0d25b7aa3b8a937d | [
"MIT"
]
| null | null | null | tests/test.py | Robert-96/altwalker-appium-example | 17bb3087d13fed62a4cb98ac0d25b7aa3b8a937d | [
"MIT"
]
| null | null | null | from appium import webdriver
from .utils import PATH
desired_caps = dict(
platformName='Android',
platformVersion='10',
automationName='uiautomator2',
deviceName='Android Emulator',
app=PATH('app/ApiDemos-debug.apk.zip')
)
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
class ModelName:
def setUpModel(self):
global driver
self.driver = driver
def vertex_A(self):
pass
def vertex_B(self):
pass
def edge_A(self):
element = self.driver.find_element_by_accessibility_id('Content')
element.click()
| 19.741935 | 73 | 0.668301 | 291 | 0.47549 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.183007 |
27d5f9dbcf40cc145235b4fffe9387c62c414d60 | 6,161 | py | Python | tests/pte-onPrem-test-package/scripts/create_SCFile.py | gbl1124/hfrd | 327d7c1e18704d2e31a2649b40ae1d90353ebe24 | [
"Apache-2.0"
]
| 5 | 2019-08-02T20:53:57.000Z | 2021-06-25T05:16:46.000Z | tests/pte-onPrem-test-package/scripts/create_SCFile.py | anandbanik/hfrd | 7bc1f13bfc9c7d902aec0363d27b089ef68c7eec | [
"Apache-2.0"
]
| null | null | null | tests/pte-onPrem-test-package/scripts/create_SCFile.py | anandbanik/hfrd | 7bc1f13bfc9c7d902aec0363d27b089ef68c7eec | [
"Apache-2.0"
]
| 14 | 2019-07-01T01:40:50.000Z | 2020-03-24T06:14:32.000Z | import json
import os
import argparse
HOME = os.environ['HOME']+'/results/'
parser = argparse.ArgumentParser(description="Python script generates the SCFiles using MSPIDs")
parser.add_argument("-m", "--mspids", nargs="+", required=True, help="1 or more MSPIDs")
parser.add_argument("-n", "--networkId", metavar='', required=True, help="Network ID")
args = parser.parse_args()
class SCFileCreator:
def __init__(self):
self.MSPIDs = args.mspids
self.peerInfo = {}
self.SCFileObject = {}
self.networkID = args.networkId
self.writeToOutput(self.networkID)
# Get information for each peerOrgs
def getPeerInfo(self):
# This function gets the peer information for all the peers and returns data in a dictionary format
for mspid in self.MSPIDs:
# read in connection profiles for each org
with open(os.path.join(HOME, "creds/ConnectionProfile_{}.json".format(mspid)), "r") as f:
orgCP = json.load(f)
# read in admin cert for each org
with open(os.path.join(HOME, "creds/{}admin/msp/signcerts/cert.pem".format(mspid)), "r") as f:
orgCT = "".join(f.readlines())
# read in priv key for each org
with open(os.path.join(HOME, "creds/{}admin/msp/keystore/priv.pem".format(mspid)), "r") as f:
orgPK = "".join(f.readlines())
temp = {}
temp["orgCP"] = orgCP
temp["orgCT"] = orgCT
temp["orgPK"] = orgPK
self.peerInfo[mspid] = dict(temp)
return self.peerInfo
def generateSCFile(self):
# This function builds the SCFile
self.getPeerInfo() # Calling the gatherPeerOrg function
self.SCFileObject["test-network"] = {}
print(self.MSPIDs)
# Added GOPATH as per Tanya"s request
self.SCFileObject["test-network"]["gopath"] = "GOPATH"
for mspid in self.MSPIDs:
# Need to make copy of all inner dict to a new address location without sharing the same reference as the first one
self.SCFileObject["test-network"]["orderer"] = {}
self.SCFileObject["test-network"][mspid] = {}
self.SCFileObject["test-network"][mspid]["ca"] = {}
self.SCFileObject["test-network"][mspid]["name"] = mspid
self.SCFileObject["test-network"][mspid]["mspid"] = mspid
self.SCFileObject["test-network"][mspid]["username"] = "admin"
self.SCFileObject["test-network"][mspid]["privateKeyPEM"] = ""
self.SCFileObject["test-network"][mspid]["signedCertPEM"] = ""
self.SCFileObject["test-network"][mspid]["adminPath"] = ""
# Storing certificate and private key
self.SCFileObject["test-network"][mspid]["admin_cert"] = self.peerInfo[mspid]["orgCT"]
self.SCFileObject["test-network"][mspid]["priv"] = self.peerInfo[mspid]["orgPK"]
# getting all fabric_ca in peer org
fabricCaPeerList = [fabric_ca for fabric_ca in
self.peerInfo[mspid]["orgCP"]["certificateAuthorities"].keys()]
# storing the first fabric_ca since the data is the same for each peer org
self.SCFileObject["test-network"][mspid]["ca"]["name"] = fabricCaPeerList[0]
self.SCFileObject["test-network"][mspid]["ca"]["url"] = \
self.peerInfo[mspid]["orgCP"]["certificateAuthorities"][fabricCaPeerList[0]]["url"]
self.SCFileObject["test-network"][mspid]["secret"] = \
self.peerInfo[mspid]["orgCP"]["certificateAuthorities"][fabricCaPeerList[0]]["registrar"][0]["enrollSecret"]
# getting the right peer orgs
for peer in self.peerInfo[mspid]["orgCP"]["organizations"][mspid]["peers"]:
# building peer dict
self.SCFileObject["test-network"][mspid][peer] = {}
self.SCFileObject["test-network"][mspid][peer]["server-hostname"] = None
self.SCFileObject["test-network"][mspid][peer]["tls_cacerts"] = ""
self.SCFileObject["test-network"][mspid][peer]["requests"] = \
self.peerInfo[mspid]["orgCP"]["peers"][peer]["url"]
self.SCFileObject["test-network"][mspid][peer]["events"] = self.peerInfo[mspid]["orgCP"]["peers"][peer][
"eventUrl"]
# getting data for each orderer
for fabricOrderer in self.peerInfo[mspid]["orgCP"]["orderers"]:
self.SCFileObject["test-network"]["tls_cert"] = \
self.peerInfo[mspid]["orgCP"]["orderers"][fabricOrderer]["tlsCACerts"]["pem"]
# building orderer dict
self.SCFileObject["test-network"]["orderer"][fabricOrderer] = {}
self.SCFileObject["test-network"]["orderer"][fabricOrderer]["name"] = "OrdererOrg"
self.SCFileObject["test-network"]["orderer"][fabricOrderer]["mspid"] = "OrdererOrg"
self.SCFileObject["test-network"]["orderer"][fabricOrderer]["mspPath"] = ""
self.SCFileObject["test-network"]["orderer"][fabricOrderer]["adminPath"] = ""
self.SCFileObject["test-network"]["orderer"][fabricOrderer]["comName"] = ""
self.SCFileObject["test-network"]["orderer"][fabricOrderer]["server-hostname"] = None
self.SCFileObject["test-network"]["orderer"][fabricOrderer]["tls_cacerts"] = ""
self.SCFileObject["test-network"]["orderer"][fabricOrderer]["url"] = \
self.peerInfo[mspid]["orgCP"]["orderers"][fabricOrderer]["url"]
# setting the ordererID for each mspid
self.SCFileObject["test-network"][mspid]["ordererID"] = fabricOrderer
return self.SCFileObject
def writeToOutput(self, outputFile):
# this function writes to config-net-${networkID}.json file
with open(os.path.join(HOME, "SCFiles/config-net-{}.json".format(outputFile)), "w") as f:
json.dump(self.generateSCFile(), f, indent=4, sort_keys=True)
if __name__ == "__main__":
scFileCreator = SCFileCreator() | 50.917355 | 127 | 0.603636 | 5,715 | 0.927609 | 0 | 0 | 0 | 0 | 0 | 0 | 2,265 | 0.367635 |
27d6cff2a09a34968b82d0b674f282b1d2271a34 | 9,721 | py | Python | catalogue/forms.py | lh00257/superharris | cc8794ac6a63fa157ed6d0ef75f5089253ff987d | [
"MIT"
]
| null | null | null | catalogue/forms.py | lh00257/superharris | cc8794ac6a63fa157ed6d0ef75f5089253ff987d | [
"MIT"
]
| null | null | null | catalogue/forms.py | lh00257/superharris | cc8794ac6a63fa157ed6d0ef75f5089253ff987d | [
"MIT"
]
| null | null | null | import re #Regular expression library
from django import forms
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.template import RequestContext
from django.contrib.auth.forms import AuthenticationForm
from catalogue.models import Submitted
from models import GlobularCluster as GC
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field, Fieldset
from crispy_forms.bootstrap import AppendedText, PrependedText, FormActions
#from crispy_forms.bootstrap import InlineField
#class login_page(forms.Form):
# username = forms.CharField(label='Username', max_length=30)
#password = forms.CharField(widget=forms.PasswordInput)
#model = User
#widgets = {
# 'password': forms.PasswordInput(),
#}
class RegistrationForm(forms.Form):
username = forms.CharField(label='Username', max_length=30)
email = forms.EmailField(label='Email', max_length=50)
password1 = forms.CharField(
label='Password',
widget=forms.PasswordInput()
)
password2 = forms.CharField(
label='Password (Again)',
widget=forms.PasswordInput()
)
def clean_password(self):
if 'password1' in self.cleaned_data:
password1 = self.Cleaned_data['password1']
password2 = self.Cleaned_data['password2']
if password1 == password2:
return password2
raise forms.ValidationError('Passwords do not match.')
def clean_username(self):
username = self.cleaned_data['username']
if not re.search(r'^\w+$', username):
raise forms.ValidationError('Username can only contain alphanumeric characters and the underscore.')
try:
User.objects.get(username=username)
except ObjectDoesNotExist:
return username
raise forms.ValidationError('Username is already taken :( .')
def __init__(self, *args, **kwargs):
super(RegistrationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'id-registrationForm'
self.helper.form_class = 'blueForms'
self.helper.form_method = 'post'
self.helper.form_action = 'register'
self.helper.form_class = 'form-horizontal'
self.fields['username'].widget.attrs['placeholder'] = u'Enter here'
self.fields['email'].widget.attrs['placeholder'] = u'Enter here'
self.fields['password1'].widget.attrs['placeholder'] = u'Enter here'
self.fields['password2'].widget.attrs['placeholder'] = u'Enter here'
self.helper.layout = Layout(
Div(
Div('username', css_class='col-xs-6'),
css_class='row-fluid'),
Div(
Div('email', css_class='col-xs-6'),
css_class='row-fluid'),
Div(
Div('password1', css_class='col-xs-6'),
css_class='row-fluid'),
Div(
Div('password2', css_class='col-xs-6'),
css_class='row-fluid'),
)
self.helper.add_input(Submit('submit', 'Submit'))
class SubmitForm(forms.Form):
gcs = GC.objects.all()
drop_down_list = [(g, g.cluster_id) for g in gcs]
drop_down_list.sort(key=lambda x:x[1])
cluster = forms.ChoiceField(label = "Cluster ID", choices = drop_down_list, required = True)
name = forms.CharField(label = "Alternative names", max_length = 50, required = False)
ra = forms.CharField(label = "Right ascension", max_length = 50, required = False)
dec = forms.CharField(label = "Declination", max_length=50, required=False)
gallon = forms.CharField(label = "Longitude", max_length=50, required=False)
gallat = forms.CharField(label = "Latitude", max_length=50, required=False)
dfs = forms.CharField(label = "Distance from the sun", max_length=50, required=False)
metallicity = forms.CharField(label = "Metallicity", max_length=50, required=False)
w_mean_met = forms.CharField(label = "Weight of mean metallicity", max_length=50, required=False)
m_v_t = forms.CharField(label = "Cluster luminosity", max_length=50, required=False)
ph_u_b = forms.CharField(label = "U-B", max_length=50, required=False)
ph_b_v = forms.CharField(label = "B-V", max_length=50, required=False)
ph_v_r = forms.CharField(label = "V-R", max_length=50, required=False)
ph_v_i = forms.CharField(label = "V-I", max_length=50, required=False)
ellipticity = forms.CharField(label = "Projected ellipticity of isophotes", max_length=50, required=False)
v_r = forms.CharField(label = "Heliocentric radial velocity", max_length=50, required=False)
sig_v = forms.CharField(label = "Velocity dispersion", max_length=50, required=False)
sig_err = forms.CharField(label = "Observational uncertainty", max_length=50, required=False)
sp_c = forms.CharField(label = "King-model central concentration", max_length=50, required=False)
sp_r_c = forms.CharField(label = "Core radius", max_length=50, required=False)
sp_r_h = forms.CharField(label = "Half-light radius", max_length=50, required=False)
sp_mu_V = forms.CharField(label = "Central surface brightness", max_length=50, required=False)
sp_rho_0 = forms.CharField(label = "Central luminosity density", max_length=50, required=False)
comment = forms.CharField(label = "Comments", max_length=50, widget=forms.Textarea, required=False)
def __init__(self, *args, **kwargs):
super(SubmitForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'id-submitForm'
self.helper.form_class = 'blueForms'
self.helper.form_method = 'post'
self.helper.form_action = 'submit'
self.helper.form_class = 'form-horizontal'
self.fields['name'].widget.attrs['placeholder'] = u'Enter here'
self.fields['ra'].widget.attrs['placeholder'] = u'Enter here [degrees]'
self.fields['dec'].widget.attrs['placeholder'] = u'Enter here [degrees]'
self.fields['gallon'].widget.attrs['placeholder'] = u'Enter here [degrees]'
self.fields['gallat'].widget.attrs['placeholder'] = u'Enter here [degrees]'
self.fields['dfs'].widget.attrs['placeholder'] = u'Enter here[kpc]'
self.fields['metallicity'].widget.attrs['placeholder'] = u'Enter here'
self.fields['w_mean_met'].widget.attrs['placeholder'] = u'Enter here'
self.fields['m_v_t'].widget.attrs['placeholder'] = u'Enter here'
self.fields['ph_u_b'].widget.attrs['placeholder'] = u'Enter here'
self.fields['ph_b_v'].widget.attrs['placeholder'] = u'Enter here'
self.fields['ph_v_r'].widget.attrs['placeholder'] = u'Enter here'
self.fields['ph_v_i'].widget.attrs['placeholder'] = u'Enter here'
self.fields['ellipticity'].widget.attrs['placeholder'] = u'Enter here'
self.fields['v_r'].widget.attrs['placeholder'] = u'Enter here [km/s]'
self.fields['sig_v'].widget.attrs['placeholder'] = u'Enter here [km/s]'
self.fields['sig_err'].widget.attrs['placeholder'] = u'Enter here [km/s]'
self.fields['sp_c'].widget.attrs['placeholder'] = u'Enter here'
self.fields['sp_r_c'].widget.attrs['placeholder'] = u'Enter here'
self.fields['sp_r_h'].widget.attrs['placeholder'] = u'Enter here'
self.fields['sp_mu_V'].widget.attrs['placeholder'] = u'Enter here'
self.fields['sp_rho_0'].widget.attrs['placeholder'] = u'Enter here'
self.fields['comment'].widget.attrs['placeholder'] = u'Enter here'
#self.helper.label_class = 'col-lg-2'
#self.helper.field_class = 'col-lg-6'
self.helper.layout = Layout(
Fieldset('Name and reference',
Div('cluster', css_class='col-xs-6'),
Div('name', css_class='col-xs-6'),
css_class='row-fluid'),
Fieldset('Observational data',
Div('ra', css_class='col-xs-6'),
Div('dec', css_class='col-xs-6'),
css_class='row-fluid'),
Div(
Div('gallon', css_class='col-xs-6'),
Div('gallat', css_class='col-xs-6',),
css_class='row-fluid'),
Div(
Div('dfs', css_class='col-xs-6'),
Div('metallicity', css_class='col-xs-6',),
css_class='row-fluid'),
Div(
Div('w_mean_met', css_class='col-xs-6'),
Div('m_v_t', css_class='col-xs-6',),
css_class='row-fluid'),
Div(
Div('ph_u_b', css_class='col-xs-6'),
Div('ph_b_v', css_class='col-xs-6',),
css_class='row-fluid'),
Div(
Div('ph_v_r', css_class='col-xs-6'),
Div('ph_v_i', css_class='col-xs-6',),
css_class='row-fluid'),
Div(
Div('ellipticity', css_class='col-xs-6'),
Div('v_r', css_class='col-xs-6',),
css_class='row-fluid'),
Div(
Div('sig_v', css_class='col-xs-6'),
Div('sig_err', css_class='col-xs-6',),
css_class='row-fluid'),
Div(
Div('sp_c', css_class='col-xs-6'),
Div('sp_r_c', css_class='col-xs-6',),
css_class='row-fluid'),
Div(
Div('sp_r_h', css_class='col-xs-6'),
Div('sp_mu_V', css_class='col-xs-6',),
css_class='row-fluid'),
Div(
Div('sp_rho_0', css_class='col-xs-6'),
css_class='row-fluid'),
Fieldset('Additional information',
Div('comment', css_class='col-xs-6'),
css_class='row-fluid'),
)
self.helper.add_input(Submit('submit', 'Submit'))
| 47.419512 | 110 | 0.632342 | 8,887 | 0.914206 | 0 | 0 | 0 | 0 | 0 | 0 | 2,926 | 0.300998 |
27d78b89ba7b997214a4c7166893ac8b3158ac3f | 38,343 | py | Python | sgan/models.py | peaceminusones/Group-GAN-GCN | ff0abf90bb830729d082d1fa46e41c749c738895 | [
"MIT"
]
| 2 | 2021-05-25T09:10:15.000Z | 2021-09-25T07:53:35.000Z | sgan/models.py | peaceminusones/Group-GAN-GCN | ff0abf90bb830729d082d1fa46e41c749c738895 | [
"MIT"
]
| null | null | null | sgan/models.py | peaceminusones/Group-GAN-GCN | ff0abf90bb830729d082d1fa46e41c749c738895 | [
"MIT"
]
| null | null | null | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def make_mlp(dim_list, activation='relu', batch_norm=True, dropout=0):
# make_mlp主要是构造多层的全连接网络,并且根据需求决定激活函数的类型,其参数dim_list是全连接网络各层维度的列表
layers = []
for dim_in, dim_out in zip(dim_list[:-1], dim_list[1:]):
layers.append(nn.Linear(dim_in, dim_out))
if batch_norm:
layers.append(nn.BatchNorm1d(dim_out))
if activation == 'relu':
layers.append(nn.ReLU())
elif activation == 'leakyrelu':
layers.append(nn.LeakyReLU())
if dropout > 0:
layers.append(nn.Dropout(p=dropout))
return nn.Sequential(*layers)
def get_noise(shape, noise_type):
# get_noise函数主要是生成特定的噪声
if noise_type == 'gaussian':
return torch.randn(*shape).cuda()
elif noise_type == 'uniform':
return torch.rand(*shape).sub_(0.5).mul_(2.0).cuda()
raise ValueError('Unrecognized noise type "%s"' % noise_type)
class Encoder(nn.Module):
"""
Encoder is part of both TrajectoryGenerator and
TrajectoryDiscriminator
网络结构主要包括一个全连接层和一个LSTM网络
"""
def __init__(
self, embedding_dim=64, h_dim=64, mlp_dim=1024, num_layers=1, dropout=0.0
):
super(Encoder, self).__init__()
self.mlp_dim = 1024
self.h_dim = h_dim
self.embedding_dim = embedding_dim
self.num_layers = num_layers
# 2*16
self.spatial_embedding = nn.Linear(2, embedding_dim)
# input_size: 16
# hidden_size: 32
# num_layers: 1
self.encoder = nn.LSTM(embedding_dim, h_dim, num_layers, dropout=dropout)
def init_hidden(self, batch):
return (
torch.zeros(self.num_layers, batch, self.h_dim).cuda(),
torch.zeros(self.num_layers, batch, self.h_dim).cuda()
)
def forward(self, obs_traj):
"""
Inputs:
- obs_traj: Tensor of shape (obs_len, batch, 2)
最原始的输入是这一批输数据所有人的观测数据中的相对位置变化坐标,即当前帧相对于上一帧每个人的坐标变化,
其经过一个2*16的全连接层,全连接层的输入的shape:[obs_len*batch,2],输出:[obs_len*batch,16]
Output:
- final_h: Tensor of shape (self.num_layers, batch, self.h_dim)
"""
# Encode observed Trajectory (batch即batch_size个sequence序列中的总人数
batch = obs_traj.size(1)
'''经过一个2*16的全连接层,全连接层的输入的shape:[obs_len*batch,2],输出:[obs_len*batch,16]'''
# shape:
# "obs_traj": [obs_len,batch,2]
# "obs_traj.contiguous().view(-1, 2)": [obs_len*batch,2]
# "obs_traj_embedding": [obs_len*batch,16]
obs_traj_embedding = self.spatial_embedding(obs_traj.reshape(-1, 2))
# 经过维度变换变成3维的以符合LSTM网络中输入input的格式要求
# "obs_traj_embedding": [obs_len,batch,16]
obs_traj_embedding = obs_traj_embedding.view(-1, batch, self.embedding_dim)
# lstm模块初始化h_0, c_0
state_tuple = self.init_hidden(batch)
# LSTM,LSTM的输入input的shape为[seq_len,batch,input_size], 然后再把h_0和c_0输入LSTM
# 输出数据:output, (h_n, c_n)
# output.shape: [seq_length, batch_size, hidden_size]
# output[-1]与h_n是相等的
output, state = self.encoder(obs_traj_embedding, state_tuple)
# 输出隐藏状态h_t记为final_h
final_h = state[0]
return final_h
class Decoder(nn.Module):
"""Decoder is part of TrajectoryGenerator"""
def __init__(
self, seq_len, embedding_dim=64, h_dim=128, mlp_dim=1024, num_layers=1,
pool_every_timestep=True, dropout=0.0, bottleneck_dim=1024,
activation='relu', batch_norm=True, pooling_type='pool_net',
neighborhood_size=2.0, grid_size=8
):
super(Decoder, self).__init__()
self.seq_len = seq_len
self.mlp_dim = mlp_dim
self.h_dim = h_dim
self.embedding_dim = embedding_dim
self.pool_every_timestep = pool_every_timestep
# mlp [2,16]
self.spatial_embedding = nn.Linear(2, embedding_dim)
# lstm
# input_size: 16
# hidden_size: 32
# num_layers: 1
self.decoder = nn.LSTM(embedding_dim, h_dim, num_layers, dropout=dropout)
# mlp [32,2]
self.hidden2pos = nn.Linear(h_dim, 2)
if pool_every_timestep:
if pooling_type == 'pool_net':
self.pool_net = PoolHiddenNet(
embedding_dim=self.embedding_dim,
h_dim=self.h_dim,
mlp_dim=mlp_dim,
bottleneck_dim=bottleneck_dim,
activation=activation,
batch_norm=batch_norm,
dropout=dropout
)
mlp_dims = [h_dim + bottleneck_dim, mlp_dim, h_dim]
self.mlp = make_mlp(
mlp_dims,
activation=activation,
batch_norm=batch_norm,
dropout=dropout
)
def forward(self, last_pos, last_pos_rel, state_tuple, seq_start_end):
"""
Inputs:
- last_pos: Tensor of shape (batch, 2)
- last_pos_rel: Tensor of shape (batch, 2)
- state_tuple: (hh, ch) each tensor of shape (num_layers, batch, h_dim)
- seq_start_end: A list of tuples which delimit sequences within batch
Output:
- pred_traj: tensor of shape (self.seq_len, batch, 2)
"""
batch = last_pos.size(0)
pred_traj_fake_rel = []
decoder_input = self.spatial_embedding(last_pos_rel)
decoder_input = decoder_input.view(1, batch, self.embedding_dim)
for _ in range(self.seq_len):
output, state_tuple = self.decoder(decoder_input, state_tuple)
rel_pos = self.hidden2pos(output.view(-1, self.h_dim))
curr_pos = rel_pos + last_pos
if self.pool_every_timestep:
decoder_h = state_tuple[0]
pool_h = self.pool_net(decoder_h, seq_start_end, curr_pos)
decoder_h = torch.cat([decoder_h.view(-1, self.h_dim), pool_h], dim=1)
decoder_h = self.mlp(decoder_h)
decoder_h = torch.unsqueeze(decoder_h, 0)
state_tuple = (decoder_h, state_tuple[1])
embedding_input = rel_pos
decoder_input = self.spatial_embedding(embedding_input)
decoder_input = decoder_input.view(1, batch, self.embedding_dim)
pred_traj_fake_rel.append(rel_pos.view(batch, -1))
last_pos = curr_pos
pred_traj_fake_rel = torch.stack(pred_traj_fake_rel, dim=0)
return pred_traj_fake_rel, state_tuple[0]
"""
modified by zyl 2021/3/2
"""
class GraphAttentionLayer(nn.Module):
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.empty(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.empty(size=(2*out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, h, adj):
Wh = torch.mm(h, self.W)
a_input = self._prepare_attentional_mechanism_input(Wh)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
zero_vec = -9e15*torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, Wh)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def _prepare_attentional_mechanism_input(self, Wh):
N = Wh.size()[0]
# 对第0个维度复制N遍
Wh_repeated_in_chunks = Wh.repeat_interleave(N, dim=0)
# 对第1个维度复制N遍
Wh_repeated_alternating = Wh.repeat(N, 1)
# 在第1维上做全连接操作,得到了(N * N, 2 * out_features)的矩阵
all_combinations_matrix = torch.cat([Wh_repeated_in_chunks, Wh_repeated_alternating], dim=1)
return all_combinations_matrix.view(N, N, 2 * self.out_features)
class GAT(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
super(GAT, self).__init__()
self.dropout = dropout
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False)
def forward(self, x, adj):
# dropout不改变x的维度
x = F.dropout(x, self.dropout, training=self.training)
x = torch.cat([att(x, adj) for att in self.attentions], dim=1)
x = F.dropout(x, self.dropout, training=self.training)
x = F.elu(self.out_att(x, adj))
return F.log_softmax(x, dim=1)
class GATEncoder(nn.Module):
def __init__(self, n_units, n_heads, dropout, alpha):
super(GATEncoder, self).__init__()
self.gat_intra = GAT(40, 72, 16, dropout, alpha, n_heads)
self.gat_inter = GAT(16, 72, 16, dropout, alpha, n_heads)
self.out_embedding = nn.Linear(16*2, 24)
def normalize(self, adj, dim):
N = adj.size()
adj2 = torch.sum(adj, dim) # 对每一行求和
norm = adj2.unsqueeze(1).float() # 扩展张量维度
norm = norm.pow(-1) # 求倒数
norm_adj = adj.mul(norm) # 点乘
return norm_adj
def forward(self, h_states, seq_start_end, end_pos, end_group):
graph_embeded_data = []
for _, (start, end) in enumerate(seq_start_end):
start = start.item()
end = end.item()
curr_state = h_states[start:end]
curr_end_group = end_group[start:end]
num_ped = end - start
eye_mtx = torch.eye(num_ped, device=end_group.device).bool()
A_g = curr_end_group.repeat(1, num_ped)
B_g = curr_end_group.transpose(1, 0).repeat(num_ped, 1)
M_intra = (A_g == B_g) & (A_g != 0) | eye_mtx
A_intra = self.normalize(M_intra, dim=1).cuda()
curr_gat_state_intra = self.gat_intra(curr_state, A_intra)
R_intra_unique = torch.unique(M_intra, sorted=False, dim=0)
n_group = R_intra_unique.size()[0]
R_intra_unique.unsqueeze_(1)
R_intra = []
for i in range(n_group-1, -1, -1):
R_intra.append(R_intra_unique[i])
R_intra = torch.cat(R_intra, dim=0)
R_intra = self.normalize(R_intra, dim=1).cuda()
curr_gat_group_state_in = torch.matmul(R_intra, curr_gat_state_intra)
M_inter = torch.ones((n_group, n_group), device=end_group.device).bool()
A_inter = self.normalize(M_inter, dim=1).cuda()
curr_gat_group_state_out = self.gat_inter(curr_gat_group_state_in, A_inter)
curr_gat_state_inter = torch.matmul(R_intra.T, curr_gat_group_state_out)
curr_gat_state = torch.cat([curr_gat_state_intra, curr_gat_state_inter], dim=1)
curr_gat_state = self.out_embedding(curr_gat_state)
graph_embeded_data.append(curr_gat_state)
graph_embeded_data = torch.cat(graph_embeded_data, dim=0)
return graph_embeded_data
# class BatchMultiHeadGraphAttention(nn.Module):
# """
# graph attetion layer(GAL)
# """
# def __init__(self, n_head, f_in, f_out, attn_dropout, bias=True):
# super(BatchMultiHeadGraphAttention, self).__init__()
# self.n_head = n_head
# self.f_in = f_in
# self.f_out = f_out
# self.w = nn.Parameter(torch.Tensor(n_head, f_in, f_out))
# self.a_src = nn.Parameter(torch.Tensor(n_head, f_out, 1))
# self.a_dst = nn.Parameter(torch.Tensor(n_head, f_out, 1))
# self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)
# self.softmax = nn.Softmax(dim=-1)
# self.dropout = nn.Dropout(attn_dropout)
# if bias:
# self.bias = nn.Parameter(torch.Tensor(f_out))
# nn.init.constant_(self.bias, 0)
# else:
# self.register_parameter("bias", None)
# nn.init.xavier_uniform_(self.w, gain=1.414)
# nn.init.xavier_uniform_(self.a_src, gain=1.414)
# nn.init.xavier_uniform_(self.a_dst, gain=1.414)
# def forward(self, h, adj):
# bs, n = h.size()[:2]
# h_prime = torch.matmul(h.unsqueeze(1), self.w)
# attn_src = torch.matmul(h_prime, self.a_src)
# attn_dst = torch.matmul(h_prime, self.a_dst)
# attn = attn_src.expand(-1, -1, -1, n) + attn_dst.expand(-1, -1, -1, n).permute(0, 1, 3, 2)
# attn = self.leaky_relu(attn)
# attn = self.softmax(attn)
# attn = self.dropout(attn)
# attn = torch.matmul(torch.squeeze(attn, dim=0), adj)
# attn = torch.unsqueeze(attn, 0)
# output = torch.matmul(attn, h_prime)
# if self.bias is not None:
# return output + self.bias, attn
# else:
# return output, attn
# def __repr__(self):
# return (
# self.__class__.__name__
# + " ("
# + str(self.n_head)
# + " -> "
# + str(self.f_in)
# + " -> "
# + str(self.f_out)
# + ")"
# )
# """
# modified by zyl 2021/2/6 graph attetion network
# """
# class GAT(nn.Module):
# def __init__(self, n_units, n_heads, dropout=0.2, alpha=0.2):
# super(GAT, self).__init__()
# self.n_layer = len(n_units) - 1
# self.dropout = dropout
# self.layer_stack = nn.ModuleList()
# for i in range(self.n_layer):
# f_in = n_units[i] * n_heads[i - 1] if i else n_units[i]
# self.layer_stack.append(
# BatchMultiHeadGraphAttention(
# n_heads[i], f_in=f_in, f_out=n_units[i + 1], attn_dropout=dropout
# )
# )
# self.norm_list = [
# torch.nn.InstanceNorm1d(32).cuda(),
# torch.nn.InstanceNorm1d(64).cuda(),
# ]
# def forward(self, x, adj):
# bs, n = x.size()[:2]
# for i, gat_layer in enumerate(self.layer_stack):
# # x = self.norm_list[i](x.permute(0, 2, 1)).permute(0, 2, 1)
# x, attn = gat_layer(x, adj)
# if i + 1 == self.n_layer:
# x = x.squeeze(dim=1)
# else:
# x = F.elu(x.contiguous().view(bs, n, -1))
# x = F.dropout(x, self.dropout, training=self.training)
# else:
# return x
# """
# modified by zyl 2021/2/6 graph attetion network encoder
# """
# class GATEncoder(nn.Module):
# def __init__(self, n_units, n_heads, dropout, alpha):
# super(GATEncoder, self).__init__()
# self.gat_intra = GAT([40,72,16], n_heads, dropout, alpha)
# self.gat_inter = GAT([16,72,16], n_heads, dropout, alpha)
# self.out_embedding = nn.Linear(16*2, 24)
# def normalize(self, adj, dim):
# N = adj.size()
# adj2 = torch.sum(adj, dim) # 对每一行求和
# norm = adj2.unsqueeze(1).float() # 扩展张量维度
# norm = norm.pow(-1) # 求倒数
# norm_adj = adj.mul(norm) # 点乘
# return norm_adj
# def forward(self, obs_traj_embedding, seq_start_end, end_pos, end_group):
# graph_embeded_data = []
# for start, end in seq_start_end.data:
# curr_seq_embedding_traj = obs_traj_embedding[:, start:end, :]
# h_states = torch.squeeze(obs_traj_embedding, dim=0)
# num_ped = end - start
# curr_end_group = end_group[start:end]
# eye_mtx = torch.eye(num_ped, device=end_group.device).bool()
# A_g = curr_end_group.repeat(1, num_ped)
# B_g = curr_end_group.transpose(1, 0).repeat(num_ped, 1)
# M_intra = (A_g == B_g) & (A_g != 0) | eye_mtx
# A_intra = self.normalize(M_intra, dim=1).cuda()
# curr_seq_graph_intra = self.gat_intra(curr_seq_embedding_traj, A_intra)
# # print("curr_seq_embedding_traj:", curr_seq_embedding_traj.size())
# # print("curr_seq_graph_intra:", curr_seq_graph_intra.size())
# R_intra_unique = torch.unique(M_intra, sorted=False, dim=0)
# n_group = R_intra_unique.size()[0]
# R_intra_unique.unsqueeze_(1)
# R_intra = []
# for i in range(n_group-1, -1, -1):
# R_intra.append(R_intra_unique[i])
# R_intra = torch.cat(R_intra, dim=0)
# R_intra = self.normalize(R_intra, dim=1).cuda()
# curr_seq_graph_state_in = torch.matmul(R_intra, torch.squeeze(curr_seq_graph_intra, dim=0))
# curr_seq_graph_state_in = torch.unsqueeze(curr_seq_graph_state_in, 0)
# M_inter = torch.ones((n_group, n_group), device=end_group.device).bool()
# A_inter = self.normalize(M_inter, dim=1).cuda()
# curr_seq_graph_out = self.gat_inter(curr_seq_graph_state_in, A_inter)
# curr_seq_graph_inter = torch.matmul(R_intra.T, torch.squeeze(curr_seq_graph_out, dim=0))
# curr_seq_graph_inter = torch.unsqueeze(curr_seq_graph_inter, 0)
# curr_gat_state = torch.cat([curr_seq_graph_intra, curr_seq_graph_inter],dim=2)
# curr_gat_state = torch.squeeze(curr_gat_state, dim=0)
# curr_gat_state = self.out_embedding(curr_gat_state)
# curr_gat_state = torch.unsqueeze(curr_gat_state, 0)
# graph_embeded_data.append(curr_gat_state)
# graph_embeded_data = torch.cat(graph_embeded_data, dim=1)
# return graph_embeded_data
class PoolHiddenNet(nn.Module):
"""Pooling module as proposed in our paper"""
def __init__(
self, embedding_dim=64, h_dim=64, mlp_dim=1024, bottleneck_dim=1024,
activation='relu', batch_norm=True, dropout=0.0
):
super(PoolHiddenNet, self).__init__()
self.mlp_dim = 1024
self.h_dim = h_dim
self.bottleneck_dim = bottleneck_dim
self.embedding_dim = embedding_dim # 16
mlp_pre_dim = embedding_dim + h_dim
mlp_pre_pool_dims = [mlp_pre_dim, 512, bottleneck_dim] # mlp_pre_pool_dims: [48,512,8]
# mlp: 2*16
self.spatial_embedding = nn.Linear(2, embedding_dim)
# mlp: 48*512*8
self.mlp_pre_pool = make_mlp(
mlp_pre_pool_dims,
activation=activation,
batch_norm=batch_norm,
dropout=dropout)
def repeat(self, tensor, num_reps):
"""
Inputs:
-tensor: 2D tensor of any shape
-num_reps: Number of times to repeat each row
Outpus:
-repeat_tensor: Repeat each row such that: R1, R1, R2, R2
"""
col_len = tensor.size(1)
tensor = tensor.unsqueeze(dim=1).repeat(1, num_reps, 1)
tensor = tensor.view(-1, col_len)
return tensor
def forward(self, h_states, seq_start_end, end_pos):
"""
Inputs:
- h_states: Tensor of shape (num_layers, batch, h_dim) 即encoder的return:final_h
- seq_start_end: A list of tuples which delimit sequences within batch
- end_pos: Tensor of shape (batch, 2)
Output:
- pool_h: Tensor of shape (batch, bottleneck_dim)
"""
pool_h = []
for _, (start, end) in enumerate(seq_start_end):
start = start.item()
end = end.item()
num_ped = end - start
# print("num_ped:", num_ped)
# print("h_states:", h_states.shape)
# h_states == final_h (即这里h_states就是LSTM的输出)
# h_states([1,batch,32]) -> cur_hidden([N,32])
curr_hidden = h_states.view(-1, self.h_dim)[start:end]
# print("curr_hidden: ", curr_hidden.shape)
# Repeat -> H1, H2, H1, H2
# curr_hidden([N,32]) -> curr_hidden_1([N*N,32])
curr_hidden_1 = curr_hidden.repeat(num_ped, 1)
# print("curr_hidden_1: ", curr_hidden_1.shape)
# Repeat position -> P1, P2, P1, P2
curr_end_pos = end_pos[start:end]
curr_end_pos_1 = curr_end_pos.repeat(num_ped, 1)
# Repeat position -> P1, P1, P2, P2
curr_end_pos_2 = self.repeat(curr_end_pos, num_ped)
# curr_rel_pos: [N*N,2]
curr_rel_pos = curr_end_pos_1 - curr_end_pos_2
# self.spatial_embedding(mlp): 2*16
# curr_rel_embedding: [N*N,16]
curr_rel_embedding = self.spatial_embedding(curr_rel_pos)
# mlp_h_inpur: [N*N,48]
mlp_h_input = torch.cat([curr_rel_embedding, curr_hidden_1], dim=1)
# curr_pool_h: [N*N,8]
curr_pool_h = self.mlp_pre_pool(mlp_h_input)
# curr_pool_h: [N,8]
# print(curr_pool_h.view(num_ped, num_ped, -1)[0])
curr_pool_h = curr_pool_h.view(num_ped, num_ped, -1).max(1)[0] # [N,N,8] -->[n,8]
# print(curr_pool_h)
# print("curr_pool_h:", curr_pool_h.shape)
pool_h.append(curr_pool_h)
# pool_h: [batch,8]: a pooled tensor Pi for each person
pool_h = torch.cat(pool_h, dim=0)
# print("pool_h:", pool_h.shape)
return pool_h
class GCN(nn.Module):
"""GCN module"""
def __init__(self, input_dim=48, hidden_dim=72, out_dim=8, gcn_layers=2):
super(GCN, self).__init__()
self.X_dim = input_dim
self.hidden_dim = hidden_dim
self.out_dim = out_dim
self.gcn_layers = gcn_layers
# graph convolution layer
self.W = torch.nn.ParameterList()
for i in range(self.gcn_layers):
if i == 0:
self.W.append(nn.Parameter(torch.randn(self.X_dim, self.hidden_dim)))
elif i == self.gcn_layers-1:
self.W.append(nn.Parameter(torch.randn(self.hidden_dim, self.out_dim)))
else:
self.W.append(nn.Parameter(torch.randn(self.hidden_dim, self.hidden_dim)))
def forward(self, A, X):
next_H = H = X
for i in range(self.gcn_layers):
next_H = F.relu(torch.matmul(torch.matmul(A, H), self.W[i]))
H = next_H
feat = H
return feat
class GCNModule(nn.Module):
"""group information aggregation with GCN layer"""
def __init__(
self, input_dim=40, hidden_dim=72, out_dim=16, gcn_layers=2, final_dim=24
):
super(GCNModule, self).__init__()
# GCN_intra: 40*72*16
self.gcn_intra = GCN(
input_dim=input_dim,
hidden_dim=hidden_dim,
out_dim=out_dim,
gcn_layers=gcn_layers)
# GCN_inter: 16*72*16
self.gcn_inter = GCN(
input_dim=16,
hidden_dim=hidden_dim,
out_dim=out_dim,
gcn_layers=gcn_layers)
# mlp:16*8
self.out_embedding = nn.Linear(out_dim*2, final_dim)
def normalize(self, adj, dim):
N = adj.size()
adj2 = torch.sum(adj, dim) # 对每一行求和
norm = adj2.unsqueeze(1).float() # 扩展张量维度
norm = norm.pow(-1) # 求倒数
norm_adj = adj.mul(norm) # 点乘
return norm_adj
def repeat(self, tensor, num_reps):
"""
Inputs:
-tensor: 2D tensor of any shape
-num_reps: Number of times to repeat each row
Outpus:
-repeat_tensor: Repeat each row such that: R1, R1, R2, R2
"""
col_len = tensor.size(1)
tensor = tensor.unsqueeze(dim=1).repeat(1, num_reps, 1)
tensor = tensor.view(-1, col_len)
return tensor
def forward(self, h_states, seq_start_end, end_pos, end_group):
"""
Inputs:
- h_states: Tensor of shape (batch, h_dim) 即encoder+pooling net的return
- seq_start_end: A list of tuples which delimit sequences within batch
- end_pos: Tensor of shape (batch, 2)
- end_group: group labels at the last time step (t_obs); shape: (batch, 1)
Output:
- gcn_aggre: Tensor of shape (batch, bottleneck_dim)
"""
gcn_aggre = []
for _, (start, end) in enumerate(seq_start_end):
start = start.item()
end = end.item()
num_ped = end - start # num_ped: number of pedestrians in the scene
# curr_state: [N,40]
curr_state = h_states[start:end]
# get the modulated adjacency matrix arrays
# Generate masks from the group labels
# labels can only be used to distinguish groups at a timestep.
# var: end_group; def: group labels at the last time step (t_obs); shape: (batch, 1)
# clip one onservation-prediction window out of multiple windows.
curr_end_group = end_group[start:end]
# get the coherency adjacency, dimension: (N, N)
# coherency mask is shared by all pedestrians in the scene
eye_mtx = torch.eye(num_ped, device=end_group.device).bool()
A_g = curr_end_group.repeat(1, num_ped)
B_g = curr_end_group.transpose(1, 0).repeat(num_ped, 1)
# M_intra: [N,N]
M_intra = (A_g == B_g) & (A_g != 0) | eye_mtx
# get the modulated normalized adjacency matrix arrays
# normalized M_intra: [N,N]
A_intra = self.normalize(M_intra, dim=1).cuda()
"""gcn_intra"""
# curr_gcn_state_intra: [N,16] (GCN:[40,72,16])
curr_gcn_state_intra = self.gcn_intra(A_intra, curr_state)
"""GPool =================================================================="""
# M_intra: [N,N]
# R_intra_unique: [M,N]
R_intra_unique = torch.unique(M_intra, sorted=False, dim=0)
# group 的数量
n_group = R_intra_unique.size()[0]
R_intra_unique.unsqueeze_(1) # 增加一维
# 从下到上翻转R_intra_unique
R_intra = []
for i in range(n_group-1, -1, -1):
R_intra.append(R_intra_unique[i])
R_intra = torch.cat(R_intra, dim=0)
# 归一化
R_intra = self.normalize(R_intra, dim=1).cuda()
# 提取群组部分 [M,N]*[N,16]
# curr_gcn_group_state: [M,16]
curr_gcn_group_state_in = torch.matmul(R_intra, curr_gcn_state_intra)
"""=========================================================================="""
"""gcn_inter"""
# M_inter: [M,M]
M_inter = torch.ones((n_group, n_group), device=end_group.device).bool()
# normalize
A_inter = self.normalize(M_inter, dim=1).cuda()
# M_inter_norm: [M,M]
# curr_gcn_group_state_in: [M,16] (GCN:[16,72,16])
# curr_gcn_group_state_out: [M,16]
curr_gcn_group_state_out = self.gcn_inter(A_inter, curr_gcn_group_state_in)
"""GUnpool================================================================="""
# [N,M]*[M,16]
# curr_gcn_state_inter: [N,16]
curr_gcn_state_inter = torch.matmul(R_intra.T, curr_gcn_group_state_out)
"""========================================================================="""
# curr_gcn_state: [N,32]
curr_gcn_state = torch.cat([curr_gcn_state_intra, curr_gcn_state_inter], dim=1)
# curr_gcn_state: [N,24]
curr_gcn_state = self.out_embedding(curr_gcn_state)
gcn_aggre.append(curr_gcn_state)
# gcn_aggre: [batch,24]:
gcn_aggre = torch.cat(gcn_aggre, dim=0)
return gcn_aggre
class TrajectoryGenerator(nn.Module):
def __init__(
self, obs_len, pred_len, embedding_dim=64, encoder_h_dim=64,
decoder_h_dim=128, mlp_dim=1024, num_layers=1, noise_dim=(0, ),
noise_type='gaussian', noise_mix_type='ped', pooling_type=None,
pool_every_timestep=True, dropout=0.0, bottleneck_dim=1024,
activation='relu', batch_norm=True, neighborhood_size=2.0, grid_size=8,
n_units=[32,16,32], n_heads=4, dropout1=0, alpha=0.2,
):
super(TrajectoryGenerator, self).__init__()
if pooling_type and pooling_type.lower() == 'none':
pooling_type = None
self.obs_len = obs_len
self.pred_len = pred_len
self.mlp_dim = mlp_dim
self.encoder_h_dim = encoder_h_dim
self.decoder_h_dim = decoder_h_dim
self.embedding_dim = embedding_dim
self.noise_dim = noise_dim
self.num_layers = num_layers
self.noise_type = noise_type
self.noise_mix_type = noise_mix_type
self.pooling_type = pooling_type
self.noise_first_dim = 0
self.pool_every_timestep = pool_every_timestep
self.bottleneck_dim = 1024
self.encoder = Encoder(
embedding_dim=embedding_dim,
h_dim=encoder_h_dim,
mlp_dim=mlp_dim,
num_layers=num_layers,
dropout=dropout
)
self.decoder = Decoder(
pred_len,
embedding_dim=embedding_dim,
h_dim=decoder_h_dim,
mlp_dim=mlp_dim,
num_layers=num_layers,
pool_every_timestep=pool_every_timestep,
dropout=dropout,
bottleneck_dim=bottleneck_dim,
activation=activation,
batch_norm=batch_norm,
pooling_type=pooling_type,
grid_size=grid_size,
neighborhood_size=neighborhood_size
)
if pooling_type == 'pool_net':
self.pool_net = PoolHiddenNet(
embedding_dim=self.embedding_dim,
h_dim=encoder_h_dim,
mlp_dim=mlp_dim,
bottleneck_dim=bottleneck_dim,
activation=activation,
batch_norm=batch_norm
)
if self.noise_dim is None:
self.noise_dim = None
elif self.noise_dim[0] == 0:
self.noise_dim = None
else:
self.noise_first_dim = noise_dim[0]
# gatencoder
self.gatencoder = GATEncoder(
n_units=n_units, n_heads=n_heads, dropout=dropout1, alpha=alpha
)
# Decoder Hidden
if pooling_type:
input_dim = encoder_h_dim + bottleneck_dim
else:
input_dim = encoder_h_dim
# if self.mlp_decoder_needed():
# mlp_decoder_context_dims = [input_dim, mlp_dim, decoder_h_dim - self.noise_first_dim]
# self.mlp_decoder_context = make_mlp(
# mlp_decoder_context_dims,
# activation=activation,
# batch_norm=batch_norm,
# dropout=dropout
# )
self.gcn_module = GCNModule(
input_dim=input_dim,
hidden_dim=72,
out_dim=16,
gcn_layers=2,
final_dim=decoder_h_dim - self.noise_first_dim
)
def add_noise(self, _input, seq_start_end, user_noise=None):
"""
Inputs:
- _input: Tensor of shape (_, decoder_h_dim - noise_first_dim)
- seq_start_end: A list of tuples which delimit sequences within batch.
- user_noise: Generally used for inference when you want to see
relation between different types of noise and outputs.
Outputs:
- decoder_h: Tensor of shape (_, decoder_h_dim)
"""
if not self.noise_dim:
return _input
if self.noise_mix_type == 'global':
noise_shape = (seq_start_end.size(0), ) + self.noise_dim
else:
noise_shape = (_input.size(0), ) + self.noise_dim
if user_noise is not None:
z_decoder = user_noise
else:
z_decoder = get_noise(noise_shape, self.noise_type)
if self.noise_mix_type == 'global':
_list = []
for idx, (start, end) in enumerate(seq_start_end):
start = start.item()
end = end.item()
_vec = z_decoder[idx].view(1, -1)
_to_cat = _vec.repeat(end - start, 1)
_list.append(torch.cat([_input[start:end], _to_cat], dim=1))
decoder_h = torch.cat(_list, dim=0)
return decoder_h
decoder_h = torch.cat([_input, z_decoder], dim=1)
return decoder_h
def mlp_decoder_needed(self):
if (
self.noise_dim or self.pooling_type or
self.encoder_h_dim != self.decoder_h_dim
):
return True
else:
return False
# modified by zyl 2021/1/12
def forward(self, obs_traj, obs_traj_rel, seq_start_end, obs_traj_g, user_noise=None):
"""
Inputs:
- obs_traj: Tensor of shape (obs_len, batch, 2)
- obs_traj_rel: Tensor of shape (obs_len, batch, 2)
- seq_start_end: A list of tuples which delimit sequences within batch.
- user_noise: Generally used for inference when you want to see
relation between different types of noise and outputs.
Output:
- pred_traj_rel: Tensor of shape (self.pred_len, batch, 2)
"""
batch = obs_traj_rel.size(1)
# Encode seq
final_encoder_h = self.encoder(obs_traj_rel)
# Pool States
if self.pooling_type:
end_pos = obs_traj[-1, :, :]
pool_h = self.pool_net(final_encoder_h, seq_start_end, end_pos)
# Construct input hidden states for decoder
# final_encoder_h: [batch, 32]
# pool_h: [batch, 8]
# mlp_decoder_context_input: [batch, 40]
mlp_decoder_context_input = torch.cat([final_encoder_h.view(-1, self.encoder_h_dim), pool_h], dim=1)
else:
mlp_decoder_context_input = final_encoder_h.view(-1, self.encoder_h_dim)
# end_pos = obs_traj[-1, :, :]
# end_group = obs_traj_g[-1, :, :]
# mlp_decoder_context_input = torch.unsqueeze(mlp_decoder_context_input, 0)
# mlp_decoder_context_input = self.gatencoder(mlp_decoder_context_input, seq_start_end, end_pos, end_group)
# mlp_decoder_context_input = torch.squeeze(mlp_decoder_context_input, dim=0)
# Add Noise
if self.mlp_decoder_needed():
# # noise_input = self.mlp_decoder_context(mlp_decoder_context_input)
# end_pos = obs_traj[-1, :, :]
# # modified by zyl 2021/1/12 9:56
# end_group = obs_traj_g[-1, :, :]
# noise_input = self.gcn_module(mlp_decoder_context_input, seq_start_end, end_pos, end_group)
end_pos = obs_traj[-1, :, :]
end_group = obs_traj_g[-1, :, :]
noise_input = self.gatencoder(mlp_decoder_context_input, seq_start_end, end_pos, end_group)
else:
noise_input = mlp_decoder_context_input
decoder_h = self.add_noise(noise_input, seq_start_end, user_noise=user_noise)
decoder_h = torch.unsqueeze(decoder_h, 0)
decoder_c = torch.zeros(self.num_layers, batch, self.decoder_h_dim).cuda()
state_tuple = (decoder_h, decoder_c)
last_pos = obs_traj[-1]
last_pos_rel = obs_traj_rel[-1]
# Predict Trajectory
decoder_out = self.decoder(
last_pos,
last_pos_rel,
state_tuple,
seq_start_end,
)
pred_traj_fake_rel, final_decoder_h = decoder_out
return pred_traj_fake_rel
class TrajectoryDiscriminator(nn.Module):
def __init__(
self, obs_len, pred_len, embedding_dim=64, h_dim=64, mlp_dim=1024,
num_layers=1, activation='relu', batch_norm=True, dropout=0.0,
d_type='local'
):
super(TrajectoryDiscriminator, self).__init__()
self.obs_len = obs_len
self.pred_len = pred_len
self.seq_len = obs_len + pred_len
# self.mlp_dim = mlp_dim
self.h_dim = h_dim
self.d_type = d_type
self.encoder = Encoder(
embedding_dim=embedding_dim, # 16
h_dim=h_dim, # 48
mlp_dim=mlp_dim, # 64
num_layers=num_layers,
dropout=dropout
)
if d_type == 'global':
mlp_pool_dims = [h_dim + embedding_dim, mlp_dim, h_dim]
self.pool_net = PoolHiddenNet(
embedding_dim=embedding_dim,
h_dim=h_dim,
mlp_dim=mlp_pool_dims,
bottleneck_dim=h_dim,
activation=activation,
batch_norm=batch_norm
)
real_classifier_dims = [h_dim, mlp_dim, 1]
self.real_classifier = make_mlp(
real_classifier_dims,
activation=activation,
batch_norm=batch_norm,
dropout=dropout
)
def forward(self, traj, traj_rel, seq_start_end=None):
"""
Inputs:
- traj: Tensor of shape (obs_len + pred_len, batch, 2)
- traj_rel: Tensor of shape (obs_len + pred_len, batch, 2)
- seq_start_end: A list of tuples which delimit sequences within batch
Output:
- scores: Tensor of shape (batch,) with real/fake scores
"""
final_h = self.encoder(traj_rel)
# Note: In case of 'global' option we are using start_pos as opposed to
# end_pos. The intution being that hidden state has the whole
# trajectory and relative postion at the start when combined with
# trajectory information should help in discriminative behavior.
if self.d_type == 'local':
classifier_input = final_h.squeeze()
else:
classifier_input = self.pool_net(final_h.squeeze(), seq_start_end, traj[0])
scores = self.real_classifier(classifier_input)
return scores
| 38.652218 | 126 | 0.586235 | 31,389 | 0.802582 | 0 | 0 | 0 | 0 | 0 | 0 | 15,178 | 0.388085 |
27d7ec475999a81872908a78c697615fa5aa0984 | 4,258 | py | Python | netflow/mkipfixtypes/ipfixtypes.py | kohler/click-packages | cec70da7cf460548ef08f1ddad6924db29d5c0c5 | [
"MIT"
]
| 13 | 2015-02-26T23:12:09.000Z | 2021-04-18T04:37:12.000Z | netflow/mkipfixtypes/ipfixtypes.py | kohoumas/click-packages | 6bb5c4ba286e5dbc74efd1708921d530425691f6 | [
"MIT"
]
| null | null | null | netflow/mkipfixtypes/ipfixtypes.py | kohoumas/click-packages | 6bb5c4ba286e5dbc74efd1708921d530425691f6 | [
"MIT"
]
| 7 | 2015-08-25T09:29:41.000Z | 2021-04-18T04:37:13.000Z | #!/usr/bin/python
#
# Generates ipfixtypes.hh from IPFIX spec and schema
#
# Copyright (c) 2006 Mazu Networks, Inc.
#
# $Id: ipfixtypes.py,v 1.1 2006/05/12 16:43:44 eddietwo Exp $
#
import xml.dom.minidom
import sys
import time
class IPFIXField:
"""
Represents a <field> element in the IPFIX specification. Access
attributes with getattr(), e.g.,
field.name or getattr(field, 'name')
field.dataType or getattr(field, 'dataType')
"""
def __init__(self, node):
self.node = node
def __getattr__(self, name):
return self.node.getAttribute(name)
class IPFIXSpecification:
"""
Represents all <field> elements in the IPFIX specification.
"""
def __init__(self, file = None):
dom = xml.dom.minidom.parse(file)
self.fields = []
for fieldDefinitions in dom.getElementsByTagName('fieldDefinitions'):
self.fields += [IPFIXField(field) for field in fieldDefinitions.getElementsByTagName('field')]
self.types = []
for simpleType in dom.getElementsByTagName('simpleType'):
if simpleType.getAttribute('name') == "dataType":
for enumeration in simpleType.getElementsByTagName('enumeration'):
self.types.append(enumeration.getAttribute('value'))
def fieldDefinitions(self):
"""
Returns all fields declared in the <fieldDefinitions>
section of the specification.
"""
return self.fields
def dataTypes(self):
"""
Returns all dataTypes declared in the <schema> section of the
specification.
"""
return self.types
def main():
if len(sys.argv) < 2:
print "Usage: %s [OPTION]... [FILE]..." % sys.argv[0]
sys.exit(0)
dataTypes = {}
fieldTypes = {}
for file in sys.argv[1:]:
spec = IPFIXSpecification(file)
for field in spec.fieldDefinitions():
if dataTypes.has_key(field.dataType):
dataTypes[field.dataType].append(field.name)
else:
dataTypes[field.dataType] = [field.name]
fieldTypes[int(field.fieldId)] = field.name
for dataType in spec.dataTypes():
if not dataTypes.has_key(dataType):
dataTypes[dataType] = []
# IPFIX_unsigned8,
data_types = ["IPFIX_%s" % dataType for dataType in dataTypes]
data_types = ",\n ".join(data_types)
# IPFIX_octetDeltaCount = 1,
field_types = fieldTypes.items()
field_types.sort()
field_types = ["IPFIX_%s = %d" % (name, fieldId) for fieldId, name in field_types]
field_types = ",\n ".join(field_types)
# case IPFIX_octetDeltaCount:
# case IPFIX_packetDeltaCount:
# ...
# return IPFIX_unsigned64;
ipfix_datatypes = []
for dataType, names in dataTypes.iteritems():
if names:
ipfix_datatypes += ["case IPFIX_%s:" % name for name in names]
ipfix_datatypes.append(" return IPFIX_%s;" % dataType)
ipfix_datatypes = "\n ".join(ipfix_datatypes)
# case IPFIX_octetDeltaCount: return "octetDeltaCount";
ipfix_names = ["case IPFIX_%s: return \"%s\";" % \
(name, name) for name in fieldTypes.values()]
ipfix_names = "\n ".join(ipfix_names)
# else if (strcmp(name, "octetDeltaCount") == 0) { return IPFIX_octetDeltaCount; }
ipfix_types = ["else if (strcmp(name, \"%s\") == 0) { return IPFIX_%s; }" % \
(name, name) for name in fieldTypes.values()]
ipfix_types = "\n ".join(ipfix_types)
date = time.asctime()
print """
// DO NOT EDIT. Generated at %(date)s.
#ifndef IPFIXTYPES_HH
#define IPFIXTYPES_HH
CLICK_DECLS
enum IPFIX_dataType {
IPFIX_unknown = 0,
%(data_types)s
};
enum IPFIX_fieldType {
%(field_types)s
};
static inline IPFIX_dataType
ipfix_datatype(uint16_t type) {
switch (type) {
%(ipfix_datatypes)s
}
return IPFIX_unknown;
}
static inline const char *
ipfix_name(uint16_t type) {
switch (type) {
%(ipfix_names)s
}
return "unknown";
}
static inline uint16_t
ipfix_type(const char *name) {
if (0) { }
%(ipfix_types)s
else { return 0; }
}
CLICK_ENDDECLS
#endif
""".strip() % locals()
if __name__ == '__main__':
main()
| 26.447205 | 106 | 0.625881 | 1,423 | 0.334194 | 0 | 0 | 0 | 0 | 0 | 0 | 1,846 | 0.433537 |
27d8e4bb5627f304929e5b7f3fa1b41d586d410e | 694 | py | Python | core_lib/web_helpers/constants_media_type.py | shubham-surya/core-lib | 543db80706746a937e5ed16bd50f2de8d58b32e4 | [
"MIT"
]
| null | null | null | core_lib/web_helpers/constants_media_type.py | shubham-surya/core-lib | 543db80706746a937e5ed16bd50f2de8d58b32e4 | [
"MIT"
]
| 9 | 2021-03-11T02:29:17.000Z | 2022-03-22T19:01:18.000Z | core_lib/web_helpers/constants_media_type.py | shubham-surya/core-lib | 543db80706746a937e5ed16bd50f2de8d58b32e4 | [
"MIT"
]
| 2 | 2022-01-27T11:19:00.000Z | 2022-02-11T11:33:09.000Z | import enum
class MediaType(enum.Enum):
MEDIA_TYPE_WILDCARD = "*"
WILDCARD = "*/*"
APPLICATION_XML = "application/xml"
APPLICATION_ATOM_XML = "application/atom+xml"
APPLICATION_XHTML_XML = "application/xhtml+xml"
APPLICATION_SVG_XML = "application/svg+xml"
APPLICATION_JSON = "application/json"
APPLICATION_FORM_URLENCODED = "application/x-www-form-urlencoded"
MULTIPART_FORM_DATA = "multipart/form-data"
APPLICATION_OCTET_STREAM = "application/octet-stream"
TEXT_PLAIN = "text/plain"
TEXT_XML = "text/xml"
TEXT_HTML = "text/html"
SERVER_SENT_EVENTS = "text/event-stream"
APPLICATION_JSON_PATCH_JSON = "application/json-patch+json"
| 34.7 | 69 | 0.729107 | 679 | 0.978386 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.391931 |
27da1fb06b835a7c7c1c2845d17975f0ff1c9b74 | 2,940 | py | Python | pylons-emlo/emlo/workspace/indexing/src/conversionhelper.py | culturesofknowledge/emlo-server | 8a88ca98a5211086195793e4bed5960550638936 | [
"MIT"
]
| null | null | null | pylons-emlo/emlo/workspace/indexing/src/conversionhelper.py | culturesofknowledge/emlo-server | 8a88ca98a5211086195793e4bed5960550638936 | [
"MIT"
]
| null | null | null | pylons-emlo/emlo/workspace/indexing/src/conversionhelper.py | culturesofknowledge/emlo-server | 8a88ca98a5211086195793e4bed5960550638936 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
'''
Created on 24 Aug 2010
@author: Matthew Wilcoxson
functions convert from one value to another in the form:
def conversion(value):
#do something
return new_value
'''
import time
def convert_to_rdf_date(value):
date_check = value
# rdf uses format '1651-12-31T00:00:00Z' or '1651-12-31T00:00:00.999Z'
# Recognisers dates in the format:
# * 'YYYY-M-D' to 'YYYY-MM-DD'
# * 'YYYY-MM-DD HH:MM:SS'
# * 'YYYY-MM-DD HH:MM:SS.M' to 'YYYY-MM-DD HH:MM:SS.MMMMMMM'
d = None
date_length = len( date_check )
if 8 <= date_length <= 10 :
d = time.strptime( date_check, '%Y-%m-%d')
elif date_length == 19 :
d = time.strptime( date_check, '%Y-%m-%d %H:%M:%S')
elif 20 <= date_length <= 26 :
d = time.strptime( date_check[:23], '%Y-%m-%d %H:%M:%S.%f')
if d == None :
raise SyntaxError( "Value '" + value +"' can not be converted to a date")
# Annoyingly time.strftime does not cope with years less than 1900, so I'm forced to use this:
new_value = "%(year)d-%(month)02d-%(day)02dT%(hour)02d:%(minute)02d:%(second)02dZ" % \
{ 'year':d.tm_year, 'month':d.tm_mon, 'day':d.tm_mday, 'hour':d.tm_hour, 'minute':d.tm_min, 'second':d.tm_sec }
return new_value
def convert_to_solr_date(value):
# Just use rdf one!
return convert_to_rdf_date(value)
def convert_to_rdf_boolean( value ):
value = value.lower()
if value == '1' or value == 'y' or value == 'true' :
new_value = 'true'
elif value == '0' or value == 'n' or value == 'false' :
new_value = 'false'
else:
raise SyntaxError( "Value '" + value + "' can not be converted to a boolean")
return new_value
def convert_to_solr_boolean(value):
# Just use rdf one!
return convert_to_rdf_boolean(value)
def convert_people_gender( value ):
valuelow = value.lower()
if valuelow == 'male' or valuelow == 'm' or valuelow == 'man' or valuelow == 'men':
new_value = "male"
elif valuelow == 'female' or valuelow == 'f' or valuelow == 'woman' or valuelow == 'women':
new_value = "female"
else:
raise SyntaxError( "Value '" + value + "' can not be converted to a gender" )
return new_value
def convert_to_local_url( value ) :
value = value.replace( 'http://sers018.sers.ox.ac.uk/history/cofk/union.php?iwork_id=', '/profile?iwork_id=' )
value = value.replace( 'http://sers018.sers.ox.ac.uk/history/cofk/selden_end.php?iwork_id=', '/profile?iwork_id=' )
return value
def convert_manifestation_type( value ):
if value == 'Scribal copy' :
return "Manuscript copy"
return value
def convert_manifestation_opened( value ):
if value == 'o' :
return "Opened"
elif value == 'p' :
return "Partially Opened"
elif value == 'u' :
return "Unopened"
return "Unknown:"+value | 30 | 119 | 0.611905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,200 | 0.408163 |
27de194719485c100a81b84fd59429f4b32b78e0 | 992 | py | Python | modules/wxpy-index/wxpy_index/version.py | john04047210/mira_wepy_server | 385b8561e63f9164102e60681e2704c55fec0577 | [
"MIT"
]
| 1 | 2018-05-22T11:25:59.000Z | 2018-05-22T11:25:59.000Z | modules/wxpy-index/wxpy_index/version.py | john04047210/mira_wepy_server | 385b8561e63f9164102e60681e2704c55fec0577 | [
"MIT"
]
| null | null | null | modules/wxpy-index/wxpy_index/version.py | john04047210/mira_wepy_server | 385b8561e63f9164102e60681e2704c55fec0577 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2018 QiaoPeng.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
"""Version information for Wxpy-Index.
This file is imported by ``wxpy_index.__init__``,
and parsed by ``setup.py``.
"""
from __future__ import absolute_import, print_function
__version__ = '0.1.0.dev20180000'
| 33.066667 | 72 | 0.746976 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 899 | 0.90625 |
27dfaf52615924607a73e76ca9bec8a17c8c3880 | 11,305 | py | Python | estimate.py | DS3Lab/feebee | eb210d07a7f9956ca2d0681ccf446330c8427a8b | [
"Apache-2.0"
]
| 1 | 2022-03-24T06:15:37.000Z | 2022-03-24T06:15:37.000Z | estimate.py | DS3Lab/feebee | eb210d07a7f9956ca2d0681ccf446330c8427a8b | [
"Apache-2.0"
]
| null | null | null | estimate.py | DS3Lab/feebee | eb210d07a7f9956ca2d0681ccf446330c8427a8b | [
"Apache-2.0"
]
| 1 | 2021-12-20T12:11:55.000Z | 2021-12-20T12:11:55.000Z | from absl import app
from absl import flags
from absl import logging
import csv
import importlib
import numpy as np
import os.path as path
import random
from sklearn.model_selection import train_test_split
import time
from transformations.reader.matrix import test_argument_and_file, load_and_log
import transformations.label_noise as label_noise
import methods.knn as knn
import methods.knn_extrapolate as knn_extrapolate
import methods.ghp as ghp
import methods.kde as kde
import methods.onenn as onenn
import methods.lr_model as lr_model
FLAGS = flags.FLAGS
flags.DEFINE_string("path", ".", "Path to the matrices directory")
flags.DEFINE_string("features_train", None, "Name of the train features numpy matrix exported file (npy)")
flags.DEFINE_string("features_test", None, "Name of the test features numpy matrix exported file (npy)")
flags.DEFINE_string("labels_train", None, "Name of the train labels numpy matrix exported file (npy)")
flags.DEFINE_string("labels_test", None, "Name of the test labels numpy matrix exported file (npy)")
flags.DEFINE_list("noise_levels", [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], "Run at different noise levels")
flags.DEFINE_integer("noise_runs", 5, "Number of runs for different noise levels")
flags.DEFINE_string("output_file", None, "File to write the output in CSV format (including headers)")
flags.DEFINE_bool("output_overwrite", True, "Writes (if True) or appends (if False) to the specified output file if any")
flags.DEFINE_enum("method", None, ["knn", "knn_loo", "knn_extrapolate", "ghp", "kde_knn_loo", "kde", "onenn", "lr_model"], "Method to estimate the bayes error (results in either 1 value or a lower and upper bound)")
def _get_csv_row(variant, run, samples, noise, results, time):
return {'method': FLAGS.method,
'variant': variant,
'run': run,
'samples': samples,
'noise': noise,
'results': results,
'time': time}
def _write_result(rows):
writeheader = False
if FLAGS.output_overwrite or not path.exists(FLAGS.output_file):
writeheader = True
with open(FLAGS.output_file, mode='w+' if FLAGS.output_overwrite else 'a+') as f:
fieldnames = ['method', 'variant', 'run', 'samples', 'noise', 'results', 'time']
writer = csv.DictWriter(f, fieldnames=fieldnames)
if writeheader:
writer.writeheader()
for r in rows:
writer.writerow(r)
def estimate_from_split_matrices(eval_fn):
test_argument_and_file(FLAGS.path, "features_train")
test_argument_and_file(FLAGS.path, "features_test")
test_argument_and_file(FLAGS.path, "labels_train")
test_argument_and_file(FLAGS.path, "labels_test")
train_features, dim_train, samples_train = load_and_log(FLAGS.path, "features_train")
test_features, dim_test, samples_test = load_and_log(FLAGS.path, "features_test")
if dim_test != dim_train:
raise AttributeError("Train and test features do not have the same dimension!")
train_labels, dim, samples_train_labels = load_and_log(FLAGS.path, "labels_train")
if dim != 1:
raise AttributeError("Train labels file does not point to a vector!")
if samples_train_labels != samples_train:
raise AttributeError("Train features and labels files does not have the same amount of samples!")
test_labels, _, samples_test_labels = load_and_log(FLAGS.path, "labels_test")
if dim != 1:
raise AttributeError("Test labels file does not point to a vector!")
if samples_test_labels != samples_test:
raise AttributeError("Test features and labels files does not have the same amount of samples!")
logging.log(logging.DEBUG, "Start full estimation with method '{}'".format(FLAGS.method))
start = time.time()
result_full = eval_fn(train_features, test_features, train_labels, test_labels)
end = time.time()
logging.log(logging.DEBUG, "Method '{}' executed in {} seconds".format(FLAGS.method, end - start))
logging.log(logging.INFO, "Full train and test set: {}".format(result_full))
if FLAGS.noise_levels and FLAGS.noise_runs > 0:
result_rows = []
for run in range(FLAGS.noise_runs):
if FLAGS.output_file:
rows = [_get_csv_row(k, run, samples_train, 0.0, v, (end - start) / float(len(result_full))) for k, v in result_full.items()]
result_rows.extend(rows)
logging.log(logging.DEBUG, "Start noisy run {} out of {}".format(run+1, FLAGS.noise_runs))
run_start = time.time()
for noise_level in [float(x) for x in FLAGS.noise_levels]:
if noise_level > 1.0 or noise_level <= 0.0:
raise AttributeError("Noise level {} has to be bigger than 0 and not larger than 1!".format(noise_level))
logging.log(logging.DEBUG, "Start noise level {} for run {} out of {}".format(noise_level, run+1, FLAGS.noise_runs))
noise_start = time.time()
# flip labels test and train
flipped_train_labels = label_noise.random_flip(train_labels, samples_train, noise_level, copy=True)
flipped_test_labels = label_noise.random_flip(test_labels, samples_test, noise_level, copy=True)
# run method
logging.log(logging.DEBUG, "Start full estimation with method '{}', noise level {}, run {}/{}".format(FLAGS.method, noise_level, run+1, FLAGS.noise_runs))
start = time.time()
result = eval_fn(train_features, test_features, flipped_train_labels, flipped_test_labels)
end = time.time()
logging.log(logging.DEBUG, "Method '{}' executed in {} seconds".format(FLAGS.method, end - start))
logging.log(logging.INFO, "Run {}/{} - noise level {}: {}".format(run+1, FLAGS.noise_runs, noise_level, result))
if FLAGS.output_file:
rows = [_get_csv_row(k, run, samples_train, noise_level, v, (end - start) / float(len(result))) for k, v in result.items()]
result_rows.extend(rows)
noise_end = time.time()
logging.log(logging.DEBUG, "Noise level {} for run {}/{} executed in {} seconds".format(noise_level, run+1, FLAGS.noise_runs, noise_end - noise_start))
run_end = time.time()
logging.log(logging.DEBUG, "Run {}/{} executed in {} seconds".format(run+1, FLAGS.noise_runs, run_end - run_start))
if FLAGS.output_file:
_write_result(result_rows)
elif FLAGS.output_file:
rows = [_get_csv_row(k, 0, samples_train, 0.0, v, (end - start) / float(len(result_full))) for k, v in result_full.items()]
_write_result(rows)
def estimate_from_single_matrix(eval_fn):
test_argument_and_file(FLAGS.path, "features_train")
test_argument_and_file(FLAGS.path, "labels_train")
train_features, dim_train, samples_train = load_and_log(FLAGS.path, "features_train")
train_labels, dim, samples_train_labels = load_and_log(FLAGS.path, "labels_train")
if dim != 1:
raise AttributeError("Train labels file does not point to a vector!")
if samples_train_labels != samples_train:
raise AttributeError("Train features and labels files does not have the same amount of samples!")
logging.log(logging.DEBUG, "Start full estimation with method '{}'".format(FLAGS.method))
start = time.time()
result_full = eval_fn(train_features, train_labels)
end = time.time()
logging.log(logging.DEBUG, "Method '{}' executed in {} seconds".format(FLAGS.method, end - start))
logging.log(logging.INFO, "Full train set: {}".format(result_full))
if FLAGS.noise_levels and FLAGS.noise_runs > 0:
result_rows = []
for run in range(FLAGS.noise_runs):
if FLAGS.output_file:
rows = [_get_csv_row(k, run, samples_train, 0.0, v, (end - start) / float(len(result_full))) for k, v in result_full.items()]
result_rows.extend(rows)
logging.log(logging.DEBUG, "Start noisy run {} out of {}".format(run+1, FLAGS.noise_runs))
run_start = time.time()
for noise_level in [float(x) for x in FLAGS.noise_levels]:
if noise_level > 1.0 or noise_level <= 0.0:
raise AttributeError("Noise level {} has to be bigger than 0 and not larger than 1!".format(noise_level))
logging.log(logging.DEBUG, "Start noise level {} for run {} out of {}".format(noise_level, run+1, FLAGS.noise_runs))
noise_start = time.time()
# flip labels train
flipped_train_labels = label_noise.random_flip(train_labels, samples_train, noise_level, copy=True)
# run method
logging.log(logging.DEBUG, "Start full estimation with method '{}', noise level {}, run {}/{}".format(FLAGS.method, noise_level, run+1, FLAGS.noise_runs))
start = time.time()
result = eval_fn(train_features, flipped_train_labels)
end = time.time()
logging.log(logging.DEBUG, "Method '{}' executed in {} seconds".format(FLAGS.method, end - start))
logging.log(logging.INFO, "Run {}/{} - noise level {}: {}".format(run+1, FLAGS.noise_runs, noise_level, result))
if FLAGS.output_file:
rows = [_get_csv_row(k, run, samples_train, noise_level, v, (end - start) / float(len(result))) for k, v in result.items()]
result_rows.extend(rows)
noise_end = time.time()
logging.log(logging.DEBUG, "Noise level {} for run {}/{} executed in {} seconds".format(noise_level, run+1, FLAGS.noise_runs, noise_end - noise_start))
run_end = time.time()
logging.log(logging.DEBUG, "Run {}/{} executed in {} seconds".format(run+1, FLAGS.noise_runs, run_end - run_start))
if FLAGS.output_file:
_write_result(result_rows)
elif FLAGS.output_file:
rows = [_get_csv_row(k, 0, samples_train, 0.0, v, (end - start) / float(len(result_full))) for k, v in result_full.items()]
_write_result(rows)
def main(argv):
if FLAGS.method is None:
raise app.UsageError("You have to specify the method!")
if FLAGS.method == "knn":
estimate_from_split_matrices(knn.eval_from_matrices)
elif FLAGS.method == "knn_extrapolate":
estimate_from_split_matrices(knn_extrapolate.eval_from_matrices)
elif FLAGS.method == "lr_model":
estimate_from_split_matrices(lr_model.eval_from_matrices)
elif FLAGS.method == "knn_loo":
estimate_from_single_matrix(knn.eval_from_matrix_loo)
elif FLAGS.method == "ghp":
estimate_from_single_matrix(ghp.eval_from_matrix)
elif FLAGS.method == "kde_knn_loo":
estimate_from_single_matrix(kde.eval_from_matrix_knn_loo)
elif FLAGS.method == "onenn":
estimate_from_single_matrix(onenn.eval_from_matrix_onenn)
elif FLAGS.method == "kde":
estimate_from_single_matrix(kde.eval_from_matrix_kde)
else:
raise NotImplementedError("Method module for 'matrices' not yet implemented!")
if __name__ == "__main__":
app.run(main)
| 51.153846 | 215 | 0.665369 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,638 | 0.233348 |
27dfb13b1540ca2ae940981337f040231ef6dd46 | 2,610 | py | Python | allmodels_image.py | GustavZ/Tensorflow-Object-Detection | 3aab434b20e510d3953b4265dd73a1c7c315067d | [
"MIT"
]
| 187 | 2017-12-26T17:41:09.000Z | 2019-03-06T04:44:25.000Z | allmodels_image.py | a554142589/realtime_object_detection | d2bd7e58df9af1848e473fa7627aa2433192903d | [
"MIT"
]
| 38 | 2018-02-01T17:05:01.000Z | 2019-02-15T21:58:25.000Z | allmodels_image.py | a554142589/realtime_object_detection | d2bd7e58df9af1848e473fa7627aa2433192903d | [
"MIT"
]
| 65 | 2018-01-19T06:03:44.000Z | 2019-03-06T04:58:31.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 10 09:45:23 2018
@author: www.github.com/GustavZ
"""
import os
import sys
import numpy as np
from rod.config import Config
from rod.helper import get_model_list, check_if_optimized_model
from rod.model import ObjectDetectionModel, DeepLabModel
ROOT_DIR = os.getcwd()
#MODELS_DIR = os.path.join(ROOT_DIR,'models')
MODELS_DIR = '/home/gustav/workspace/eetfm_automation/nmsspeed_test'
INPUT_TYPE = 'image'
def create_test_config(type,model_name, optimized=False, single_class=False):
class TestConfig(Config):
OD_MODEL_PATH=MODELS_DIR+'/'+model_name+'/{}'
DL_MODEL_PATH=MODELS_DIR+'/'+model_name+'/{}'
OD_MODEL_NAME=model_name
DL_MODEL_NAME=model_name
VISUALIZE=False
SPLIT_MODEL = False
WRITE_TIMELINE = True
LIMIT_IMAGES = 11
if optimized:
USE_OPTIMIZED=True
else:
USE_OPTIMIZED=False
if single_class:
NUM_CLASSES=1
else:
NUM_CLASSES=90
def __init__(self):
super(TestConfig, self).__init__(type)
return TestConfig()
# Read sequentail Models or Gather all Models from models/
config = Config('od')
if config.SEQ_MODELS:
model_names = config.SEQ_MODELS
else:
model_names = get_model_list(MODELS_DIR)
# Sequential testing
for model_name in model_names:
print("> testing model: {}".format(model_name))
# conditionals
optimized=False
single_class=False
# Test Model
if 'hands' in model_name or 'person' in model_name:
single_class=True
if 'deeplab' in model_name:
config = create_test_config('dl',model_name,optimized,single_class)
model = DeepLabModel(config).prepare_model(INPUT_TYPE)
else:
config = create_test_config('od',model_name,optimized,single_class)
model = ObjectDetectionModel(config).prepare_model(INPUT_TYPE)
# Check if there is an optimized graph
model_dir = os.path.join(os.getcwd(),'models',model_name)
optimized = check_if_optimized_model(model_dir)
# Again for the optimized graph
if optimized:
if 'deeplab' in model_name:
config = create_test_config('dl',model_name,optimized,single_class)
model = DeepLabModel(config).prepare_model(INPUT_TYPE)
else:
config = create_test_config('od',model_name,optimized,single_class)
model = ObjectDetectionModel(config).prepare_model(INPUT_TYPE)
model.run()
| 32.222222 | 79 | 0.668966 | 649 | 0.248659 | 0 | 0 | 0 | 0 | 0 | 0 | 499 | 0.191188 |
27e04f3e71ee9ae2490b13c55437303fba48ca2d | 5,953 | py | Python | train.py | Jing-lun/GPR_3D_Model_Reconstruction | 24259bdbdf5e993e286e556ee1bae720892a16b9 | [
"Unlicense"
]
| 1 | 2021-09-30T10:22:54.000Z | 2021-09-30T10:22:54.000Z | train.py | Jing-lun/GPR_3D_Model_Reconstruction | 24259bdbdf5e993e286e556ee1bae720892a16b9 | [
"Unlicense"
]
| 1 | 2021-07-23T13:10:58.000Z | 2021-07-23T13:10:58.000Z | train.py | Jing-lun/GPR_3D_Model_Reconstruction | 24259bdbdf5e993e286e556ee1bae720892a16b9 | [
"Unlicense"
]
| null | null | null | # Copyright 2021, Robotics Lab, City College of New York
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Originating Author: Jinglun Feng, ([email protected])
import argparse
import logging
import os
import sys
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from tqdm import tqdm
from torch.autograd import Variable
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader, random_split
from torchvision.utils import save_image
from model import UNet3D
from utils.data_loader import BasicDataset
from utils.utils import PointLoss
from eval import eval_net
def train_net(net,
epochs,
batch_size,
lr,
device,
save_cp = True):
dset = BasicDataset(args.input, args.gt)
n_train = int(len(dset) * 0.85)
n_val = len(dset) - n_train
train, val = random_split(dset, [n_train, n_val])
dset_train = DataLoader(train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True)
dset_valid = DataLoader(val, batch_size=batch_size, shuffle=False, num_workers=8, pin_memory=True)
writer = SummaryWriter(comment=f'BS_{2}')
logging.info(f'''Starting training:
Epochs: {epochs}
Batch size: {batch_size}
Learning rate: {lr}
Training size: {n_train}
Validation size: {n_val}
Device: {device.type}
''')
optimizer = optim.Adam(net.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=40, gamma=0.2)
L1_loss = nn.L1Loss()
L1_loss.to(device)
global_step = 0
for epoch in range(epochs):
net.train()
epoch_loss = 0
with tqdm(total=n_train, desc=f'Epoch {epoch+1}/{epochs}', unit='mat') as pbar:
for batch in dset_train:
mats = batch['mat_input']
pcds = batch['mat_gt']
mats = mats.to(device=device, dtype=torch.float32)
pcds = pcds.to(device=device, dtype=torch.float32)
test = pcds*6 + 1
optimizer.zero_grad()
mats_pred = net(mats)
new_predict = test * mats_pred
new_ground_truth = 7*pcds
loss = L1_loss(new_predict, new_ground_truth)
epoch_loss += loss.item()
writer.add_scalar('Loss/train', loss.item(), global_step)
pbar.set_postfix(**{'loss (batch)': loss.item()})
loss.backward()
optimizer.step()
pbar.update(mats.shape[0])
global_step += 1
val_score = eval_net(net, dset_valid, device, n_val)
logging.info(f'Validation L1 Distance: {val_score}')
writer.add_scalar('Loss/test', val_score, global_step)
scheduler.step()
if epoch % 20 == 0:
torch.save(net.state_dict(),
'check_points/' + f'CP_epoch{epoch + 1}.pth')
logging.info(f'Checkpoint {epoch + 1} saved !')
writer.close()
def args_setting():
parser = argparse.ArgumentParser(description='Train the net on gpr data',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-e', '--epochs', metavar='E', type=int, default=101,
help='Number of epochs', dest='epochs')
parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=4,
help='Batch size', dest='batchsize')
parser.add_argument('-l', '--learning-rate', metavar='LR', type=float, nargs='?', default=0.00001,
help='Learning rate', dest='lr')
parser.add_argument('-f', '--load', dest='load', type=str, default='check_points/good_627/CP_epoch101.pth',
help='Load model from a .pth file')
parser.add_argument('-i', '--input', default='../resnet_range/',
type=str, metavar='PATH', help='path to input dataset', dest='input')
parser.add_argument('-g', '--ground-truth', default='../new_mat_gt/',
type=str, metavar='PATH', help='path to gt dataset', dest='gt')
parser.add_argument('-c', '--checkpoint', default='check_point/',
type=str, metavar='PATH', help='path to gt dataset', dest='cp')
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
args = args_setting()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logging.info(f'Let\'s use {torch.cuda.device_count()} GPUs!')
net = UNet3D(residual='conv')
net = torch.nn.DataParallel(net)
if args.load != '':
net.load_state_dict(
torch.load(args.load, map_location=device)
)
logging.info(f'Model loaded from {args.load}')
logging.info(f'Network Structure:\n'
f'\t{net}\n')
net.to(device=device)
try:
train_net(net=net,
epochs=args.epochs,
batch_size=args.batchsize,
lr=args.lr,
device=device)
except KeyboardInterrupt:
torch.save(net.state_dict(), 'INTERRUPTED.pth')
logging.info('Saved interrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 36.975155 | 111 | 0.607425 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,744 | 0.292962 |
27e1b8a412e18403318f1bb9f3adb67ae8c94d10 | 805 | py | Python | Python/1-Fundamentals/Week 2/workshop2/banking_pkg/account.py | armirh/Nucamp-SQL-Devops-Training | 6c2dc5793c732bfb4c4d365acbb346a95fbf4bf2 | [
"MIT"
]
| 2 | 2022-01-19T02:33:11.000Z | 2022-01-19T02:33:13.000Z | Python/1-Fundamentals/Week 2/workshop2/banking_pkg/account.py | armirh/Nucamp-SQL-Devops-Training | 6c2dc5793c732bfb4c4d365acbb346a95fbf4bf2 | [
"MIT"
]
| null | null | null | Python/1-Fundamentals/Week 2/workshop2/banking_pkg/account.py | armirh/Nucamp-SQL-Devops-Training | 6c2dc5793c732bfb4c4d365acbb346a95fbf4bf2 | [
"MIT"
]
| null | null | null | import sys
def show_balance(balance):
balance = ''
print(" Has logged in!")
print(" The current balance is " + str(balance))
def deposit(balance):
print("Your current balance is: " + str(balance))
while True:
deposit_amount = float(input("Enter deposit amount: "))
balance = float()
balance = balance + deposit_amount
print("The new balance is: " + str(balance))
return balance
def withdraw(balance):
print("Current balance is: " + str(balance))
while True:
withdraw_amount = float(input("Enter the withdrawl amount: "))
balance = float()
balance = balance - withdraw_amount
print("New balance is: " + str(balance))
return balance
def logout():
print("Goodbye!! ")
sys.exit()
| 23.676471 | 70 | 0.607453 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 200 | 0.248447 |
27e21695cafe73f0d93a69b097c0c530103f4723 | 2,098 | py | Python | scrapers/who.py | kollivier/sushi-chef-who-covid-advice | cb50d5fdfe992767eff50bc33e323a682752d0b2 | [
"MIT"
]
| null | null | null | scrapers/who.py | kollivier/sushi-chef-who-covid-advice | cb50d5fdfe992767eff50bc33e323a682752d0b2 | [
"MIT"
]
| null | null | null | scrapers/who.py | kollivier/sushi-chef-who-covid-advice | cb50d5fdfe992767eff50bc33e323a682752d0b2 | [
"MIT"
]
| null | null | null | from bs4 import BeautifulSoup
from ricecooker.classes import nodes
class WHOPageScraperBase:
def __init__(self, url, file_on_disk):
self.url = url
self.file_on_disk = file_on_disk
def node_for_text_section(self, content):
# print("content = {}".format(content.prettify()))
pass
def node_for_video(self, content):
pass
def node_for_rows(self, rows):
for row in rows:
pass #print("row = {}".format(row.prettify()))
def get_ricecooker_node(self):
"""
Convert the data at the URL to a ricecooker/Kolibri-compatible representation.
:return: A ricecooker.TreeNode-derived Node object.
"""
raise NotImplementedError("Not implemented!")
class WHOCovidAdvicePageScraper(WHOPageScraperBase):
def get_ricecooker_node(self):
soup = BeautifulSoup(open(self.file_on_disk).read())
print("opening {}".format(self.file_on_disk))
# We'll add the title later when we iterate through the sections
topic_node = nodes.TopicNode(source_id=self.url, title='')
sections = soup.find_all('div', attrs={'class': 'section-heading'})
for section in sections:
# This is the top-level header, meaning it's the page title
title = section.text.strip()
if section.find('h1'):
print("Page title = {}".format(title))
topic_node.title = title
continue
print("Section = {}".format(title))
content = section.find_next_sibling()
if "content-block" in content.attrs['class']:
self.node_for_text_section(content)
elif "row" in content.attrs['class']:
# the section rows are siblings in the tree.
rows = [content]
next = content.find_next_sibling()
while "row" in next.attrs['class']:
rows.append(next)
next = next.find_next_sibling()
self.node_for_rows(rows)
return topic_node
| 32.78125 | 86 | 0.597712 | 2,024 | 0.964728 | 0 | 0 | 0 | 0 | 0 | 0 | 562 | 0.267874 |
27e33b028e6c906a2e346f640e4d67536b199914 | 23,817 | py | Python | dxtbx/tests/model/experiment/test_experiment_list.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
]
| null | null | null | dxtbx/tests/model/experiment/test_experiment_list.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
]
| null | null | null | dxtbx/tests/model/experiment/test_experiment_list.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
]
| null | null | null | from __future__ import absolute_import, division, print_function
import six.moves.cPickle as pickle
from glob import glob
import os
import pytest
from dxtbx.model import Experiment, ExperimentList
from dxtbx.model.experiment_list import ExperimentListFactory, \
ExperimentListDumper, ExperimentListDict
def test_experiment_contains():
from dxtbx.model import Beam, Detector, Goniometer, Scan
from dxtbx.model import Crystal
# Create a load of models
b1 = Beam()
d1 = Detector()
g1 = Goniometer()
s1 = Scan()
c1 = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1")
# Create an experiment
e = Experiment(
beam=b1, detector=d1, goniometer=g1,
scan=s1, crystal=c1, imageset=None)
# Check experiment contains model
assert b1 in e
assert d1 in e
assert g1 in e
assert s1 in e
assert c1 in e
# Create a load of models that look the same but aren't
b2 = Beam()
d2 = Detector()
g2 = Goniometer()
s2 = Scan()
c2 = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1")
# Check experiment doesn't contain model
assert b2 not in e
assert d2 not in e
assert g2 not in e
assert s2 not in e
assert c2 not in e
def test_experiment_equality():
from dxtbx.model import Beam, Detector, Goniometer, Scan
from dxtbx.model import Crystal
# Create a load of models
b1 = Beam()
d1 = Detector()
g1 = Goniometer()
s1 = Scan()
c1 = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1")
# Create a load of models that look the same but aren't
b2 = Beam()
d2 = Detector()
g2 = Goniometer()
s2 = Scan()
c2 = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1")
# Create an experiment
e1 = Experiment(
beam=b1, detector=d1, goniometer=g1,
scan=s1, crystal=c1, imageset=None)
# Create an experiment
e2 = Experiment(
beam=b1, detector=d1, goniometer=g1,
scan=s1, crystal=c1, imageset=None)
# Create an experiment
e3 = Experiment(
beam=b2, detector=d2, goniometer=g2,
scan=s2, crystal=c2, imageset=None)
# Check e1 equals e2 but not e3
assert e1 == e2
assert e1 != e3
assert e2 != e3
def test_experiment_consistent(dials_regression):
from dxtbx.imageset import ImageSetFactory
from dxtbx.model import Scan
# Create a sweep
sweep_filenames = os.path.join(dials_regression, 'centroid_test_data', 'centroid*.cbf')
sweep = ImageSetFactory.new(sorted(glob(sweep_filenames)))[0]
# Create experiment with sweep and good scan
e = Experiment(imageset=sweep, scan=sweep.get_scan())
assert e.is_consistent()
# Create experiment with sweep and defective scan
scan = sweep.get_scan()
scan.set_image_range((1, 1))
e = Experiment(imageset=sweep, scan=scan)
#assert not e.is_consistent()) # FIXME
## Create experiment with imageset and good scan
#assert e.is_consistent()
## Create experiment with imageset and non-still scan
#assert not e.is_consistent()
## Create experiment with imageset and scan with more than 1 image
#assert not e.is_consistent()
## Create experiment with imageset and defective scan
#assert not e.is_consistent()
def test_experimentlist_contains(experiment_list):
from dxtbx.model import Beam, Detector, Goniometer, Scan
# Check all the models are found
for e in experiment_list:
assert e.beam in experiment_list
assert e.detector in experiment_list
assert e.goniometer in experiment_list
assert e.scan in experiment_list
# Create some more models
b = Beam()
d = Detector()
g = Goniometer()
s = Scan()
# Check that models not in are not found
assert b not in experiment_list
assert d not in experiment_list
assert g not in experiment_list
assert s not in experiment_list
# def test_experimentlist_index(experiment_list):
# # Check the indices of exisiting experiments
# assert experiment_list.index(experiment_list[0]) is 0
# assert experiment_list.index(experiment_list[1]) is 1
# assert experiment_list.index(experiment_list[2]) is 2
# assert experiment_list.index(experiment_list[3]) is 1
# assert experiment_list.index(experiment_list[4]) is 0
# # Check index of non exisiting experiment
# try:
# experiment_list.index(Experiment())
# assert False
# except ValueError:
# pass
def test_experimentlist_replace(experiment_list):
# Get the models
b = [e.beam for e in experiment_list]
d = [e.detector for e in experiment_list]
g = [e.goniometer for e in experiment_list]
s = [e.scan for e in experiment_list]
# Replace some models
experiment_list.replace(b[0], b[1])
assert experiment_list[0].beam is b[1]
assert experiment_list[4].beam is b[1]
# Replace again
experiment_list[0].beam = b[0]
experiment_list[4].beam = b[4]
def test_experimentlist_indices(experiment_list):
from dxtbx.model import Beam, Detector, Goniometer, Scan
# Get the models
b = [e.beam for e in experiment_list]
d = [e.detector for e in experiment_list]
g = [e.goniometer for e in experiment_list]
s = [e.scan for e in experiment_list]
# Check indices of beams
assert list(experiment_list.indices(b[0])) == [0, 4]
assert list(experiment_list.indices(b[1])) == [1, 3]
assert list(experiment_list.indices(b[2])) == [2]
assert list(experiment_list.indices(b[3])) == [1, 3]
assert list(experiment_list.indices(b[4])) == [0, 4]
# Check indices of detectors
assert list(experiment_list.indices(d[0])) == [0, 4]
assert list(experiment_list.indices(d[1])) == [1, 3]
assert list(experiment_list.indices(d[2])) == [2]
assert list(experiment_list.indices(d[3])) == [1, 3]
assert list(experiment_list.indices(d[4])) == [0, 4]
# Check indices of goniometer
assert list(experiment_list.indices(g[0])) == [0, 4]
assert list(experiment_list.indices(g[1])) == [1, 3]
assert list(experiment_list.indices(g[2])) == [2]
assert list(experiment_list.indices(g[3])) == [1, 3]
assert list(experiment_list.indices(g[4])) == [0, 4]
# Check indices of scans
assert list(experiment_list.indices(s[0])) == [0, 4]
assert list(experiment_list.indices(s[1])) == [1, 3]
assert list(experiment_list.indices(s[2])) == [2]
assert list(experiment_list.indices(s[3])) == [1, 3]
assert list(experiment_list.indices(s[4])) == [0, 4]
# Check some models not in the list
assert len(experiment_list.indices(Beam())) == 0
assert len(experiment_list.indices(Detector())) == 0
assert len(experiment_list.indices(Goniometer())) == 0
assert len(experiment_list.indices(Scan())) == 0
def test_experimentlist_models(experiment_list):
# Get all the unique models
b = experiment_list.beams()
d = experiment_list.detectors()
g = experiment_list.goniometers()
s = experiment_list.scans()
# Check we have the expected number
assert len(b) == 3
assert len(d) == 3
assert len(g) == 3
assert len(s) == 3
# Check we have the expected order
assert b[0] == experiment_list[0].beam
assert b[1] == experiment_list[1].beam
assert b[2] == experiment_list[2].beam
assert d[0] == experiment_list[0].detector
assert d[1] == experiment_list[1].detector
assert d[2] == experiment_list[2].detector
assert g[0] == experiment_list[0].goniometer
assert g[0] == experiment_list[0].goniometer
assert g[1] == experiment_list[1].goniometer
assert s[2] == experiment_list[2].scan
assert s[1] == experiment_list[1].scan
assert s[2] == experiment_list[2].scan
def test_experimentlist_to_dict(experiment_list):
# Convert the list to a dictionary
obj = experiment_list.to_dict()
# Check this is the right object
assert obj['__id__'] == 'ExperimentList'
# Check length of items
assert len(obj['experiment']) == 5
assert len(obj['beam']) == 3
assert len(obj['detector']) == 3
assert len(obj['goniometer']) == 3
assert len(obj['scan']) == 3
# The expected models
b = [0, 1, 2, 1, 0]
d = [0, 1, 2, 1, 0]
g = [0, 1, 2, 1, 0]
s = [0, 1, 2, 1, 0]
# Check all the experiments
for i, eobj in enumerate(obj['experiment']):
assert eobj['__id__'] == 'Experiment'
assert eobj['beam'] == b[i]
assert eobj['detector'] == d[i]
assert eobj['goniometer'] == g[i]
assert eobj['scan'] == s[i]
def test_experimentlist_where(experiment_list):
for beam in experiment_list.beams():
assert beam is not None
for i in experiment_list.where(beam=beam):
assert experiment_list[i].beam is beam
for goniometer in experiment_list.goniometers():
assert goniometer is not None
for i in experiment_list.where(goniometer=goniometer):
assert experiment_list[i].goniometer is goniometer
for scan in experiment_list.scans():
assert scan is not None
for i in experiment_list.where(scan=scan):
assert experiment_list[i].scan is scan
for detector in experiment_list.detectors():
assert detector is not None
for i in experiment_list.where(detector=detector):
assert experiment_list[i].detector is detector
@pytest.fixture
def experiment_list():
from dxtbx.model import Beam, Detector, Goniometer, Scan
# Initialise a list of experiments
experiments = ExperimentList()
# Create a few beams
b1 = Beam()
b2 = Beam()
b3 = Beam()
# Create a few detectors
d1 = Detector()
d2 = Detector()
d3 = Detector()
# Create a few goniometers
g1 = Goniometer()
g2 = Goniometer()
g3 = Goniometer()
# Create a few scans
s1 = Scan()
s2 = Scan()
s3 = Scan()
# Create a list of models
b = [b1, b2, b3, b2, b1]
d = [d1, d2, d3, d2, d1]
g = [g1, g2, g3, g2, g1]
s = [s1, s2, s3, s2, s1]
ident = ["sausage", "eggs", "bacon", "toast", "beans"]
# Populate with various experiments
for i in range(5):
experiments.append(Experiment(
beam=b[i],
detector=d[i],
goniometer=g[i],
scan=s[i],
identifier=ident[i]))
# Return the list of experiments
return experiments
def test_experimentlist_factory_from_json(dials_regression):
os.environ['DIALS_REGRESSION'] = dials_regression
# Get all the filenames
filename1 = os.path.join(dials_regression, 'experiment_test_data', 'experiment_1.json')
filename2 = os.path.join(dials_regression, 'experiment_test_data', 'experiment_2.json')
filename3 = os.path.join(dials_regression, 'experiment_test_data', 'experiment_3.json')
filename4 = os.path.join(dials_regression, 'experiment_test_data', 'experiment_4.json')
# Read all the experiment lists in
el1 = ExperimentListFactory.from_json_file(filename1)
#el2 = ExperimentListFactory.from_json_file(filename2)
el3 = ExperimentListFactory.from_json_file(filename3)
el4 = ExperimentListFactory.from_json_file(filename4)
# All the experiment lists should be the same length
assert len(el1) == 1
#assert len(el1) == len(el2)
assert len(el1) == len(el3)
assert len(el1) == len(el4)
# Check all the models are the same
for e in zip(el1, el3, el4):
e1 = e[0]
assert e1.imageset is not None
assert e1.beam is not None
assert e1.detector is not None
assert e1.goniometer is not None
assert e1.scan is not None
assert e1.crystal is not None
for ee in e[1:]:
assert e1.imageset == ee.imageset
assert e1.beam == ee.beam
assert e1.detector == ee.detector
assert e1.goniometer == ee.goniometer
assert e1.scan == ee.scan
assert e1.crystal == ee.crystal
def test_experimentlist_factory_from_pickle(dials_regression):
os.environ['DIALS_REGRESSION'] = dials_regression
# Get all the filenames
filename1 = os.path.join(dials_regression, 'experiment_test_data', 'experiment_1.json')
# Read all the experiment lists in
el1 = ExperimentListFactory.from_json_file(filename1)
# Pickle then load again
el2 = pickle.loads(pickle.dumps(el1))
# All the experiment lists should be the same length
assert len(el1) == 1
assert len(el1) == len(el2)
# Check all the models are the same
for e1, e2 in zip(el1, el2):
assert e1.imageset and e1.imageset == e2.imageset
assert e1.beam and e1.beam == e2.beam
assert e1.detector and e1.detector == e2.detector
assert e1.goniometer and e1.goniometer == e2.goniometer
assert e1.scan and e1.scan == e2.scan
assert e1.crystal and e1.crystal == e2.crystal
def test_experimentlist_factory_from_args(dials_regression):
pytest.importorskip('dials')
os.environ['DIALS_REGRESSION'] = dials_regression
# Get all the filenames
filenames = [
os.path.join(dials_regression, 'experiment_test_data', 'experiment_1.json'),
#os.path.join(dials_regression, 'experiment_test_data', 'experiment_2.json'),
os.path.join(dials_regression, 'experiment_test_data', 'experiment_3.json'),
os.path.join(dials_regression, 'experiment_test_data', 'experiment_4.json')]
# Get the experiments from a list of filenames
experiments = ExperimentListFactory.from_args(filenames, verbose=True)
# Have 4 experiment
assert len(experiments) == 3
for i in range(3):
assert experiments[i].imageset is not None
assert experiments[i].beam is not None
assert experiments[i].detector is not None
assert experiments[i].goniometer is not None
assert experiments[i].scan is not None
def test_experimentlist_factory_from_imageset():
from dxtbx.model import Beam, Detector, Goniometer, Scan
from dxtbx.model import Crystal
from dxtbx.format.Format import Format
imageset = Format.get_imageset(["filename.cbf"], as_imageset=True)
imageset.set_beam(Beam(), 0)
imageset.set_detector(Detector(), 0)
crystal = Crystal(
(1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1")
experiments = ExperimentListFactory.from_imageset_and_crystal(
imageset, crystal)
assert len(experiments) == 1
assert experiments[0].imageset is not None
assert experiments[0].beam is not None
assert experiments[0].detector is not None
assert experiments[0].crystal is not None
def test_experimentlist_factory_from_sweep():
from dxtbx.model import Beam, Detector, Goniometer, Scan
from dxtbx.model import Crystal
from dxtbx.format.Format import Format
filenames = ["filename_%01d.cbf" % (i+1) for i in range(0, 2)]
imageset = Format.get_imageset(
filenames,
beam = Beam(),
detector = Detector(),
goniometer = Goniometer(),
scan = Scan((1,2), (0,1)),
as_sweep=True)
crystal = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1")
experiments = ExperimentListFactory.from_imageset_and_crystal(
imageset, crystal)
assert len(experiments) == 1
assert experiments[0].imageset is not None
assert experiments[0].beam is not None
assert experiments[0].detector is not None
assert experiments[0].goniometer is not None
assert experiments[0].scan is not None
assert experiments[0].crystal is not None
def test_experimentlist_factory_from_datablock():
from dxtbx.model import Beam, Detector, Goniometer, Scan
from dxtbx.datablock import DataBlockFactory
from dxtbx.model import Crystal
from dxtbx.format.Format import Format
filenames = ["filename_%01d.cbf" % (i+1) for i in range(0, 2)]
imageset = Format.get_imageset(
filenames,
beam = Beam(),
detector = Detector(),
goniometer = Goniometer(),
scan = Scan((1,2), (0,1)),
as_sweep=True)
crystal = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1")
datablock = DataBlockFactory.from_imageset(imageset)
experiments = ExperimentListFactory.from_datablock_and_crystal(
datablock, crystal)
assert len(experiments) == 1
assert experiments[0].imageset is not None
assert experiments[0].beam is not None
assert experiments[0].detector is not None
assert experiments[0].goniometer is not None
assert experiments[0].scan is not None
assert experiments[0].crystal is not None
def test_experimentlist_dumper_dump_formats(dials_regression, tmpdir):
tmpdir.chdir()
os.environ['DIALS_REGRESSION'] = dials_regression
# Get all the filenames
filename1 = os.path.join(dials_regression, 'experiment_test_data', 'experiment_1.json')
# Read all the experiment lists in
elist1 = ExperimentListFactory.from_json_file(filename1)
# Create the experiment list dumper
dump = ExperimentListDumper(elist1)
# Dump as JSON file and reload
filename = 'temp1.json'
dump.as_json(filename)
elist2 = ExperimentListFactory.from_json_file(filename)
check(elist1, elist2)
# Dump as split JSON file and reload
filename = 'temp2.json'
dump.as_json(filename, split=True)
elist2 = ExperimentListFactory.from_json_file(filename)
check(elist1, elist2)
# Dump as pickle and reload
filename = 'temp.pickle'
dump.as_pickle(filename)
elist2 = ExperimentListFactory.from_pickle_file(filename)
check(elist1, elist2)
def test_experimentlist_dumper_dump_scan_varying(dials_regression, tmpdir):
tmpdir.chdir()
os.environ['DIALS_REGRESSION'] = dials_regression
# Get all the filenames
filename1 = os.path.join(dials_regression, 'experiment_test_data', 'experiment_1.json')
# Read the experiment list in
elist1 = ExperimentListFactory.from_json_file(filename1)
# Make trivial scan-varying models
crystal = elist1[0].crystal
beam = elist1[0].beam
goniometer = elist1[0].goniometer
crystal.set_A_at_scan_points([crystal.get_A()] * 5)
from scitbx.array_family import flex
cov_B = flex.double([1e-5]*9*9)
crystal.set_B_covariance(cov_B)
cov_B.reshape(flex.grid(1, 9, 9))
cov_B_array = flex.double(flex.grid(5, 9, 9))
for i in range(5):
cov_B_array[i:(i+1), :, :] = cov_B
crystal.set_B_covariance_at_scan_points(cov_B_array)
beam.set_s0_at_scan_points([beam.get_s0()] * 5)
goniometer.set_setting_rotation_at_scan_points([goniometer.get_setting_rotation()] * 5)
# Create the experiment list dumper
dump = ExperimentListDumper(elist1)
# Dump as JSON file and reload
filename = 'temp.json'
dump.as_json(filename)
elist2 = ExperimentListFactory.from_json_file(filename)
check(elist1, elist2)
def test_experimentlist_dumper_dump_empty_sweep(tmpdir):
tmpdir.chdir()
from dxtbx.model import Beam, Detector, Goniometer, Scan
from dxtbx.model import Crystal
from dxtbx.format.Format import Format
filenames = ["filename_%01d.cbf" % (i+1) for i in range(0, 2)]
imageset = Format.get_imageset(
filenames,
beam = Beam((1, 0, 0)),
detector = Detector(),
goniometer = Goniometer(),
scan = Scan((1,2), (0.0, 1.0)),
as_sweep=True)
crystal = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1")
experiments = ExperimentListFactory.from_imageset_and_crystal(
imageset, crystal)
dump = ExperimentListDumper(experiments)
filename = 'temp.json'
dump.as_json(filename)
experiments2 = ExperimentListFactory.from_json_file(filename,
check_format=False)
check(experiments, experiments2)
def test_experimentlist_dumper_dump_with_lookup(dials_regression, tmpdir):
tmpdir.chdir()
from dxtbx.model import Beam, Detector, Goniometer, Scan
from dxtbx.model import Crystal
filename = os.path.join(dials_regression, "centroid_test_data",
"experiments_with_lookup.json")
experiments = ExperimentListFactory.from_json_file(
filename, check_format=True)
imageset = experiments[0].imageset
assert not imageset.external_lookup.mask.data.empty()
assert not imageset.external_lookup.gain.data.empty()
assert not imageset.external_lookup.pedestal.data.empty()
assert imageset.external_lookup.mask.filename is not None
assert imageset.external_lookup.gain.filename is not None
assert imageset.external_lookup.pedestal.filename is not None
assert imageset.external_lookup.mask.data.tile(0).data().all_eq(True)
assert imageset.external_lookup.gain.data.tile(0).data().all_eq(1)
assert imageset.external_lookup.pedestal.data.tile(0).data().all_eq(0)
dump = ExperimentListDumper(experiments)
filename = 'temp.json'
dump.as_json(filename)
experiments = ExperimentListFactory.from_json_file(
filename,
check_format=True)
imageset = experiments[0].imageset
assert not imageset.external_lookup.mask.data.empty()
assert not imageset.external_lookup.gain.data.empty()
assert not imageset.external_lookup.pedestal.data.empty()
assert imageset.external_lookup.mask.filename is not None
assert imageset.external_lookup.gain.filename is not None
assert imageset.external_lookup.pedestal.filename is not None
assert imageset.external_lookup.mask.data.tile(0).data().all_eq(True)
assert imageset.external_lookup.gain.data.tile(0).data().all_eq(1)
assert imageset.external_lookup.pedestal.data.tile(0).data().all_eq(0)
def test_experimentlist_dumper_dump_with_bad_lookup(dials_regression, tmpdir):
tmpdir.chdir()
from dxtbx.model import Beam, Detector, Goniometer, Scan
from dxtbx.model import Crystal
filename = os.path.join(dials_regression, "centroid_test_data",
"experiments_with_bad_lookup.json")
experiments = ExperimentListFactory.from_json_file(
filename, check_format=False)
imageset = experiments[0].imageset
assert imageset.external_lookup.mask.data.empty()
assert imageset.external_lookup.gain.data.empty()
assert imageset.external_lookup.pedestal.data.empty()
assert imageset.external_lookup.mask.filename is not None
assert imageset.external_lookup.gain.filename is not None
assert imageset.external_lookup.pedestal.filename is not None
dump = ExperimentListDumper(experiments)
filename = 'temp.json'
dump.as_json(filename)
experiments = ExperimentListFactory.from_json_file(
filename, check_format=False)
imageset = experiments[0].imageset
assert imageset.external_lookup.mask.data.empty()
assert imageset.external_lookup.gain.data.empty()
assert imageset.external_lookup.pedestal.data.empty()
assert imageset.external_lookup.mask.filename is not None
assert imageset.external_lookup.gain.filename is not None
assert imageset.external_lookup.pedestal.filename is not None
def test_experimentlist_with_identifiers():
from dxtbx.model import Beam, Detector, Goniometer, Scan
# Initialise a list of experiments
experiments = ExperimentList()
experiments.append(Experiment(
beam=Beam(s0=(0,0,-1)),
detector=Detector(),
identifier="bacon"))
experiments.append(Experiment(
beam=Beam(s0=(0,0,-1)),
detector=Detector(),
identifier="sausage"))
with pytest.raises(Exception):
experiments.append(Experiment(
beam=Beam(),
detector=Detector(),
identifier="bacon"))
d = experiments.to_dict()
e2 = ExperimentListDict(d).decode()
assert experiments[0].identifier == e2[0].identifier
assert experiments[1].identifier == e2[1].identifier
assert tuple(experiments.identifiers()) == ("bacon", "sausage")
experiments[0].identifier = "spam"
assert tuple(experiments.identifiers()) == ("spam", "sausage")
experiments.append(Experiment(identifier="bacon"))
experiments.select_on_experiment_identifiers(["spam", "bacon"])
assert list(experiments.identifiers()) == ["spam", "bacon"]
experiments.append(Experiment(identifier="ham"))
experiments.append(Experiment(identifier="jam"))
experiments.remove_on_experiment_identifiers(["spam", "jam"])
assert list(experiments.identifiers()) == ["bacon", "ham"]
def check(el1, el2):
# All the experiment lists should be the same length
assert len(el1) == 1
assert len(el1) == len(el2)
# Check all the models are the same
for e1, e2 in zip(el1, el2):
assert e1.imageset and e1.imageset == e2.imageset
assert e1.beam and e1.beam == e2.beam
assert e1.detector is not None and e1.detector == e2.detector
assert e1.goniometer and e1.goniometer == e2.goniometer
assert e1.scan and e1.scan == e2.scan
assert e1.crystal and e1.crystal == e2.crystal
assert e1.identifier == e2.identifier
| 32.715659 | 89 | 0.719528 | 0 | 0 | 0 | 0 | 924 | 0.038796 | 0 | 0 | 4,255 | 0.178654 |
27e6185cd1321c58ae5c06b94cfd558705c422cd | 365 | py | Python | Divergence analysis/splitreference.py | MarniTausen/CloverAnalysisPipeline | ae169b46c7be40cdf0d97101480be12df87fc58e | [
"Unlicense"
]
| 4 | 2018-03-26T08:54:50.000Z | 2021-07-28T13:34:07.000Z | Divergence analysis/splitreference.py | MarniTausen/CloverAnalysisPipeline | ae169b46c7be40cdf0d97101480be12df87fc58e | [
"Unlicense"
]
| null | null | null | Divergence analysis/splitreference.py | MarniTausen/CloverAnalysisPipeline | ae169b46c7be40cdf0d97101480be12df87fc58e | [
"Unlicense"
]
| 4 | 2017-10-26T12:59:39.000Z | 2021-07-12T08:40:56.000Z | from sys import argv
def make_new_reference_files(filename, sub1, sub2, divider=">chr9"):
genomes = open(filename).read().split(divider)
f = open(sub1, "w")
f.write(genomes[0])
f.close()
f = open(sub2, "w")
f.write(">chr9"+genomes[1])
f.close()
if __name__=="__main__":
make_new_reference_files(argv[1], argv[2], argv[3], argv[4])
| 26.071429 | 68 | 0.635616 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.082192 |
27e766575366ccd5d46f9e7a446bbcc0f07d388e | 1,323 | py | Python | __pg/appids.py | briandorsey/cloud-playground | 6e8ee5fcc6bb4e96bd10dcdf9eda451870c3cc1e | [
"Apache-2.0"
]
| 5 | 2017-03-02T15:57:44.000Z | 2020-02-14T05:17:28.000Z | __pg/appids.py | briandorsey/cloud-playground | 6e8ee5fcc6bb4e96bd10dcdf9eda451870c3cc1e | [
"Apache-2.0"
]
| null | null | null | __pg/appids.py | briandorsey/cloud-playground | 6e8ee5fcc6bb4e96bd10dcdf9eda451870c3cc1e | [
"Apache-2.0"
]
| 3 | 2017-05-20T11:23:07.000Z | 2022-01-13T12:00:57.000Z | """Module which defines collaborating app ids.
This module is used by:
settings.py
scripts/deploy.sh
"""
import os
# List of (playground appid, mimic app id, playground app id alias)
_APP_ID_TUPLES = [
# production environment
('try-appengine', 'shared-playground', 'cloud-playground'),
# development environment
('fredsa-bliss', 'fredsa-hr', None),
('dansanderson-bliss', 'dansanderson-mimic', None),
]
def _GetTupleFor(app_id):
for app_ids in _APP_ID_TUPLES:
if app_id in app_ids:
return app_ids
return (app_id, app_id, None)
# Our app id
_APP_ID = os.environ['APPLICATION_ID'].split('~')[-1]
# support regular 'appspot.com' app ids only
assert ':' not in _APP_ID, ('{} app ids are unsupported'
.format(_APP_ID.split(':')[0]))
app_ids = _GetTupleFor(_APP_ID)
# The application where the playground IDE runs
PLAYGROUND_APP_ID = app_ids[0]
# The application where user code runs
MIMIC_APP_ID = app_ids[1]
# The application alias where the playground IDE runs
PLAYGROUND_APP_ID_ALIAS = app_ids[2]
# Whether we're using two collaborating app ids
TWO_COLLABORATING_APP_IDS = PLAYGROUND_APP_ID != MIMIC_APP_ID
def PrintAppIds():
"""Prints a new line delimited list of known app ids."""
print '\n'.join(set((PLAYGROUND_APP_ID, MIMIC_APP_ID)))
| 25.442308 | 67 | 0.708995 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 696 | 0.526077 |
27e7f431903fe9377416892525c526c246e0ed24 | 21,183 | py | Python | _states/novav21.py | NDPF/salt-formula-nova | 265d9e6c2cbd41d564ee389b210441d9f7378433 | [
"Apache-2.0"
]
| 4 | 2017-04-27T14:27:04.000Z | 2017-11-04T18:23:09.000Z | _states/novav21.py | NDPF/salt-formula-nova | 265d9e6c2cbd41d564ee389b210441d9f7378433 | [
"Apache-2.0"
]
| 22 | 2017-02-01T09:04:52.000Z | 2019-05-10T09:04:01.000Z | _states/novav21.py | NDPF/salt-formula-nova | 265d9e6c2cbd41d564ee389b210441d9f7378433 | [
"Apache-2.0"
]
| 35 | 2017-02-05T23:11:16.000Z | 2019-04-04T17:21:36.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from six.moves import zip_longest
import time
import salt
from salt.exceptions import CommandExecutionError
LOG = logging.getLogger(__name__)
KEYSTONE_LOADED = False
def __virtual__():
"""Only load if the nova module is in __salt__"""
if 'keystonev3.project_get_details' in __salt__:
global KEYSTONE_LOADED
KEYSTONE_LOADED = True
return 'novav21'
class SaltModuleCallException(Exception):
def __init__(self, result_dict, *args, **kwargs):
super(SaltModuleCallException, self).__init__(*args, **kwargs)
self.result_dict = result_dict
def _get_failure_function_mapping():
return {
'create': _create_failed,
'update': _update_failed,
'find': _find_failed,
'delete': _delete_failed,
}
def _call_nova_salt_module(call_string, name, module_name='novav21'):
def inner(*args, **kwargs):
func = __salt__['%s.%s' % (module_name, call_string)]
result = func(*args, **kwargs)
if not result['result']:
ret = _get_failure_function_mapping()[func._action_type](
name, func._resource_human_readable_name)
ret['comment'] += '\nStatus code: %s\n%s' % (result['status_code'],
result['comment'])
raise SaltModuleCallException(ret)
return result['body'].get(func._body_response_key)
return inner
def _error_handler(fun):
@six.wraps(fun)
def inner(*args, **kwargs):
try:
return fun(*args, **kwargs)
except SaltModuleCallException as e:
return e.result_dict
return inner
@_error_handler
def flavor_present(name, cloud_name, vcpus=1, ram=256, disk=0, flavor_id=None,
extra_specs=None):
"""Ensures that the flavor exists"""
extra_specs = extra_specs or {}
# There is no way to query flavors by name
flavors = _call_nova_salt_module('flavor_list', name)(
detail=True, cloud_name=cloud_name)
flavor = [flavor for flavor in flavors if flavor['name'] == name]
# Flavor names are unique, there is either 1 or 0 with requested name
if flavor:
flavor = flavor[0]
current_extra_specs = _call_nova_salt_module(
'flavor_get_extra_specs', name)(
flavor['id'], cloud_name=cloud_name)
to_delete = set(current_extra_specs) - set(extra_specs)
to_add = set(extra_specs) - set(current_extra_specs)
for spec in to_delete:
_call_nova_salt_module('flavor_delete_extra_spec', name)(
flavor['id'], spec, cloud_name=cloud_name)
_call_nova_salt_module('flavor_add_extra_specs', name)(
flavor['id'], cloud_name=cloud_name, **extra_specs)
if to_delete or to_add:
ret = _updated(name, 'Flavor', extra_specs)
else:
ret = _no_change(name, 'Flavor')
else:
flavor = _call_nova_salt_module('flavor_create', name)(
name, vcpus, ram, disk, id=flavor_id, cloud_name=cloud_name)
_call_nova_salt_module('flavor_add_extra_specs', name)(
flavor['id'], cloud_name=cloud_name, **extra_specs)
flavor['extra_specs'] = extra_specs
ret = _created(name, 'Flavor', flavor)
return ret
@_error_handler
def flavor_absent(name, cloud_name):
"""Ensure flavor is absent"""
# There is no way to query flavors by name
flavors = _call_nova_salt_module('flavor_list', name)(
detail=True, cloud_name=cloud_name)
flavor = [flavor for flavor in flavors if flavor['name'] == name]
# Flavor names are unique, there is either 1 or 0 with requested name
if flavor:
_call_nova_salt_module('flavor_delete', name)(
flavor[0]['id'], cloud_name=cloud_name)
return _deleted(name, 'Flavor')
return _non_existent(name, 'Flavor')
def _get_keystone_project_id_by_name(project_name, cloud_name):
if not KEYSTONE_LOADED:
LOG.error("Keystone module not found, can not look up project ID "
"by name")
return None
project = __salt__['keystonev3.project_get_details'](
project_name, cloud_name=cloud_name)
if not project:
return None
return project['project']['id']
@_error_handler
def quota_present(name, cloud_name, **kwargs):
"""Ensures that the nova quota exists
:param name: project name to ensure quota for.
"""
project_name = name
project_id = _get_keystone_project_id_by_name(project_name, cloud_name)
changes = {}
if not project_id:
ret = _update_failed(project_name, 'Project quota')
ret['comment'] += ('\nCould not retrieve keystone project %s' %
project_name)
return ret
quota = _call_nova_salt_module('quota_list', project_name)(
project_id, cloud_name=cloud_name)
for key, value in kwargs.items():
if quota.get(key) != value:
changes[key] = value
if changes:
_call_nova_salt_module('quota_update', project_name)(
project_id, cloud_name=cloud_name, **changes)
return _updated(project_name, 'Project quota', changes)
else:
return _no_change(project_name, 'Project quota')
@_error_handler
def quota_absent(name, cloud_name):
"""Ensures that the nova quota set to default
:param name: project name to reset quota for.
"""
project_name = name
project_id = _get_keystone_project_id_by_name(project_name, cloud_name)
if not project_id:
ret = _delete_failed(project_name, 'Project quota')
ret['comment'] += ('\nCould not retrieve keystone project %s' %
project_name)
return ret
_call_nova_salt_module('quota_delete', name)(
project_id, cloud_name=cloud_name)
return _deleted(name, 'Project quota')
@_error_handler
def aggregate_present(name, cloud_name, availability_zone_name=None,
hosts=None, metadata=None):
"""Ensures that the nova aggregate exists"""
aggregates = _call_nova_salt_module('aggregate_list', name)(
cloud_name=cloud_name)
aggregate_exists = [agg for agg in aggregates
if agg['name'] == name]
metadata = metadata or {}
hosts = hosts or []
if availability_zone_name:
metadata.update(availability_zone=availability_zone_name)
if not aggregate_exists:
aggregate = _call_nova_salt_module('aggregate_create', name)(
name, availability_zone_name, cloud_name=cloud_name)
if metadata:
_call_nova_salt_module('aggregate_set_metadata', name)(
cloud_name=cloud_name, **metadata)
aggregate['metadata'] = metadata
for host in hosts or []:
_call_nova_salt_module('aggregate_add_host', name)(
name, host, cloud_name=cloud_name)
aggregate['hosts'] = hosts
return _created(name, 'Host aggregate', aggregate)
else:
aggregate = aggregate_exists[0]
changes = {}
existing_meta = set(aggregate['metadata'].items())
requested_meta = set(metadata.items())
if existing_meta - requested_meta or requested_meta - existing_meta:
_call_nova_salt_module('aggregate_set_metadata', name)(
name, cloud_name=cloud_name, **metadata)
changes['metadata'] = metadata
hosts_to_add = set(hosts) - set(aggregate['hosts'])
hosts_to_remove = set(aggregate['hosts']) - set(hosts)
if hosts_to_remove or hosts_to_add:
for host in hosts_to_add:
_call_nova_salt_module('aggregate_add_host', name)(
name, host, cloud_name=cloud_name)
for host in hosts_to_remove:
_call_nova_salt_module('aggregate_remove_host', name)(
name, host, cloud_name=cloud_name)
changes['hosts'] = hosts
if changes:
return _updated(name, 'Host aggregate', changes)
else:
return _no_change(name, 'Host aggregate')
@_error_handler
def aggregate_absent(name, cloud_name):
"""Ensure aggregate is absent"""
existing_aggregates = _call_nova_salt_module('aggregate_list', name)(
cloud_name=cloud_name)
matching_aggs = [agg for agg in existing_aggregates
if agg['name'] == name]
if matching_aggs:
_call_nova_salt_module('aggregate_delete', name)(
name, cloud_name=cloud_name)
return _deleted(name, 'Host Aggregate')
return _non_existent(name, 'Host Aggregate')
@_error_handler
def keypair_present(name, cloud_name, public_key_file=None, public_key=None):
"""Ensures that the Nova key-pair exists"""
existing_keypairs = _call_nova_salt_module('keypair_list', name)(
cloud_name=cloud_name)
matching_kps = [kp for kp in existing_keypairs
if kp['keypair']['name'] == name]
if public_key_file and not public_key:
with salt.utils.fopen(public_key_file, 'r') as f:
public_key = f.read()
if not public_key:
ret = _create_failed(name, 'Keypair')
ret['comment'] += '\nPlease specify public key for keypair creation.'
return ret
if matching_kps:
# Keypair names are unique, there is either 1 or 0 with requested name
kp = matching_kps[0]['keypair']
if kp['public_key'] != public_key:
_call_nova_salt_module('keypair_delete', name)(
name, cloud_name=cloud_name)
else:
return _no_change(name, 'Keypair')
res = _call_nova_salt_module('keypair_create', name)(
name, cloud_name=cloud_name, public_key=public_key)
return _created(name, 'Keypair', res)
@_error_handler
def keypair_absent(name, cloud_name):
"""Ensure keypair is absent"""
existing_keypairs = _call_nova_salt_module('keypair_list', name)(
cloud_name=cloud_name)
matching_kps = [kp for kp in existing_keypairs
if kp['keypair']['name'] == name]
if matching_kps:
_call_nova_salt_module('keypair_delete', name)(
name, cloud_name=cloud_name)
return _deleted(name, 'Keypair')
return _non_existent(name, 'Keypair')
def cell_present(name='cell1', transport_url='none:///', db_engine='mysql',
db_name='nova_upgrade', db_user='nova', db_password=None,
db_address='0.0.0.0'):
"""Ensure nova cell is present
For newly created cells this state also runs discover_hosts and
map_instances."""
cell_info = __salt__['cmd.shell'](
"nova-manage cell_v2 list_cells --verbose | "
"awk '/%s/ {print $4,$6,$8}'" % name).split()
db_connection = (
'%(db_engine)s+pymysql://%(db_user)s:%(db_password)s@'
'%(db_address)s/%(db_name)s?charset=utf8' % {
'db_engine': db_engine, 'db_user': db_user,
'db_password': db_password, 'db_address': db_address,
'db_name': db_name})
args = {'transport_url': transport_url, 'db_connection': db_connection}
# There should be at least 1 component printed to cell_info
if len(cell_info) >= 1:
cell_info = dict(zip_longest(
('cell_uuid', 'existing_transport_url', 'existing_db_connection'),
cell_info))
cell_uuid, existing_transport_url, existing_db_connection = cell_info
command_string = ''
if existing_transport_url != transport_url:
command_string = (
'%s --transport-url %%(transport_url)s' % command_string)
if existing_db_connection != db_connection:
command_string = (
'%s --database_connection %%(db_connection)s' % command_string)
if not command_string:
return _no_change(name, 'Nova cell')
try:
__salt__['cmd.shell'](
('nova-manage cell_v2 update_cell --cell_uuid %s %s' % (
cell_uuid, command_string)) % args)
LOG.warning("Updating the transport_url or database_connection "
"fields on a running system will NOT result in all "
"nodes immediately using the new values. Use caution "
"when changing these values.")
ret = _updated(name, 'Nova cell', args)
except Exception as e:
ret = _update_failed(name, 'Nova cell')
ret['comment'] += '\nException: %s' % e
return ret
args.update(name=name)
try:
cell_uuid = __salt__['cmd.shell'](
'nova-manage cell_v2 create_cell --name %(name)s '
'--transport-url %(transport_url)s '
'--database_connection %(db_connection)s --verbose' % args)
__salt__['cmd.shell']('nova-manage cell_v2 discover_hosts '
'--cell_uuid %s --verbose' % cell_uuid)
__salt__['cmd.shell']('nova-manage cell_v2 map_instances '
'--cell_uuid %s' % cell_uuid)
ret = _created(name, 'Nova cell', args)
except Exception as e:
ret = _create_failed(name, 'Nova cell')
ret['comment'] += '\nException: %s' % e
return ret
def cell_absent(name, force=False):
"""Ensure cell is absent"""
cell_uuid = __salt__['cmd.shell'](
"nova-manage cell_v2 list_cells | awk '/%s/ {print $4}'" % name)
if not cell_uuid:
return _non_existent(name, 'Nova cell')
try:
__salt__['cmd.shell'](
'nova-manage cell_v2 delete_cell --cell_uuid %s %s' % (
cell_uuid, '--force' if force else ''))
ret = _deleted(name, 'Nova cell')
except Exception as e:
ret = _delete_failed(name, 'Nova cell')
ret['comment'] += '\nException: %s' % e
return ret
def _db_version_update(db, version, human_readable_resource_name):
existing_version = __salt__['cmd.shell'](
'nova-manage %s version 2>/dev/null' % db)
try:
existing_version = int(existing_version)
version = int(version)
except Exception as e:
ret = _update_failed(existing_version,
human_readable_resource_name)
ret['comment'] += ('\nCan not convert existing or requested version '
'to integer, exception: %s' % e)
LOG.error(ret['comment'])
return ret
if existing_version < version:
try:
__salt__['cmd.shell'](
'nova-manage %s sync --version %s' % (db, version))
ret = _updated(existing_version, human_readable_resource_name,
{db: '%s sync --version %s' % (db, version)})
except Exception as e:
ret = _update_failed(existing_version,
human_readable_resource_name)
ret['comment'] += '\nException: %s' % e
return ret
return _no_change(existing_version, human_readable_resource_name)
def api_db_version_present(name=None, version="20"):
"""Ensures that specific api_db version is present"""
return _db_version_update('api_db', version, 'Nova API database version')
def db_version_present(name=None, version="334"):
"""Ensures that specific db version is present"""
return _db_version_update('db', version, 'Nova database version')
def online_data_migrations_present(name=None, api_db_version="20",
db_version="334"):
"""Runs online_data_migrations if databases are of specific versions"""
ret = {'name': 'online_data_migrations', 'changes': {}, 'result': False,
'comment': 'Current nova api_db version != {0} or nova db version '
'!= {1}.'.format(api_db_version, db_version)}
cur_api_db_version = __salt__['cmd.shell'](
'nova-manage api_db version 2>/dev/null')
cur_db_version = __salt__['cmd.shell'](
'nova-manage db version 2>/dev/null')
try:
cur_api_db_version = int(cur_api_db_version)
cur_db_version = int(cur_db_version)
api_db_version = int(api_db_version)
db_version = int(db_version)
except Exception as e:
LOG.error(ret['comment'])
ret['comment'] = ('\nCan not convert existing or requested database '
'versions to integer, exception: %s' % e)
return ret
if cur_api_db_version == api_db_version and cur_db_version == db_version:
try:
__salt__['cmd.shell']('nova-manage db online_data_migrations')
ret['result'] = True
ret['comment'] = ('nova-manage db online_data_migrations was '
'executed successfuly')
ret['changes']['online_data_migrations'] = (
'online_data_migrations run on nova api_db version {0} and '
'nova db version {1}'.format(api_db_version, db_version))
except Exception as e:
ret['comment'] = (
'Failed to execute online_data_migrations on nova api_db '
'version %s and nova db version %s, exception: %s' % (
api_db_version, db_version, e))
return ret
@_error_handler
def service_enabled(name, cloud_name, binary="nova-compute"):
"""Ensures that the service is enabled on the host
:param name: name of a host where service is running
:param service: name of the service have to be run
"""
changes = {}
services = _call_nova_salt_module('services_list', name)(
name, service=binary, cloud_name=cloud_name)
enabled_service = [s for s in services if s['binary'] == binary
and s['status'] == 'enabled' and s['host'] == name]
if len(enabled_service) > 0:
ret = _no_change(name, 'Compute services')
else:
changes = _call_nova_salt_module('services_update', name)(
name, binary, 'enable', cloud_name=cloud_name)
ret = _updated(name, 'Compute services', changes)
return ret
@_error_handler
def service_disabled(name, cloud_name, binary="nova-compute", disabled_reason=None):
"""Ensures that the service is disabled on the host
:param name: name of a host where service is running
:param service: name of the service have to be disabled
"""
changes = {}
kwargs = {}
if disabled_reason is not None:
kwargs['disabled_reason'] = disabled_reason
services = _call_nova_salt_module('services_list', name)(
name, service=binary, cloud_name=cloud_name)
disabled_service = [s for s in services if s['binary'] == binary
and s['status'] == 'disabled' and s['host'] == name]
if len(disabled_service) > 0:
ret = _no_change(name, 'Compute services')
else:
changes = _call_nova_salt_module('services_update', name)(
name, binary, 'disable', cloud_name=cloud_name, **kwargs)
ret = _updated(name, 'Compute services', changes)
return ret
def _find_failed(name, resource):
return {
'name': name, 'changes': {}, 'result': False,
'comment': 'Failed to find {0}s with name {1}'.format(resource, name)}
def _created(name, resource, changes):
return {
'name': name, 'changes': changes, 'result': True,
'comment': '{0} {1} created'.format(resource, name)}
def _create_failed(name, resource):
return {
'name': name, 'changes': {}, 'result': False,
'comment': '{0} {1} creation failed'.format(resource, name)}
def _no_change(name, resource):
return {
'name': name, 'changes': {}, 'result': True,
'comment': '{0} {1} already is in the desired state'.format(
resource, name)}
def _updated(name, resource, changes):
return {
'name': name, 'changes': changes, 'result': True,
'comment': '{0} {1} was updated'.format(resource, name)}
def _update_failed(name, resource):
return {
'name': name, 'changes': {}, 'result': False,
'comment': '{0} {1} update failed'.format(resource, name)}
def _deleted(name, resource):
return {
'name': name, 'changes': {}, 'result': True,
'comment': '{0} {1} deleted'.format(resource, name)}
def _delete_failed(name, resource):
return {
'name': name, 'changes': {}, 'result': False,
'comment': '{0} {1} deletion failed'.format(resource, name)}
def _non_existent(name, resource):
return {
'name': name, 'changes': {}, 'result': True,
'comment': '{0} {1} does not exist'.format(resource, name)}
| 39.155268 | 84 | 0.626729 | 206 | 0.009725 | 0 | 0 | 10,182 | 0.480668 | 0 | 0 | 6,418 | 0.302979 |
27ea8174cb81713dd5c70d96704d5a2c63cec32e | 325 | py | Python | server/dev.py | Khanable/Photography-Portfolio-Website | 5019e8316e078dcb672888dd847fdd6b732443a9 | [
"MIT"
]
| null | null | null | server/dev.py | Khanable/Photography-Portfolio-Website | 5019e8316e078dcb672888dd847fdd6b732443a9 | [
"MIT"
]
| null | null | null | server/dev.py | Khanable/Photography-Portfolio-Website | 5019e8316e078dcb672888dd847fdd6b732443a9 | [
"MIT"
]
| null | null | null | from sys import modules
from importlib import import_module
modules['server'] = import_module('src')
from werkzeug.serving import run_simple
from server.app import App
from server.mode import Mode
if __name__=='__main__':
app = App(mode=Mode.Development)
run_simple('localhost', 8000, app, use_reloader=True)
| 27.083333 | 55 | 0.76 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.104615 |
27ed7774eba9356593529c7a047bb6eafaebca6b | 6,891 | py | Python | src/pyff/fetch.py | rhoerbe/pyFF | 85933ed9cc9f720c9432d5e4c3114895cefd3579 | [
"BSD-2-Clause-FreeBSD"
]
| null | null | null | src/pyff/fetch.py | rhoerbe/pyFF | 85933ed9cc9f720c9432d5e4c3114895cefd3579 | [
"BSD-2-Clause-FreeBSD"
]
| null | null | null | src/pyff/fetch.py | rhoerbe/pyFF | 85933ed9cc9f720c9432d5e4c3114895cefd3579 | [
"BSD-2-Clause-FreeBSD"
]
| null | null | null | """
An abstraction layer for metadata fetchers. Supports both syncronous and asyncronous fetchers with cache.
"""
from .logs import get_log
import os
import requests
from .constants import config
from datetime import datetime
from collections import deque
import six
from concurrent import futures
import traceback
from .parse import parse_resource
from itertools import chain
from .exceptions import ResourceException
from .utils import url_get
from copy import deepcopy, copy
if six.PY2:
from UserDict import DictMixin as ResourceManagerBase
elif six.PY3:
from collections import MutableMapping as ResourceManagerBase
requests.packages.urllib3.disable_warnings()
log = get_log(__name__)
class ResourceManager(ResourceManagerBase):
def __init__(self):
self._resources = dict()
self.shutdown = False
def __setitem__(self, key, value):
if not isinstance(value, Resource):
raise ValueError("I can only store Resources")
self._resources[key] = value
def __getitem__(self, key):
return self._resources[key]
def __delitem__(self, key):
if key in self:
del self._resources[key]
def keys(self):
return list(self._resources.keys())
def values(self):
return list(self._resources.values())
def walk(self, url=None):
if url is not None:
return self[url].walk()
else:
i = [r.walk() for r in list(self.values())]
return chain(*i)
def add(self, r):
if not isinstance(r, Resource):
raise ValueError("I can only store Resources")
self[r.name] = r
def __contains__(self, item):
return item in self._resources
def __len__(self):
return len(list(self.values()))
def __iter__(self):
return self.walk()
def reload(self, url=None, fail_on_error=False, store=None):
# type: (object, basestring) -> None
if url is not None:
resources = deque([self[url]])
else:
resources = deque(list(self.values()))
with futures.ThreadPoolExecutor(max_workers=config.worker_pool_size) as executor:
while resources:
tasks = dict((executor.submit(r.fetch, store=store), r) for r in resources)
new_resources = deque()
for future in futures.as_completed(tasks):
r = tasks[future]
try:
res = future.result()
if res is not None:
for nr in res:
new_resources.append(nr)
except Exception as ex:
log.debug(traceback.format_exc())
log.error(ex)
if fail_on_error:
raise ex
resources = new_resources
class Resource(object):
def __init__(self, url, **kwargs):
self.url = url
self.opts = kwargs
self.t = None
self.type = "text/plain"
self.expire_time = None
self.last_seen = None
self._infos = deque(maxlen=config.info_buffer_size)
self.children = deque()
def _null(t):
return t
self.opts.setdefault('cleanup', [])
self.opts.setdefault('via', [])
self.opts.setdefault('fail_on_error', False)
self.opts.setdefault('as', None)
self.opts.setdefault('verify', None)
self.opts.setdefault('filter_invalid', True)
self.opts.setdefault('validate', True)
if "://" not in self.url:
if os.path.isfile(self.url):
self.url = "file://{}".format(os.path.abspath(self.url))
@property
def post(self):
return self.opts['via']
def add_via(self, callback):
self.opts['via'].append(callback)
@property
def cleanup(self):
return self.opts['cleanup']
def __str__(self):
return "Resource {} expires at {} using ".format(self.url, self.expire_time) + \
",".join(["{}={}".format(k, v) for k, v in list(self.opts.items())])
def walk(self):
yield self
for c in self.children:
for cn in c.walk():
yield cn
def is_expired(self):
now = datetime.now()
return self.expire_time is not None and self.expire_time < now
def is_valid(self):
return self.t is not None and not self.is_expired()
def add_info(self, info):
self._infos.append(info)
def add_child(self, url, **kwargs):
opts = deepcopy(self.opts)
del opts['as']
opts.update(kwargs)
r = Resource(url, **opts)
self.children.append(r)
return r
@property
def name(self):
if 'as' in self.opts:
return self.opts['as']
else:
return self.url
@property
def info(self):
if self._infos is None or not self._infos:
return dict()
else:
return self._infos[-1]
def fetch(self, store=None):
info = dict()
info['Resource'] = self.url
self.add_info(info)
data = None
if os.path.isdir(self.url):
data = self.url
info['Directory'] = self.url
elif '://' in self.url:
r = url_get(self.url)
info['HTTP Response Headers'] = r.headers
log.debug("got status_code={:d}, encoding={} from_cache={} from {}".
format(r.status_code, r.encoding, getattr(r, "from_cache", False), self.url))
info['Status Code'] = str(r.status_code)
info['Reason'] = r.reason
if r.ok:
data = r.text
else:
raise ResourceException("Got status={:d} while fetching {}".format(r.status_code, self.url))
else:
raise ResourceException("Unknown resource type {}".format(self.url))
parse_info = parse_resource(self, data)
if parse_info is not None and isinstance(parse_info, dict):
info.update(parse_info)
if self.t is not None:
self.last_seen = datetime.now()
if self.post and isinstance(self.post, list):
for cb in self.post:
if self.t is not None:
self.t = cb(self.t, **self.opts)
if self.is_expired():
info['Expired'] = True
raise ResourceException("Resource at {} expired on {}".format(self.url, self.expire_time))
else:
info['Expired'] = False
for (eid, error) in list(info['Validation Errors'].items()):
log.error(error)
if store is not None:
store.update(self.t, tid=self.name)
return self.children
| 29.702586 | 108 | 0.565085 | 6,181 | 0.896967 | 123 | 0.017849 | 420 | 0.060949 | 0 | 0 | 644 | 0.093455 |
27effbc79d2bf6543199f4b75da0205988092da4 | 498 | py | Python | pychonet/HomeSolarPower.py | mochipon/pychonet | 65ba4189f9a66b6e698646854542cdd506369813 | [
"MIT"
]
| null | null | null | pychonet/HomeSolarPower.py | mochipon/pychonet | 65ba4189f9a66b6e698646854542cdd506369813 | [
"MIT"
]
| null | null | null | pychonet/HomeSolarPower.py | mochipon/pychonet | 65ba4189f9a66b6e698646854542cdd506369813 | [
"MIT"
]
| null | null | null | from pychonet.EchonetInstance import EchonetInstance
class HomeSolarPower(EchonetInstance):
def __init__(self, netif, instance = 0x1):
self.eojgc = 0x02
self.eojcc = 0x79
EchonetInstance.__init__(self, self.eojgc, self.eojcc, instance, netif)
def getMeasuredInstantPower(self):
return int.from_bytes(self.getSingleMessageResponse(0xE0), 'big')
def getMeasuredCumulPower(self):
return int.from_bytes(self.getSingleMessageResponse(0xE1), 'big')
| 35.571429 | 79 | 0.728916 | 443 | 0.889558 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.02008 |
27f5b22f4011155a67ce267a26bf5d2d27c8298e | 6,955 | py | Python | adlmagics/adlmagics/adlmagics_main.py | Azure/Azure-Data-Service-Notebook | 6bd28587c9fa0a7c1f9113f638b790b1773c5585 | [
"MIT"
]
| 6 | 2018-06-06T08:37:53.000Z | 2020-06-01T13:13:13.000Z | adlmagics/adlmagics/adlmagics_main.py | Azure/Azure-Data-Service-Notebook | 6bd28587c9fa0a7c1f9113f638b790b1773c5585 | [
"MIT"
]
| 30 | 2018-06-08T02:47:18.000Z | 2018-07-25T07:07:07.000Z | adlmagics/adlmagics/adlmagics_main.py | Azure/Azure-Data-Service-Notebook | 6bd28587c9fa0a7c1f9113f638b790b1773c5585 | [
"MIT"
]
| 5 | 2018-06-06T08:37:55.000Z | 2021-01-07T09:15:15.000Z | from IPython.core.magic import Magics, magics_class, line_cell_magic
from sys import stdout
from os import linesep
from os.path import join, expanduser
from adlmagics.version import adlmagics_version
from adlmagics.converters.dataframe_converter import DataFrameConverter
from adlmagics.utils.json_file_persister import JsonFilePersister
from adlmagics.utils.ipshell_result_receiver import IPShellResultReceiver
from adlmagics.presenters.presenter_base import PresenterBase
from adlmagics.presenters.text_presenter import TextPresenter
from adlmagics.presenters.adla_job_presenter import AdlaJobPresenter
from adlmagics.presenters.adla_jobs_presenter import AdlaJobsPresenter
from adlmagics.presenters.adls_files_presenter import AdlsFilesPresenter
from adlmagics.presenters.adls_folders_presenter import AdlsFoldersPresenter
from adlmagics.services.azure_token_service import AzureTokenService
from adlmagics.services.adla_service_sdk_impl import AdlaServiceSdkImpl
from adlmagics.services.adls_service_sdk_impl import AdlsServiceSdkImpl
from adlmagics.services.session_service import SessionService
from adlmagics.services.presenter_factory import PresenterFactory
from adlmagics.magics.session.session_magic_base import SessionMagicBase
from adlmagics.magics.session.session_viewing_magic import SessionViewingMagic
from adlmagics.magics.session.session_item_setting_magic import SessionItemSettingMagic
from adlmagics.magics.azure.azure_magic_base import AzureMagicBase
from adlmagics.magics.azure.azure_login_magic import AzureLoginMagic
from adlmagics.magics.azure.azure_logout_magic import AzureLogoutMagic
from adlmagics.magics.adla.adla_magic_base import AdlaMagicBase
from adlmagics.magics.adla.adla_accounts_listing_magic import AdlaAccountsListingMagic
from adlmagics.magics.adla.adla_job_viewing_magic import AdlaJobViewingMagic
from adlmagics.magics.adla.adla_job_submission_magic import AdlaJobSubmissionMagic
from adlmagics.magics.adla.adla_jobs_listing_magic import AdlaJobsListingMagic
from adlmagics.magics.adls.adls_magic_base import AdlsMagicBase
from adlmagics.magics.adls.adls_accounts_listing_magic import AdlsAccountsListingMagic
from adlmagics.magics.adls.adls_folders_listing_magic import AdlsFoldersListingMagic
from adlmagics.magics.adls.adls_files_listing_magic import AdlsFilesListingMagic
from adlmagics.magics.adls.adls_file_sampling_magic import AdlsFileSamplingMagic
@magics_class
class AdlMagics(Magics):
def __init__(self, shell, data = None):
super(AdlMagics, self).__init__(shell)
self.__session_service = SessionService(JsonFilePersister(join(expanduser("~"), "adlmagics.session"), "utf-8"))
self.__presenter_factory = PresenterFactory()
self.__register_presenter(TextPresenter)
self.__register_presenter(AdlaJobPresenter)
self.__register_presenter(AdlaJobsPresenter)
self.__register_presenter(AdlsFilesPresenter)
self.__register_presenter(AdlsFoldersPresenter)
self.__token_service = AzureTokenService(self.__presenter_factory)
self.__adla_service = AdlaServiceSdkImpl(self.__token_service)
self.__adls_service = AdlsServiceSdkImpl(self.__token_service)
self.__initialize_magics()
self.__write_line("%s %s initialized" % (AdlMagics.__name__, adlmagics_version))
@line_cell_magic
def adl(self, line, cell = ""):
cmd = line.strip()
arg_string = ""
try:
cmd_end_index = cmd.index(" ")
cmd = cmd[0:cmd_end_index].strip().lower()
arg_string = line[cmd_end_index:].strip()
except:
pass
if (cmd not in self.__magics):
raise ValueError("Unsupported command '%s'" % cmd)
magic = self.__magics[cmd]
return magic.execute(arg_string, cell)
def __register_presenter(self, presenter_class):
if (not issubclass(presenter_class, PresenterBase)):
raise TypeError("%s not a presenter class." % (presenter_class.__name__))
presenter = presenter_class()
self.__presenter_factory.register_presenter(presenter)
def __initialize_magics(self):
df_converter = DataFrameConverter()
self.__magics = dict()
self.__register_session_magic(SessionViewingMagic)
self.__register_session_magic(SessionItemSettingMagic)
self.__register_azure_magic(AzureLoginMagic)
self.__register_azure_magic(AzureLogoutMagic)
self.__register_adla_magic(AdlaAccountsListingMagic, df_converter)
self.__register_adla_magic(AdlaJobViewingMagic, df_converter)
self.__register_adla_magic(AdlaJobsListingMagic, df_converter)
adla_job_submission_magic = AdlaJobSubmissionMagic(self.__session_service, self.__presenter_factory, df_converter, IPShellResultReceiver(), self.__adla_service)
self.__magics[adla_job_submission_magic.cmd_name.lower()] = adla_job_submission_magic
self.__register_adls_magic(AdlsAccountsListingMagic, df_converter)
self.__register_adls_magic(AdlsFoldersListingMagic, df_converter)
self.__register_adls_magic(AdlsFilesListingMagic, df_converter)
self.__register_adls_magic(AdlsFileSamplingMagic, df_converter)
def __register_session_magic(self, session_magic_class):
if (not issubclass(session_magic_class, SessionMagicBase)):
raise TypeError("%s not a session magic class." % (session_magic_class.__name__))
session_magic = session_magic_class(self.__session_service, self.__presenter_factory)
self.__magics[session_magic.cmd_name.lower()] = session_magic
def __register_azure_magic(self, azure_magic_class):
if (not issubclass(azure_magic_class, AzureMagicBase)):
raise TypeError("%s not a azure magic class." % (azure_magic_class.__name__))
azure_magic = azure_magic_class(self.__session_service, self.__presenter_factory, self.__token_service)
self.__magics[azure_magic.cmd_name.lower()] = azure_magic
def __register_adla_magic(self, adla_magic_class, result_converter):
if (not issubclass(adla_magic_class, AdlaMagicBase)):
raise TypeError("%s not a adla magic class." % (adla_magic_class.__name__))
adla_magic = adla_magic_class(self.__session_service, self.__presenter_factory, result_converter, self.__adla_service)
self.__magics[adla_magic.cmd_name.lower()] = adla_magic
def __register_adls_magic(self, adls_magic_class, result_converter):
if (not issubclass(adls_magic_class, AdlsMagicBase)):
raise TypeError("%s not a adls magic class." % (adls_magic_class.__name__))
adls_magic = adls_magic_class(self.__session_service, self.__presenter_factory, result_converter, self.__adls_service)
self.__magics[adls_magic.cmd_name.lower()] = adls_magic
def __write_line(self, text):
stdout.write(text + linesep) | 48.298611 | 168 | 0.78404 | 4,529 | 0.651186 | 0 | 0 | 4,543 | 0.653199 | 0 | 0 | 224 | 0.032207 |
27f6676280bfbc46f5ea3961bee24ccfef845e05 | 10,137 | py | Python | metadata_service/api/dashboard.py | iiAnderson/metaflow-service | b42391e5ee2187a93259b944c515522d76b1314e | [
"Apache-2.0"
]
| null | null | null | metadata_service/api/dashboard.py | iiAnderson/metaflow-service | b42391e5ee2187a93259b944c515522d76b1314e | [
"Apache-2.0"
]
| null | null | null | metadata_service/api/dashboard.py | iiAnderson/metaflow-service | b42391e5ee2187a93259b944c515522d76b1314e | [
"Apache-2.0"
]
| null | null | null | import asyncio
import json
from datetime import datetime, timedelta
from aiohttp import web
from .utils import read_body, get_week_times, get_formatted_time
from ..data.models import RunRow
from ..data.postgres_async_db import AsyncPostgresDB
import logging
class DashboardAPI(object):
_run_table = None
lock = asyncio.Lock()
def __init__(self, app, cors):
cors.add(app.router.add_route("GET", "/dashboard/flows", self.get_flows))
cors.add(app.router.add_route("GET", "/dashboard/flows/{flow_id}/count", self.count_runs))
cors.add(app.router.add_route("GET", "/dashboard/flows/{flow_id}/recent", self.get_recent_run))
cors.add(app.router.add_route("GET", "/dashboard/flows/{flow_id}/last", self.get_last_n_runs))
cors.add(app.router.add_route("GET", "/dashboard/flows/{flow_id}/{timestamp}", self.get_runs_since))
self._run_async_table = AsyncPostgresDB.get_instance().run_table_postgres
self._flow_async_table = AsyncPostgresDB.get_instance().flow_table_postgres
self._rich_run_async_table = AsyncPostgresDB.get_instance().rich_run_table_postgres
async def get_flows(self, request):
"""
---
description: Get run by run number
tags:
- Run
parameters:
- name: "flow_id"
in: "path"
description: "flow_id"
required: true
type: "string"
- name: "run_number"
in: "path"
description: "run_number"
required: true
type: "integer"
produces:
- text/plain
responses:
"200":
description: successful operation. Return specified run
"404":
description: specified run not found
"405":
description: invalid HTTP Method
"""
flow_response = await self._flow_async_table.get_all_flows()
data = []
for flow in flow_response.body:
flow_id = flow['flow_id']
run_response = await self._run_async_table.get_all_runs(flow_id)
last_run = run_response.body[-1]
rich_run_response = await self._rich_run_async_table.get_rich_run(flow_id, last_run['run_number'])
rich_last_run = rich_run_response.body
data.append({
"success": rich_last_run['success'],
"finished": rich_last_run['finished'],
"finished_at": rich_last_run['finished_at'],
"created_at": last_run['ts_epoch'],
"run_id": last_run['run_number'],
"flow": flow_id,
"user": last_run['user_name']
})
return web.Response(
status=rich_run_response.response_code, body=json.dumps(data)
)
async def count_runs(self, request):
"""
---
description: Get all runs
tags:
- Run
parameters:
- name: "flow_id"
in: "path"
description: "flow_id"
required: true
type: "string"
produces:
- text/plain
responses:
"200":
description: Returned all runs of specified flow
"405":
description: invalid HTTP Method
"""
flow_name = request.match_info.get("flow_id")
if flow_name == "all":
flow_response = await self._flow_async_table.get_all_flows()
flows = [x['flow_id'] for x in flow_response.body]
else:
flows = [flow_name]
counts = get_week_times()
time_bound = (datetime.now() - timedelta(days=7)).timestamp()
for flow_id in flows:
run_response = await self._rich_run_async_table.get_rich_run_since(flow_id, time_bound)
for run in run_response.body:
logging.error(run)
datetime_created = datetime.fromtimestamp(run['ts_epoch']/1000)
counts[get_formatted_time(datetime_created)] = counts[get_formatted_time(datetime_created)] + 1
return_data =[]
for key, value in counts.items():
return_data.append({"time": key, "count": value})
return web.Response(status=run_response.response_code, body=json.dumps(return_data))
async def get_runs_since(self, request):
"""
---
description: Get all runs
tags:
- Run
parameters:
- name: "flow_id"
in: "path"
description: "flow_id"
required: true
type: "string"
produces:
- text/plain
responses:
"200":
description: Returned all runs of specified flow
"405":
description: invalid HTTP Method
"""
timestamp = request.match_info.get("timestamp")
flow_name = request.match_info.get("flow_id")
if flow_name == "all":
flow_response = await self._flow_async_table.get_all_flows()
flows = [x['flow_id'] for x in flow_response.body]
else:
flows = [flow_name]
data = []
for flow_id in flows:
run_response = await self._rich_run_async_table.get_rich_run_since(flow_id, timestamp)
rich_runs = run_response.body
for rich_run_data in rich_runs:
logging.error(flow_id + " " + str(rich_run_data['run_number']))
run_response = await self._run_async_table.get_run(flow_id, rich_run_data['run_number'])
run_data = run_response.body
data.append({
"success": rich_run_data['success'],
"finished": rich_run_data['finished'],
"finished_at": rich_run_data['finished_at'],
"created_at": run_data['ts_epoch'],
"run_id": run_data['run_number'],
"flow": flow_id,
"user": run_data['user_name']
})
return web.Response(status=run_response.response_code, body=json.dumps(data))
async def get_run_data(self, request):
"""
---
description: Get all runs
tags:
- Run
parameters:
- name: "flow_id"
in: "path"
description: "flow_id"
required: true
type: "string"
produces:
- text/plain
responses:
"200":
description: Returned all runs of specified flow
"405":
description: invalid HTTP Method
"""
flow_id = request.match_info.get("flow_id")
run_number = request.match_info.get("run_number")
run_response = await self._rich_run_async_table.get_rich_run(flow_id, run_number)
rich_run_data = run_response.body
run_response = await self._run_async_table.get_run(flow_id, run_number)
run_data = run_response.body
data = {
"success": rich_run_data['success'],
"finished": rich_run_data['finished'],
"finished_at": rich_run_data['finished_at'],
"created_at": run_data['ts_epoch'],
"run_id": run_data['run_number'],
"flow": flow_id,
"user": run_data['user_name']
}
return web.Response(status=run_response.response_code, body=json.dumps(data))
async def get_recent_run(self, request):
"""
---
description: Get all runs
tags:
- Run
parameters:
- name: "flow_id"
in: "path"
description: "flow_id"
required: true
type: "string"
produces:
- text/plain
responses:
"200":
description: Returned all runs of specified flow
"405":
description: invalid HTTP Method
"""
flow_id = request.match_info.get("flow_id")
run_response = await self._run_async_table.get_all_runs(flow_id)
run_data = run_response.body
recent_run = run_data[-1]
run_response = await self._rich_run_async_table.get_rich_run(flow_id, recent_run['run_number'])
rich_run_data = run_response.body
data = {
"success": rich_run_data['success'],
"finished": rich_run_data['finished'],
"finished_at": rich_run_data['finished_at'],
"created_at": recent_run['ts_epoch'],
"run_id": recent_run['run_number'],
"flow": flow_id,
"user": recent_run['user_name']
}
return web.Response(status=run_response.response_code, body=json.dumps(data))
async def get_last_n_runs(self, request):
"""
---
description: Get all runs
tags:
- Run
parameters:
- name: "flow_id"
in: "path"
description: "flow_id"
required: true
type: "string"
produces:
- text/plain
responses:
"200":
description: Returned all runs of specified flow
"405":
description: invalid HTTP Method
"""
flow_id = request.match_info.get("flow_id")
run_response = await self._run_async_table.get_all_runs(flow_id)
run_data = run_response.body
n_recent_runs = run_data[-5:]
data = []
for recent_run in n_recent_runs:
run_response = await self._rich_run_async_table.get_rich_run(flow_id, recent_run['run_number'])
rich_run_data = run_response.body
data.append({
"success": rich_run_data['success'],
"finished": rich_run_data['finished'],
"finished_at": rich_run_data['finished_at'],
"created_at": recent_run['ts_epoch'],
"run_id": recent_run['run_number'],
"flow": flow_id,
"user": recent_run['user_name']
})
return web.Response(status=run_response.response_code, body=json.dumps(data))
| 32.805825 | 111 | 0.56792 | 9,876 | 0.974253 | 0 | 0 | 0 | 0 | 8,973 | 0.885173 | 3,940 | 0.388675 |
27f693df0e7ea237223f8c2bc9de9a57a4f98dac | 838 | py | Python | tests/test_report.py | whalebot-helmsman/pykt-64 | ee5e0413cd850876d3abc438480fffea4f7b7517 | [
"BSD-3-Clause"
]
| null | null | null | tests/test_report.py | whalebot-helmsman/pykt-64 | ee5e0413cd850876d3abc438480fffea4f7b7517 | [
"BSD-3-Clause"
]
| null | null | null | tests/test_report.py | whalebot-helmsman/pykt-64 | ee5e0413cd850876d3abc438480fffea4f7b7517 | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
from setup_teardown import start_db, stop_db
from nose.tools import *
from pykt import KyotoTycoon, KTException
@raises(IOError)
def test_err_report():
db = KyotoTycoon()
db.report()
@with_setup(setup=start_db,teardown=stop_db)
def test_report():
db = KyotoTycoon()
db = db.open()
ret = db.report()
ok_(ret)
ok_(isinstance(ret, dict))
db.close()
@with_setup(setup=start_db,teardown=stop_db)
def test_report_with_db():
db = KyotoTycoon("test")
db = db.open()
ret = db.report()
ok_(ret)
ok_(isinstance(ret, dict))
db.close()
@with_setup(setup=start_db,teardown=stop_db)
def test_report_loop():
db = KyotoTycoon()
db = db.open()
for i in xrange(100):
ret = db.report()
ok_(ret)
ok_(isinstance(ret, dict))
db.close()
| 21.487179 | 44 | 0.643198 | 0 | 0 | 0 | 0 | 693 | 0.826969 | 0 | 0 | 29 | 0.034606 |
27f6d38ee1079239114141527da38c16b3c99951 | 1,024 | py | Python | src/Screen.py | D3r3k23/CaveRun | 27f7b3c518f8646bc506f5d3b774ef6e62faef96 | [
"MIT"
]
| 1 | 2022-02-10T04:42:04.000Z | 2022-02-10T04:42:04.000Z | src/Screen.py | D3r3k23/CaveRun | 27f7b3c518f8646bc506f5d3b774ef6e62faef96 | [
"MIT"
]
| null | null | null | src/Screen.py | D3r3k23/CaveRun | 27f7b3c518f8646bc506f5d3b774ef6e62faef96 | [
"MIT"
]
| 1 | 2022-01-11T17:11:44.000Z | 2022-01-11T17:11:44.000Z |
import Resources
import Colors
import pygame
screen = None
def init(width, height):
global screen
screen = pygame.display.set_mode((width, height))
def width():
return screen.get_width()
def height():
return screen.get_height()
def res():
return (screen.get_width(), screen.get_height())
def rect():
return screen.get_rect()
def clear():
screen.fill(Colors.BLACK)
def draw_to_screen(img, rect=(0, 0)):
screen.blit(img, rect)
def display():
pygame.display.update()
clear()
# Base class for drawable objects
# Created from image and coordinates, stores image and rect
class Drawable:
def __init__(self, img, x, y, center=False): # x, y: center or (left, top) coordinates
width = img.get_width()
height = img.get_height()
origin = (x - (width // 2), y - (height // 2)) if center else (x, y)
self.img = img
self.rect = pygame.Rect(origin, (width, height))
def draw(self):
draw_to_screen(self.img, self.rect)
| 21.333333 | 90 | 0.640625 | 405 | 0.395508 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.129883 |
27f931503927cf87b2047c06d44bfc6dbb23b7c2 | 5,416 | py | Python | manga_db/extractor/toonily.py | nilfoer/mangadb | 860d7de310002735631ea26810b4df5b6bc08d7b | [
"MIT"
]
| 3 | 2021-01-14T16:22:41.000Z | 2022-02-21T03:31:22.000Z | manga_db/extractor/toonily.py | nilfoer/mangadb | 860d7de310002735631ea26810b4df5b6bc08d7b | [
"MIT"
]
| 13 | 2021-01-14T10:34:19.000Z | 2021-05-20T08:47:54.000Z | manga_db/extractor/toonily.py | nilfoer/mangadb | 860d7de310002735631ea26810b4df5b6bc08d7b | [
"MIT"
]
| 1 | 2022-02-24T03:10:04.000Z | 2022-02-24T03:10:04.000Z | import re
import datetime
import bs4
from typing import Dict, Tuple, Optional, TYPE_CHECKING, ClassVar, Pattern, cast, Match, Any
from .base import BaseMangaExtractor, MangaExtractorData
from ..constants import STATUS_IDS, CENSOR_IDS
if TYPE_CHECKING:
from ..ext_info import ExternalInfo
class ToonilyExtractor(BaseMangaExtractor):
site_name: ClassVar[str] = "Toonily"
site_id: ClassVar[int] = 5
URL_PATTERN_RE: ClassVar[Pattern] = re.compile(
r"(?:https?://)?toonily\.com/webtoon/([-A-Za-z0-9]+)")
BASE_URL = "https://toonily.com"
MANGA_URL = "https://toonily.com/webtoon/{id_onpage}"
def __init__(self, url: str):
super().__init__(url)
self.id_onpage: str = self.book_id_from_url(url)
self.cover_url: Optional[str] = None
self.export_data: Optional[MangaExtractorData] = None
@classmethod
def match(cls, url: str) -> bool:
"""
Returns True on URLs the extractor is compatible with
"""
return bool(cls.URL_PATTERN_RE.match(url))
def extract(self) -> Optional[MangaExtractorData]:
if self.export_data is None:
html = self.get_html(self.url)
if html is None:
return None
data_dict = self._extract_info(html)
self.export_data = MangaExtractorData(
pages=0,
# seem to only be in english
language='English',
collection=[],
groups=[],
parody=[],
character=[],
url=self.url,
id_onpage=self.id_onpage,
imported_from=ToonilyExtractor.site_id,
uploader=None,
upload_date=datetime.date.min,
**data_dict)
return self.export_data
def _extract_info(self, html: str) -> Dict[str, Any]:
res: Dict[str, Any] = {}
soup = bs4.BeautifulSoup(html, "html.parser")
cover_url = soup.select_one("div.summary_image img")
self.cover_url = cover_url.attrs['data-src']
res['title_eng'] = soup.select_one("div.post-title h1").text.strip()
book_data = soup.select_one("div.summary_content")
label_to_idx = {x.get_text().strip(): i for i, x in enumerate(book_data.select("div.summary-heading"))}
content = book_data.select("div.summary-content")
# assumes order stays the same
rating_idx = label_to_idx["Rating"]
res['rating'] = float(content[rating_idx].select_one("#averagerate").text.strip())
res['ratings'] = int(content[rating_idx].select_one("#countrate").text.strip())
# sep is ','
alt_title_idx = label_to_idx["Alt Name(s)"]
alt_titles = [s.strip() for s in content[alt_title_idx].text.split(",")]
if alt_titles[0] == 'N/A':
res['title_foreign'] = None
else:
# @Incomplete take first non-latin title; alnum() supports unicode and thus returns
# true for """"alphanumeric"""" japanese symbols !?!?
non_latin = [s for s in alt_titles if ord(s[0]) > 128]
if non_latin:
res['title_foreign'] = non_latin[0]
else:
res['title_foreign'] = alt_titles[0]
authors = [s.text.strip() for s in content[label_to_idx["Author(s)"]].select("a")]
artists = [s.text.strip() for s in content[label_to_idx["Artist(s)"]].select("a")]
res['artist'] = [n for n in authors if n not in artists] + artists
tags = [a.text.strip() for a in book_data.select('div.genres-content a')]
res['tag'] = tags
res['nsfw'] = 'Mature' in tags
uncensored = 'Uncensored' in tags
res['censor_id'] = (
CENSOR_IDS['Uncensored'] if uncensored else CENSOR_IDS['Censored'])
# type
res['category'] = [content[label_to_idx["Type"]].text.strip()]
# OnGoing or Completed
status_str = content[label_to_idx["Status"]].text.strip().capitalize()
res['status_id'] = STATUS_IDS['Hiatus'] if status_str == 'On Hiatus' else STATUS_IDS[status_str]
# e.g.: 128 Users bookmarked this
# e.g.: 128K Users bookmarked this
favorites_str = book_data.select_one("div.add-bookmark span").text.split()[0].strip().lower()
if 'k' in favorites_str:
res['favorites'] = int(float(favorites_str[:-1]) * 1000)
else:
res['favorites'] = int(favorites_str)
summary = soup.select_one("div.description-summary div.summary__content").text.strip()
# @CleanUp
res['note'] = f"{'Summary: ' if not uncensored else ''}{summary}"
return res
def get_cover(self) -> Optional[str]:
if self.export_data is None:
self.extract()
return self.cover_url
@classmethod
def book_id_from_url(cls, url: str) -> str:
# guaranteed match since we only get passed matching urls
match = cast(Match, cls.URL_PATTERN_RE.match(url))
return match.group(1)
@classmethod
def url_from_ext_info(cls, ext_info: 'ExternalInfo') -> str:
return cls.MANGA_URL.format(id_onpage=ext_info.id_onpage)
@classmethod
def read_url_from_ext_info(cls, ext_info: 'ExternalInfo') -> str:
# @CleanUp just uses first chapter
return f"{cls.url_from_ext_info(ext_info)}/chapter-1"
| 37.351724 | 111 | 0.605613 | 5,117 | 0.944793 | 0 | 0 | 732 | 0.135155 | 0 | 0 | 1,269 | 0.234306 |
27fb6ab9dc39790c3dcbcf43be391bd869cc5d49 | 10,965 | py | Python | blindbackup/providers/blindfs.py | nagylzs/blindbackup | fa0c7a6ef42bb5aefec99eff69a3227c8695fdd9 | [
"Apache-2.0"
]
| 1 | 2020-01-26T05:46:14.000Z | 2020-01-26T05:46:14.000Z | blindbackup/providers/blindfs.py | nagylzs/blindbackup | fa0c7a6ef42bb5aefec99eff69a3227c8695fdd9 | [
"Apache-2.0"
]
| null | null | null | blindbackup/providers/blindfs.py | nagylzs/blindbackup | fa0c7a6ef42bb5aefec99eff69a3227c8695fdd9 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
import os.path
import threading
from .. import cryptfile
from ..util import *
from ..client import create_client
from ..syncdir import FsProvider, FsListener
class BlindFsListener(threading.Thread, FsListener):
def is_stopping(self):
return self.stop_requested.isSet()
def __init__(self, client, sender, relpath, onchange):
self.client = client
self.sender = sender
self.relpath = relpath
self.onchange = onchange
self.stop_requested = threading.Event()
self.stopped = threading.Event()
self.uid = None
threading.Thread.__init__(self)
FsListener.__init__(self) # This will create a dummy uid but we will overwrite it later in run().
def request_stop(self):
"""Request a stop on the listening thread."""
self.stop_requested.set()
def is_stopped(self):
"""Tells if the listening thread has stopped."""
return self.stopped.is_set()
def run(self):
self.stopped.clear()
self.stop_requested.clear()
self.uid = self.client("listenchanges", root=self.relpath)
while not self.stop_requested.is_set():
changes = self.client("pollchanges", uid=self.uid)
if changes:
for eventPath, eventType, eventUid in changes:
self.onchange(self.sender, eventPath, eventType, eventUid)
self.stopped.set()
def get_uid(self):
"""Get unique identifier for the listener.
This can be used to send notification messages that are not to be sent back to this listener."""
return self.uid
class BlindFsProvider(FsProvider):
"""FsProvider that is provided by a backup server.
@param client: A Client instance
@param root: The root parameter must be a list of path elements.
It represents the relative path on the server that will be
snychronized.
"""
@classmethod
def get_name(cls):
return "blindfs"
def __init__(self, path: str, can_create: bool, settings: dict, client=None, root=None):
if root is None:
# Normal construction
if client is None:
self.client = create_client(settings)
else:
self.client = client
if path:
root = path.split("/")
else:
root = []
if root and not root[0]:
raise Exception("BlindFsProvider: root cannot be [''], it must be []. Hint: use :// instead of :///")
if not client.directory_exists(path):
if can_create:
client("mkdir", relpath=path)
# else:
# parser.error("Remote path does not exist: %s" % loc)
else:
# cloned
assert client
assert path is None
assert root
self.client = client
self.root = root
self.settings = settings
self._is_case_sensitive = None
self.tmp_dir = settings.get("tmp_dir", None)
super().__init__()
def clone(self):
res = BlindFsProvider(None, False, self.settings, self.client, self.root)
res.uid = self.get_uid()
return res
def drill(self, relpath):
"""Change root of the FsProvider to a new subdir.
@param relpath: a list of path items
Should only use it on a clone."""
assert (isinstance(relpath, list))
self.root = self.root + relpath
def get_event_relpath(self, event_path):
"""Convert the full path of an event into a path relative to this provider.
@return: a list of path items"""
myroot = "/".join(self.root)
assert (event_path.startswith(myroot))
return event_path[len(myroot) + 1:].split("/")
def _remotepath(self, relpath):
return self.root + relpath
def iscasesensitive(self):
if self._is_case_sensitive is None:
self._is_case_sensitive = self.client("iscasesensitive")
return self._is_case_sensitive
def listdir(self, relpath):
# print("listdir",relpath,self._remotepath(relpath))
return self.client("listdir", relpath=self._remotepath(relpath))
def getinfo(self, items, encrypted):
root = "/".join(self.root)
# map object cannot be serialized, need to convert items to a list.
return self.client(
"getinfo", root=root, items=list(items), encrypted=encrypted)
def sendchanges(self, delet, dcopy, fcopy):
# Delete unwanted first
for dpath in delet:
yield (self.DELETE, "/".join(dpath))
# Then create new directories
infos = self.getinfo(dcopy, bool(self.decryptionkey))
for idx, dpath in enumerate(dcopy):
# use getinfo here, but need to have some buffering?
atime, mtime, fsize = infos[idx]
yield (
self.DIRECTORY,
"/".join(dpath),
atime, mtime)
subdnames, subfnames = self.listdir(dpath)
for change in self.sendchanges(
[],
self._prefixed(dpath, subdnames),
self._prefixed(dpath, subfnames)):
yield change
# Finally send file data
# TODO: make this much more efficient. Do not want to create one request per file, especially if files are small.
infos = self.getinfo(fcopy, bool(self.decryptionkey))
for idx, relpath in enumerate(fcopy):
atime, mtime, fsize = infos[idx]
file_data = self.client.recv_backup(
"/".join(self._remotepath(relpath)))
localpath = create_tmp_file_for(self.tmp_dir)
fout = open(localpath, "wb+")
try:
fout.write(file_data)
fout.close()
yield (
self.FILE, "/".join(relpath),
atime, mtime, fsize, localpath, self.RECEIVER)
finally:
if os.path.isfile(localpath):
os.unlink(localpath)
def receivechanges(self, sender):
# Unfortunately, we have to make our own schedule here.
# Small files should be sent at once to minimize the number
# of requests on the server.
# TODO: store changes in a tmp file because there can be many.
root = "/".join(self.root)
delet, dcopy, fcopy = [], [], []
files, encfiles = [], []
ownedfiles = []
cnt, totalsize = 0, 0
try:
while True:
change = next(sender)
op, *args = change
if op == self.DELETE:
# (self.DELETE, converted_path)
change = (self.DELETE, "/".join(
self.recrypt_path_items(change[1].split("/")))
)
delet.append(change)
cnt += 1
elif op == self.DIRECTORY:
# (self.DIRECTORY,converted_path,atime,mtime)
change = list(change)
change[1] = "/".join(self.recrypt_path_items(change[1].split("/")))
dcopy.append(tuple(change))
cnt += 1
elif op == self.FILE:
# (self.FILE,converted_path,atime,mtime,fsize,fpath,owner)
selpath, atime, mtime, fsize, fpath, owner = args
selpath = "/".join(
self.recrypt_path_items(selpath.split("/")))
if owner == self.RECEIVER:
ownedfiles.append(fpath)
# Hide original full path from the server.
# The owner parameter is meaningless on the server side
# (server cannot own a file on the client side) so it is
# omited.
change = (self.FILE, selpath, atime, mtime, fsize, "")
fcopy.append(change)
cnt += 1
totalsize += args[3]
if self.encryptionkey and self.decryptionkey:
encpath = create_tmp_file_for(fpath)
cryptfile.recrypt_file(
cryptfile.hashkey(self.decryptionkey),
cryptfile.hashkey(self.encryptionkey),
fpath, encpath)
encfiles.append(encpath)
files.append([selpath, encpath])
elif self.encryptionkey:
encpath = create_tmp_file_for(fpath)
cryptfile.encrypt_file(
self.encryptionkey, fpath, encpath)
encfiles.append(encpath)
files.append([selpath, encpath])
elif self.decryptionkey:
encpath = create_tmp_file_for(fpath)
cryptfile.decrypt_file(
self.decryptionkey, fpath, encpath)
encfiles.append(encpath)
files.append([selpath, encpath])
else:
files.append([selpath, fpath])
else:
raise Exception("Protocol error")
if cnt > 1000 or totalsize > 1024 * 1024:
self.client(
"receivechanges",
root=root, uid=self.get_uid(),
delet=delet, dcopy=dcopy, fcopy=fcopy,
files=files
)
for encpath in encfiles:
os.unlink(encpath)
encfiles.clear()
for ownedpath in ownedfiles:
os.unlink(ownedpath)
ownedfiles.clear()
delet.clear()
dcopy.clear()
fcopy.clear()
files.clear()
except StopIteration:
pass
if cnt:
self.client(
"receivechanges",
root=root, uid=self.get_uid(),
delet=delet, dcopy=dcopy, fcopy=fcopy,
files=files
)
for encpath in encfiles:
os.unlink(encpath)
encfiles.clear()
for ownedpath in ownedfiles:
os.unlink(ownedpath)
ownedfiles.clear()
def listenchanges(self, onchange) -> FsListener:
"""Listen for changes in the filesystem."""
# Note: listenchanges always uses relative paths on the sedrver.
# So instead of self.root, we pass "" here!
listener = BlindFsListener(self.client, self, "", onchange)
listener.start()
return listener
| 38.882979 | 121 | 0.529503 | 10,777 | 0.982855 | 1,625 | 0.148199 | 60 | 0.005472 | 0 | 0 | 2,220 | 0.202462 |
27fd6c6f828a7e94f81f249d959e7e48fffdae85 | 3,587 | py | Python | examples/computer_vision/harris.py | parag-hub/arrayfire-python | 65040c10833506f212f13e5bcc0e49cb20645e6e | [
"BSD-3-Clause"
]
| 420 | 2015-07-30T00:02:21.000Z | 2022-03-28T16:52:28.000Z | examples/computer_vision/harris.py | parag-hub/arrayfire-python | 65040c10833506f212f13e5bcc0e49cb20645e6e | [
"BSD-3-Clause"
]
| 198 | 2015-07-29T17:17:36.000Z | 2022-01-20T18:31:28.000Z | examples/computer_vision/harris.py | parag-hub/arrayfire-python | 65040c10833506f212f13e5bcc0e49cb20645e6e | [
"BSD-3-Clause"
]
| 75 | 2015-07-29T15:17:54.000Z | 2022-02-24T06:50:23.000Z | #!/usr/bin/env python
#######################################################
# Copyright (c) 2018, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
from time import time
import arrayfire as af
import os
import sys
def draw_corners(img, x, y, draw_len):
# Draw vertical line of (draw_len * 2 + 1) pixels centered on the corner
# Set only the first channel to 1 (green lines)
xmin = max(0, x - draw_len)
xmax = min(img.dims()[1], x + draw_len)
img[y, xmin : xmax, 0] = 0.0
img[y, xmin : xmax, 1] = 1.0
img[y, xmin : xmax, 2] = 0.0
# Draw vertical line of (draw_len * 2 + 1) pixels centered on the corner
# Set only the first channel to 1 (green lines)
ymin = max(0, y - draw_len)
ymax = min(img.dims()[0], y + draw_len)
img[ymin : ymax, x, 0] = 0.0
img[ymin : ymax, x, 1] = 1.0
img[ymin : ymax, x, 2] = 0.0
return img
def harris_demo(console):
root_path = os.path.dirname(os.path.abspath(__file__))
file_path = root_path
if console:
file_path += "/../../assets/examples/images/square.png"
else:
file_path += "/../../assets/examples/images/man.jpg"
img_color = af.load_image(file_path, True);
img = af.color_space(img_color, af.CSPACE.GRAY, af.CSPACE.RGB)
img_color /= 255.0
ix, iy = af.gradient(img)
ixx = ix * ix
ixy = ix * iy
iyy = iy * iy
# Compute a Gaussian kernel with standard deviation of 1.0 and length of 5 pixels
# These values can be changed to use a smaller or larger window
gauss_filt = af.gaussian_kernel(5, 5, 1.0, 1.0)
# Filter second order derivatives
ixx = af.convolve(ixx, gauss_filt)
ixy = af.convolve(ixy, gauss_filt)
iyy = af.convolve(iyy, gauss_filt)
# Calculate trace
itr = ixx + iyy
# Calculate determinant
idet = ixx * iyy - ixy * ixy
# Calculate Harris response
response = idet - 0.04 * (itr * itr)
# Get maximum response for each 3x3 neighborhood
mask = af.constant(1, 3, 3)
max_resp = af.dilate(response, mask)
# Discard responses that are not greater than threshold
corners = response > 1e5
corners = corners * response
# Discard responses that are not equal to maximum neighborhood response,
# scale them to original value
corners = (corners == max_resp) * corners
# Copy device array to python list on host
corners_list = corners.to_list()
draw_len = 3
good_corners = 0
for x in range(img_color.dims()[1]):
for y in range(img_color.dims()[0]):
if corners_list[x][y] > 1e5:
img_color = draw_corners(img_color, x, y, draw_len)
good_corners += 1
print("Corners found: {}".format(good_corners))
if not console:
# Previews color image with green crosshairs
wnd = af.Window(512, 512, "Harris Feature Detector")
while not wnd.close():
wnd.image(img_color)
else:
idx = af.where(corners)
corners_x = idx / float(corners.dims()[0])
corners_y = idx % float(corners.dims()[0])
print(corners_x)
print(corners_y)
if __name__ == "__main__":
if (len(sys.argv) > 1):
af.set_device(int(sys.argv[1]))
console = (sys.argv[2] == '-') if len(sys.argv) > 2 else False
af.info()
print("** ArrayFire Harris Corner Detector Demo **\n")
harris_demo(console)
| 28.927419 | 85 | 0.606078 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,296 | 0.361305 |
7e0022ad51ef52a75fd8fa97ecb5ea7bdfaf493d | 4,376 | py | Python | tests/generate_data.py | ngounou92/py-glm | 83081444e2cbba4d94f9e6b85b6be23e0ff600b8 | [
"BSD-3-Clause"
]
| 127 | 2017-09-01T13:54:43.000Z | 2022-03-12T11:43:32.000Z | tests/generate_data.py | cscherrer/py-glm | d719d29fb5cc71c2cb5e728db36c6230a69292d8 | [
"BSD-3-Clause"
]
| 8 | 2017-09-01T14:00:55.000Z | 2020-11-09T14:42:50.000Z | tests/generate_data.py | cscherrer/py-glm | d719d29fb5cc71c2cb5e728db36c6230a69292d8 | [
"BSD-3-Clause"
]
| 35 | 2017-09-01T19:23:04.000Z | 2022-03-22T13:45:10.000Z | import numpy as np
from scipy.linalg import sqrtm
from sklearn.preprocessing import StandardScaler
def make_linear_regression(n_samples=10000,
n_uncorr_features=10, n_corr_features=10,
n_drop_features=4,
include_intercept=True,
coef_range=(-1, 1),
resid_sd=0.25):
X = make_correlated_data(
n_samples, n_uncorr_features, n_corr_features, include_intercept)
parameters = make_regression_coeffs(
X, n_drop_features=n_drop_features, coef_range=coef_range)
y = make_linear_regression_y(X, parameters, resid_sd)
return (X, y, parameters)
def make_logistic_regression(n_samples=10000,
n_uncorr_features=10, n_corr_features=10,
n_drop_features=4,
include_intercept=True,
coef_range=(-1, 1)):
X = make_correlated_data(
n_samples, n_uncorr_features, n_corr_features, include_intercept)
parameters = make_regression_coeffs(
X, n_drop_features=n_drop_features, coef_range=coef_range)
y = make_logistic_regression_y(X, parameters)
return (X, y, parameters)
def make_poisson_regression(n_samples=10000,
n_uncorr_features=10, n_corr_features=10,
n_drop_features=4,
include_intercept=True,
coef_range=(-1, 1)):
X = make_correlated_data(
n_samples, n_uncorr_features, n_corr_features, include_intercept)
parameters = make_regression_coeffs(
X, n_drop_features=n_drop_features, coef_range=coef_range)
y = make_poisson_regression_y(X, parameters)
return (X, y, parameters)
def make_gamma_regression(n_samples=10000,
n_uncorr_features=10, n_corr_features=10,
n_drop_features=4,
include_intercept=True,
coef_range=(-1, 1)):
X = make_correlated_data(
n_samples, n_uncorr_features, n_corr_features, include_intercept)
parameters = make_regression_coeffs(
X, n_drop_features=n_drop_features, coef_range=coef_range)
y = make_gamma_regression_y(X, parameters)
return (X, y, parameters)
def make_uncorrelated_data(n_samples=10000, n_features=25):
X = np.random.normal(size=(n_samples, n_features))
return X
def make_correlated_data(n_samples=10000,
n_uncorr_features=10, n_corr_features=15,
include_intercept=True):
X_uncorr = make_uncorrelated_data(n_samples, n_uncorr_features)
X_corr_base = make_uncorrelated_data(n_samples, n_corr_features)
cov_matrix = make_covariance_matrix(n_corr_features)
X_corr = StandardScaler().fit_transform(np.dot(X_corr_base, cov_matrix))
X = np.column_stack((X_uncorr, X_corr))
if include_intercept:
intercept = np.ones(n_samples).reshape(-1, 1)
return np.column_stack((intercept, X))
return X
def make_covariance_matrix(n_features=15):
A = np.random.normal(size=(n_features, n_features))
A_sq = np.dot(A.T, A)
return sqrtm(A_sq)
def make_regression_coeffs(X, n_drop_features=None, coef_range=(-1, 1)):
n_features = X.shape[1]
parameters = np.random.uniform(coef_range[0], coef_range[1], size=n_features)
if n_drop_features is not None:
drop_idxs = np.random.choice(
list(range(len(parameters))), size=n_drop_features, replace=False)
parameters[drop_idxs] = 0.0
return parameters
def make_linear_regression_y(X, parameters, resid_sd=0.25):
y_systematic = np.dot(X, parameters)
y = y_systematic + np.random.normal(scale=resid_sd, size=X.shape[0])
return y
def make_logistic_regression_y(X, parameters):
y_systematic = np.dot(X, parameters)
p = 1 / (1 + np.exp(-y_systematic))
return np.random.binomial(1, p=p, size=X.shape[0])
def make_poisson_regression_y(X, parameters):
y_systematic = np.dot(X, parameters)
mu = np.exp(y_systematic)
return np.random.poisson(lam=mu, size=X.shape[0])
def make_gamma_regression_y(X, parameters):
y_systematic = np.dot(X, parameters)
mu = np.exp(y_systematic)
return np.random.exponential(scale=mu, size=X.shape[0])
| 41.67619 | 81 | 0.658364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7e0285965f79d7e3cf86a7275a5d19452f38b750 | 1,735 | py | Python | scripts/http-server.py | jrbenito/SonoffDIY-tasmotizer | 1fe9eb9b3b5630102feaf941bd02173d916e81a5 | [
"MIT"
]
| null | null | null | scripts/http-server.py | jrbenito/SonoffDIY-tasmotizer | 1fe9eb9b3b5630102feaf941bd02173d916e81a5 | [
"MIT"
]
| 3 | 2020-03-30T14:07:54.000Z | 2020-03-30T22:59:29.000Z | scripts/http-server.py | jrbenito/SonoffDIY-tasmotizer | 1fe9eb9b3b5630102feaf941bd02173d916e81a5 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# encoding: utf-8
"""
fake-registration-server.py
Created by nano on 2018-11-22.
Copyright (c) 2018 VTRUST. All rights reserved.
"""
import tornado.web
import tornado.locks
from tornado.options import define, options, parse_command_line
define("port", default=80, help="run on the given port", type=int)
define("addr", default="192.168.254.1", help="run on the given ip", type=str)
define("debug", default=True, help="run in debug mode")
import os
import signal
def exit_cleanly(signal, frame):
print("Received SIGINT, exiting...")
exit(0)
signal.signal(signal.SIGINT, exit_cleanly)
from base64 import b64encode
import hashlib
import hmac
import binascii
from time import time
timestamp = lambda : int(time())
class FilesHandler(tornado.web.StaticFileHandler):
def parse_url_path(self, url_path):
if not url_path or url_path.endswith('/'):
url_path = url_path + str('index.html')
return url_path
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("You are connected to vtrust-flash")
def main():
parse_command_line()
app = tornado.web.Application(
[
(r"/", MainHandler),
('/files/(.*)', FilesHandler, {'path': str('../files/')}),
(r".*", tornado.web.RedirectHandler, {"url": "http://" + options.addr + "/", "permanent": False}),
],
debug=options.debug,
)
try:
app.listen(options.port, options.addr)
print("Listening on " + options.addr + ":" + str(options.port))
tornado.ioloop.IOLoop.current().start()
except OSError as err:
print("Could not start server on port " + str(options.port))
if err.errno == 98: # EADDRINUSE
print("Close the process on this port and try again")
else:
print(err)
if __name__ == "__main__":
main()
| 25.144928 | 101 | 0.702594 | 305 | 0.175793 | 0 | 0 | 0 | 0 | 0 | 0 | 515 | 0.29683 |
7e03585ae9ededa10d0e3ad01e0e054a8d2b1e4e | 1,998 | py | Python | tests/summary/test_binning_config.py | rob-tay/fast-carpenter | a8b128ba00b9a6808b2f0de40cefa2a360466897 | [
"Apache-2.0"
]
| null | null | null | tests/summary/test_binning_config.py | rob-tay/fast-carpenter | a8b128ba00b9a6808b2f0de40cefa2a360466897 | [
"Apache-2.0"
]
| null | null | null | tests/summary/test_binning_config.py | rob-tay/fast-carpenter | a8b128ba00b9a6808b2f0de40cefa2a360466897 | [
"Apache-2.0"
]
| null | null | null | import numpy as np
import fast_carpenter.summary.binning_config as mgr
from . import dummy_binning_descriptions as binning
def test_create_one_region():
cfg = {"_" + k: v for k, v in binning.bins_nmuon.items()}
_in, _out, _bins, _index = mgr.create_one_dimension("test_create_one_region", **cfg)
assert _in == "NMuon"
assert _out == "nmuon"
assert _index is None
assert _bins is None
def test_create_one_dimension_aT():
cfg = {"_" + k: v for k, v in binning.bins_met_px.items()}
_in, _out, _bins, _index = mgr.create_one_dimension("test_create_one_dimension_aT", **cfg)
assert _in == "MET_px"
assert _out == "met_px"
assert _index is None
assert isinstance(_bins, np.ndarray)
assert np.all(_bins[1:-1] == np.linspace(0, 100, 11))
assert _bins[0] == float("-inf")
assert _bins[-1] == float("inf")
def test_create_one_dimension_HT():
cfg = {"_" + k: v for k, v in binning.bins_py.items()}
_in, _out, _bins, _index = mgr.create_one_dimension("test_create_one_dimension_HT", **cfg)
assert _in == "Jet_Py"
assert _out == "py_leadJet"
assert _index == 0
assert isinstance(_bins, np.ndarray)
assert np.all(_bins[1:-1] == [0, 20, 100])
assert _bins[0] == float("-inf")
assert _bins[-1] == float("inf")
def test_create_binning_list():
ins, outs, bins = mgr.create_binning_list("test_create_binning_list", [binning.bins_nmuon, binning.bins_met_px])
assert ins == ["NMuon", "MET_px"]
assert outs == ["nmuon", "met_px"]
assert len(bins) == 2
assert bins[0] is None
def test_create_weights_list():
name = "test_create_weights_list"
weights = mgr.create_weights(name, binning.weight_list)
assert len(weights) == 1
assert weights["EventWeight"] == "EventWeight"
def test_create_weights_dict():
name = "test_create_weights_dict"
weights = mgr.create_weights(name, binning.weight_dict)
assert len(weights) == 1
assert weights["weighted"] == "EventWeight"
| 33.864407 | 116 | 0.68018 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 322 | 0.161161 |
7e037b73adcc0dc266a78c21e1147b45fea5e505 | 671 | py | Python | create_ITIs.py | daanvanes/mloc_exp | bf6fb94b933f1cb78c60c38f80f03c78e9da3686 | [
"MIT"
]
| 1 | 2019-03-20T15:12:07.000Z | 2019-03-20T15:12:07.000Z | create_ITIs.py | daanvanes/mloc_exp | bf6fb94b933f1cb78c60c38f80f03c78e9da3686 | [
"MIT"
]
| null | null | null | create_ITIs.py | daanvanes/mloc_exp | bf6fb94b933f1cb78c60c38f80f03c78e9da3686 | [
"MIT"
]
| null | null | null | from __future__ import division
from constants import *
import numpy as np
import os
precueITIs = np.random.exponential(standard_parameters['mean_iti_precue'], standard_parameters['n_targets']) + standard_parameters['min_iti_precue']
np.save('ITIs/precueITIs.npy',precueITIs)
postcueITIs = np.random.exponential(standard_parameters['mean_iti_postcue'], standard_parameters['n_targets']) + standard_parameters['min_iti_postcue']
np.save('ITIs/postcueITIs.npy',postcueITIs)
spITIs = np.round(np.random.exponential(standard_parameters['mean_iti_sp'], standard_parameters['n_targets']) + standard_parameters['min_iti_sp']).astype('int32')
np.save('ITIs/spITIs.npy',spITIs) | 51.615385 | 162 | 0.81073 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.28763 |
7e040cb0e0c724ec734deffc7ed5a19d9e7e9d15 | 99 | py | Python | venv/lib/python3.7/site-packages/kdlearn/myfunctions.py | FillOverFlow/kdlearn | 1e57895cb10ca903a33e2774986661b9b64d4071 | [
"MIT"
]
| 1 | 2021-01-19T03:35:20.000Z | 2021-01-19T03:35:20.000Z | venv/lib/python3.7/site-packages/kdlearn/myfunctions.py | FillOverFlow/kdlearn | 1e57895cb10ca903a33e2774986661b9b64d4071 | [
"MIT"
]
| null | null | null | venv/lib/python3.7/site-packages/kdlearn/myfunctions.py | FillOverFlow/kdlearn | 1e57895cb10ca903a33e2774986661b9b64d4071 | [
"MIT"
]
| null | null | null | '''
PUT FUNCTION HERE !!
Author Davinci
'''
def helloworld(name):
return "KDlearn :" + name
| 9.9 | 29 | 0.626263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.555556 |
7e0480f047709048b68affbe1e229fbea8aaa94b | 4,122 | py | Python | Set_ADT/linearset.py | jaeheeLee17/DS_and_Algorithms_summary | 917500dd768eae8cfbb02cf2838d494cb720f1c0 | [
"MIT"
]
| null | null | null | Set_ADT/linearset.py | jaeheeLee17/DS_and_Algorithms_summary | 917500dd768eae8cfbb02cf2838d494cb720f1c0 | [
"MIT"
]
| null | null | null | Set_ADT/linearset.py | jaeheeLee17/DS_and_Algorithms_summary | 917500dd768eae8cfbb02cf2838d494cb720f1c0 | [
"MIT"
]
| null | null | null | # Implementation of the Set ADT container using a Python list.
class Set:
# Creates an empty set instance.
def __init__(self):
self._theElements = list()
# Returns the number of items in the set
def __len__(self):
return len(self._theElements)
# Determines if an element is in the set.
def __contains__(self, element):
return element in self._theElements
# Adds a new unique element to the set.
def add(self, element):
if element not in self:
self._theElements.append(element)
# Removes an element from the set.
def remove(self, element):
assert element in self, "The element must be in the set."
self._theElements.remove(item)
# Determines if two sets are equal
def __eq__(self, setB):
if len(self) != len(setB):
return False
else:
# return self.isSubsetOf(setB)
for i in range(len(self)):
if self._theElements[i] != setB._theElements[i]:
return False
return True
# Determines if this set is a subset of setB
def isSubsetOf(self, setB):
for element in self:
if element not in setB:
return False
return True
# Creates a new set from the union of this set and setB
def union(self, setB):
'''
newSet = Set()
newSet._theElements.extend(self._theElements)
for element in setB:
if element not in self:
newSet._theElements.append(element)
return newSet
'''
newSet = Set()
a, b = 0, 0
# Merge the two lists together until one is empty.
while a < len(self) and b < len(setB):
valueA = self._theElements[a]
valueB = self._theElements[b]
if valueA < valueB:
newSet._theElements.append(valueA)
a += 1
elif valueA > valueB:
newSet._theElements.append(valueB)
b += 1
else: # Only one of the two duplicates are appended.
newSet._theElements.append(valueA)
a += 1
b += 1
# If listA contains more items, append them to newList
while a < len(self):
newSet._theElements.append(self._theElements[a])
a += 1
# Or if listB contains more items, append them to newList
while b < len(setB):
newSet._theElements.append(setB._theElements[b])
b += 1
return newSet
# TODO: Creates a new set from the intersection: self set and setB.
def intersect(self, setB):
newSet = Set()
for element in setB:
if element in self:
newSet._theElements.append(element)
return newSet
# TODO: Creates a new set from the difference: self set and setB.
def difference(self, setB):
newSet = Set()
newSet._theElements.extend(self._theElements)
for element in setB:
if element in self:
newSet._theElements.remove(element)
return newSet
# Returns an iterator for traversing the list of items.
def __iter__(self):
return _SetIterator(self._theElements)
# Finds the position of the element within the ordered list..
def _findPosition(self, element):
start = 0
end = len(self) - 1
while start <= end:
mid = (start + end) // 2
if self[mid] == element:
return mid
elif element < self[mid]:
end = mid - 1
else:
start = mid + 1
return start
# An iterator for the Set ADT.
class _SetIterator:
def __init__(self, theElements):
self._SetRef = theElements
self._curidx = 0
def __iter__(self):
return self
def __next__(self):
if self._curidx < len(self._SetRef):
entry = self._SetRef[self._curidx]
self._curidx += 1
return entry
else:
raise StopIteration
| 31.707692 | 71 | 0.563561 | 4,025 | 0.976468 | 0 | 0 | 0 | 0 | 0 | 0 | 1,160 | 0.281417 |
7e0968601bb493a7e6ab7c62ca33e94de63a37f6 | 123 | py | Python | src/apps/buttons/apps.py | GoddessEyes/info_tbot | c7c5c818dc0c0c72aa15e6e4a85e7e28b4a7660d | [
"MIT"
]
| null | null | null | src/apps/buttons/apps.py | GoddessEyes/info_tbot | c7c5c818dc0c0c72aa15e6e4a85e7e28b4a7660d | [
"MIT"
]
| 4 | 2021-03-19T02:42:10.000Z | 2021-09-22T19:08:09.000Z | src/apps/buttons/apps.py | GoddessEyes/info_tbot | c7c5c818dc0c0c72aa15e6e4a85e7e28b4a7660d | [
"MIT"
]
| null | null | null | from django.apps import AppConfig
class ButtonsConfig(AppConfig):
name = 'apps.buttons'
verbose_name = 'Клавиши'
| 17.571429 | 33 | 0.731707 | 93 | 0.715385 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.230769 |
7e09cc367a70ac9496d060fdad8e3eb6e83f2472 | 141 | py | Python | code/tenka1_2019_c_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
]
| 3 | 2019-08-16T16:55:48.000Z | 2021-04-11T10:21:40.000Z | code/tenka1_2019_c_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
]
| null | null | null | code/tenka1_2019_c_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
]
| null | null | null | input()
S=input()
dot=S.count(".")
ans=dot
count=0
for s in S:
if s=="#":count+=1
else:dot-=1
ans=(min(ans,count+dot))
print(ans) | 14.1 | 28 | 0.574468 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.042553 |
7e0a3148033e56abb61f66e7e257ace62456c980 | 2,932 | py | Python | app/billing/views.py | flaviogf/finance | 86a74e1eea6b19d7fe8c311eb77394a267e26432 | [
"MIT"
]
| null | null | null | app/billing/views.py | flaviogf/finance | 86a74e1eea6b19d7fe8c311eb77394a267e26432 | [
"MIT"
]
| null | null | null | app/billing/views.py | flaviogf/finance | 86a74e1eea6b19d7fe8c311eb77394a267e26432 | [
"MIT"
]
| null | null | null | from flask import (Blueprint, abort, flash, redirect, render_template, request,
url_for)
from flask_login import current_user, login_required
from app import db
from app.billing.forms import CreateBillingForm
from app.models import Billing
from sqlalchemy import desc
billing = Blueprint('billing', __name__)
@billing.route('/billing/create', methods=['GET', 'POST'])
@login_required
def create():
form = CreateBillingForm()
if form.validate_on_submit():
billing = Billing(title=form.title.data,
description=form.description.data,
value=form.value.data,
work_date=form.work_date.data,
user_id=current_user.get_id())
db.session.add(billing)
db.session.commit()
return redirect(url_for('billing.pagination'))
return render_template('create_billing.html', title='Create Billing', form=form)
@billing.route('/billing')
@login_required
def pagination():
page = request.args.get('page', 1, type=int)
billings = (Billing.query
.filter_by(user_id=current_user.get_id())
.order_by(desc(Billing.id))
.paginate(page=page, per_page=5))
return render_template('pagination_billing.html', title='Search Billing', billings=billings)
@billing.route('/billing/<int:id>', methods=['GET', 'POST'])
@login_required
def update(id):
billing = Billing.query.get_or_404(id)
form = CreateBillingForm()
if form.validate_on_submit():
billing.title = form.title.data
billing.description = form.description.data
billing.value = form.value.data
billing.work_date = form.work_date.data
db.session.commit()
flash('Billing updated with successfully.')
return redirect(url_for('billing.update', id=id))
elif request.method == 'GET':
form.title.data = billing.title
form.description.data = billing.description
form.value.data = billing.value
form.work_date.data = billing.work_date
return render_template('create_billing.html', title='Update Billing', form=form)
@billing.route('/billing/<int:id>/confirm-receive')
@login_required
def confirm_receive(id):
billing = Billing.query.get_or_404(id)
if current_user.get_id() != billing.user_id:
abort(403)
billing.confirm_receive()
db.session.commit()
page = request.args.get('page', 1, type=int)
return redirect(url_for('billing.pagination', page=page))
@billing.route('/billing/<int:id>/cancel-receive')
@login_required
def cancel_receive(id):
billing = Billing.query.get_or_404(id)
if current_user.get_id() != billing.user_id:
abort(403)
billing.cancel_receive()
db.session.commit()
page = request.args.get('page', 1, type=int)
return redirect(url_for('billing.pagination', page=page))
| 27.660377 | 96 | 0.664734 | 0 | 0 | 0 | 0 | 2,587 | 0.882333 | 0 | 0 | 396 | 0.135061 |
7e0b8779363fd91f6026918cffc7f561df56bcf8 | 9,120 | py | Python | flickipedia/mashup.py | rfaulkner/Flickipedia | 1b53f30be4027901748a09c411d568c7148f4e4b | [
"BSD-2-Clause"
]
| 1 | 2016-03-11T09:40:19.000Z | 2016-03-11T09:40:19.000Z | flickipedia/mashup.py | rfaulkner/Flickipedia | 1b53f30be4027901748a09c411d568c7148f4e4b | [
"BSD-2-Clause"
]
| 1 | 2015-02-27T02:23:19.000Z | 2015-02-27T02:23:19.000Z | flickipedia/mashup.py | rfaulkner/Flickipedia | 1b53f30be4027901748a09c411d568c7148f4e4b | [
"BSD-2-Clause"
]
| null | null | null | """
Author: Ryan Faulkner
Date: October 19th, 2014
Container for mashup logic.
"""
import json
import random
from sqlalchemy.orm.exc import UnmappedInstanceError
from flickipedia.redisio import DataIORedis
from flickipedia.model.articles import ArticleModel, ArticleContentModel
from flickipedia.config import log, settings
from flickipedia.model.likes import LikeModel
from flickipedia.model.exclude import ExcludeModel
from flickipedia.model.photos import PhotoModel
from flickipedia.parse import parse_strip_elements, parse_convert_links, \
handle_photo_integrate, format_title_link, add_formatting_generic
def get_article_count():
"""
Fetch total article count
:return: int; total count of articles
"""
DataIORedis().connect()
# Fetch article count from redis (query from DB if not present)
# Refresh according to config for rate
article_count = DataIORedis().read(settings.ARTICLE_COUNT_KEY)
if not article_count \
or random.randint(1, settings.ARTICLE_COUNT_REFRESH_RATE) == 1 \
or article_count < settings.MYSQL_MAX_ROWS:
with ArticleModel() as am:
article_count = am.get_article_count()
DataIORedis().write(settings.ARTICLE_COUNT_KEY, article_count)
return int(article_count)
def get_max_article_id():
"""
Fetch the maximum article ID
:return: int; maximum id from article meta
"""
max_aid = DataIORedis().read(settings.MAX_ARTICLE_ID_KEY)
if not max_aid \
or random.randint(1, settings.ARTICLE_MAXID_REFRESH_RATE) == 1:
with ArticleModel() as am:
max_aid = am.get_max_id()
DataIORedis().write(settings.MAX_ARTICLE_ID_KEY, max_aid)
return max_aid
def get_article_stored_body(article):
"""
Fetch corresponding article object
:param article: str; article name
:return: json, Article; stored page content, corresponding
article model object
"""
with ArticleModel() as am:
article_obj = am.get_article_by_name(article)
try:
with ArticleContentModel() as acm:
body = acm.get_article_content(article_obj._id).markup
except Exception as e:
log.info('Article markup not found: "%s"' % e.message)
body = ''
return body
def get_wiki_content(article):
"""
Retrieve the wiki content from the mediawiki API
:param article: str; article name
:return: Wikipedia; mediawiki api response object
"""
pass
def get_flickr_photos(flickr_json):
"""
Retrience Flickr photo content from Flickr API
:param article: str; article name
:return: list; list of Flickr photo json
"""
photos = []
for i in xrange(settings.NUM_PHOTOS_TO_FETCH):
try:
photos.append(
{
'owner': flickr_json['photos']['photo'][i]['owner'],
'photo_id': flickr_json['photos']['photo'][i]['id'],
'farm': flickr_json['photos']['photo'][i]['farm'],
'server': flickr_json['photos']['photo'][i]['server'],
'title': flickr_json['photos']['photo'][i]['title'],
'secret': flickr_json['photos']['photo'][i]['secret'],
},
)
except (IndexError, KeyError) as e:
log.error('No more photos to process for: - "%s"' % (e.message))
log.debug('Photo info: %s' % (str(photos)))
return photos
def manage_article_storage(max_article_id, article_count):
"""
Handle the storage of new articles
:param max_article_id: int; article id
:param article_count: int; total count of articles
:return: bool; success
"""
if article_count >= settings.MYSQL_MAX_ROWS:
if max_article_id:
# TODO - CHANGE THIS be careful, could iterate many times
article_removed = False
attempts = 0
while not article_removed \
or attempts > settings.MAX_RETRIES_FOR_REMOVE:
attempts += 1
article_id = random.randint(0, int(max_article_id))
with ArticleModel() as am:
log.info('Removing article id: ' + str(article_id))
try:
am.delete_article(article_id)
article_removed = True
except UnmappedInstanceError:
continue
else:
log.error('Could not determine a max article id.')
return True
def handle_article_insert(article, wiki_page_id):
"""
Handle insertion of article meta data
:param article_id: int; article id
:return: int, bool; success
"""
with ArticleModel() as am:
if am.insert_article(article, wiki_page_id):
article_obj = am.get_article_by_name(article)
article_id = article_obj._id
success = True
else:
log.error('Couldn\'t insert article: "%s"' % article)
article_id = -1
success = False
return article_id, success
def handle_article_content_insert(article_id, page_content, is_new_article):
"""
Handle the insertion of article content
:param article_id: int; article id
:param page_content: json; page content
:param is_new_article: bool; a new article?
:return: bool; success
"""
with ArticleContentModel() as acm:
if is_new_article:
acm.insert_article(article_id, json.dumps(page_content))
else:
acm.update_article(article_id, json.dumps(page_content))
def prep_page_content(article_id, article, wiki, photos, user_obj):
"""
Prepare the formatted article content
:param article_id: int; article id
:param article: str; article name
:param wiki_resp: wikipedia; mediawiki api response
:param photos: list; list of photo json
:param user_obj: User; user object for request
:return: dict; formatted page response passed to jinja template
"""
html = parse_strip_elements(wiki.html())
html = parse_convert_links(html)
html = add_formatting_generic(html)
photo_ids = process_photos(article_id, photos, user_obj)
html = handle_photo_integrate(photos, html, article)
page_content = {
'title': format_title_link(wiki.title, article),
'content': html,
'section_img_class': settings.SECTION_IMG_CLASS,
'num_photos': len(photos),
'article_id': article_id,
'user_id': user_obj.get_id(),
'photo_ids': photo_ids
}
return page_content
def update_last_access(article_id):
"""
Update article last access
:param article_id: int; article id
:return: bool; success
"""
pass
def order_photos_by_rank(article_id, photos):
""" Reorders photos by score """
# Compute scores
for i in xrange(len(photos)):
# Get Exclusions & Endorsements
with ExcludeModel() as em:
exclusions = em.get_excludes_article_photo(article_id,
photos[i]['photo_id'])
with LikeModel() as lm:
endorsements = lm.get_likes_article_photo(article_id,
photos[i]['photo_id'])
photos[i]['score'] = len(endorsements) - len(exclusions)
# lambda method for sorting by score descending
f = lambda x, y: cmp(-x['score'], -y['score'])
return sorted(photos, f)
def process_photos(article_id, photos, user_obj):
"""
Handles linking photo results with the model and returns a list of
Flickr photo ids to pass to templating
:param article_id: int; article id
:param photos: list of photos
:param user_obj: User; user object for request
:return: List of Flickr photo ids
"""
photo_ids = []
for photo in photos:
# Ensure that each photo is modeled
with PhotoModel() as pm:
photo_obj = pm.get_photo(photo['photo_id'], article_id)
if not photo_obj:
log.info('Processing photo: "%s"' % str(photo))
if pm.insert_photo(photo['photo_id'], article_id):
photo_obj = pm.get_photo(
photo['photo_id'], article_id)
if not photo_obj:
log.error('DB Error: Could not retrieve or '
'insert: "%s"' % str(photo))
continue
else:
log.error('Couldn\'t insert photo: "%s"' % (
photo['photo_id']))
photo['id'] = photo_obj._id
photo['votes'] = photo_obj.votes
# Retrieve like data
with LikeModel() as lm:
if lm.get_like(article_id, photo_obj._id,
user_obj.get_id()):
photo['like'] = True
else:
photo['like'] = False
photo_ids.append(photo['photo_id'])
return photo_ids | 35.076923 | 78 | 0.607346 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,160 | 0.346491 |
7e0be21835c15a9296a6ae0c119d0388d9169b45 | 240 | py | Python | docs/examples/slider_dimmer.py | SatoshiIwasada/BlueDot | e93bc242593d3a3cbfd0ff97f98fcffb0fcd961a | [
"MIT"
]
| 112 | 2017-03-27T17:23:17.000Z | 2022-03-13T09:51:43.000Z | docs/examples/slider_dimmer.py | SatoshiIwasada/BlueDot | e93bc242593d3a3cbfd0ff97f98fcffb0fcd961a | [
"MIT"
]
| 109 | 2017-03-29T11:19:54.000Z | 2022-02-03T14:18:15.000Z | docs/examples/slider_dimmer.py | SatoshiIwasada/BlueDot | e93bc242593d3a3cbfd0ff97f98fcffb0fcd961a | [
"MIT"
]
| 40 | 2017-03-30T23:23:27.000Z | 2022-01-21T17:09:11.000Z | from bluedot import BlueDot
from gpiozero import PWMLED
from signal import pause
def set_brightness(pos):
brightness = (pos.y + 1) / 2
led.value = brightness
led = PWMLED(27)
bd = BlueDot()
bd.when_moved = set_brightness
pause()
| 17.142857 | 32 | 0.725 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7e0e399837934c037868f72f1f2ece1fe8884d6e | 328 | py | Python | blog/urls.py | Halo-Developers/Halo-Learn | 4c8f9e395c0145df39fa3333fefa23d02a370688 | [
"MIT"
]
| 1 | 2021-09-23T16:02:51.000Z | 2021-09-23T16:02:51.000Z | blog/urls.py | kuyesu/Halo-Learn | abd60d45c191297daedd20b3b308a30a78cba9c7 | [
"MIT"
]
| null | null | null | blog/urls.py | kuyesu/Halo-Learn | abd60d45c191297daedd20b3b308a30a78cba9c7 | [
"MIT"
]
| 2 | 2021-09-20T09:50:45.000Z | 2022-02-20T06:42:42.000Z | from django.urls import path
from . import views
app_name = 'blog'
urlpatterns = [
path('', views.post_list, name='post_list'),
path('<slug:post>/',views.post_detail,name="post_detail"),
path('comment/reply/', views.reply_page, name="reply"),
path('tag/<slug:tag_slug>/',views.post_list, name='post_tag'),
]
| 27.333333 | 68 | 0.67378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.307927 |
7e11a4d77fb343f77a786063eff503d5200a6c2d | 1,313 | py | Python | 126-Word_Ladder_II.py | QuenLo/leecode | ce861103949510dc54fd5cb336bd992c40748de2 | [
"MIT"
]
| 6 | 2018-06-13T06:48:42.000Z | 2020-11-25T10:48:13.000Z | 126-Word_Ladder_II.py | QuenLo/leecode | ce861103949510dc54fd5cb336bd992c40748de2 | [
"MIT"
]
| null | null | null | 126-Word_Ladder_II.py | QuenLo/leecode | ce861103949510dc54fd5cb336bd992c40748de2 | [
"MIT"
]
| null | null | null | class Solution:
def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
tree, words, len_w = collections.defaultdict(set), set( wordList ), len(beginWord)
if endWord not in words:
return []
found, q, nextq = False, {beginWord}, set()
while q and not found:
words -= set(q)
for x in q:
# a -> z
for char in string.ascii_lowercase:
for i in range(len_w):
test = x[:i] + char + x[i+1:]
if test == endWord:
found = True
tree[x].add(test)
elif test in words:
nextq.add(test)
tree[x].add(test)
q, nextq = nextq, set()
def back(x):
if x == endWord:
return [[x]]
else:
ans = []
for test in tree[x]:
for y in back(test):
ans.append( [x] + y )
return ans
# [[x]] if x == endWord else [[x] + rest for y in tree[x] for rest in bt(y)]
return back(beginWord)
| 35.486486 | 96 | 0.393755 | 1,312 | 0.999238 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.063976 |
fd665b1231aab43a664a3eab839a54a833e10f79 | 3,144 | py | Python | web/env/lib/python3.6/site-packages/test/file/test_includer.py | Conbrown100/webfortune | 779026d064498d36ddeba07e06cc744fb335ceb6 | [
"Apache-2.0"
]
| 8 | 2015-07-30T16:19:18.000Z | 2021-08-10T21:00:47.000Z | web/env/lib/python3.6/site-packages/test/file/test_includer.py | Conbrown100/webfortune | 779026d064498d36ddeba07e06cc744fb335ceb6 | [
"Apache-2.0"
]
| 3 | 2015-01-09T13:53:55.000Z | 2017-06-05T17:39:46.000Z | web/env/lib/python3.6/site-packages/test/file/test_includer.py | Conbrown100/webfortune | 779026d064498d36ddeba07e06cc744fb335ceb6 | [
"Apache-2.0"
]
| 6 | 2015-01-09T13:47:15.000Z | 2020-12-25T14:09:41.000Z | import os
from tempfile import TemporaryDirectory
import codecs
import logging
from grizzled.file.includer import *
from grizzled.os import working_directory
from grizzled.text import strip_margin
import pytest
@pytest.fixture
def log():
return logging.getLogger('test')
def test_simple(log):
outer = '''|First non-blank line.
|Second non-blank line.
|%include "inner.txt"
|Last line.
|'''
inner = '''|Inner line 1
|Inner line 2
|'''
expected = strip_margin(
'''|First non-blank line.
|Second non-blank line.
|Inner line 1
|Inner line 2
|Last line.
|'''
)
with TemporaryDirectory() as dir:
outer_path = os.path.join(dir, "outer.txt")
all = (
(outer, outer_path),
(inner, os.path.join(dir, "inner.txt")),
)
for text, path in all:
log.debug(f'writing "{path}"')
with codecs.open(path, mode='w', encoding='utf-8') as f:
f.write(strip_margin(text))
inc = Includer(outer_path)
lines = [line for line in inc]
res = ''.join(lines)
assert res == expected
def test_nested(log):
outer = '''|First non-blank line.
|Second non-blank line.
|%include "nested1.txt"
|Last line.
|'''
nested1 = '''|Nested 1 line 1
|%include "nested2.txt"
|Nested 1 line 3
|'''
nested2 = '''|Nested 2 line 1
|Nested 2 line 2
|'''
expected = strip_margin(
'''|First non-blank line.
|Second non-blank line.
|Nested 1 line 1
|Nested 2 line 1
|Nested 2 line 2
|Nested 1 line 3
|Last line.
|'''
)
with TemporaryDirectory() as dir:
outer_path = os.path.join(dir, "outer.txt")
all = (
(outer, outer_path),
(nested1, os.path.join(dir, "nested1.txt")),
(nested2, os.path.join(dir, "nested2.txt")),
)
for text, path in all:
with codecs.open(path, mode='w', encoding='utf-8') as f:
f.write(strip_margin(text))
inc = Includer(outer_path)
lines = [line for line in inc]
res = ''.join(lines)
assert res == expected
def test_overflow(log):
outer = '''|First non-blank line.
|Second non-blank line.
|%include "outer.txt"
|Last line.
|'''
with TemporaryDirectory() as dir:
outer_path = os.path.join(dir, "outer.txt")
with codecs.open(outer_path, mode='w', encoding='utf-8') as f:
f.write(strip_margin(outer))
try:
Includer(outer_path, max_nest_level=10)
assert False, "Expected max-nesting exception"
except MaxNestingExceededError as e:
print(e)
def _log_text_file(log, prefix: str, text: str) -> None:
log.debug(f'{prefix}:\n---\n{text}\n---')
| 29.383178 | 70 | 0.515585 | 0 | 0 | 0 | 0 | 63 | 0.020038 | 0 | 0 | 1,253 | 0.398537 |
fd684e3bf1de0c4b9c2f1d5a15a6a2d42e862075 | 286 | py | Python | output/models/nist_data/list_pkg/unsigned_short/schema_instance/nistschema_sv_iv_list_unsigned_short_min_length_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/list_pkg/unsigned_short/schema_instance/nistschema_sv_iv_list_unsigned_short_min_length_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/list_pkg/unsigned_short/schema_instance/nistschema_sv_iv_list_unsigned_short_min_length_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| null | null | null | from output.models.nist_data.list_pkg.unsigned_short.schema_instance.nistschema_sv_iv_list_unsigned_short_min_length_2_xsd.nistschema_sv_iv_list_unsigned_short_min_length_2 import NistschemaSvIvListUnsignedShortMinLength2
__all__ = [
"NistschemaSvIvListUnsignedShortMinLength2",
]
| 47.666667 | 221 | 0.905594 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.15035 |
fd69e06856c7f3a481475985f97cf69bf7d1965f | 127 | py | Python | satchmo/projects/skeleton/localsite/urls.py | predatell/satchmo | 6ced1f845aadec240c7e433c3cbf4caca96e0d92 | [
"BSD-3-Clause"
]
| 1 | 2019-10-08T16:19:59.000Z | 2019-10-08T16:19:59.000Z | satchmo/projects/skeleton/localsite/urls.py | predatell/satchmo | 6ced1f845aadec240c7e433c3cbf4caca96e0d92 | [
"BSD-3-Clause"
]
| null | null | null | satchmo/projects/skeleton/localsite/urls.py | predatell/satchmo | 6ced1f845aadec240c7e433c3cbf4caca96e0d92 | [
"BSD-3-Clause"
]
| null | null | null | from django.conf.urls import url
from simple.localsite.views import example
urlpatterns = [
url(r'example/', example),
]
| 15.875 | 42 | 0.732283 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.086614 |
fd6a627b6084b5a56d9fe3161a2d00c62052ed2a | 8,850 | py | Python | tbconnect/tests/test_views.py | praekeltfoundation/healthcheck | 3f8b3722ea41c2d81c706e0f9a3473ba2cb2f2ba | [
"BSD-3-Clause"
]
| null | null | null | tbconnect/tests/test_views.py | praekeltfoundation/healthcheck | 3f8b3722ea41c2d81c706e0f9a3473ba2cb2f2ba | [
"BSD-3-Clause"
]
| 23 | 2020-07-16T15:40:35.000Z | 2021-12-13T13:59:30.000Z | tbconnect/tests/test_views.py | praekeltfoundation/healthcheck | 3f8b3722ea41c2d81c706e0f9a3473ba2cb2f2ba | [
"BSD-3-Clause"
]
| 1 | 2021-02-24T04:58:40.000Z | 2021-02-24T04:58:40.000Z | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from tbconnect.models import TBCheck, TBTest
from userprofile.models import HealthCheckUserProfile
from userprofile.tests.test_views import BaseEventTestCase
from tbconnect.serializers import TBCheckSerializer
class TBCheckViewSetTests(APITestCase, BaseEventTestCase):
url = reverse("tbcheck-list")
def test_data_validation(self):
"""
The supplied data must be validated, and any errors returned
"""
user = get_user_model().objects.create_user("test")
user.user_permissions.add(Permission.objects.get(codename="add_tbcheck"))
self.client.force_authenticate(user)
response = self.client.post(self.url)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_successful_request(self):
"""
Should create a new TBCheck object in the database
"""
user = get_user_model().objects.create_user("test")
user.user_permissions.add(Permission.objects.get(codename="add_tbcheck"))
self.client.force_authenticate(user)
response = self.client.post(
self.url,
{
"msisdn": "27856454612",
"source": "USSD",
"province": "ZA-WC",
"city": "Cape Town",
"age": TBCheck.AGE_18T40,
"gender": TBCheck.GENDER_FEMALE,
"cough": True,
"fever": True,
"sweat": False,
"weight": True,
"exposure": "yes",
"tracing": True,
"risk": TBCheck.RISK_LOW,
"location": "+40.20361+40.20361",
"follow_up_optin": True,
"language": "eng",
},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
[tbcheck] = TBCheck.objects.all()
self.assertEqual(tbcheck.msisdn, "27856454612")
self.assertEqual(tbcheck.source, "USSD")
self.assertEqual(tbcheck.province, "ZA-WC")
self.assertEqual(tbcheck.city, "Cape Town")
self.assertEqual(tbcheck.age, TBCheck.AGE_18T40)
self.assertEqual(tbcheck.gender, TBCheck.GENDER_FEMALE)
self.assertTrue(tbcheck.cough)
self.assertTrue(tbcheck.fever)
self.assertFalse(tbcheck.sweat)
self.assertTrue(tbcheck.weight)
self.assertEqual(tbcheck.exposure, "yes")
self.assertTrue(tbcheck.tracing)
self.assertEqual(tbcheck.risk, TBCheck.RISK_LOW)
self.assertEqual(tbcheck.location, "+40.20361+40.20361")
self.assertTrue(tbcheck.follow_up_optin)
self.assertEqual(tbcheck.language, "eng")
def test_location_validation(self):
"""
Should create a new TBCheck object in the database
"""
user = get_user_model().objects.create_user("test")
user.user_permissions.add(Permission.objects.get(codename="add_tbcheck"))
self.client.force_authenticate(user)
response = self.client.post(
self.url,
{
"msisdn": "27856454612",
"source": "USSD",
"province": "ZA-WC",
"city": "Cape Town",
"age": TBCheck.AGE_18T40,
"gender": TBCheck.GENDER_FEMALE,
"cough": True,
"fever": True,
"sweat": False,
"weight": True,
"exposure": "yes",
"tracing": True,
"risk": TBCheck.RISK_LOW,
},
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{"non_field_errors": ["location and city_location are both None"]},
)
def test_creates_user_profile(self):
"""
The user profile should be created when the TB Check is saved
"""
user = get_user_model().objects.create_user("test")
user.user_permissions.add(Permission.objects.get(codename="add_tbcheck"))
self.client.force_authenticate(user)
response = self.client.post(
self.url,
{
"msisdn": "+27856454612",
"source": "USSD",
"province": "ZA-WC",
"city": "Cape Town",
"age": TBCheck.AGE_18T40,
"gender": TBCheck.GENDER_FEMALE,
"cough": True,
"fever": True,
"sweat": False,
"weight": True,
"exposure": "yes",
"tracing": True,
"risk": TBCheck.RISK_LOW,
"location": "+40.20361+40.20361",
},
format="json",
)
profile = HealthCheckUserProfile.objects.get(msisdn="+27856454612")
self.assertEqual(profile.province, "ZA-WC")
self.assertEqual(profile.city, "Cape Town")
self.assertEqual(profile.age, TBCheck.AGE_18T40)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
class TBTestViewSetTests(APITestCase, BaseEventTestCase):
url = reverse("tbtest-list")
def test_data_validation(self):
"""
The supplied data must be validated, and any errors returned
"""
user = get_user_model().objects.create_user("test")
user.user_permissions.add(Permission.objects.get(codename="add_tbtest"))
self.client.force_authenticate(user)
response = self.client.post(self.url)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_successful_create_request(self):
"""
Should create a new TBTest object in the database
"""
user = get_user_model().objects.create_user("test")
user.user_permissions.add(Permission.objects.get(codename="add_tbtest"))
self.client.force_authenticate(user)
response = self.client.post(
self.url,
{
"msisdn": "27856454612",
"source": "WhatsApp",
"result": TBTest.RESULT_PENDING,
},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
[tbtest] = TBTest.objects.all()
self.assertEqual(tbtest.msisdn, "27856454612")
self.assertEqual(tbtest.source, "WhatsApp")
self.assertEqual(tbtest.result, TBTest.RESULT_PENDING)
def test_successful_update_request(self):
"""
Should create a new TBTest object in the database
"""
tbtest = TBTest.objects.create(
**{
"msisdn": "27856454612",
"source": "WhatsApp",
"result": TBTest.RESULT_PENDING,
}
)
user = get_user_model().objects.create_user("test")
user.user_permissions.add(Permission.objects.get(codename="change_tbtest"))
self.client.force_authenticate(user)
update_url = reverse("tbtest-detail", args=(tbtest.id,))
response = self.client.patch(update_url, {"result": TBTest.RESULT_POSITIVE})
self.assertEqual(response.status_code, status.HTTP_200_OK)
tbtest.refresh_from_db()
self.assertEqual(tbtest.msisdn, "27856454612")
self.assertEqual(tbtest.source, "WhatsApp")
self.assertEqual(tbtest.result, TBTest.RESULT_POSITIVE)
class TBCheckSerializerTests(TestCase):
def test_valid_tbcheck(self):
"""
If age is <18 skip location and location_
"""
data = {
"msisdn": "+2349039756628",
"source": "WhatsApp",
"province": "ZA-GT",
"city": "<not collected>",
"age": "<18",
"gender": "male",
"cough": "True",
"fever": "False",
"sweat": "False",
"weight": "False",
"exposure": "no",
"tracing": "False",
"risk": "low",
}
serializer = TBCheckSerializer(data=data)
self.assertTrue(serializer.is_valid())
self.assertEqual(
dict(serializer.validated_data),
{
"age": "<18",
"city": "<not collected>",
"cough": True,
"exposure": "no",
"fever": False,
"gender": "male",
"msisdn": "+2349039756628",
"province": "ZA-GT",
"risk": "low",
"source": "WhatsApp",
"sweat": False,
"tracing": False,
"weight": False,
},
)
| 37.184874 | 84 | 0.568701 | 8,390 | 0.948023 | 0 | 0 | 0 | 0 | 0 | 0 | 1,988 | 0.224633 |
fd6abf4d61e22150256649650adbe262b09e0720 | 1,350 | py | Python | code/runibm1.py | jrod2699/CS159-NLP-Final-Project- | 76eea6149ab01d5e72232874398458ec9f35227f | [
"MIT"
]
| null | null | null | code/runibm1.py | jrod2699/CS159-NLP-Final-Project- | 76eea6149ab01d5e72232874398458ec9f35227f | [
"MIT"
]
| null | null | null | code/runibm1.py | jrod2699/CS159-NLP-Final-Project- | 76eea6149ab01d5e72232874398458ec9f35227f | [
"MIT"
]
| null | null | null | import nltk
import random
from preprocess import compile_corpus
from nltk.translate import IBMModel1, AlignedSent, Alignment
def run(filename, iterations):
# global variables utilized in the assessment of the IBM Model
global ibm1
global corpus
# construct and modify corpus by adding the system alignments to every sentence pair
corpus = compile_corpus(filename)
ibm1 = IBMModel1(corpus, iterations)
# produce random sentences for testing purposes
get_rand_sent()
def get_rand_sent():
'''
Redirect the standard output of the program -- i.e. the random sentences --
and transfer it over to the appropriate file. From there we will take a
look at the sentence pair and include the hand alignment (gold standard)
to proceed with evaluating the IBM model.
'''
i = 0
while i < 20:
index = random.randint(0, len(corpus))
try:
# only print out "valid" sentence pairs
# valid = sentence pairs with system-created alignments
print(" ".join(corpus[index].mots), "\t", " ".join(corpus[index].words), "\t", corpus[index].alignment)
i += 1
except:
pass
def main():
# change the file based on the langauge being tested
run("data/languages/vie-eng.txt", 5)
if __name__ == "__main__":
main()
| 31.395349 | 115 | 0.665926 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 683 | 0.505926 |
fd6ba7ba1979062899ef77a2f2ebee1332127153 | 374 | py | Python | makenew_python_app/server/boot.py | makenew/python-app | 5f3c6669efe6e80d356d39afb712d72bf0e69916 | [
"MIT"
]
| 2 | 2021-01-10T05:54:37.000Z | 2021-01-12T01:24:38.000Z | makenew_python_app/server/boot.py | makenew/python-app | 5f3c6669efe6e80d356d39afb712d72bf0e69916 | [
"MIT"
]
| null | null | null | makenew_python_app/server/boot.py | makenew/python-app | 5f3c6669efe6e80d356d39afb712d72bf0e69916 | [
"MIT"
]
| null | null | null | from os import environ, path
from .server import Server
from .config import configure
def boot(create_dependencies):
config_path = environ.get("PYAPP_CONFIG_PATH") or path.realpath(
path.join(path.dirname(__file__), "..", "..", "config")
)
server = Server(create_dependencies, config_path)
server.update_config_factory(configure)
server.run()
| 26.714286 | 68 | 0.716578 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.093583 |
fd6c34c3adb6f440619a388c9b66cf0b7a99a5e9 | 1,027 | py | Python | checker/backends/pagure.py | 1dot75cm/repo-checker | 1ca191efbeaa9f44876546ee59487e8d515cd735 | [
"MIT"
]
| 4 | 2016-01-10T15:58:48.000Z | 2019-08-10T23:12:31.000Z | checker/backends/pagure.py | 1dot75cm/repo-checker | 1ca191efbeaa9f44876546ee59487e8d515cd735 | [
"MIT"
]
| 1 | 2021-03-31T18:46:14.000Z | 2021-03-31T18:46:14.000Z | checker/backends/pagure.py | 1dot75cm/repo-checker | 1ca191efbeaa9f44876546ee59487e8d515cd735 | [
"MIT"
]
| 5 | 2016-03-22T00:58:33.000Z | 2017-09-14T12:43:54.000Z | # -*- coding: utf-8 -*-
from checker.backends import BaseBackend
from checker import logger
log = logger.getLogger(__name__)
class PagureBackend(BaseBackend):
"""for projects hosted on pagure.io"""
name = 'Pagure'
domain = 'pagure.io'
example = 'https://pagure.io/pagure'
def __init__(self, url):
super(PagureBackend, self).__init__()
self._url = url
self._rule_type = "xpath"
def get_urls(self, branch=None):
return ['https://releases.pagure.org/%s/' % self._url.split('/')[-1],
'%s/commits/%s' % (self._url, branch)]
def get_rules(self):
log.debug('use %s backend rule for %s package.' %
(self.name, self._url.split('/')[-1]))
return [("//td[3][contains(text(), '-')]/text()", ""),
("//h5/a//span/@title", "//div[1]/h5/a/@href")]
@classmethod
def isrelease(cls, url):
if cls.domain in url and 'commits' in url:
return False
else:
return True
| 27.756757 | 77 | 0.559883 | 897 | 0.873418 | 0 | 0 | 155 | 0.150925 | 0 | 0 | 296 | 0.288218 |
fd6d8440e80ddfffc2b7c87e874259a2676fb497 | 2,184 | py | Python | vial/plugins/grep/plugin.py | solarnz/vial | 080dd204c6fac49c9541cd179e7842de7cb6f8ee | [
"MIT"
]
| 5 | 2015-06-27T09:36:26.000Z | 2018-05-05T02:43:43.000Z | vial/plugins/grep/plugin.py | solarnz/vial | 080dd204c6fac49c9541cd179e7842de7cb6f8ee | [
"MIT"
]
| 4 | 2018-06-07T15:19:33.000Z | 2020-02-10T12:15:11.000Z | vial/plugins/grep/plugin.py | solarnz/vial | 080dd204c6fac49c9541cd179e7842de7cb6f8ee | [
"MIT"
]
| 2 | 2019-08-30T07:27:05.000Z | 2020-02-12T08:03:24.000Z | import os
import re
from time import time
from vial import vfunc, vim
from vial.fsearch import get_files
from vial.utils import get_projects, redraw
MAX_FILESIZE = 10 * 1024 * 1024
def grep(query):
matcher = re.compile(re.escape(query))
t = time() - 1
result = []
for r in get_projects():
for name, path, root, top, fullpath in get_files(r):
if time() - t >= 1:
redraw()
print fullpath
t = time()
try:
if os.stat(fullpath).st_size > MAX_FILESIZE:
continue
with open(fullpath) as f:
source = f.read()
matches = matcher.finditer(source)
lines = source.splitlines()
except OSError:
continue
for m in matches:
start = m.start()
line = source.count('\n', 0, start) + 1
offset = start - source.rfind('\n', 0, start)
text = lines[line - 1]
if len(text) > 100:
offstart = max(0, offset - 30)
text = text[offstart:offstart+60] + '...'
if offstart:
text = '...' + text
result.append({
'bufnr': '',
'filename': fullpath,
'pattern': '',
'valid': 1,
'nr': -1,
'lnum': line,
'vcol': 0,
'col': offset,
'text': text.replace('\x00', ' '),
'type': ''
})
vfunc.setqflist(result)
if result:
vim.command('cw')
redraw()
print '{} matches found'.format(len(result))
def grepop(type):
old = vfunc.getreg('"')
if type == 'v':
vim.command('normal! `<v`>y')
elif type == 'char':
vim.command('normal! `[v`]y')
else:
return
query = vfunc.getreg('"')
if query.strip():
grep(query)
else:
redraw()
print 'Search for nothing?'
vfunc.setreg('"', old)
| 24.818182 | 61 | 0.431777 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 192 | 0.087912 |
fd6ea7420f474f3252a16e6bcdeebb2e566cf6e9 | 3,619 | py | Python | tests/test_models.py | DynamicGravitySystems/DGP | 5c0b566b846eb25f1e5ede64b2caaaa6a3352a29 | [
"Apache-2.0"
]
| 7 | 2017-08-15T21:51:40.000Z | 2020-10-28T00:40:23.000Z | tests/test_models.py | DynamicGravitySystems/DGP | 5c0b566b846eb25f1e5ede64b2caaaa6a3352a29 | [
"Apache-2.0"
]
| 63 | 2017-08-11T15:12:03.000Z | 2020-05-23T19:03:46.000Z | tests/test_models.py | cbertinato/DGP | 5bb8a30895365eccdd452970c45e248903fca8af | [
"Apache-2.0"
]
| 4 | 2018-03-29T21:30:26.000Z | 2020-10-27T20:15:23.000Z | # -*- coding: utf-8 -*-
"""
Unit tests for new Project/Flight data classes, including JSON
serialization/de-serialization
"""
import time
from datetime import datetime
from typing import Tuple
from uuid import uuid4
from pathlib import Path
import pytest
import pandas as pd
from dgp.core import DataType
from dgp.core.models.project import AirborneProject
from dgp.core.hdf5_manager import HDF5Manager
from dgp.core.models.datafile import DataFile
from dgp.core.models.dataset import DataSet
from dgp.core.models import flight
from dgp.core.models.meter import Gravimeter
@pytest.fixture()
def make_flight():
def _factory() -> Tuple[str, flight.Flight]:
name = str(uuid4().hex)[:12]
return name, flight.Flight(name)
return _factory
def test_flight_actions(make_flight):
# TODO: Test adding/setting gravimeter
flt = flight.Flight('test_flight')
assert 'test_flight' == flt.name
f1_name, f1 = make_flight() # type: flight.Flight
f2_name, f2 = make_flight() # type: flight.Flight
assert f1_name == f1.name
assert f2_name == f2.name
assert not f1.uid == f2.uid
assert '<Flight %s :: %s>' % (f1_name, f1.uid) == repr(f1)
def test_project_path(project: AirborneProject, tmpdir):
assert isinstance(project.path, Path)
new_path = Path(tmpdir).joinpath("new_prj_path")
project.path = new_path
assert new_path == project.path
def test_project_add_child(project: AirborneProject):
with pytest.raises(TypeError):
project.add_child(None)
def test_project_get_child(make_flight):
prj = AirborneProject(name="Project-2", path=Path('.'))
f1_name, f1 = make_flight()
f2_name, f2 = make_flight()
f3_name, f3 = make_flight()
prj.add_child(f1)
prj.add_child(f2)
prj.add_child(f3)
assert f1 == prj.get_child(f1.uid)
assert f3 == prj.get_child(f3.uid)
assert not f2 == prj.get_child(f1.uid)
with pytest.raises(IndexError):
fx = prj.get_child(str(uuid4().hex))
def test_project_remove_child(make_flight):
prj = AirborneProject(name="Project-3", path=Path('.'))
f1_name, f1 = make_flight()
f2_name, f2 = make_flight()
f3_name, f3 = make_flight()
prj.add_child(f1)
prj.add_child(f2)
assert 2 == len(prj.flights)
assert f1 in prj.flights
assert f2 in prj.flights
assert f3 not in prj.flights
assert not prj.remove_child(f3.uid)
assert prj.remove_child(f1.uid)
assert f1 not in prj.flights
assert 1 == len(prj.flights)
def test_gravimeter():
meter = Gravimeter("AT1A-13")
assert "AT1A" == meter.type
assert "AT1A-13" == meter.name
assert meter.config is None
config = meter.read_config(Path("tests/at1m.ini"))
assert isinstance(config, dict)
with pytest.raises(FileNotFoundError):
config = meter.read_config(Path("tests/at1a-fake.ini"))
assert {} == meter.read_config(Path("tests/sample_gravity.csv"))
def test_dataset(tmpdir):
path = Path(tmpdir).joinpath("test.hdf5")
df_grav = DataFile(DataType.GRAVITY, datetime.utcnow(), Path('gravity.dat'))
df_traj = DataFile(DataType.TRAJECTORY, datetime.utcnow(), Path('gps.dat'))
dataset = DataSet(df_grav, df_traj)
assert df_grav == dataset.gravity
assert df_traj == dataset.trajectory
frame_grav = pd.DataFrame([0, 1, 2])
frame_traj = pd.DataFrame([7, 8, 9])
HDF5Manager.save_data(frame_grav, df_grav, path)
HDF5Manager.save_data(frame_traj, df_traj, path)
expected_concat: pd.DataFrame = pd.concat([frame_grav, frame_traj])
# assert expected_concat.equals(dataset.dataframe)
| 27.210526 | 80 | 0.698812 | 0 | 0 | 0 | 0 | 184 | 0.050843 | 0 | 0 | 461 | 0.127383 |
fd6f10a9e5cd95371737b186d651e8e464b2660c | 389 | py | Python | examples/urls.py | sayanjap/DynamicForms | 071707de36d109fe3a17ae5df239240ea5ba707f | [
"BSD-3-Clause"
]
| 42 | 2018-01-18T14:50:05.000Z | 2022-03-24T18:34:19.000Z | examples/urls.py | sayanjap/DynamicForms | 071707de36d109fe3a17ae5df239240ea5ba707f | [
"BSD-3-Clause"
]
| 14 | 2018-12-05T21:39:23.000Z | 2022-02-27T06:43:48.000Z | examples/urls.py | sayanjap/DynamicForms | 071707de36d109fe3a17ae5df239240ea5ba707f | [
"BSD-3-Clause"
]
| 5 | 2018-01-18T16:32:20.000Z | 2021-06-07T10:15:18.000Z | from django.conf.urls import include, url
from rest_framework.documentation import include_docs_urls
from examples.rest import router
from .views import index
urlpatterns = [
url(r'^$', index, name='index'),
url(r'^', include(router.urls)),
url(r'^dynamicforms/', include('dynamicforms.urls')),
url(r'^api-docs/', include_docs_urls(title='Example API documentation')),
]
| 29.923077 | 77 | 0.722365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.236504 |
fd6f1c1a3069baecfcb5b723cf12a8c76710a022 | 1,312 | py | Python | tests/contract/test_concept.py | Informasjonsforvaltning/fdk-harvester-bff | 21f5d41bbe2506d9c23f0e670e6dee1bfe9f0742 | [
"Apache-2.0"
]
| null | null | null | tests/contract/test_concept.py | Informasjonsforvaltning/fdk-harvester-bff | 21f5d41bbe2506d9c23f0e670e6dee1bfe9f0742 | [
"Apache-2.0"
]
| 20 | 2020-09-23T10:04:48.000Z | 2022-03-14T07:47:45.000Z | tests/contract/test_concept.py | Informasjonsforvaltning/fdk-harvester-bff | 21f5d41bbe2506d9c23f0e670e6dee1bfe9f0742 | [
"Apache-2.0"
]
| null | null | null | """Test cases for concepts."""
from typing import Any
import pytest
import requests
@pytest.mark.contract
def test_get_concept_with_id(http_service: Any) -> None:
test_id = "a683bc63-2961-46af-9956-8a4a3f991cc6"
url = f"{http_service}/concepts/{test_id}"
result = requests.get(url=url, headers={"accept": "application/json"})
assert result.headers["Cache-Control"] == "max-age=86400"
parsed_result = result.json()
assert parsed_result["id"] == "a683bc63-2961-46af-9956-8a4a3f991cc6"
assert (
parsed_result["identifier"]
== "http://begrepskatalogen/begrep/88804c36-ff43-11e6-9d97-005056825ca0"
)
assert parsed_result["prefLabel"] == {"nb": "norsk etternavn"}
assert parsed_result["altLabel"] == [{"nb": "etternavn"}]
assert parsed_result["definition"]["text"] == {
"nb": "navn som i rekkefølge er etter fornavn og eventuelt mellomnavn som skal være i henhold til Lov om personnavn"
}
assert parsed_result["definition"]["remark"] == {
"nb": "Kan være bygget opp av to etternavn satt sammen med bindestrek - såkalt dobbelt etternavn. For at et navn skal anses som navn etter navneloven, må det i utgangspunktet være uttrykt med bokstavene i det norske alfabetet med de diakritiske tegn som støttes av folkeregisteret"
}
| 42.322581 | 289 | 0.70503 | 0 | 0 | 0 | 0 | 1,231 | 0.933283 | 0 | 0 | 779 | 0.590599 |
fd6fc2f8e9fb0cf4963f53e0dd218bc472fd9daa | 4,572 | py | Python | passmanBackend/vault_backend/models.py | sharanvarma0/passman-backend | d210fcc43886bd9be40ceaba3411209799cb8476 | [
"BSD-3-Clause"
]
| null | null | null | passmanBackend/vault_backend/models.py | sharanvarma0/passman-backend | d210fcc43886bd9be40ceaba3411209799cb8476 | [
"BSD-3-Clause"
]
| null | null | null | passmanBackend/vault_backend/models.py | sharanvarma0/passman-backend | d210fcc43886bd9be40ceaba3411209799cb8476 | [
"BSD-3-Clause"
]
| null | null | null | '''
Mostly these are internal imports related to django and rest_framework.
The os and io imports are for creating files, paths and parsing bytes objects respectively
'''
from django.db import models
from django.contrib.auth.models import User
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from vault_backend.extra_functions import *
import os
import io
'''
The Vault model represents the basic password vault in passman. This model will store the directory path, filename and vault_name specified. This is linked to the User model for only displaying vaults belonging
to the authenticated user. The Vault model is later referenced in different places for creating and updating records stored in it.
'''
class Vault(models.Model):
number = models.IntegerField(primary_key=True)
username = models.ForeignKey(User, on_delete=models.CASCADE)
vault_name = models.CharField(max_length=200, unique=True)
directory = models.CharField(max_length=200, default='/home/sharan/.vaults')
filename = models.CharField(max_length=200, default="vault")
# Create a new vault as a file in specified directory for future use and store of encrypted passwords.
def create_vault(self):
default_directory = self.directory
default_filename = self.filename
if not os.path.exists(default_directory):
os.mkdir(default_directory)
file = open(default_directory + '/' + default_filename, 'w').close()
def check_data(self, term, data):
if term in data:
return True
return False
# adding passwords to the vault file after encrypting them
def add_data(self, sitename, password):
try:
user = self.username
key = generate_key(user)
default_directory = self.directory
default_filename = self.filename
arr_of_passwords = self.get_data()
print(arr_of_passwords)
if arr_of_passwords == '':
arr_of_passwords = [] # passwords stored as a JSON array for easy future retrieval and storage
write_descriptor = open(default_directory + '/' + default_filename, 'w')
write_data = {'site_name': sitename, 'password': password}
if self.check_data(write_data, arr_of_passwords):
return 2
arr_of_passwords.append(write_data)
write_data = JSONRenderer().render(arr_of_passwords)
encrypted_data = encrypt_data(key, write_data) # this encrypt_data function is defined in extra_functions module. It takes some data and encrypts it using cryptography.fernet (refer cryptography.fernet module).
write_descriptor.write(encrypted_data)
write_descriptor.close()
return 0
except:
if (write_descriptor):
write_descriptor.close()
return 1
# read data from the vault file and decrypt them before dispatch
def get_data(self):
try:
user = self.username
key = generate_key(user)
default_directory = self.directory
default_filename = self.filename
read_descriptor = open(default_directory + '/' + default_filename, 'r')
data = read_descriptor.read()
if data == '':
read_descriptor.close()
return data
read_data = io.BytesIO(decrypt_data(key, data)) # the decrypt_data function is defined in extra_functions module. It decrypts data given by generating a fernet key from PBKDF2 using user creds.
json_read_data = JSONParser().parse(read_data)
read_descriptor.close()
return json_read_data
except:
read_descriptor.close()
return 1
# Delete Record functionality in vault.Not tested delete functionality yet. Might implement in future.
''' def delete_data(self, sitename, password):
try:
delete_data = {'site_name':sitename, 'password':password}
data = self.get_data()
if self.check_data(delete_data, data):
data.remove(delete_data)
if data:
for dictionary_data in data:
self.add_data(dictionary_data['site_name'], dictionary_data['password'])
return 0
else:
self.create_vault()
return 0
except ValueError:
return 'No Such Value'
'''
| 41.563636 | 227 | 0.649606 | 3,090 | 0.675853 | 0 | 0 | 0 | 0 | 0 | 0 | 1,890 | 0.413386 |
fd71a2a6d5b1e71ced9722bf68301238887fd3c8 | 95,557 | py | Python | DexParse.py | liumengdeqq/DexParse | 769899e26f01700c690ed82c48790d1000efb5f1 | [
"Apache-2.0"
]
| 16 | 2015-11-19T01:51:52.000Z | 2020-03-10T06:24:28.000Z | DexParse.py | CvvT/DexParse | 80c3f4a27e7163536f98584c5e7f7ec35a9451b8 | [
"Apache-2.0"
]
| null | null | null | DexParse.py | CvvT/DexParse | 80c3f4a27e7163536f98584c5e7f7ec35a9451b8 | [
"Apache-2.0"
]
| 22 | 2015-09-15T02:20:48.000Z | 2021-06-24T02:55:09.000Z | #! /usr/bin/python
# coding=utf-8
import struct
import os
import hashlib
import Instruction
Access_Flag = {'public': 1, 'private': 2, 'protected': 4, 'static': 8, 'final': 0x10,
'synchronized': 0x20, 'volatile': 0x40, 'bridge': 0x40, 'transient': 0x80,
'varargs': 0x80, 'native': 0x100, 'interface': 0x200, 'abstract': 0x400,
'strictfp': 0x800, 'synthetic': 0x1000, 'annotation': 0x2000, 'enum': 0x4000,
'constructor': 0x10000, 'declared_synchronized': 0x20000}
TypeDescriptor = {'void': 'V', 'boolean': 'Z', 'byte': 'B', 'short': 'S', 'char': 'C',
'int': 'I', 'long': 'J', 'float': 'F', 'double': 'D', 'boolean[]': '[Z',
'byte[]': '[B', 'short[]': '[S', 'char[]': '[C', 'int[]': 'I',
'long[]': '[J', 'float[]': '[F', 'double[]': 'D'}
ShortyDescriptor = {'void': 'V', 'boolean': 'Z', 'byte': 'B', 'short': 'S', 'char': 'C',
'int': 'I', 'long': 'J', 'float': 'F', 'double': 'D'}
ACSII = {'1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '0': 0,
'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15}
def checksum(f, len):
a = 1
b = 0
f.seek(12)
print("file size is :", len)
for i in range(12, len):
onebyte = struct.unpack("B", f.read(1))[0]
a = (a + onebyte) % 65521
b = (b + a) % 65521
return b << 16 | a
def get_file_sha1(f):
f.seek(32) # skip magic, checksum, sha
sha = hashlib.sha1()
while True:
data = f.read(1024)
if not data:
break
sha.update(data)
return sha.hexdigest()
def rightshift(value, n):
mask = 0x80000000
check = value & mask
if check != mask:
return value >> n
else:
submask = mask
for loop in range(0, n):
submask = (submask | (mask >> loop))
strdata = struct.pack("I", submask | (value >> n))
ret = struct.unpack("i", strdata)[0]
return ret
def readunsignedleb128(file):
res = struct.unpack("B", file.read(1))[0]
if res > 0x7f:
cur = struct.unpack("B", file.read(1))[0]
res = (res & 0x7f) | ((cur & 0x7f) << 7)
if cur > 0x7f:
cur = struct.unpack("B", file.read(1))[0]
res |= (cur & 0x7f) << 14
if cur > 0x7f:
cur = struct.unpack("B", file.read(1))[0]
res |= (cur & 0x7f) << 21
if cur > 0x7f:
cur = struct.unpack("B", file.read(1))[0]
res |= cur << 28
if res == 44370793110:
print(file.tell())
return res
def readsignedleb128(file):
res = struct.unpack("B", file.read(1))[0]
if res <= 0x7f:
res = rightshift((res << 25), 25)
else:
cur = struct.unpack("B", file.read(1))[0]
res = (res & 0x7f) | ((cur & 0x7f) << 7)
if cur <= 0x7f:
res = rightshift((res << 18), 18)
else:
cur = struct.unpack("B", file.read(1))[0]
res |= (cur & 0x7f) << 14
if cur <= 0x7f:
res = rightshift((res << 11), 11)
else:
cur = struct.unpack("B", file.read(1))[0]
res |= (cur & 0x7f) << 21
if cur <= 0x7f:
res = rightshift((res << 4), 4)
else:
cur = struct.unpack("B", file.read(1))[0]
res |= cur << 28
return res
def writesignedleb128(num, file):
if num >= 0:
writeunsignedleb128(num, file)
else:
mask = 0x80000000
for i in range(0, 32):
tmp = num & mask
mask >>= 1
if tmp == 0:
break
loop = 32 - i + 1
while loop > 7:
cur = num & 0x7f | 0x80
num >>= 7
file.write(struct.pack("B", cur))
loop -= 7
cur = num & 0x7f
file.write(struct.pack("B", cur))
def signedleb128forlen(num):
if num >= 0:
return unsignedleb128forlen(num)
else:
mask = 0x80000000
for i in range(0, 32):
tmp = num & mask
mask >>= 1
if tmp == 0:
break
loop = 32 - i + 1
if loop % 7 == 0:
return loop / 7
else:
return loop / 7 + 1
def writeunsignedleb128(num, file):
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7F | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
file.write(struct.pack("B", num))
def unsignedleb128forlen(num):
len = 1
temp = num
while num > 0x7f:
len += 1
num >>= 7
if len > 5:
print("error for unsignedleb128forlen", temp)
os._exit(num)
return len
def writeunsignedleb128p1alignshort(num, file):
num += 1
if num <= 0x7f:
if file.tell() % 2 == 1:
file.write(struct.pack("B", num))
else:
# print(hex(num))
file.write(struct.pack("B", num | 0x80))
file.write(struct.pack("B", 0))
else:
cur = num & 0x7F | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
if file.tell() % 2 == 1:
file.write(struct.pack("B", num))
else:
file.write(struct.pack("B", num | 0x80))
file.write(struct.pack("B", 0))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
if file.tell() % 2 == 1:
file.write(struct.pack("B", num))
else:
file.write(struct.pack("B", num | 0x80))
file.write(struct.pack("B", 0))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
if file.tell() % 2 == 1:
file.write(struct.pack("B", num))
else:
file.write(struct.pack("B", num | 0x80))
file.write(struct.pack("B", 0))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if file.tell() % 2 == 1:
file.write(struct.pack("B", num))
else:
file.write(struct.pack("B", num | 0x80))
file.write(struct.pack("B", 0))
def readunsignedleb128p1(file):
res = readunsignedleb128(file)
return res - 1
def writeunsignedleb128p1(num, file):
writeunsignedleb128(num+1, file)
def unsignedleb128p1forlen(num):
return unsignedleb128forlen(num+1)
def getutf8str(file):
string = []
while 1:
onebyte = struct.unpack("B", file.read(1))[0]
if onebyte == 0:
break
string.append(onebyte)
return bytearray(string).decode("utf-8")
def getstr(bytes):
return bytearray(bytes).decode("utf-8")
class EncodedArray:
def __init__(self, file):
self.size = readunsignedleb128(file)
self.values = []
for i in range(0, self.size):
self.values.append(EncodedValue(file))
def copytofile(self, file):
writeunsignedleb128(self.size, file)
for i in range(0, self.size):
self.values[i].copytofile(file)
def makeoffset(self, off):
off += unsignedleb128forlen(self.size)
for i in range(0, self.size):
off = self.values[i].makeoffset(off)
return off
def printf(self):
print("encoded array size", self.size)
class EncodedValue:
def __init__(self, file):
self.onebyte = struct.unpack("B", file.read(1))[0]
self.type = self.onebyte & 0x1F
self.arg = (self.onebyte >> 5) & 0x7
self.value = []
if self.type == 0x00:
# print 'here 0x00 VALUE_BYTE in class : ' + str(curClass_idx)
if self.arg != 0:
print ("[-] Ca ,get error in VALUE_BYTE")
os._exit(1)
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x02:
# print 'here 0x02 VALUE_SHORT in class : ' + str(curClass_idx)
if self.arg >= 2:
print ("[-] Ca ,get error in VALUE_SHORT at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x03:
# print 'here 0x03 VALUE_CHAR in class : ' + str(curClass_idx)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x04:
# print 'here 0x04 VALUE_INT in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_INT at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x06:
# print 'here 0x06 VALUE_LONG in class : ' + str(curClass_idx)
if self.arg >= 8:
print ("[-] Ca ,get error in VALUE_LONG at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x10:
# print 'here 0x10 VALUE_FLOAT in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_FLOAT at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x11:
# print 'here 0x11 VALUE_DOUBLE in class : ' + str(curClass_idx)
if self.arg >= 8:
print ("[-] Ca ,get error in VALUE_DOUBLE at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x17:
# print 'here 0x17 VALUE_STRING in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_STRING at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x18:
# print 'here 0x18 VALUE_TYPE in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_TYPE at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x19:
# print 'here 0x19 VALUE_FIELD in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_FIELD at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x1a:
# print 'here 0x1a VALUE_METHOD in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_METHOD at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x1b:
# print 'here 0x1b VALUE_ENUM in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_ENUM at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x1c:
# print 'here 0x1c VALUE_ARRAY in class : ' + str(curClass_idx)
if self.arg != 0x00:
print ("[-] Ca ,get error in VALUE_ARRAY")
os._exit(1)
self.value.append(EncodedArray(file))
elif self.type == 0x1d:
# print 'here 0x1d VALUE_ANNOTATION in class : ' + str(curClass_idx)
if self.arg != 0:
os._exit()
self.value.append(EncodedAnnotation(file))
# if case(0x1e):
# print 'here 0x1e VALUE_NULL in class : ' + str(curClass_idx)
# break
# if case(0x1f):
# print 'here 0x1f VALUE_BOOLEAN in class : ' + str(curClass_idx)
# break
def copytofile(self, file):
file.write(struct.pack("B", self.onebyte))
if self.type <= 0x1b:
for i in range(0, self.arg+1):
file.write(struct.pack("B", self.value[i]))
elif self.type == 0x1c:
self.value[0].copytofile(file)
elif self.type == 0x1d:
self.value[0].copytofile(file)
def makeoffset(self, off):
off += 1
if self.type <= 0x1b:
off += self.arg+1
elif self.type == 0x1c:
off = self.value[0].makeoffset(off)
elif self.type == 0x1d:
off = self.value[0].makeoffset(off)
return off
def printf(self):
print("encoded value :", self.type, self.arg)
# ----------------------------------------------------------------------------------------
class AnnotationElement:
def __init__(self, file):
self.name_idx = readunsignedleb128(file)
self.value = EncodedValue(file)
def copytofile(self, file):
writeunsignedleb128(self.name_idx, file)
self.value.copytofile(file)
def makeoffset(self, off):
off += unsignedleb128forlen(self.name_idx)
off = self.value.makeoffset(off)
return off
class EncodedAnnotation:
def __init__(self, file):
self.type_idx = readunsignedleb128(file)
self.size = readunsignedleb128(file)
self.elements = [] # annotation_element[size]
for i in range(0, self.size):
self.elements.append(AnnotationElement(file))
def copytofile(self, file):
writeunsignedleb128(self.type_idx, file)
writeunsignedleb128(self.size, file)
for i in range(0, self.size):
self.elements[i].copytofile(file)
def makeoffset(self, off):
off += unsignedleb128forlen(self.type_idx)
off += unsignedleb128forlen(self.size)
for i in range(0, self.size):
off = self.elements[i].makeoffset(off)
return off
class DexHeader:
def __init__(self, file, mode=0):
if mode == 0:
self.start = file.tell()
self.magic = []
self.magic.append(chr(struct.unpack("B", file.read(1))[0]))
self.magic.append(chr(struct.unpack("B", file.read(1))[0]))
self.magic.append(chr(struct.unpack("B", file.read(1))[0]))
self.magic.append(chr(struct.unpack("B", file.read(1))[0]))
self.version = []
self.version.append(chr(struct.unpack("B", file.read(1))[0]))
self.version.append(chr(struct.unpack("B", file.read(1))[0]))
self.version.append(chr(struct.unpack("B", file.read(1))[0]))
self.version.append(chr(struct.unpack("B", file.read(1))[0]))
self.checksum = struct.unpack("I", file.read(4))[0]
self.signature = file.read(20)
self.file_size = struct.unpack("I", file.read(4))[0]
self.header_size = struct.unpack("I", file.read(4))[0]
self.endian_tag = hex(struct.unpack("I", file.read(4))[0])
self.link_size = struct.unpack("I", file.read(4))[0]
self.link_off = struct.unpack("I", file.read(4))[0]
self.map_off = struct.unpack("I", file.read(4))[0]
self.string_ids_size = struct.unpack("I", file.read(4))[0]
self.string_ids_off = struct.unpack("I", file.read(4))[0]
self.type_ids_size = struct.unpack("I", file.read(4))[0]
self.type_ids_off = struct.unpack("I", file.read(4))[0]
self.proto_ids_size = struct.unpack("I", file.read(4))[0]
self.proto_ids_off = struct.unpack("I", file.read(4))[0]
self.field_ids_size = struct.unpack("I", file.read(4))[0]
self.field_ids_off = struct.unpack("I", file.read(4))[0]
self.method_ids_size = struct.unpack("I", file.read(4))[0]
self.method_ids_off = struct.unpack("I", file.read(4))[0]
self.class_defs_size = struct.unpack("I", file.read(4))[0]
self.class_defs_off = struct.unpack("I", file.read(4))[0]
self.data_size = struct.unpack("I", file.read(4))[0]
self.data_off = struct.unpack("I", file.read(4))[0]
self.len = file.tell() - self.start
def create(self, dexfile):
self.magic = []
self.magic.append('d')
self.magic.append('e')
self.magic.append('x')
self.magic.append(0x0A)
self.version = []
self.version.append('0')
self.version.append('3')
self.version.append('5')
self.version.append(0)
self.checksum = 1234
self.signature = "idontknow"
self.file_size = 1234
self.header_size = 112
self.endian_tag = 0x12345678
self.link_size = 0
self.link_off = 0
# self.map_off = dexfile.dexmaplist
def copytofile(self, file):
file.seek(self.start, 0)
file.write(struct.pack("B", ord(self.magic[0])))
file.write(struct.pack("B", ord(self.magic[1])))
file.write(struct.pack("B", ord(self.magic[2])))
file.write(struct.pack("B", ord(self.magic[3])))
file.write(struct.pack("B", ord(self.version[0])))
file.write(struct.pack("B", ord(self.version[1])))
file.write(struct.pack("B", ord(self.version[2])))
file.write(struct.pack("B", ord(self.version[3])))
file.write(struct.pack("I", self.checksum))
file.write(self.signature)
file.write(struct.pack("I", self.file_size))
file.write(struct.pack("I", self.header_size))
file.write(struct.pack("I", int(self.endian_tag, 16)))
file.write(struct.pack("I", self.link_size))
file.write(struct.pack("I", self.link_off))
file.write(struct.pack("I", self.map_off))
file.write(struct.pack("I", self.string_ids_size))
file.write(struct.pack("I", self.string_ids_off))
file.write(struct.pack("I", self.type_ids_size))
file.write(struct.pack("I", self.type_ids_off))
file.write(struct.pack("I", self.proto_ids_size))
file.write(struct.pack("I", self.proto_ids_off))
file.write(struct.pack("I", self.field_ids_size))
file.write(struct.pack("I", self.field_ids_off))
file.write(struct.pack("I", self.method_ids_size))
file.write(struct.pack("I", self.method_ids_off))
file.write(struct.pack("I", self.class_defs_size))
file.write(struct.pack("I", self.class_defs_off))
file.write(struct.pack("I", self.data_size))
file.write(struct.pack("I", self.data_off))
def makeoffset(self, dexmaplist):
self.string_ids_size = dexmaplist[1].size
self.string_ids_off = dexmaplist[1].offset
self.type_ids_size = dexmaplist[2].size
self.type_ids_off = dexmaplist[2].offset
self.proto_ids_size = dexmaplist[3].size
self.proto_ids_off = dexmaplist[3].offset
self.field_ids_size = dexmaplist[4].size
self.field_ids_off = dexmaplist[4].offset
self.method_ids_size = dexmaplist[5].size
self.method_ids_off = dexmaplist[5].offset
self.class_defs_size = dexmaplist[6].size
self.class_defs_off = dexmaplist[6].offset
self.data_off = dexmaplist[0x1000].offset
self.data_size = 0
self.map_off = dexmaplist[0x1000].offset
self.file_size = 0
def printf(self):
print ("DEX FILE HEADER:")
print ("magic: ", self.magic)
print ("version: ", self.version)
print ("checksum: ", self.checksum)
print ("signature: ", self.signature)
print ("file_size: ", self.file_size)
print ("header_size: ", self.header_size)
print ("endian_tag: ", self.endian_tag)
print ("link_size: ", self.link_size)
print ("link_off: ", self.link_off)
print ("map_off: ", self.map_off)
print ("string_ids_size: ", self.string_ids_size)
print ("string_ids_off: ", self.string_ids_off)
print ("type_ids_size: ", self.type_ids_size)
print ("type_ids_off: ", self.type_ids_off)
print ("proto_ids_size: ", self.proto_ids_size)
print ("proto_ids_off: ", self.proto_ids_off)
print ("field_ids_size: ", self.field_ids_size)
print ("field_ids_off: ", self.field_ids_off)
print ("method_ids_size: ", self.method_ids_size)
print ("method_ids_off: ", self.method_ids_off)
print ("class_defs_size: ", self.class_defs_size)
print ("class_defs_off: ", self.class_defs_off)
print ("data_size: ", self.data_size)
print ("data_off: ", self.data_off)
class DexStringID:
def __init__(self, file, mode=1):
if mode == 1:
self.stringDataoff = struct.unpack("I", file.read(4))[0] # in file
file.seek(self.stringDataoff, 0)
self.size = readunsignedleb128(file)
self.str = getutf8str(file)
self.ref = None
else:
self.stringDataoff = 0
self.size = 0
self.str = ""
self.ref = None
def addstrID(self, str):
self.ref = str
self.str = getstr(str.str)
def copytofile(self, file):
# self.stringDataoff = self.ref.start
file.write(struct.pack("I", self.ref.start))
def getreference(self, dexmaplist):
self.ref = dexmaplist[0x2002].getreference(self.stringDataoff)
def printf(self):
print ("size: ", self.size, " str: ", self.str, "dataof: ", self.stringDataoff)
class DexTypeID:
def __init__(self, file, str_table, mode=1):
if mode == 1:
self.descriptorIdx = struct.unpack("I", file.read(4))[0] # in file
self.str = str_table[self.descriptorIdx].str
else:
self.descriptorIdx = 0
self.str = ""
def addtype(self, index, string):
self.descriptorIdx = index
self.str = string
def copytofile(self, file):
file.write(struct.pack("I", self.descriptorIdx))
def printf(self):
print ("type id: ", self.str)
class DexProtoId:
def __init__(self, file, str_table, type_table, mode=1):
if mode == 1:
self.shortyIdx = struct.unpack("I", file.read(4))[0] # in file
self.returnTypeIdx = struct.unpack("I", file.read(4))[0] # in file
self.parametersOff = struct.unpack("I", file.read(4))[0] # in file
self.name = str_table[self.shortyIdx].str
self.returnstr = type_table[self.returnTypeIdx].str
self.ref = None
else:
self.shortyIdx = 0
self.returnTypeIdx = 0
self.parametersOff = 0
self.ref = None
def addproto(self, idx, typeidx, reference):
self.shortyIdx = idx
self.returnTypeIdx = typeidx
self.ref = reference
def copytofile(self, file):
file.write(struct.pack("I", self.shortyIdx))
file.write(struct.pack("I", self.returnTypeIdx))
if self.ref is not None:
file.write(struct.pack("I", self.ref.start))
else:
file.write(struct.pack("I", 0))
def getreference(self, dexmaplist):
self.ref = dexmaplist[0x1001].getreference(self.parametersOff)
def printf(self):
print ("return Type:", self.returnstr)
print ("methodname:", self.name)
if self.ref is not None:
self.ref.printf()
class DexFieldId:
def __init__(self, file, str_table, type_table, mode=1):
if mode == 1:
self.classIdx = struct.unpack("H", file.read(2))[0] # in file
self.typeIdx = struct.unpack("H", file.read(2))[0] # in file
self.nameIdx = struct.unpack("I", file.read(4))[0] # in file
self.classstr = type_table[self.classIdx].str
self.typestr = type_table[self.typeIdx].str
self.name = str_table[self.nameIdx].str
def addfield(self, classidx, typeidx, nameidx):
self.classIdx = classidx
self.typeIdx = typeidx
self.nameIdx = nameidx
def copytofile(self, file):
file.write(struct.pack("H", self.classIdx))
file.write(struct.pack("H", self.typeIdx))
file.write(struct.pack("I", self.nameIdx))
def printf(self):
print ("classstr:", self.classstr)
print ("typestr:", self.typestr)
print ("name:", self.name)
print ()
class DexMethodId:
def __init__(self, file, str_table, type_table, proto_table, mode=1):
if mode == 1:
self.classIdx = struct.unpack("H", file.read(2))[0] # in file
self.protoIdx = struct.unpack("H", file.read(2))[0] # in file
self.nameIdx = struct.unpack("I", file.read(4))[0] # in file
self.classstr = type_table[self.classIdx].str
self.name = str_table[self.nameIdx].str
else:
self.classIdx = 0
self.protoIdx = 0
self.nameIdx = 0
def addmethod(self, class_idx, proto_idx, name_idx):
self.classIdx = class_idx
self.protoIdx = proto_idx
self.nameIdx = name_idx
def copytofile(self, file):
file.write(struct.pack("H", self.classIdx))
file.write(struct.pack("H", self.protoIdx))
file.write(struct.pack("I", self.nameIdx))
def printf(self):
print ("classstr:", self.classstr)
print ("name:", self.name)
print ()
class DexClassDef:
def __init__(self, file, str_table, type_table, mode=1):
if mode == 1:
self.classIdx = struct.unpack("I", file.read(4))[0] # in file
self.accessFlags = struct.unpack("I", file.read(4))[0] # in file
self.superclassIdx = struct.unpack("I", file.read(4))[0] # in file
self.interfacesOff = struct.unpack("I", file.read(4))[0] # in file
self.sourceFileIdx = struct.unpack("I", file.read(4))[0] # in file
self.annotationsOff = struct.unpack("I", file.read(4))[0] # in file
self.classDataOff = struct.unpack("I", file.read(4))[0] # in file
self.staticValuesOff = struct.unpack("I", file.read(4))[0] # in file
self.classstr = type_table[self.classIdx].str
self.superclassstr = type_table[self.superclassIdx].str
if self.sourceFileIdx == 0xFFFFFFFF:
self.sourceFilestr = "NO_INDEX"
else:
self.sourceFilestr = str_table[self.sourceFileIdx].str
else:
self.classIdx = 0
self.accessFlags = 0
self.superclassIdx = 0
self.interfacesOff = 0
self.sourceFileIdx = 0
self.annotationsOff = 0
self.classDataOff = 0
self.staticValuesOff = 0
self.interfacesRef = None
self.annotationsRef = None
self.classDataRef = None
self.staticValuesRef = None
def addclassdef(self, classidx, access, superclass, source):
self.classIdx = classidx
self.accessFlags = access
self.superclassIdx = superclass
self.sourceFileIdx = source
def addclassdefref(self, interref, annoref, classref, staticref):
self.interfacesRef = interref
self.annotationsRef = annoref
self.classDataRef = classref
self.staticValuesRef = staticref
# get class data reference by its name,e.g. Lcom/cc/test/MainActivity;
def getclassdefref(self, str):
if self.classstr == str and self.classDataOff > 0:
return self.classDataRef
return None
def copytofile(self, file):
file.write(struct.pack("I", self.classIdx))
file.write(struct.pack("I", self.accessFlags))
file.write(struct.pack("I", self.superclassIdx))
if self.interfacesRef is not None:
file.write(struct.pack("I", self.interfacesRef.start))
# print(self.interfacesRef.start)
else:
file.write(struct.pack("I", 0))
file.write(struct.pack("I", self.sourceFileIdx))
if self.annotationsRef is not None:
file.write(struct.pack("I", self.annotationsRef.start))
# print(self.annotationsRef.start)
else:
file.write(struct.pack("I", 0))
if self.classDataRef is not None:
file.write(struct.pack("I", self.classDataRef.start))
else:
file.write(struct.pack("I", 0))
if self.staticValuesRef is not None:
file.write(struct.pack("I", self.staticValuesRef.start))
else:
file.write(struct.pack("I", 0))
def getreference(self, dexmaplist):
self.interfacesRef = dexmaplist[0x1001].getreference(self.interfacesOff)
if 0x2006 in dexmaplist.keys():
self.annotationsRef = dexmaplist[0x2006].getreference(self.annotationsOff)
self.classDataRef = dexmaplist[0x2000].getreference(self.classDataOff)
if 0x2005 in dexmaplist.keys():
self.staticValuesRef = dexmaplist[0x2005].getreference(self.staticValuesOff)
def printf(self):
print ("classtype:", self.classIdx, self.classstr)
print("access flag:", self.accessFlags)
print ("superclasstype:", self.superclassIdx, self.superclassstr)
print ("iterface off", self.interfacesOff)
print("source file index", self.sourceFilestr)
print("annotations off", self.annotationsOff)
print("class data off", self.classDataOff)
print("static values off", self.staticValuesOff)
if self.interfacesRef is not None:
self.interfacesRef.printf()
if self.annotationsRef is not None:
self.annotationsRef.printf()
if self.classDataRef is not None:
self.classDataRef.printf()
if self.staticValuesRef is not None:
self.staticValuesRef.printf()
class StringData:
def __init__(self, file, mode = 1):
if mode == 1:
self.start = file.tell()
self.len = 0
self.size = readunsignedleb128(file) # in file
self.str = [] # getutf8str(file) # in file
while 1:
onebyte = struct.unpack("B", file.read(1))[0]
if onebyte == 0:
break
self.str.append(onebyte)
else:
self.start = 0
self.len = 0
self.size = 0
self.str = []
def addstr(self, str):
self.size = len(str)
self.str = bytearray(str)
def copytofile(self, file):
writeunsignedleb128(self.size, file)
for i in range(0, len(self.str)):
file.write(struct.pack("B", self.str[i]))
file.write(struct.pack("B", 0))
def makeoffset(self, off):
self.start = off
self.len = len(self.str) + unsignedleb128forlen(self.size)
return off + self.len + 1 # 1 byte for '\0'
def modify(self, str):
self.size = len(str)
self.str = bytearray(str)
def printf(self):
print (getstr(self.str))
class TypeItem: # alignment: 4 bytes
def __init__(self, file, type_table, mode=1):
if mode == 1:
self.start = file.tell()
self.size = struct.unpack("I", file.read(4))[0] # in file
self.list = []
self.str = []
self.len = 0
for i in range(0, self.size):
self.list.append(struct.unpack("H", file.read(2))[0]) # in file
self.str.append(type_table[self.list[i]].str)
if self.size % 2 == 1:
struct.unpack("H", file.read(2)) # for alignment
else:
self.start = 0
self.size = 0
self.list = None
self.str = None
self.len = 0
def addtypeItem(self, type_list, str_list):
self.size = len(type_list)
self.list = type_list
self.str = str_list
def copytofile(self, file):
file.write(struct.pack("I", self.size))
for i in range(0, self.size):
file.write(struct.pack("H", self.list[i]))
if self.size % 2 == 1:
file.write(struct.pack("H", 0))
def equal(self, param_list, length):
if length != self.size:
return False
for i in range(0, self.size):
if param_list[i] != self.str[i]:
return False
return True
def makeoffset(self, off):
align = off % 4
if align != 0:
off += (4 - align)
self.len = 4 + 2 * self.size
self.start = off
return off + self.len
def printf(self):
for i in range(0, self.size):
print (self.list[i], self.str[i])
# alignment: 4bytes
class AnnotationsetItem:
def __init__(self, file):
self.start = file.tell()
self.len = 0
self.size = struct.unpack("I", file.read(4))[0] # in file
self.entries = [] # annotation_off, offset of annotation_item
self.ref = []
for i in range(0, self.size):
self.entries.append(struct.unpack("I", file.read(4))[0])
def copytofile(self, file):
file.write(struct.pack("I", self.size))
for i in range(0, self.size):
file.write(struct.pack("I", self.ref[i].start))
def makeoffset(self, off):
align = off % 4
if align != 0:
off += (4 - align)
self.start = off
self.len = 4 + 4 * self.size
return off + self.len
def getreference(self, dexmaplist):
for i in range(0, self.size):
self.ref.append(dexmaplist[0x2004].getreference(self.entries[i]))
def printf(self):
print ("size: ", self.size)
# alignment: 4bytes
class AnnotationsetrefList:
def __init__(self, file):
self.start = file.tell()
self.size = struct.unpack("I", file.read(4))[0] # in file
self.list = [] # annotaions_off, offset of annotation_set_item
self.ref = []
self.len = 0
for i in range(0, self.size):
self.list.append(struct.unpack("I", file.read(4))[0])
def copytofile(self, file):
file.write(struct.pack("I", self.size))
for i in range(0, self.size):
if self.ref[i] is not None:
file.write(struct.pack("I", self.ref[i].start))
else:
file.write(struct.pack("I", 0))
def makeoffset(self, off):
align = off % 4
if align != 0:
off += (4 - align)
self.start = off
self.len = 4 + 4 * self.size
return off + self.len
def getreference(self, dexmaplist):
for i in range(0, self.size):
self.ref.append(dexmaplist[0x1003].getreference(self.list[i]))
def printf(self):
print ("size: ", self.size)
class Encodedfield:
def __init__(self, file, mode=1):
if mode == 1:
self.start = file.tell()
self.len = 0
self.field_idx_diff = readunsignedleb128(file)
self.access_flags = readunsignedleb128(file)
else:
self.len = 0
self.field_idx_diff = 0
self.access_flags = 1
self.field_idx = 0 # need to set later
def __lt__(self, other): # for sort
return self.field_idx_diff < other.field_idx_diff
def addfield(self, idx, flag):
self.field_idx = idx
self.access_flags = int(flag)
def copytofile(self, file):
writeunsignedleb128(self.field_idx_diff, file)
writeunsignedleb128(self.access_flags, file)
def makeoffset(self, off):
self.start = off
self.len += unsignedleb128forlen(self.field_idx_diff)
self.len += unsignedleb128forlen(self.access_flags)
return off + self.len
def printf(self):
print ("diff: ", self.field_idx_diff)
print ("access: ", self.access_flags)
class Encodedmethod:
def __init__(self, file, mode=1):
if mode == 1:
self.start = file.tell()
self.len = 0
self.method_idx_diff = readunsignedleb128(file)
self.access_flags = readunsignedleb128(file)
self.code_off = readunsignedleb128(file)
self.coderef = None
else:
self.len = 0
self.method_idx_diff = 0
self.access_flags = 0
self.coderef = 0
self.method_idx = 0 # need to set later
self.modified = 0 # if set this var, means that code_off will moodified to zero
def addmethod(self, method_idx, access, ref):
self.method_idx = method_idx
self.access_flags = int(access)
self.coderef = ref
def copytofile(self, file):
writeunsignedleb128(self.method_idx_diff, file)
writeunsignedleb128(self.access_flags, file)
if self.modified == 1:
writeunsignedleb128(0, file)
elif self.coderef is not None:
writeunsignedleb128(self.coderef.start, file)
else:
writeunsignedleb128(0, file)
def makeoffset(self, off):
self.start = off
self.len += unsignedleb128forlen(self.method_idx_diff)
self.len += unsignedleb128forlen(self.access_flags)
if self.modified == 1:
self.len += unsignedleb128forlen(0)
elif self.coderef is not None:
self.len += unsignedleb128forlen(self.coderef.start)
else:
self.len += unsignedleb128forlen(0)
return off + self.len
def getreference(self, dexmaplist):
self.coderef = dexmaplist[0x2001].getreference(self.code_off)
def printf(self):
print ("method_idx_diff: ", self.method_idx_diff)
print("method idx:", self.method_idx)
print ("access: ", self.access_flags)
print ("code off: ", self.code_off)
# alignment:none
class ClassdataItem:
def __init__(self, file, mode=1):
if mode == 1:
self.start = file.tell()
self.len = 0
self.static_field_size = readunsignedleb128(file)
self.instance_fields_size = readunsignedleb128(file)
self.direct_methods_size = readunsignedleb128(file)
self.virtual_methods_size = readunsignedleb128(file)
self.static_fields = []
self.instance_fields = []
self.direct_methods = []
self.virtual_methods = []
for i in range(0, self.static_field_size):
self.static_fields.append(Encodedfield(file))
for i in range(0, self.instance_fields_size):
self.instance_fields.append(Encodedfield(file))
for i in range(0, self.direct_methods_size):
self.direct_methods.append(Encodedmethod(file))
for i in range(0, self.virtual_methods_size):
self.virtual_methods.append(Encodedmethod(file))
else:
self.static_field_size = 0
self.instance_fields_size = 0
self.direct_methods_size = 0
self.virtual_methods_size = 0
self.static_fields = []
self.instance_fields = []
self.direct_methods = []
self.virtual_methods = []
def addstaticfield(self, field_idx, accessflag):
self.static_field_size += 1
field = Encodedfield(None, 2)
field.addfield(field_idx, accessflag)
self.static_fields.append(field)
def addinstancefield(self, field_idx, accessflag):
self.instance_fields_size += 1
field = Encodedfield(None, 2)
field.addfield(field_idx, accessflag)
self.instance_fields.append(field)
def adddirectmethod(self, method_idx, accessflag, code_ref):
method = Encodedmethod(None, 2)
method.addmethod(method_idx, accessflag, code_ref)
self.direct_methods_size += 1
self.direct_methods.append(method)
def addvirtualmethod(self, method_idx, accessflag, code_ref):
method = Encodedmethod(None, 2)
method.addmethod(method_idx, accessflag, code_ref)
self.virtual_methods_size += 1
self.virtual_methods.append(method)
def commit(self): # call this when everything done, just for static field by now
if self.static_field_size > 0:
# self.static_fields.sort() # since each field added has the largest index
# there is no need to sort the list
last = 0
for i in range(0, self.static_field_size):
self.static_fields[i].field_idx_diff = self.static_fields[i].field_idx - last
last = self.static_fields[i].field_idx
if self.instance_fields_size > 0:
last = 0
for i in range(0, self.instance_fields_size):
self.instance_fields[i].field_idx_diff = self.instance_fields[i].field_idx - last
last = self.instance_fields[i].field_idx
if self.direct_methods_size > 0:
last = 0
for i in range(0, self.direct_methods_size):
self.direct_methods[i].method_idx_diff = self.direct_methods[i].method_idx - last
last = self.direct_methods[i].method_idx
if self.virtual_methods_size > 0:
last = 0
for i in range(0, self.virtual_methods_size):
self.virtual_methods[i].method_idx_diff = self.virtual_methods[i].method_idx - last
last = self.virtual_methods[i].method_idx
def copytofile(self, file):
writeunsignedleb128(self.static_field_size, file)
writeunsignedleb128(self.instance_fields_size, file)
writeunsignedleb128(self.direct_methods_size, file)
writeunsignedleb128(self.virtual_methods_size, file)
for i in range(0, self.static_field_size):
self.static_fields[i].copytofile(file)
for i in range(0, self.instance_fields_size):
self.instance_fields[i].copytofile(file)
for i in range(0, self.direct_methods_size):
self.direct_methods[i].copytofile(file)
for i in range(0, self.virtual_methods_size):
self.virtual_methods[i].copytofile(file)
# besides adding refenrence, also need to set the correct index
def getreference(self, dexmaplist):
last = 0
for i in range(0, self.static_field_size):
self.static_fields[i].field_idx = last + self.static_fields[i].field_idx_diff
last = self.static_fields[i].field_idx
last = 0
for i in range(0, self.instance_fields_size):
self.instance_fields[i].field_idx = last + self.instance_fields[i].field_idx_diff
last = self.instance_fields[i].field_idx
last = 0
for i in range(0, self.direct_methods_size):
self.direct_methods[i].getreference(dexmaplist)
self.direct_methods[i].method_idx = last + self.direct_methods[i].method_idx_diff
last = self.direct_methods[i].method_idx
last = 0
for i in range(0, self.virtual_methods_size):
self.virtual_methods[i].getreference(dexmaplist)
self.virtual_methods[i].method_idx = last + self.virtual_methods[i].method_idx_diff
last = self.virtual_methods[i].method_idx
def makeoffset(self, off):
self.start = off
off += unsignedleb128forlen(self.static_field_size)
off += unsignedleb128forlen(self.instance_fields_size)
off += unsignedleb128forlen(self.direct_methods_size)
off += unsignedleb128forlen(self.virtual_methods_size)
for i in range(0, self.static_field_size):
off = self.static_fields[i].makeoffset(off)
for i in range(0, self.instance_fields_size):
off = self.instance_fields[i].makeoffset(off)
for i in range(0, self.direct_methods_size):
off = self.direct_methods[i].makeoffset(off)
for i in range(0, self.virtual_methods_size):
off = self.virtual_methods[i].makeoffset(off)
self.len = off - self.start
return off
def printf(self):
print ("static field size: ", self.static_field_size)
print ("instance fields size: ", self.instance_fields_size)
print ("direct methods size: ", self.direct_methods_size)
print ("virtual methods size: ", self.virtual_methods_size)
for i in range(0, self.static_field_size):
self.static_fields[i].printf()
for i in range(0, self.instance_fields_size):
self.instance_fields[i].printf()
for i in range(0, self.direct_methods_size):
self.direct_methods[i].printf()
for i in range(0, self.virtual_methods_size):
self.virtual_methods[i].printf()
class TryItem:
def __init__(self, file):
self.start = file.tell()
self.start_addr = struct.unpack("I", file.read(4))[0] # in file
self.insn_count = struct.unpack("H", file.read(2))[0] # in file
self.handler_off = struct.unpack("H", file.read(2))[0] # in file
self.len = 0
def copytofile(self, file):
file.write(struct.pack("I", self.start_addr))
file.write(struct.pack("H", self.insn_count))
file.write(struct.pack("H", self.handler_off))
def makeoffset(self, off):
self.start = off
self.len = 4 + 2 + 2
return off + self.len
def printf(self):
print ("start_Addr: ", self.start_addr)
print ("insn_count: ", self.insn_count)
print ("handler_off: ", self.handler_off)
print ()
class EncodedTypeAddrPair:
def __init__(self, file):
self.type_idx = readunsignedleb128(file)
self.addr = readunsignedleb128(file)
def copytofile(self, file):
writeunsignedleb128(self.type_idx, file)
writeunsignedleb128(self.addr, file)
def makeoffset(self, off):
off += unsignedleb128forlen(self.type_idx)
off += unsignedleb128forlen(self.addr)
return off
def printf(self):
print ("type idx: ", self.type_idx)
print ("addr: ", self.addr)
print ()
class EncodedhandlerItem:
def __init__(self, file):
self.start = file.tell()
self.len = 0
self.size = readsignedleb128(file)
self.handlers = []
# print("start handler item", abs(self.size))
for i in range(0, abs(self.size)):
self.handlers.append(EncodedTypeAddrPair(file))
if self.size <= 0:
self.catch_all_addr = readunsignedleb128(file)
def copytofile(self, file):
writesignedleb128(self.size, file)
for i in range(0, abs(self.size)):
self.handlers[i].copytofile(file)
if self.size <= 0:
writeunsignedleb128(self.catch_all_addr, file)
def makeoffset(self, off):
self.start = off
off += signedleb128forlen(self.size)
for i in range(0, abs(self.size)):
off = self.handlers[i].makeoffset(off)
if self.size <= 0:
off += unsignedleb128forlen(self.catch_all_addr)
self.len = off - self.start
return off
class EncodedhandlerList:
def __init__(self, file):
self.start = file.tell()
self.len = 0
self.size = readunsignedleb128(file)
self.list = []
for i in range(0, self.size):
self.list.append(EncodedhandlerItem(file))
def copytofile(self, file):
file.seek(self.start, 0)
writeunsignedleb128(self.size, file)
for i in range(0, self.size):
self.list[i].copytofile(file)
def makeoffset(self, off):
self.start = off
off += unsignedleb128forlen(self.size)
for i in range(0, self.size):
off = self.list[i].makeoffset(off)
return off
# alignment: 4bytes
class CodeItem:
def __init__(self, file, mode=1):
if mode == 1:
self.start = file.tell()
self.len = 0
self.register_size = struct.unpack("H", file.read(2))[0] # in file
self.ins_size = struct.unpack("H", file.read(2))[0] # in file
self.outs_size = struct.unpack("H", file.read(2))[0] # in file
self.tries_size = struct.unpack("H", file.read(2))[0] # in file
self.debug_info_off = struct.unpack("I", file.read(4))[0] # in file
self.insns_size = struct.unpack("I", file.read(4))[0] # in file
self.insns = []
self.debugRef = None
for i in range(0, self.insns_size):
self.insns.append(struct.unpack("H", file.read(2))[0])
if self.tries_size != 0 and self.insns_size % 2 == 1:
self.padding = struct.unpack("H", file.read(2))[0]
self.tries = []
for i in range(0, self.tries_size):
self.tries.append(TryItem(file))
if self.tries_size != 0:
self.handler = EncodedhandlerList(file)
align = file.tell() % 4 # for alignment
if align != 0:
file.read(4-align)
else:
self.start = 0
self.len = 0
self.register_size = 0
self.ins_size = 0
self.outs_size = 0
self.tries_size = 0
self.debug_info_off = 0
self.insns_size = 0
self.insns = []
self.debugRef = None
self.padding = 0
self.tries = []
self.handler = None
def addcode(self, reg_size, insize, outsize, triessize, debugoff, inssize, insnslist, debugref, trieslist, handlerref):
self.register_size = reg_size
self.ins_size = insize
self.outs_size = outsize
self.tries_size = triessize
self.debug_info_off = debugoff
self.insns_size = inssize
self.insns = insnslist
self.debugRef = debugref
self.tries = trieslist
self.handler = handlerref
def copytofile(self, file):
file.seek(self.start, 0)
file.write(struct.pack("H", self.register_size))
file.write(struct.pack("H", self.ins_size))
file.write(struct.pack("H", self.outs_size))
file.write(struct.pack("H", self.tries_size))
if self.debugRef is not None:
file.write(struct.pack("I", self.debugRef.start))
else:
file.write(struct.pack("I", 0))
file.write(struct.pack("I", self.insns_size))
for i in range(0, self.insns_size):
file.write(struct.pack("H", self.insns[i]))
if self.tries_size != 0 and self.insns_size % 2 == 1:
file.write(struct.pack("H", self.padding))
for i in range(0, self.tries_size):
self.tries[i].copytofile(file)
if self.tries_size != 0:
self.handler.copytofile(file)
align = file.tell() % 4 # for alignment
if align != 0:
for i in range(0, 4-align):
file.write(struct.pack("B", 0))
# print("code item addr:", file.tell())
def makeoffset(self, off):
align = off % 4
if align != 0:
off += (4 - align)
self.start = off
off += (4 * 2 + 2 * 4) # 4 ushort and 2 uint
off += (2 * self.insns_size)
if self.tries_size != 0 and self.insns_size % 2 == 1: # for padding
off += 2
for i in range(0, self.tries_size):
off = self.tries[i].makeoffset(off)
if self.tries_size != 0:
off = self.handler.makeoffset(off)
self.len = off - self.start
return off
def getreference(self, dexmaplist):
self.debugRef = dexmaplist[0x2003].getreference(self.debug_info_off)
def printf(self):
print("registers_size:", self.register_size)
print("ins_size, outs_size, tries_size:", self.ins_size, self.outs_size, self.tries_size)
print("debug info of:", self.debug_info_off)
print("insn_size:", self.insns_size)
for i in range(0, self.insns_size):
print(self.insns[i])
tmp = Instruction.InstructionSet(self.insns)
tmp.printf()
# alignment: none
class AnnotationItem:
Visibity = {0: 'VISIBITITY_BUILD', 1: 'VISIBILITY_RUNTIME', 2: 'VISIBILITY_SYSTEM'}
def __init__(self, file):
self.start = file.tell()
self.len = 0
self.visibility = struct.unpack("B", file.read(1))[0] # infile
self.annotation = EncodedAnnotation(file)
def copytofile(self, file):
file.write(struct.pack("B", self.visibility))
self.annotation.copytofile(file)
def makeoffset(self, off):
self.start = off
off += 1
off = self.annotation.makeoffset(off)
self.len = off - self.start
return off
# alignment: none
class EncodedArrayItem:
def __init__(self, file):
self.start = file.tell()
self.len = 0
self.value = EncodedArray(file)
def copytofile(self, file):
self.value.copytofile(file)
def makeoffset(self, off):
# if self.start == 1096008:
self.start = off
off = self.value.makeoffset(off)
self.len = off - self.start
return off
def printf(self):
print("None for EncodedArrayItem by now")
class FieldAnnotation:
def __init__(self, file):
self.field_idx = struct.unpack("I", file.read(4))[0] # in file
self.annotations_off = struct.unpack("I", file.read(4))[0] # in file, offset of annotation_set_item
self.annotations_off_ref = None
def copytofile(self, file):
file.write(struct.pack("I", self.field_idx))
file.write(struct.pack("I", self.annotations_off_ref.start))
def makeoffset(self, off):
off += 4 * 2
return off
def getreference(self, dexmaplist):
self.annotations_off_ref = dexmaplist[0x1003].getreference(self.annotations_off)
class MethodAnnotation:
def __init__(self, file):
self.method_idx = struct.unpack("I", file.read(4))[0] # in file
self.annotations_off = struct.unpack("I", file.read(4))[0] # in file
self.annotations_off_ref = None
def copytofile(self, file):
file.write(struct.pack("I", self.method_idx))
file.write(struct.pack("I", self.annotations_off_ref.start))
def makeoffset(self, off):
off += 4 * 2
return off
def getreference(self, dexmaplist):
self.annotations_off_ref = dexmaplist[0x1003].getreference(self.annotations_off)
class ParamterAnnotation:
def __init__(self, file):
self.method_idx = struct.unpack("I", file.read(4))[0] # in file
self.annotations_off = struct.unpack("I", file.read(4))[0] # in file. offset of "annotation_set_ref_list"
self.annotations_off_ref = None
def copytofile(self, file):
file.write(struct.pack("I", self.method_idx))
file.write(struct.pack("I", self.annotations_off_ref.start))
def makeoffset(self, off):
off += 4 * 2
return off
def getreference(self, dexmaplist):
self.annotations_off_ref = dexmaplist[0x1002].getreference(self.annotations_off)
# alignment: 4 bytes
class AnnotationsDirItem:
def __init__(self, file):
self.start = file.tell()
self.len = 0
self.class_annotations_off = struct.unpack("I", file.read(4))[0] # in file
self.fields_size = struct.unpack("I", file.read(4))[0] # in file
self.annotated_methods_size = struct.unpack("I", file.read(4))[0] # in file
self.annotate_parameters_size = struct.unpack("I", file.read(4))[0] # in file
self.field_annotations = [] # field_annotation[size]
self.method_annotations = []
self.parameter_annotations = []
self.class_annotations_ref = None
for i in range(0, self.fields_size):
self.field_annotations.append(FieldAnnotation(file))
for i in range(0, self.annotated_methods_size):
self.method_annotations.append(MethodAnnotation(file))
for i in range(0, self.annotate_parameters_size):
self.parameter_annotations.append(ParamterAnnotation(file))
def copytofile(self, file):
if self.class_annotations_ref is not None:
file.write(struct.pack("I", self.class_annotations_ref.start))
else:
file.write(struct.pack("I", self.class_annotations_off))
file.write(struct.pack("I", self.fields_size))
file.write(struct.pack("I", self.annotated_methods_size))
file.write(struct.pack("I", self.annotate_parameters_size))
for i in range(0, self.fields_size):
self.field_annotations[i].copytofile(file)
for i in range(0, self.annotated_methods_size):
self.method_annotations[i].copytofile(file)
for i in range(0, self.annotate_parameters_size):
self.parameter_annotations[i].copytofile(file)
def makeoffset(self, off):
self.start = off
off += 4 * 4
for i in range(0, self.fields_size):
off = self.field_annotations[i].makeoffset(off)
for i in range(0, self.annotated_methods_size):
off = self.method_annotations[i].makeoffset(off)
for i in range(0, self.annotate_parameters_size):
off = self.parameter_annotations[i].makeoffset(off)
self.len = off - self.start
return off
def getreference(self, dexmaplist):
self.class_annotations_ref = dexmaplist[0x1003].getreference(self.class_annotations_off)
for i in range(0, self.fields_size):
self.field_annotations[i].getreference(dexmaplist)
for i in range(0, self.annotated_methods_size):
self.method_annotations[i].getreference(dexmaplist)
for i in range(0, self.annotate_parameters_size):
self.parameter_annotations[i].getreference(dexmaplist)
def printf(self):
print("None for AnnotationDirItem by now")
# alignment: none
class DebugInfo:
def __init__(self, file, mode=1):
if mode == 1:
self.start = file.tell()
self.len = 0
self.line_start = readunsignedleb128(file)
self.parameters_size = readunsignedleb128(file)
self.parameter_names = []
for i in range(0, self.parameters_size):
self.parameter_names.append(readunsignedleb128p1(file))
self.debug = []
while 1:
onebyte = struct.unpack("B", file.read(1))[0]
self.debug.append(onebyte)
if onebyte == 0:
break
elif onebyte == 1:
self.debug.append(readunsignedleb128(file))
elif onebyte == 2:
self.debug.append(readsignedleb128(file))
elif onebyte == 3:
self.debug.append(readunsignedleb128(file))
self.debug.append(readunsignedleb128p1(file))
self.debug.append(readunsignedleb128p1(file))
elif onebyte == 4:
self.debug.append(readunsignedleb128(file))
self.debug.append(readunsignedleb128p1(file))
self.debug.append(readunsignedleb128p1(file))
self.debug.append(readunsignedleb128p1(file))
elif onebyte == 5:
self.debug.append(readunsignedleb128(file))
elif onebyte == 6:
self.debug.append(readunsignedleb128(file))
elif onebyte == 9:
self.debug.append(readunsignedleb128p1(file))
else:
self.start = 0
self.len = 0
self.line_start = 0
self.parameters_size = 0
self.parameter_names = []
self.debug = []
def adddebugitem(self, linestart, paramsize, names_list, debug_list):
self.line_start = linestart
self.parameters_size = paramsize
self.parameter_names = names_list
self.debug = debug_list
def copytofile(self, file):
file.seek(self.start, 0)
writeunsignedleb128(self.line_start, file)
writeunsignedleb128(self.parameters_size, file)
for i in range(0, self.parameters_size):
# print(self.parameter_names[i])
# if i == self.parameters_size-1:
# writeunsignedleb128p1alignshort(self.parameter_names[i], file)
# else:
writeunsignedleb128p1(self.parameter_names[i], file)
index = 0
while 1:
onebyte = self.debug[index]
file.write(struct.pack("B", onebyte))
index += 1
if onebyte == 0:
break
elif onebyte == 1:
writeunsignedleb128(self.debug[index], file)
index += 1
elif onebyte == 2:
writesignedleb128(self.debug[index], file)
index += 1
elif onebyte == 3:
writeunsignedleb128(self.debug[index], file)
writeunsignedleb128p1(self.debug[index+1], file)
writeunsignedleb128p1(self.debug[index+2], file)
index += 3
elif onebyte == 4:
writeunsignedleb128(self.debug[index], file)
writeunsignedleb128p1(self.debug[index+1], file)
writeunsignedleb128p1(self.debug[index+2], file)
writeunsignedleb128p1(self.debug[index+3], file)
index += 4
elif onebyte == 5:
writeunsignedleb128(self.debug[index], file)
index += 1
elif onebyte == 6:
writeunsignedleb128(self.debug[index], file)
index += 1
elif onebyte == 9:
writeunsignedleb128p1(self.debug[index], file)
index += 1
def printf(self):
print(self.line_start, self.parameters_size)
def makeoffset(self, off):
self.start = off
off += unsignedleb128forlen(self.line_start)
off += unsignedleb128forlen(self.parameters_size)
for i in range(0, self.parameters_size):
off += unsignedleb128p1forlen(self.parameter_names[i])
index = 0
while 1:
onebyte = self.debug[index]
off += 1
index += 1
if onebyte == 0:
break
elif onebyte == 1:
off += unsignedleb128forlen(self.debug[index])
index += 1
elif onebyte == 2:
off += signedleb128forlen(self.debug[index])
index += 1
elif onebyte == 3:
off += unsignedleb128forlen(self.debug[index])
off += unsignedleb128p1forlen(self.debug[index+1])
off += unsignedleb128p1forlen(self.debug[index+2])
index += 3
elif onebyte == 4:
off += unsignedleb128forlen(self.debug[index])
off += unsignedleb128p1forlen(self.debug[index+1])
off += unsignedleb128p1forlen(self.debug[index+2])
off += unsignedleb128p1forlen(self.debug[index+3])
index += 4
elif onebyte == 5:
off += unsignedleb128forlen(self.debug[index])
index += 1
elif onebyte == 6:
off += unsignedleb128forlen(self.debug[index])
index += 1
elif onebyte == 9:
off += unsignedleb128p1forlen(self.debug[index])
index += 1
self.len = off - self.start
return off
class DexMapItem:
Constant = {0: 'TYPE_HEADER_ITEM', 1: 'TYPE_STRING_ID_ITEM', 2: 'TYPE_TYPE_ID_ITEM',
3: 'TYPE_PROTO_ID_ITEM', 4: 'TYPE_FIELD_ID_ITEM', 5: 'TYPE_METHOD_ID_ITEM',
6: 'TYPE_CLASS_DEF_ITEM', 0x1000: 'TYPE_MAP_LIST', 0x1001: 'TYPE_TYPE_LIST',
0x1002: 'TYPE_ANNOTATION_SET_REF_LIST', 0x1003: 'TYPE_ANNOTATION_SET_ITEM',
0x2000: 'TYPE_CLASS_DATA_ITEM', 0x2001: 'TYPE_CODE_ITEM', 0x2002: 'TYPE_STRING_DATA_ITEM',
0x2003: 'TYPE_DEBUG_INFO_ITEM', 0x2004: 'TYPE_ANNOTATION_ITEM', 0x2005: 'TYPE_ENCODED_ARRAY_ITEM',
0x2006: 'TYPE_ANNOTATIONS_DIRECTORY_ITEM'}
def __init__(self, file):
self.type = struct.unpack("H", file.read(2))[0]
self.unused = struct.unpack("H", file.read(2))[0]
self.size = struct.unpack("I", file.read(4))[0]
self.offset = struct.unpack("I", file.read(4))[0]
self.item = []
self.len = 0 # the length of the item
def addstr(self, str): # return index of the string, I put it on the last position simply
if self.type == 0x2002:
strdata = StringData(None, 2) # new a empty class
strdata.addstr(str)
self.item.append(strdata)
self.size += 1
return strdata
else:
print("error in add string")
return None
def addstrID(self, strdata):
if self.type == 1:
stringid = DexStringID(None, 2)
stringid.addstrID(strdata)
self.item.append(stringid)
self.size += 1
else:
print("error in add string id")
def addtypeID(self, field):
if self.type == 4:
self.item.append(field)
self.size += 1
else:
print("error in add type id")
def addclassdata(self, classdata):
if self.type == 0x2000:
self.item.append(classdata)
self.size += 1
else:
print("error in add class data")
def addtypeid(self, index, str):
if self.type == 2:
type = DexTypeID(None, None, 2)
type.addtype(index, str)
self.item.append(type)
self.size += 1
else:
print("error in add type id")
def addmethodid(self, class_idx, proto_idx, name_idx):
method = DexMethodId(None, None, None, None, 2)
method.addmethod(class_idx, proto_idx, name_idx)
print("add method id", proto_idx)
self.item.append(method)
self.size += 1
def addclassdef(self, classdef):
if self.type == 6:
self.item.append(classdef)
self.size += 1
else:
print("error in add class def")
def addprotoid(self, short_idx, type_idx, paramref):
if self.type == 3:
proto = DexProtoId(None, None, None, 2)
proto.addproto(short_idx, type_idx, paramref)
self.item.append(proto)
self.size += 1
else:
print("error in add proto id")
def addtypelist(self, typeitem):
if self.type == 0x1001:
self.item.append(typeitem)
self.size += 1
else:
print("error in add type list")
def addcodeitem(self, codeitem):
if self.type == 0x2001:
self.item.append(codeitem)
self.size += 1
else:
print("error in add code item")
def adddebugitem(self, debugitem):
if self.type == 0x2003:
self.item.append(debugitem)
self.size += 1
else:
print("error in add debug item")
def copytofile(self, file):
file.seek(self.offset, 0)
if self.type <= 0x2006:
align = file.tell() % 4
if align != 0:
for i in range(0, 4-align):
file.write(struct.pack("B", 0))
print("copytofile:", DexMapItem.Constant[self.type], file.tell())
for i in range(0, self.size):
self.item[i].copytofile(file)
# if self.type == 0x2002:
# print("for debug", i, getstr(self.item[i].str))
def printf(self, index):
print ("type: ", DexMapItem.Constant[self.type])
print ("size: ", self.size)
print ("offset: ", self.offset)
if self.type == index:
for i in range(0, self.size):
self.item[i].printf()
print ()
def setitem(self, file, dexmapitem):
file.seek(self.offset)
for i in range(0, self.size):
if self.type == 1: # string
file.seek(self.offset+i*4, 0)
self.item.append(DexStringID(file))
elif self.type == 2:
file.seek(self.offset+i*4, 0)
self.item.append(DexTypeID(file, dexmapitem[1].item)) # make sure has already build string table
elif self.type == 3:
file.seek(self.offset+i*12, 0)
self.item.append(DexProtoId(file, dexmapitem[1].item, dexmapitem[2].item))
elif self.type == 4:
file.seek(self.offset+i*8, 0)
self.item.append(DexFieldId(file, dexmapitem[1].item, dexmapitem[2].item))
elif self.type == 5:
file.seek(self.offset+i*8, 0)
self.item.append(DexMethodId(file, dexmapitem[1].item, dexmapitem[2].item, dexmapitem[3].item))
elif self.type == 6:
file.seek(self.offset+i*32, 0)
self.item.append(DexClassDef(file, dexmapitem[1].item, dexmapitem[2].item))
elif self.type == 0x1001: # TYPE_TYPE_LIST
self.item.append(TypeItem(file, dexmapitem[2].item))
elif self.type == 0x1002: # TYPE_ANNOTATION_SET_REF_LIST
self.item.append(AnnotationsetrefList(file))
elif self.type == 0x1003: # TYPE_ANNOTATION_SET_ITEM
self.item.append(AnnotationsetItem(file))
elif self.type == 0x2000: # TYPE_CLASS_DATA_ITEM
self.item.append(ClassdataItem(file))
elif self.type == 0x2001: # TYPE_CODE_ITEM
self.item.append(CodeItem(file))
elif self.type == 0x2002: # TYPE_STRING_DATA_ITEM
self.item.append(StringData(file))
elif self.type == 0x2003: # TYPE_DEBUG_INFO_ITEM
self.item.append(DebugInfo(file))
elif self.type == 0x2004: # TYPE_ANNOTATION_ITEM
self.item.append(AnnotationItem(file))
elif self.type == 0x2005: # TYPE_ENCODED_ARRAY_ITEM
self.item.append(EncodedArrayItem(file))
elif self.type == 0x2006: # TYPE_ANNOTATIONS_DIRECTORY_ITEM
self.item.append(AnnotationsDirItem(file))
def makeoffset(self, off):
if self.type < 0x2000 or self.type == 0x2001 or self.type == 0x2006:
align = off % 4
if align != 0:
off += (4 - align)
self.offset = off
if self.type == 0: # header
self.len = 112
elif self.type == 1: # string id
self.len = 4 * self.size
elif self.type == 2: # type id
self.len = 4 * self.size
elif self.type == 3: # proto id
self.len = 12 * self.size
elif self.type == 4: # field id
self.len = 8 * self.size
elif self.type == 5: # method id
self.len = 8 * self.size
elif self.type == 6: # class def
self.len = 32 * self.size
elif self.type == 0x1000: # map list, resolve specially in dexmaplist class
pass
elif 0x1001 <= self.type <= 0x2006: # type list, annotation ref set list, annotation set item...
for i in range(0, self.size):
off = self.item[i].makeoffset(off)
# if self.type == 0x2002:
# print("for debug", i, off)
self.len = off - self.offset
if self.type == 0x2000:
print("the off is:", off)
if self.type <= 6:
return off + self.len
else:
return off
def getref(self, dexmaplist):
for i in range(0, self.size):
self.item[i].getreference(dexmaplist)
def getreference(self, addr):
if addr == 0:
return None
i = 0
for i in range(0, self.size):
if self.item[i].start == addr:
return self.item[i]
if i >= self.size:
os._exit(addr)
return None
def getrefbystr(self, str): # for modify the string data
if self.type == 0x2002:
for i in range(0, self.size):
if getstr(self.item[i].str) == str:
return self.item[i]
else:
print("error occur here", self.type)
return None
def getindexbyname(self, str): # search for type id item
for i in range(0, self.size):
if self.item[i].str == str:
print("find index of", DexMapItem.Constant[self.type], str)
return i
print("did not find it in", DexMapItem.Constant[self.type])
return -1
def getindexbyproto(self, short_idx, return_type_idx, param_list, length): # called by item, index of 3
for i in range(0, self.size):
if short_idx == self.item[i].shortyIdx and return_type_idx == self.item[i].returnTypeIdx:
if self.item[i].ref is not None:
if self.item[i].ref.equal(param_list, length):
return i
return -1
class DexMapList:
Seq = (0, 1, 2, 3, 4, 5, 6, 0x1000, 0x1001, 0x1002, 0x1003, 0x2001, 0x2000, 0x2002,
0x2003, 0x2004, 0x2005, 0x2006)
def __init__(self, file, offset):
file.seek(offset, 0)
self.start = offset
self.size = struct.unpack("I", file.read(4))[0]
mapitem = []
self.dexmapitem = {}
for i in range(0, self.size):
mapitem.append(DexMapItem(file))
for i in range(0, self.size):
mapitem[i].setitem(file, self.dexmapitem)
self.dexmapitem[mapitem[i].type] = mapitem[i]
def copy(self, file):
for i in range(0, len(DexMapList.Seq)):
index = DexMapList.Seq[i]
if index in self.dexmapitem.keys():
print(index, "start at:", file.tell())
if index != 0x1000:
self.dexmapitem[index].copytofile(file)
else:
self.copytofile(file)
def copytofile(self, file):
print("output map list", file.tell())
file.seek(self.start, 0)
file.write(struct.pack("I", self.size))
for i in range(0, len(DexMapList.Seq)):
index = DexMapList.Seq[i]
if index in self.dexmapitem.keys():
# print(self.dexmapitem[index].type)
file.write(struct.pack("H", self.dexmapitem[index].type))
file.write(struct.pack("H", self.dexmapitem[index].unused))
file.write(struct.pack("I", self.dexmapitem[index].size))
file.write(struct.pack("I", self.dexmapitem[index].offset))
def makeoff(self):
off = 0
for i in range(0, len(DexMapList.Seq)):
index = DexMapList.Seq[i]
if index in self.dexmapitem.keys():
align = off % 4
if align != 0:
off += (4 - align)
if index != 0x1000:
off = self.dexmapitem[index].makeoffset(off)
else:
off = self.makeoffset(off)
return off
def makeoffset(self, off):
self.start = off
off += (4 + self.size * 12)
self.dexmapitem[0x1000].offset = self.start
return off
def getreference(self):
self.dexmapitem[1].getref(self.dexmapitem)
self.dexmapitem[3].getref(self.dexmapitem)
self.dexmapitem[6].getref(self.dexmapitem)
if 0x1002 in self.dexmapitem.keys():
self.dexmapitem[0x1002].getref(self.dexmapitem)
if 0x1003 in self.dexmapitem.keys():
self.dexmapitem[0x1003].getref(self.dexmapitem)
self.dexmapitem[0x2000].getref(self.dexmapitem)
self.dexmapitem[0x2001].getref(self.dexmapitem)
if 0x2006 in self.dexmapitem.keys():
self.dexmapitem[0x2006].getref(self.dexmapitem)
def getrefbystr(self, str):
return self.dexmapitem[0x2002].getrefbystr(str)
def printf(self, index):
print ("DexMapList:")
print ("size: ", self.size)
for i in self.dexmapitem:
self.dexmapitem[i].printf(index)
# default: 0 create from file 1 create from memory
class DexFile:
def __init__(self, filename, mode=0):
if mode == 0:
file = open(filename, 'rb')
self.dexheader = DexHeader(file)
self.dexmaplist = DexMapList(file, self.dexheader.map_off)
self.dexmaplist.dexmapitem[0].item.append(self.dexheader)
self.dexmaplist.getreference()
file.close()
def copytofile(self, filename):
if os.path.exists(filename):
os.remove(filename)
file = open(filename, 'wb+')
file.seek(0, 0)
self.makeoffset()
self.dexmaplist.copy(file)
rest = self.dexheader.file_size -file.tell()
for i in range(0, rest):
file.write(struct.pack("B", 0))
file_sha = get_file_sha1(file)
tmp = bytes(file_sha)
i = 0
file.seek(12)
while i < 40:
num = (ACSII[tmp[i]] << 4) + ACSII[tmp[i+1]]
file.write(struct.pack("B", num))
i += 2
csum = checksum(file, self.dexheader.file_size)
print("checksum:", hex(csum), "file size:", self.dexheader.file_size)
file.seek(8)
file.write(struct.pack("I", csum))
file.close()
def printf(self, index):
if index == 0:
self.dexheader.printf()
else:
self.dexmaplist.printf(index)
def printclasscode(self, class_name, method_name):
index = self.dexmaplist.dexmapitem[2].getindexbyname(class_name)
if index < 0:
print("did not find the class", class_name)
return
count = self.dexmaplist.dexmapitem[6].size
classcoderef = None
for i in range(0, count):
if self.dexmaplist.dexmapitem[6].item[i].classIdx == index:
print("the class def index is :", i)
self.dexmaplist.dexmapitem[6].item[i].printf()
classdataref = self.dexmaplist.dexmapitem[6].item[i].classDataRef
flag = False
if classdataref is not None:
for i in range(0, classdataref.direct_methods_size):
methodref = self.dexmaplist.dexmapitem[5].item[classdataref.direct_methods[i].method_idx]
print(methodref.name, classdataref.direct_methods[i].method_idx)
if methodref.name == method_name:
print("find the direct method:", methodref.classstr, methodref.name,
classdataref.direct_methods[i].access_flags, classdataref.direct_methods[i].code_off)
classcoderef = classdataref.direct_methods[i].coderef
if classcoderef is not None:
classcoderef.printf()
else:
print("the code item is None")
flag = True
break
if flag:
break
print("did not find the direct method")
for j in range(0, classdataref.virtual_methods_size):
methodref = self.dexmaplist.dexmapitem[5].item[classdataref.virtual_methods[j].method_idx]
print(methodref.name)
if methodref.name == method_name:
print("find the virtual method:", methodref.classstr, methodref.name,
classdataref.virtual_methods[j].access_flags, classdataref.virtual_methods[j].code_off)
classcoderef = classdataref.virtual_methods[j].coderef
classcoderef.printf()
flag = True
break
if flag is False:
print("did not find the virtual method")
# if flag: # find the class data item, now get and print the code item
# classcoderef.printf()
# print("print done")
# else:
# print("sonething wrong here")
# with open(method_name, "wb") as file:
# classcoderef.copytofile(file)
# file.close()
break
if classcoderef is not None:
classcoderef.printf()
def makeoffset(self):
off = self.dexmaplist.makeoff()
align = off % 4
if align != 0:
off += (4 - align)
self.dexheader.makeoffset(self.dexmaplist.dexmapitem)
self.dexheader.file_size = off
self.dexheader.data_size = off - self.dexheader.map_off
def modifystr(self, src, dst):
strData = self.dexmaplist.getrefbystr(src)
if strData is not None:
print("find string", src)
strData.modify(dst)
def addstr(self, str):
strdata = self.dexmaplist.dexmapitem[0x2002].addstr(str)
strdata.printf()
self.dexmaplist.dexmapitem[1].addstrID(strdata)
return self.dexmaplist.dexmapitem[1].size-1 # return the index of the str
def addtype(self, str):
index = self.addstr(str)
self.dexmaplist.dexmapitem[2].addtypeid(index, str)
return self.dexmaplist.dexmapitem[2].size-1
def addfield(self, classidx, type_str, name_str):
field = DexFieldId(None, None, None, 2)
str_idx = self.dexmaplist.dexmapitem[1].getindexbyname(name_str)
if str_idx < 0:
str_idx = self.addstr(name_str)
if type_str in TypeDescriptor.keys(): # transform the type str to type descriptor
type_str = TypeDescriptor[type_str]
type_idx = self.dexmaplist.dexmapitem[2].getindexbyname(type_str)
if type_idx < 0:
print("did not find this type in type ids", type_str)
type_idx = self.addtype(type_str)
field.addfield(classidx, type_idx, str_idx)
self.dexmaplist.dexmapitem[4].addtypeID(field)
return self.dexmaplist.dexmapitem[4].size-1
# classtype: Lcom/cc/test/Dexparse;
def addclass(self, classtype, accessflag, superclass, sourcefile):
item = DexClassDef(None, None, None, 2)
strdata = self.dexmaplist.getrefbystr(classtype)
if strdata is not None:
print("This class is existing", classtype)
return
type_index = self.addtype(classtype)
super_index = self.dexmaplist.dexmapitem[2].getindexbyname(superclass)
if super_index < 0: # did not find it
print("This super class is not exiting", superclass)
return
source_index = self.dexmaplist.dexmapitem[1].getindexbyname(sourcefile)
if source_index < 0:
source_index = self.addstr(sourcefile)
item.addclassdef(type_index, accessflag, super_index, source_index)
self.dexmaplist.dexmapitem[6].addclassdef(item)
return item
def addclassData(self, classdataref):
self.dexmaplist.dexmapitem[0x2000].addclassdata(classdataref)
# add proto id and return the index,
# if already exist just return the index
def addproto(self, proto_list, return_str):
size = len(proto_list)
proto = ""
if return_str in ShortyDescriptor.keys():
proto += ShortyDescriptor[return_str]
else:
proto += "L"
for i in range(0, size):
str = proto_list[i]
if str in ShortyDescriptor.keys():
proto += ShortyDescriptor[str]
else:
proto += 'L' # for reference of class or array
short_idx = self.dexmaplist.dexmapitem[1].getindexbyname(proto)
if short_idx < 0:
print("did not find this string in string ids", proto)
short_idx = self.addstr(proto)
if return_str in TypeDescriptor.keys(): # transform to type descriptor
return_str = TypeDescriptor[return_str]
type_idx = self.dexmaplist.dexmapitem[2].getindexbyname(return_str)
if type_idx < 0:
print("did not find this type in type ids", return_str)
type_idx = self.addtype(return_str)
proto_idx = self.dexmaplist.dexmapitem[3].getindexbyproto(short_idx, type_idx, proto_list, size)
if proto_idx >= 0:
return proto_idx
typeItem = TypeItem(None, None, 2)
type_list = []
str_list = []
for i in range(0, size):
type_str = proto_list[i]
if type_str in TypeDescriptor.keys():
type_str = TypeDescriptor[type_str]
type_index = self.dexmaplist.dexmapitem[2].getindexbyname(type_str)
if type_index < 0:
print("did not find this param in type ids", type_str)
type_index = self.addtype(type_str)
type_list.append(type_index)
str_list.append(type_str)
typeItem.addtypeItem(type_list, str_list)
self.dexmaplist.dexmapitem[0x1001].addtypelist(typeItem)
self.dexmaplist.dexmapitem[3].addprotoid(short_idx, type_idx, typeItem)
return self.dexmaplist.dexmapitem[3].size-1
def addmethod(self, class_idx, proto_list, return_str, name):
name_idx = self.dexmaplist.dexmapitem[1].getindexbyname(name)
if name_idx < 0:
name_idx = self.addstr(name)
self.dexmaplist.dexmapitem[5].addmethodid(class_idx, self.addproto(proto_list, return_str), name_idx)
return self.dexmaplist.dexmapitem[5].size-1
def addcode(self, ref):
self.dexmaplist.dexmapitem[0x2001].addcodeitem(ref)
def adddebug(self, debugitem):
self.dexmaplist.dexmapitem[0x2003].adddebugitem(debugitem)
def getmethodItem(self, class_name, method_name):
index = self.dexmaplist.dexmapitem[2].getindexbyname(class_name)
if index < 0:
print("did not find the class", class_name)
return
else:
print("find the class, index is :", index)
count = self.dexmaplist.dexmapitem[6].size
encoded_method = None
method_idx = 0
def_idx = 0
for i in range(0, count):
if self.dexmaplist.dexmapitem[6].item[i].classIdx == index:
def_idx = i
self.dexmaplist.dexmapitem[6].item[i].printf()
classdataref = self.dexmaplist.dexmapitem[6].item[i].classDataRef
flag = False
if classdataref is not None:
for i in range(0, classdataref.direct_methods_size):
methodref = self.dexmaplist.dexmapitem[5].item[classdataref.direct_methods[i].method_idx]
print(methodref.name, classdataref.direct_methods[i].method_idx)
if methodref.name == method_name:
print("find the direct method:", methodref.classstr, methodref.name,
classdataref.direct_methods[i].access_flags, classdataref.direct_methods[i].code_off)
encoded_method = classdataref.direct_methods[i]
method_idx = classdataref.direct_methods[i].method_idx
flag = True
break
if flag:
break
print("did not find the direct method")
for j in range(0, classdataref.virtual_methods_size):
methodref = self.dexmaplist.dexmapitem[5].item[classdataref.virtual_methods[j].method_idx]
print(methodref.name)
if methodref.name == method_name:
print("find the virtual method:", methodref.classstr, methodref.name,
classdataref.virtual_methods[j].access_flags, classdataref.virtual_methods[j].code_off)
encoded_method = classdataref.virtual_methods[j]
method_idx = classdataref.virtual_methods[j].method_idx
flag = True
break
if flag is False:
print("did not find the virtual method")
break
return {"method": encoded_method, "classidx": index, "methodidx": method_idx, "defidx": def_idx}
def verifyclass(self, def_idx):
classdef = self.dexmaplist.dexmapitem[6].item[def_idx]
classdef.accessFlags |= 0x00010000
def gettypeid(self, type):
return self.dexmaplist.dexmapitem[2].getindexbyname(type)
def jiaguAll(dexfile, outfile):
method_list = [] # record all method need to protect
tmp_method = dexfile.getmethodItem("Lcom/cc/test/MainActivity;", "onCreate")
method_list.append({"access": tmp_method["method"].access_flags, "ref": tmp_method["method"].coderef,
"classidx": tmp_method["classidx"], "methodidx": tmp_method["methodidx"]})
tmp_method["method"].access_flags = int(Access_Flag['native'] | Access_Flag['public'])
tmp_method["method"].modified = 1
# change the access flag, make it native
dexfile.makeoffset() # make offset
if os.path.exists(outfile): # if exists, delete it
print("the file is exist, just replace it")
os.remove(outfile)
file = open(outfile, 'wb+')
file.seek(0, 0)
size = len(method_list)
filesize = dexfile.dexheader.file_size # in order to adjust the dex file
dexfile.dexheader.file_size += 16 * size # each injected data need 16 bytes
dexfile.dexmaplist.copy(file)
file.seek(filesize, 0)
print("file size :", filesize, " size : ", size)
for i in range(0, size):
file.write(struct.pack("I", method_list[i]["classidx"]))
file.write(struct.pack("I", method_list[i]["methodidx"]))
file.write(struct.pack("I", method_list[i]["access"]))
file.write(struct.pack("I", method_list[i]["ref"].start))
print("inject data :", method_list[i]["classidx"], method_list[i]["methodidx"])
# assume that the code ref is not None, otherwise it make no sense(no need to protect)
file_sha = get_file_sha1(file)
tmp = bytes(file_sha)
i = 0
file.seek(12)
while i < 40:
num = (ACSII[tmp[i]] << 4) + ACSII[tmp[i+1]]
file.write(struct.pack("B", num))
i += 2
csum = checksum(file, dexfile.dexheader.file_size)
print("checksum:", hex(csum), "file size:", dexfile.dexheader.file_size)
file.seek(8)
file.write(struct.pack("I", csum))
file.close()
if __name__ == '__main__':
dexfile = DexFile("classes.dex")
# jiaguAll(dexfile, "classescp.dex")
# dexfile.printclasscode("Lcom/cc/test/MainActivity;", "onCreate")
# dexfile.printf(3)
# dexfile.addstr("DexParse.java")
# dexfile.addstr("Lcom/cc/test/DexParse.java")
# dexfile.modifystr("A Text From CwT", "A Text From DexParse")
# dexfile.printf()
# note: you need to delete file classescp.dex first, otherwise
# new dex file will append the old one
# dexfile.copytofile("classescp.dex")
| 40.958851 | 124 | 0.551744 | 84,445 | 0.883713 | 0 | 0 | 0 | 0 | 0 | 0 | 9,978 | 0.104419 |
fd71ae1315e427ea9c9874263b95024d2ffb8696 | 1,852 | py | Python | api/myapi/serializers.py | UmmuRasul/sbvbn | 3d4705b9eb5e6bc996028ecc0a8ec43a435ef18b | [
"MIT"
]
| null | null | null | api/myapi/serializers.py | UmmuRasul/sbvbn | 3d4705b9eb5e6bc996028ecc0a8ec43a435ef18b | [
"MIT"
]
| null | null | null | api/myapi/serializers.py | UmmuRasul/sbvbn | 3d4705b9eb5e6bc996028ecc0a8ec43a435ef18b | [
"MIT"
]
| null | null | null | from rest_framework import serializers
from api.models import User, UserProfile, Post, News, Video
from datetime import datetime
class UserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
fields = ('address', 'country', 'city', 'zip', 'photo')
class UserSerializer(serializers.HyperlinkedModelSerializer):
profile = UserProfileSerializer(required=True)
class Meta:
model = User
fields = ('url', 'email', 'first_name', 'last_name', 'password', 'profile')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
profile_data = validated_data.pop('profile')
password = validated_data.pop('password')
user = User(**validated_data)
user.set_password(password)
user.save()
UserProfile.objects.create(user=user, **profile_data)
return user
def update(self, instance, validated_data):
profile_data = validated_data.pop('profile')
profile = instance.profile
instance.email = validated_data.get('email', instance.email)
instance.save()
profile.address = profile_data.get('address', profile.address)
profile.country = profile_data.get('country', profile.country)
profile.city = profile_data.get('city', profile.city)
profile.zip = profile_data.get('zip', profile.zip)
profile.photo = profile_data.get('photo', profile.photo)
profile.save()
return instance
class PostSerializer(serializers.Serializer):
class Meta:
model = Post
fields = '__all__'
class NewSerializer(serializers.Serializer):
class Meta:
model = News
fields = '__all__'
class VideoSerializer(serializers.Serializer):
class Meta:
model = Video
fields = '__all__' | 31.389831 | 83 | 0.665227 | 1,710 | 0.923326 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.113391 |
fd71c4f7dcacba2ce5484fe215f8d27faba98441 | 6,603 | py | Python | src/morphforgecontrib/simulation/channels/hh_style/neuron/mm_neuron_alphabetabeta.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
]
| 1 | 2021-01-21T11:31:59.000Z | 2021-01-21T11:31:59.000Z | src/morphforgecontrib/simulation/channels/hh_style/neuron/mm_neuron_alphabetabeta.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
]
| null | null | null | src/morphforgecontrib/simulation/channels/hh_style/neuron/mm_neuron_alphabetabeta.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
]
| null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from ..core import StdChlAlphaBetaBeta
from morphforge.units import qty
from morphforge import units
from hocmodbuilders.mmwriter_alphabetabeta import NEURONChlWriterAlphaBetaBeta
from morphforge.simulation.neuron.hocmodbuilders import HocModUtils
from morphforge.simulation.neuron import NEURONChl_Base
from morphforge.constants.standardtags import StandardTags
from morphforge.simulation.neuron.core.neuronsimulationenvironment import NEURONEnvironment
from morphforge.simulation.neuron.objects.neuronrecordable import NEURONRecordableOnLocation
class NEURONChl_AlphaBetaBeta_Record(NEURONRecordableOnLocation):
def __init__(self, alphabeta_beta_chl, modvar, **kwargs):
super(NEURONChl_AlphaBetaBeta_Record, self).__init__(**kwargs)
self.alphabeta_beta_chl = alphabeta_beta_chl
self.modvar = modvar
def build_mod(self, modfile_set):
pass
def build_hoc(self, hocfile_obj):
HocModUtils.create_record_from_modfile(
hocfile_obj,
vecname='RecVec%s' % self.name,
cell_location=self.cell_location,
modvariable=self.modvar,
mod_neuronsuffix=self.alphabeta_beta_chl.get_neuron_suffix(),
recordobj=self,
)
def get_description(self):
return '%s %s %s' % (self.modvar, self.alphabeta_beta_chl.name,
self.cell_location.get_location_description_str())
class NEURONChl_AlphaBetaBeta_CurrentDensityRecord(NEURONChl_AlphaBetaBeta_Record):
def __init__(self, **kwargs):
super(NEURONChl_AlphaBetaBeta_CurrentDensityRecord,
self).__init__(modvar='i', **kwargs)
def get_unit(self):
return units.parse_unit_str('mA/cm2')
def get_std_tags(self):
return [StandardTags.CurrentDensity]
class NEURONChl_AlphaBetaBeta_ConductanceDensityRecord(NEURONChl_AlphaBetaBeta_Record):
def __init__(self, **kwargs):
super(NEURONChl_AlphaBetaBeta_ConductanceDensityRecord,
self).__init__(modvar='g', **kwargs)
def get_unit(self):
return qty('S/cm2')
def get_std_tags(self):
return [StandardTags.ConductanceDensity]
class NEURONChl_AlphaBetaBeta_StateVariableRecord(NEURONChl_AlphaBetaBeta_Record):
def __init__(self, state, **kwargs):
super(NEURONChl_AlphaBetaBeta_StateVariableRecord,
self).__init__(modvar=state, **kwargs)
def get_unit(self):
return qty('')
def get_std_tags(self):
return [StandardTags.StateVariable]
class NEURONChl_AlphaBetaBeta_StateVariableTauRecord(NEURONChl_AlphaBetaBeta_Record):
def __init__(self, state, **kwargs):
super(NEURONChl_AlphaBetaBeta_StateVariableTauRecord,
self).__init__(modvar=state + 'tau', **kwargs)
def get_unit(self):
return qty('ms')
def get_std_tags(self):
return [StandardTags.StateTimeConstant]
class NEURONChl_AlphaBetaBeta_StateVariableInfRecord(NEURONChl_AlphaBetaBeta_Record):
def __init__(self, state, **kwargs):
super(NEURONChl_AlphaBetaBeta_StateVariableInfRecord,
self).__init__(modvar=state + 'inf', **kwargs)
def get_unit(self):
return qty('')
def get_std_tags(self):
return [StandardTags.StateSteadyState]
class NEURONChl_AlphaBetaBeta(StdChlAlphaBetaBeta, NEURONChl_Base):
class Recordables(object):
CurrentDensity = StandardTags.CurrentDensity
def __init__(self, **kwargs):
super( NEURONChl_AlphaBetaBeta, self).__init__(**kwargs)
def build_hoc_section(self, cell, section, hocfile_obj, mta):
return NEURONChlWriterAlphaBetaBeta.build_hoc_section(cell=cell, section=section, hocfile_obj=hocfile_obj, mta=mta)
def create_modfile(self, modfile_set):
NEURONChlWriterAlphaBetaBeta.build_mod(alphabeta_beta_chl=self, modfile_set=modfile_set)
def get_recordable(self, what, name, cell_location, **kwargs):
recorders = {
StdChlAlphaBetaBeta.Recordables.CurrentDensity: NEURONChl_AlphaBetaBeta_CurrentDensityRecord,
}
return recorders[what](alphabeta_beta_chl=self, cell_location= cell_location, name=name, **kwargs )
def get_mod_file_changeables(self):
# If this fails, then the attirbute probably needs to be added to the list below:
change_attrs = set([
'conductance',
'beta2threshold',
'ion',
'eqn',
'conductance',
'statevars',
'reversalpotential',
])
assert set(self.__dict__) == set(['mm_neuronNumber','_name','_simulation',
'cachedNeuronSuffix']) | change_attrs
attrs = [
'ion',
'eqn',
'conductance',
'statevars',
'reversalpotential',
'beta2threshold',
]
return dict([(a, getattr(self, a)) for a in attrs])
# Register the channel
NEURONEnvironment.channels.register_plugin( StdChlAlphaBetaBeta, NEURONChl_AlphaBetaBeta)
| 33.015 | 123 | 0.69741 | 4,372 | 0.662123 | 0 | 0 | 0 | 0 | 0 | 0 | 1,878 | 0.284416 |
fd7312c0409e17edc8a594caad14c3eebd8edb1f | 5,344 | py | Python | cookie.py | cppchriscpp/fortune-cookie | 46e433e1ae06a8ad742b252d642f8620bde9e38b | [
"MIT"
]
| null | null | null | cookie.py | cppchriscpp/fortune-cookie | 46e433e1ae06a8ad742b252d642f8620bde9e38b | [
"MIT"
]
| null | null | null | cookie.py | cppchriscpp/fortune-cookie | 46e433e1ae06a8ad742b252d642f8620bde9e38b | [
"MIT"
]
| null | null | null | import markovify
import re
import nltk
import os
import urllib.request
from shutil import copyfile
# We need a temporary(ish) place to store the data we retrieve.
# If you are running this in a docker container you may want to mount a volume and use it.
# Also be sure to make a symlink between it and the assets directory. See our dockerfile for an example!
datadir = "./web/assets/data"
if 'DATA_DIR' in os.environ:
datadir = os.environ['DATA_DIR']
if not os.path.exists(datadir):
os.mkdir(datadir)
# Basically the example from the markovify documentation that uses parts of speech and stuff to make better sentences
class POSifiedText(markovify.Text):
def word_split(self, sentence):
words = re.split(self.word_split_pattern, sentence)
words = [ "::".join(tag) for tag in nltk.pos_tag(words) ]
return words
def word_join(self, words):
sentence = " ".join(word.split("::")[0] for word in words)
return sentence
# Grab a list of fortunes from Github
if not os.path.exists(datadir+"/cookie.txt"):
urllib.request.urlretrieve("https://raw.githubusercontent.com/ianli/fortune-cookies-galore/master/fortunes.txt", datadir+"/cookie.txt")
# Grab the US constitution raw text
if not os.path.exists(datadir+'/const.txt'):
urllib.request.urlretrieve("https://www.usconstitution.net/const.txt", datadir+"/const.txt")
if not os.path.exists(datadir+'/tweeter.txt'):
urllib.request.urlretrieve("https://raw.githubusercontent.com/ElDeveloper/tweets/master/tweets_text.txt", datadir+"/tweeter.txt")
# Read both files into variables
with open(datadir+"/cookie.txt") as f:
text = f.read()
with open(datadir+'/const.txt') as f:
tswext = f.read()
with open(datadir+"/tweeter.txt") as f:
tweetext = f.read()
# Break up the text to make it more workable
cookie_text_split = text.split("\n")
const_text_split = tswext.split("\n")
tweet_text_split = tweetext.split("\n")
# Some cleanup to remove things in the fortune cookie file that aren't really fortunes.
# (There are some odd facts and quotes in here. This is a bit barbaric, but this is a fun project anyway! No need for perfection...)
def excluded(string):
if string.startswith("Q:"):
return False
if "\"" in string:
return False
if "--" in string:
return False
return True
# Same thing for the constitution text - this just removes the comment at the top.
def exwifted(string):
if "[" in string:
return False
return True
# Apply the cleanups from above
cookie_text_split[:] = [x for x in cookie_text_split if excluded(x)]
const_text_split[:] = [x for x in const_text_split if exwifted(x)]
# Merge the text back into one big blob like markovify expects. (There's probably a better way to do this, but again, fun project. Efficiency's not that important...
cookie_text_model = POSifiedText("\n".join(cookie_text_split))
const_text_model = POSifiedText("\n".join(const_text_split))
tweet_text_model = POSifiedText("\n".join(tweet_text_split))
# Combine them into a terrifying structure
const_and_cookie_model = markovify.combine([cookie_text_model, const_text_model])
tweet_and_cookie_model = markovify.combine([cookie_text_model, tweet_text_model], [4, 1])
everything_model = markovify.combine([cookie_text_model, const_text_model, tweet_text_model], [4, 1, 1])
# Print a couple lines to the terminal to show that everything's working...
print("Examples:")
for i in range(5):
print(const_and_cookie_model.make_short_sentence(240, tries=25))
# Now, open a temporary file and write some javascript surrounding our story.
with open(datadir+"/cookie.js.new", "w+") as file:
# NOTE: I don't escape anything here... with bad seed text it'd be quite possible to inject weird js, etc.
file.write("window.fortuneCookies=[\n")
print("Running cookie")
# Write 100 lines of junk into the js file. Note that leaving the closing comma is ok, as javascript doesn't care.
for i in range(250):
file.write("\"" + cookie_text_model.make_short_sentence(240, tries=25) + "\",\n")
# Close it up!
file.write("];")
print("Running const + cookie")
file.write("window.constCookies=[\n")
for i in range(250):
file.write("\"" + const_and_cookie_model.make_short_sentence(240, tries=25) + "\",\n")
file.write("];")
print("Running const only")
file.write("window.constLines=[\n")
for i in range(250):
file.write("\"" + const_text_model.make_short_sentence(240, tries=25) + "\",\n")
file.write("];")
print("Running tweet only")
file.write("window.tweetLines=[\n")
for i in range(250):
file.write("\"" + tweet_text_model.make_short_sentence(240, tries=25) + "\",\n")
file.write("];")
print("Running tweet cookie")
file.write("window.tweetCookie=[\n")
for i in range(250):
file.write("\"" + tweet_and_cookie_model.make_short_sentence(240, tries=25) + "\",\n")
file.write("];")
print("Running everything")
file.write("window.everythingCookie=[\n")
for i in range(250):
file.write("\"" + everything_model.make_short_sentence(240, tries=25) + "\",\n")
file.write("];")
# Finally, copy our temp file over the old one, so clients can start seeing it.
copyfile(datadir+"/cookie.js.new", datadir+"/cookie.js")
| 36.60274 | 165 | 0.698915 | 342 | 0.063997 | 0 | 0 | 0 | 0 | 0 | 0 | 2,358 | 0.441243 |
fd76b6a6e3bed41850763cc3f44afdab15844d51 | 427 | py | Python | wsgi_microservice_middleware/__init__.py | presalytics/WSGI-Microservice-Middleware | 1dfcd1121d25569312d7c605d162cb52f38101e3 | [
"MIT"
]
| 1 | 2020-08-13T05:31:01.000Z | 2020-08-13T05:31:01.000Z | wsgi_microservice_middleware/__init__.py | presalytics/WSGI-Microservice-Middleware | 1dfcd1121d25569312d7c605d162cb52f38101e3 | [
"MIT"
]
| null | null | null | wsgi_microservice_middleware/__init__.py | presalytics/WSGI-Microservice-Middleware | 1dfcd1121d25569312d7c605d162cb52f38101e3 | [
"MIT"
]
| null | null | null | import environs
env = environs.Env()
env.read_env()
from wsgi_microservice_middleware.cors import CORSMiddleware
from wsgi_microservice_middleware.request_id import (
RequestIdFilter,
RequestIdMiddleware,
current_request_id,
RequestIdJsonLogFormatter
)
__all__ = [
'CORSMiddleware',
'RequestIdFilter',
'RequestIdMiddleware',
'current_request_id',
'RequestIdJsonLogFormatter'
]
| 17.791667 | 60 | 0.744731 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.236534 |
fd77738934a082ed69675adc328a1ec23a42bd8b | 686 | py | Python | car_manager.py | njiang6/turtle_crossing | 5445ca941bc53002299c60a0587d84f8a111f1be | [
"Apache-2.0"
]
| 1 | 2021-03-24T02:21:03.000Z | 2021-03-24T02:21:03.000Z | car_manager.py | njiang6/turtle_crossing | 5445ca941bc53002299c60a0587d84f8a111f1be | [
"Apache-2.0"
]
| null | null | null | car_manager.py | njiang6/turtle_crossing | 5445ca941bc53002299c60a0587d84f8a111f1be | [
"Apache-2.0"
]
| null | null | null | import turtle as t
import random
COLORS = ["red", "orange", "yellow", "green", "blue", "purple"]
STARTING_MOVE_DISTANCE = 5
MOVE_INCREMENT = 1
def next_level():
global STARTING_MOVE_DISTANCE
STARTING_MOVE_DISTANCE += MOVE_INCREMENT
class CarManager(t.Turtle):
def __init__(self):
super().__init__()
self.penup()
self.setheading(180)
self.y = round(random.randint(-250, 250) / 10) * 10
self.x = 300
self.goto(self.x, self.y)
self.shape("square")
self.shapesize(stretch_wid=1, stretch_len=2)
self.color(random.choice(COLORS))
def go_forward(self):
self.forward(STARTING_MOVE_DISTANCE)
| 23.655172 | 63 | 0.644315 | 440 | 0.641399 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.072886 |
fd78ccdbc7f44ee790bb4e0e5bb66afdadb94039 | 3,329 | py | Python | 2021/05_2/solution.py | budavariam/advent_of_code | 0903bcbb0df46371b6a340ca2be007dce6470c66 | [
"MIT"
]
| null | null | null | 2021/05_2/solution.py | budavariam/advent_of_code | 0903bcbb0df46371b6a340ca2be007dce6470c66 | [
"MIT"
]
| null | null | null | 2021/05_2/solution.py | budavariam/advent_of_code | 0903bcbb0df46371b6a340ca2be007dce6470c66 | [
"MIT"
]
| 1 | 2022-02-11T13:14:50.000Z | 2022-02-11T13:14:50.000Z | """ Advent of code 2021 day 05 / 2 """
import math
from os import path
import re
from collections import Counter
class Code(object):
def __init__(self, lines):
self.lines = lines
def printmap(self, dim, minx, miny, maxx, maxy):
for i in range(miny, maxy + 1):
ln = ""
for j in range(minx, maxx+1):
pos = f"{i}-{j}"
ln += str(dim.get(pos)) if dim.get(pos) is not None else '.'
print(ln)
print(dim)
def solve(self):
# print(self.lines)
minx, miny, maxx, maxy = 0, 0, 0, 0
dim = {}
cnt = 0
xa, xb, ya, yb = -1, -1, -1, -1
for line in self.lines:
x1, y1, x2, y2 = line
xa, xb = sorted([x1, x2])
ya, yb = sorted([y1, y2])
minx = min(minx, xa)
miny = min(miny, ya)
maxx = max(maxx, xb)
maxy = max(maxy, yb)
if x1 == x2:
# print("hor", y1, x1, y2, x2, ya, xa, yb, xb)
for i in range(ya, yb+1):
pos = f"{i}-{x1}"
if dim.get(pos) is not None:
dim[pos] += 1
else:
dim[pos] = 1
elif y1 == y2:
# print("vert", y1, x1, y2, x2, ya, xa, yb, xb)
for i in range(xa, xb+1):
pos = f"{y1}-{i}"
if dim.get(pos) is not None:
dim[pos] += 1
else:
dim[pos] = 1
else:
# print("diag", y1, x1, y2, x2, ya, xa, yb, xb)
if x1 < x2:
for i, x in enumerate(range(x1, x2+1)):
if y1 < y2:
pos = f"{y1+i}-{x}"
else:
pos = f"{y1-i}-{x}"
if dim.get(pos) is not None:
dim[pos] += 1
else:
dim[pos] = 1
else:
for i, x in enumerate(range(x2, x1+1)):
if y1 < y2:
pos = f"{y2-i}-{x}"
else:
pos = f"{y2+i}-{x}"
if dim.get(pos) is not None:
dim[pos] += 1
else:
dim[pos] = 1
# self.printmap(dim, minx, miny, maxx, maxy)
for i in dim.values():
if i > 1:
cnt += 1
return cnt
def preprocess(raw_data):
pattern = re.compile(r'(\d+),(\d+) -> (\d+),(\d+)')
processed_data = []
for line in raw_data.split("\n"):
match = re.match(pattern, line)
data = [int(match.group(1)), int(match.group(2)),
int(match.group(3)), int(match.group(4))]
# data = line
processed_data.append(data)
return processed_data
def solution(data):
""" Solution to the problem """
lines = preprocess(data)
solver = Code(lines)
return solver.solve()
if __name__ == "__main__":
with(open(path.join(path.dirname(__file__), 'input.txt'), 'r')) as input_file:
print(solution(input_file.read()))
| 31.40566 | 82 | 0.393812 | 2,533 | 0.760889 | 0 | 0 | 0 | 0 | 0 | 0 | 431 | 0.129468 |
fd7a3d5f8bd77ce667a1424c233439cb51d4d806 | 2,032 | py | Python | examples/plot_tissue_specific_corrections.py | imagejan/starfish | adf48f4b30cfdf44ac8c9cc78fc469665ce7d594 | [
"MIT"
]
| null | null | null | examples/plot_tissue_specific_corrections.py | imagejan/starfish | adf48f4b30cfdf44ac8c9cc78fc469665ce7d594 | [
"MIT"
]
| null | null | null | examples/plot_tissue_specific_corrections.py | imagejan/starfish | adf48f4b30cfdf44ac8c9cc78fc469665ce7d594 | [
"MIT"
]
| null | null | null | """
Tissue Corrections
==================
"""
###################################################################################################
# .. _tutorial_removing_autoflourescence:
#
# Removing autofluorescence
# =========================
#
# In addition to the bright spots (signal) that we want to detect, microscopy experiments on tissue
# slices often have a non-zero amount of auto-fluorescence from the cell bodies. This can be mitigated
# by "clearing" strategies whereby tissue lipids and proteins are digested, or computationally by
# estimating and subtracting the background values.
#
# We use the same test image from the previous section to demonstrate how this can work.
#
# Clipping
# --------
# The simplest way to remove background is to set a global, (linear) cut-off and clip out the
# background values.
import starfish
import starfish.data
from starfish.image import Filter
from starfish.types import Axes
experiment: starfish.Experiment = starfish.data.ISS(use_test_data=True)
field_of_view: starfish.FieldOfView = experiment["fov_001"]
image: starfish.ImageStack = field_of_view.get_image("primary")
###################################################################################################
# Next, create the clip filter. Here we clip at the 50th percentile, optimally separates the spots
# from the background
clip_50 = Filter.Clip(p_min=97)
clipped: starfish.ImageStack = clip_50.run(image)
###################################################################################################
# plot both images
import matplotlib.pyplot as plt
import xarray as xr
# get the images
orig_plot: xr.DataArray = image.sel({Axes.CH: 0, Axes.ROUND: 0}).xarray.squeeze()
clip_plot: xr.DataArray = clipped.sel({Axes.CH: 0, Axes.ROUND: 0}).xarray.squeeze()
f, (ax1, ax2) = plt.subplots(ncols=2)
ax1.imshow(orig_plot)
ax1.set_title("original")
ax2.imshow(clip_plot)
ax2.set_title("clipped")
###################################################################################################
#
| 35.034483 | 102 | 0.599902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,301 | 0.640256 |
fd7a65079d9a251ce00ad45789e94722c4e7ad4e | 146 | py | Python | cru_alaska_temperature/__init__.py | sc0tts/cruAKtemp | 84dbfc424f5f36bb0f0055b5290f0ab2063ae225 | [
"MIT"
]
| null | null | null | cru_alaska_temperature/__init__.py | sc0tts/cruAKtemp | 84dbfc424f5f36bb0f0055b5290f0ab2063ae225 | [
"MIT"
]
| 7 | 2017-04-25T21:50:47.000Z | 2018-03-19T17:39:28.000Z | cru_alaska_temperature/__init__.py | sc0tts/cruAKtemp | 84dbfc424f5f36bb0f0055b5290f0ab2063ae225 | [
"MIT"
]
| 2 | 2019-02-18T22:42:07.000Z | 2020-08-31T23:37:17.000Z | from .alaska_temperature import AlaskaTemperature
from .bmi import AlaskaTemperatureBMI
__all__ = ["AlaskaTemperature", "AlaskaTemperatureBMI"]
| 24.333333 | 55 | 0.835616 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.280822 |
fd7b422625225dcfe35545919a8429eaaa584545 | 378 | py | Python | Qualification/1-ForegoneSolution/Solution.py | n1try/codejam-2019 | 3cedc74915eca7384adaf8f6a68eeb21ada1beaf | [
"MIT"
]
| null | null | null | Qualification/1-ForegoneSolution/Solution.py | n1try/codejam-2019 | 3cedc74915eca7384adaf8f6a68eeb21ada1beaf | [
"MIT"
]
| null | null | null | Qualification/1-ForegoneSolution/Solution.py | n1try/codejam-2019 | 3cedc74915eca7384adaf8f6a68eeb21ada1beaf | [
"MIT"
]
| null | null | null | import re
t = int(input())
for i in range(0, t):
chars = input()
m1, m2 = [None] * len(chars), [None] * len(chars)
for j in range(0, len(chars)):
m1[j] = "3" if chars[j] == "4" else chars[j]
m2[j] = "1" if chars[j] == "4" else "0"
s1 = ''.join(m1)
s2 = ''.join(m2)
print("Case #{}: {} {}".format(i + 1, s1, re.sub(r'^0*', '', s2))) | 29.076923 | 70 | 0.457672 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.116402 |
fd7bd590362f7ad441cb4aaacc481be5a9c4d64c | 1,645 | py | Python | 1.imdbData.py | batucimenn/imdbScraperOnWaybackMachine2 | e6d92b5c794a2603a05e986b587a796d2a80fd8d | [
"MIT"
]
| null | null | null | 1.imdbData.py | batucimenn/imdbScraperOnWaybackMachine2 | e6d92b5c794a2603a05e986b587a796d2a80fd8d | [
"MIT"
]
| null | null | null | 1.imdbData.py | batucimenn/imdbScraperOnWaybackMachine2 | e6d92b5c794a2603a05e986b587a796d2a80fd8d | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# coding: utf-8
# Scraper movies data from Imdb
# In[ ]:
import csv
import pandas as pd
# Year range to collect data.
# In[ ]:
startYear=int(input("startYear: "))
finishYear=int(input("finishYear: "))
# File path to save. Ex: C:\Users\User\Desktop\newFile
# In[ ]:
filePath = input("File path: "+"r'")+("/")
# Create csv and set the titles.
# In[ ]:
with open(filePath+str(startYear)+"_"+str(finishYear)+".csv", mode='w', newline='') as yeni_dosya:
yeni_yazici = csv.writer(yeni_dosya, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
yeni_yazici.writerow(['Title'+";"+'Film'+";"+'Year'])
yeni_dosya.close()
# Download title.basics.tsv.gz from https://datasets.imdbws.com/. Extract data.tsv, print it into csv.
# In[ ]:
with open("data.tsv",encoding="utf8") as tsvfile:
tsvreader = csv.reader(tsvfile, delimiter="\t")
for line in tsvreader:
try:
ceviri=int(line[5])
if(ceviri>=startYear and ceviri<=finishYear and (line[1]=="movie" or line[1]=="tvMovie")):
print(line[0]+";"+line[3]+";"+line[5]+";"+line[1])
line0=line[0].replace("\"","")
line5=line[5].replace("\"","")
with open(filePath+str(startYear)+"_"+str(finishYear)+".csv", mode='a', newline='') as yeni_dosya:
yeni_yazici = csv.writer(yeni_dosya, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
yeni_yazici.writerow([line0+";"+line[3]+";"+line5])
yeni_dosya.close()
except:
pass
| 26.532258 | 117 | 0.570821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 499 | 0.303343 |
fd7c26cf48ae51b52e75c459ca5537852b6f4936 | 2,680 | py | Python | effective_python/metaclass_property/descriptor_demo.py | ftconan/python3 | eb63ba33960072f792ecce6db809866b38c402f8 | [
"MIT"
]
| 1 | 2018-12-19T22:07:56.000Z | 2018-12-19T22:07:56.000Z | effective_python/metaclass_property/descriptor_demo.py | ftconan/python3 | eb63ba33960072f792ecce6db809866b38c402f8 | [
"MIT"
]
| 12 | 2020-03-14T05:32:26.000Z | 2022-03-12T00:08:49.000Z | effective_python/metaclass_property/descriptor_demo.py | ftconan/python3 | eb63ba33960072f792ecce6db809866b38c402f8 | [
"MIT"
]
| 1 | 2018-12-19T22:08:00.000Z | 2018-12-19T22:08:00.000Z | """
@author: magician
@file: descriptor_demo.py
@date: 2020/1/14
"""
from weakref import WeakKeyDictionary
class Homework(object):
"""
Homework
"""
def __init__(self):
self._grade = 0
@property
def grade(self):
return self._grade
@grade.setter
def grade(self, value):
if not(0 <= value <= 100):
raise ValueError('Grade must be between 0 and 120')
self._grade = value
# class Exam(object):
# """
# Exam
# """
# def __init__(self):
# self._writing_grade = 0
# self._math_grade = 0
#
# @staticmethod
# def _check_grade(value):
# if not(0 <= value <= 100):
# raise ValueError('Grade must be between 0 and 100')
#
# @property
# def writing_grade(self):
# return self._writing_grade
#
# @writing_grade.setter
# def writing_grade(self, value):
# self._check_grade(value)
# self._writing_grade = value
#
# @property
# def math_grade(self):
# return self._math_grade
#
# @math_grade.setter
# def math_grade(self, value):
# self._check_grade(value)
# self._math_grade = value
class Grade(object):
"""
Grade
"""
def __init__(self):
# self._value = 0
# keep instance status
# self._values = {}
# preventing memory leaks
self._values = WeakKeyDictionary()
def __get__(self, instance, instance_type):
# return self._value
if instance is None:
return self
return self._values.get(instance, 0)
def __set__(self, instance, value):
if not (0 <= value <= 100):
raise ValueError('Grade must be between 0 and 100')
# self._value = value
self._values[instance] = value
class Exam(object):
"""
Exam
"""
math_grade = Grade()
writing_grade = Grade()
science_grade = Grade()
if __name__ == '__main__':
galileo = Homework()
galileo.grade = 95
# first_exam = Exam()
# first_exam.writing_grade = 82
# first_exam.science_grade = 99
# print('Writing', first_exam.writing_grade)
# print('Science', first_exam.science_grade)
#
# second_exam = Exam()
# second_exam.writing_grade = 75
# second_exam.science_grade = 99
# print('Second', second_exam.writing_grade, 'is right')
# print('First', first_exam.writing_grade, 'is wrong')
first_exam = Exam()
first_exam.writing_grade = 82
second_exam = Exam()
second_exam.writing_grade = 75
print('First ', first_exam.writing_grade, 'is right')
print('Second ', second_exam.writing_grade, 'is right')
| 23.304348 | 65 | 0.596642 | 1,081 | 0.403358 | 0 | 0 | 225 | 0.083955 | 0 | 0 | 1,451 | 0.541418 |
fd7c5d171d30796fbb3b1df9d4223d6476d4d998 | 3,584 | py | Python | afk-q-babyai/babyai/layers/aggrerator.py | IouJenLiu/AFK | db2b47bb3a5614b61766114b87f143e4a61a4a8d | [
"MIT"
]
| 1 | 2022-03-12T03:10:29.000Z | 2022-03-12T03:10:29.000Z | afk-q-babyai/babyai/layers/aggrerator.py | IouJenLiu/AFK | db2b47bb3a5614b61766114b87f143e4a61a4a8d | [
"MIT"
]
| null | null | null | afk-q-babyai/babyai/layers/aggrerator.py | IouJenLiu/AFK | db2b47bb3a5614b61766114b87f143e4a61a4a8d | [
"MIT"
]
| null | null | null | import torch
import numpy as np
import torch.nn.functional as F
def masked_softmax(x, m=None, axis=-1):
'''
x: batch x time x hid
m: batch x time (optional)
'''
x = torch.clamp(x, min=-15.0, max=15.0)
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=axis, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=axis, keepdim=True) + 1e-6)
return softmax
class ScaledDotProductAttention(torch.nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = torch.nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
attn = masked_softmax(attn, mask, 2)
__attn = self.dropout(attn)
output = torch.bmm(__attn, v)
return output, attn
class MultiHeadAttention(torch.nn.Module):
''' From Multi-Head Attention module
https://github.com/jadore801120/attention-is-all-you-need-pytorch'''
def __init__(self, block_hidden_dim, n_head, dropout=0.1, q_dim=128):
super().__init__()
self.q_dim = q_dim
self.n_head = n_head
self.block_hidden_dim = block_hidden_dim
self.w_qs = torch.nn.Linear(q_dim, n_head * block_hidden_dim, bias=False)
self.w_ks = torch.nn.Linear(block_hidden_dim, n_head * block_hidden_dim, bias=False)
self.w_vs = torch.nn.Linear(block_hidden_dim, n_head * block_hidden_dim, bias=False)
torch.nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (q_dim * 2)))
torch.nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (block_hidden_dim * 2)))
torch.nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (block_hidden_dim * 2)))
self.attention = ScaledDotProductAttention(temperature=np.power(block_hidden_dim, 0.5))
self.fc = torch.nn.Linear(n_head * block_hidden_dim, block_hidden_dim)
self.layer_norm = torch.nn.LayerNorm(self.block_hidden_dim)
torch.nn.init.xavier_normal_(self.fc.weight)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, q, mask, k, v):
# q: batch x len_q x hid
# k: batch x len_k x hid
# v: batch x len_v x hid
# mask: batch x len_q x len_k
# output: batch x len_q x hid
# attn: batch x len_q x len_k
batch_size, len_q = q.size(0), q.size(1)
len_k, len_v = k.size(1), v.size(1)
assert mask.size(1) == len_q
assert mask.size(2) == len_k
residual = q
q = self.w_qs(q).view(batch_size, len_q, self.n_head, self.block_hidden_dim)
k = self.w_ks(k).view(batch_size, len_k, self.n_head, self.block_hidden_dim)
v = self.w_vs(v).view(batch_size, len_v, self.n_head, self.block_hidden_dim)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, self.block_hidden_dim) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, self.block_hidden_dim) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, self.block_hidden_dim) # (n*b) x lv x dv
mask = mask.repeat(self.n_head, 1, 1) # (n*b) x .. x ..
output, attn = self.attention(q, k, v, mask=mask)
attn = attn.view(self.n_head, batch_size, len_q, -1)
attn = torch.mean(attn, 0) # batch x lq x lk
output = None
return output, attn | 41.195402 | 103 | 0.624163 | 3,098 | 0.864397 | 0 | 0 | 0 | 0 | 0 | 0 | 457 | 0.127511 |
fd816f646c55f1654d9547e1f480c4843279f30e | 707 | py | Python | analysis_llt/ml/cv/neighbors.py | Tammy-Lee/analysis-llt | ea1bb62d614bb75dac68c010a0cc524a5be185f2 | [
"MIT"
]
| null | null | null | analysis_llt/ml/cv/neighbors.py | Tammy-Lee/analysis-llt | ea1bb62d614bb75dac68c010a0cc524a5be185f2 | [
"MIT"
]
| null | null | null | analysis_llt/ml/cv/neighbors.py | Tammy-Lee/analysis-llt | ea1bb62d614bb75dac68c010a0cc524a5be185f2 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
create on 2019-03-20 04:17
author @lilia
"""
from sklearn.neighbors import KNeighborsClassifier
from analysis_llt.ml.cv.base import BaseCV
class KNNCV(BaseCV):
fit_predict_proba_ = False
def __init__(self, n_neighbors=5, cv=None, random_state=None, verbose=0, **model_params):
super(KNNCV, self).__init__(cv=cv, random_state=random_state, verbose=verbose, **model_params)
self.n_neighbors = n_neighbors
if 'predict_proba' in model_params:
warnings.warn("SVC does not have predict_proba function")
def build_model(self):
knn = KNeighborsClassifier(n_neighbors=self.n_neighbors, **self.model_params)
return knn
| 29.458333 | 102 | 0.708628 | 535 | 0.756719 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.182461 |
fd8175ceff7997ec372ad498a63c3ba3b5e8e259 | 1,066 | py | Python | tests/test_oas_cache.py | maykinmedia/zgw-consumers | 9b0759d9b7c3590b245004afd4c5e5474785bf91 | [
"MIT"
]
| 2 | 2021-04-25T11:29:33.000Z | 2022-03-08T14:06:58.000Z | tests/test_oas_cache.py | maykinmedia/zgw-consumers | 9b0759d9b7c3590b245004afd4c5e5474785bf91 | [
"MIT"
]
| 27 | 2020-04-01T07:33:02.000Z | 2022-03-14T09:11:05.000Z | tests/test_oas_cache.py | maykinmedia/zgw-consumers | 9b0759d9b7c3590b245004afd4c5e5474785bf91 | [
"MIT"
]
| 2 | 2020-07-30T15:40:47.000Z | 2020-11-30T10:56:29.000Z | import threading
from zds_client.oas import schema_fetcher
def test_schema_fetch_twice(oas):
schema = oas.fetch()
assert isinstance(schema, dict)
assert oas.mocker.call_count == 1
oas.fetch()
# check that the cache is used
assert oas.mocker.call_count == 1
def test_clear_caches_in_between(oas):
schema = oas.fetch()
assert isinstance(schema, dict)
assert oas.mocker.call_count == 1
schema_fetcher.cache.clear()
oas.fetch()
assert oas.mocker.call_count == 2
def test_cache_across_threads(oas):
def _target():
# disable the local python cache
schema_fetcher.cache._local_cache = {}
oas.fetch()
thread1 = threading.Thread(target=_target)
thread2 = threading.Thread(target=_target)
# start thread 1 and let it complete, this ensures the schema is stored in the
# cache
thread1.start()
thread1.join()
# start thread 2 and let it complete, we can now verify the call count
thread2.start()
thread2.join()
assert oas.mocker.call_count == 1
| 21.755102 | 82 | 0.684803 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 217 | 0.203565 |
fd81f57132ba4b8e36862c9d9eb8179dcba9623a | 4,165 | py | Python | src/uproot_browser/tree.py | amangoel185/uproot-browser | 8181913ac04d0318b05256923d8980d6d3acaa7f | [
"BSD-3-Clause"
]
| 12 | 2022-03-18T11:47:26.000Z | 2022-03-25T13:57:08.000Z | src/uproot_browser/tree.py | amangoel185/uproot-browser | 8181913ac04d0318b05256923d8980d6d3acaa7f | [
"BSD-3-Clause"
]
| 7 | 2022-03-18T11:40:36.000Z | 2022-03-29T22:15:01.000Z | src/uproot_browser/tree.py | amangoel185/uproot-browser | 8181913ac04d0318b05256923d8980d6d3acaa7f | [
"BSD-3-Clause"
]
| 1 | 2022-03-21T14:37:07.000Z | 2022-03-21T14:37:07.000Z | """
Display tools for TTrees.
"""
from __future__ import annotations
import dataclasses
import functools
from pathlib import Path
from typing import Any, Dict
import uproot
from rich.console import Console
from rich.markup import escape
from rich.text import Text
from rich.tree import Tree
console = Console()
__all__ = ("make_tree", "process_item", "print_tree", "UprootItem", "console")
def __dir__() -> tuple[str, ...]:
return __all__
@dataclasses.dataclass
class UprootItem:
path: str
item: Any
@property
def is_dir(self) -> bool:
return isinstance(self.item, (uproot.reading.ReadOnlyDirectory, uproot.TTree))
def meta(self) -> dict[str, Any]:
return process_item(self.item)
def label(self) -> Text:
return process_item(self.item)["label"] # type: ignore[no-any-return]
@property
def children(self) -> list[UprootItem]:
if not self.is_dir:
return []
items = {key.split(";")[0] for key in self.item.keys()}
return [
UprootItem(f"{self.path}/{key}", self.item[key]) for key in sorted(items)
]
def make_tree(node: UprootItem, *, tree: Tree | None = None) -> Tree:
"""
Given an object, build a rich.tree.Tree output.
"""
if tree is None:
tree = Tree(**node.meta())
else:
tree = tree.add(**node.meta())
for child in node.children:
make_tree(child, tree=tree)
return tree
@functools.singledispatch
def process_item(uproot_object: Any) -> Dict[str, Any]:
"""
Given an unknown object, return a rich.tree.Tree output. Specialize for known objects.
"""
name = getattr(uproot_object, "name", "<unnamed>")
classname = getattr(uproot_object, "classname", uproot_object.__class__.__name__)
label = Text.assemble(
"❓ ",
(f"{name} ", "bold"),
(classname, "italic"),
)
return {"label": label}
@process_item.register
def _process_item_tfile(
uproot_object: uproot.reading.ReadOnlyDirectory,
) -> Dict[str, Any]:
"""
Given an TFile, return a rich.tree.Tree output.
"""
path = Path(uproot_object.file_path)
result = {
"label": Text.from_markup(
f":file_folder: [link file://{path}]{escape(path.name)}"
),
"guide_style": "bold bright_blue",
}
return result
@process_item.register
def _process_item_ttree(uproot_object: uproot.TTree) -> Dict[str, Any]:
"""
Given an tree, return a rich.tree.Tree output.
"""
label = Text.assemble(
"🌴 ",
(f"{uproot_object.name} ", "bold"),
f"({uproot_object.num_entries:g})",
)
result = {
"label": label,
"guide_style": "bold bright_green",
}
return result
@process_item.register
def _process_item_tbranch(uproot_object: uproot.TBranch) -> Dict[str, Any]:
"""
Given an branch, return a rich.tree.Tree output.
"""
jagged = isinstance(
uproot_object.interpretation, uproot.interpretation.jagged.AsJagged
)
icon = "🍃 " if jagged else "🍁 "
label = Text.assemble(
icon,
(f"{uproot_object.name} ", "bold"),
(f"{uproot_object.typename}", "italic"),
)
result = {"label": label}
return result
@process_item.register
def _process_item_th(uproot_object: uproot.behaviors.TH1.Histogram) -> Dict[str, Any]:
"""
Given an histogram, return a rich.tree.Tree output.
"""
icon = "📊 " if uproot_object.kind == "COUNT" else "📈 "
sizes = " × ".join(f"{len(ax)}" for ax in uproot_object.axes)
label = Text.assemble(
icon,
(f"{uproot_object.name} ", "bold"),
(f"{uproot_object.classname} ", "italic"),
f"({sizes})",
)
result = {"label": label}
return result
# pylint: disable-next=redefined-outer-name
def print_tree(entry: str, *, console: Console = console) -> None:
"""
Prints a tree given a specification string. Currently, that must be a
single filename. Colons are not allowed currently in the filename.
"""
upfile = uproot.open(entry)
tree = make_tree(UprootItem("/", upfile))
console.print(tree)
| 25.090361 | 90 | 0.623049 | 650 | 0.155391 | 0 | 0 | 3,006 | 0.718623 | 0 | 0 | 1,253 | 0.299546 |
fd83d8e2ff034305d143f2f77d3ed14a017cf93e | 5,256 | py | Python | backend/apps/tasks/models.py | HerlanAssis/simple-project-manager | 800c833ec0cbeba848264753d79c5ecedc54cc39 | [
"MIT"
]
| 1 | 2019-06-14T20:34:19.000Z | 2019-06-14T20:34:19.000Z | backend/apps/tasks/models.py | HerlanAssis/simple-project-manager | 800c833ec0cbeba848264753d79c5ecedc54cc39 | [
"MIT"
]
| 3 | 2020-02-11T23:42:20.000Z | 2020-06-25T17:35:48.000Z | backend/apps/tasks/models.py | HerlanAssis/simple-project-manager | 800c833ec0cbeba848264753d79c5ecedc54cc39 | [
"MIT"
]
| null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from apps.core.models import BaseModel
from apps.core.utils import HASH_MAX_LENGTH, create_hash, truncate
from django.utils.encoding import python_2_unicode_compatible
from django.utils import timezone
from django.db.models import F
import datetime
TODO = 'TODO'
DOING = 'DOING'
BLOCKED = 'BLOCKED'
DONE = 'DONE'
PROGRESS = (
(TODO, 'To Do'),
(DOING, 'Doing'),
(BLOCKED, 'Blocked'),
(DONE, 'Done'),
)
@python_2_unicode_compatible
class TaskManager(BaseModel):
project_name = models.CharField(max_length=256)
project_id = models.BigIntegerField(unique=True, editable=False)
owner = models.ForeignKey(
User, related_name="managed_tasks", on_delete=models.CASCADE)
invitation_code = models.CharField(
max_length=HASH_MAX_LENGTH, default=create_hash, unique=True, editable=False)
class Meta:
unique_together = ['owner', 'project_id']
@property
def qtd_overdue_tasks(self):
return self.tasks.exclude(status=DONE).filter(expected_date__lt=datetime.date.today()).count()
@property
def qtd_blocked_tasks(self):
return self.tasks.filter(status=BLOCKED).count()
@property
def qtd_tasks_completed_late(self):
return self.tasks.filter(status=DONE).filter(conclusion_date__gt=F('expected_date')).count()
@property
def qtd_tasks(self):
return self.tasks.all().count()
@property
def progress(self):
resultado = 0.0
try:
resultado = "{0:.2f}".format(self.tasks.filter(status=DONE).count()/self.tasks.all().count() * 100)
except ZeroDivisionError as e:
pass
return resultado
@property
def qtd_completed_tasks(self):
return self.tasks.filter(status=DONE).count()
@property
def qtd_open_tasks(self):
return self.tasks.exclude(status=DONE).count()
def notify(self, message, **kwargs):
created = kwargs['created']
message = "[{}]\n{}".format(self.project_name, message)
notify_model = self.vigilantes.model
notify_model.notify(self.vigilantes.all(), created, message)
def resetInvitationCode(self):
self.invitation_code = create_hash()
self.save()
def __str__(self):
return "{}".format(self.project_name)
@python_2_unicode_compatible
class Task(BaseModel):
status = models.CharField(
max_length=64,
choices=PROGRESS,
default=TODO,
)
title = models.CharField(max_length=32)
description = models.CharField(max_length=256, null=True, blank=True)
task_manager = models.ForeignKey(
TaskManager, related_name="tasks", on_delete=models.CASCADE)
owner = models.ForeignKey(
User, related_name="tasks", on_delete=models.CASCADE)
responsible = models.ForeignKey(
User, related_name="responsibilities_tasks", on_delete=models.CASCADE, null=True, blank=True)
expected_date = models.DateField(blank=True)
conclusion_date = models.DateTimeField(blank=True, null=True, editable=False)
def save(self, *args, **kwargs):
if self.status == DONE:
self.conclusion_date = datetime.datetime.now()
else:
self.conclusion_date = None
task = super(Task, self).save(*args, **kwargs)
return task
@property
def is_overdue(self):
if self.expected_date is None:
return False
return self.expected_date > datetime.date.today()
@property
def expires_today(self):
if self.expected_date is None:
return False
return self.expected_date == datetime.date.today() and self.status != DONE
# notify_task_manager
def notify(self, **kwargs):
responsible_name = "-"
if self.responsible is not None:
responsible_name = self.responsible.get_username()
message = "Código: {}\nTítulo: {}\nStatus: {}\nResponsável: {}".format(self.id, self.title, self.get_status_display(), responsible_name )
self.task_manager.notify(message, **kwargs)
def __str__(self):
return "{}-{}".format(self.id, self.title)
@python_2_unicode_compatible
class Note(BaseModel):
owner = models.ForeignKey(
User, related_name="notes", on_delete=models.CASCADE)
task = models.ForeignKey(
Task, related_name="task_notes", on_delete=models.CASCADE, blank=True)
description = models.CharField(max_length=256)
def __str__(self):
return truncate(self.description, 10)
@python_2_unicode_compatible
class Release(BaseModel):
completed_on = models.DateField()
closed = models.BooleanField(default=False)
is_final_release = models.BooleanField(default=False)
title = models.CharField(max_length=32)
description = models.CharField(max_length=256, blank=True)
task_manager = models.ForeignKey(
TaskManager, related_name="releases", on_delete=models.CASCADE)
def __str__(self):
return self.title
# method for updating
@receiver(post_save, sender=Task)
def notify(sender, instance, **kwargs):
instance.notify(**kwargs)
| 30.917647 | 145 | 0.68398 | 4,435 | 0.843316 | 0 | 0 | 4,654 | 0.884959 | 0 | 0 | 313 | 0.059517 |
fd84a04d43460db0ba028f9e178dd3ce7cffe504 | 2,650 | py | Python | eland/tests/dataframe/test_aggs_pytest.py | redNixon/eland | 1b9cb1db6d30f0662fe3679c7bb31e2c0865f0c3 | [
"Apache-2.0"
]
| null | null | null | eland/tests/dataframe/test_aggs_pytest.py | redNixon/eland | 1b9cb1db6d30f0662fe3679c7bb31e2c0865f0c3 | [
"Apache-2.0"
]
| null | null | null | eland/tests/dataframe/test_aggs_pytest.py | redNixon/eland | 1b9cb1db6d30f0662fe3679c7bb31e2c0865f0c3 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2019 Elasticsearch BV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# File called _pytest for PyCharm compatability
import numpy as np
from pandas.util.testing import assert_almost_equal
from eland.tests.common import TestData
class TestDataFrameAggs(TestData):
def test_basic_aggs(self):
pd_flights = self.pd_flights()
ed_flights = self.ed_flights()
pd_sum_min = pd_flights.select_dtypes(include=[np.number]).agg(["sum", "min"])
ed_sum_min = ed_flights.select_dtypes(include=[np.number]).agg(["sum", "min"])
# Eland returns all float values for all metric aggs, pandas can return int
# TODO - investigate this more
pd_sum_min = pd_sum_min.astype("float64")
assert_almost_equal(pd_sum_min, ed_sum_min)
pd_sum_min_std = pd_flights.select_dtypes(include=[np.number]).agg(
["sum", "min", "std"]
)
ed_sum_min_std = ed_flights.select_dtypes(include=[np.number]).agg(
["sum", "min", "std"]
)
print(pd_sum_min_std.dtypes)
print(ed_sum_min_std.dtypes)
assert_almost_equal(pd_sum_min_std, ed_sum_min_std, check_less_precise=True)
def test_terms_aggs(self):
pd_flights = self.pd_flights()
ed_flights = self.ed_flights()
pd_sum_min = pd_flights.select_dtypes(include=[np.number]).agg(["sum", "min"])
ed_sum_min = ed_flights.select_dtypes(include=[np.number]).agg(["sum", "min"])
# Eland returns all float values for all metric aggs, pandas can return int
# TODO - investigate this more
pd_sum_min = pd_sum_min.astype("float64")
assert_almost_equal(pd_sum_min, ed_sum_min)
pd_sum_min_std = pd_flights.select_dtypes(include=[np.number]).agg(
["sum", "min", "std"]
)
ed_sum_min_std = ed_flights.select_dtypes(include=[np.number]).agg(
["sum", "min", "std"]
)
print(pd_sum_min_std.dtypes)
print(ed_sum_min_std.dtypes)
assert_almost_equal(pd_sum_min_std, ed_sum_min_std, check_less_precise=True)
| 37.323944 | 86 | 0.672075 | 1,859 | 0.701509 | 0 | 0 | 0 | 0 | 0 | 0 | 988 | 0.37283 |
fd8681cae85c92327aba29d9f6d3628698abb698 | 1,811 | py | Python | frootspi_examples/launch/conductor.launch.py | SSL-Roots/FrootsPi | 3aff59342a9d3254d8b089b66aeeed59bcb66c7b | [
"Apache-2.0"
]
| 2 | 2021-11-27T10:57:01.000Z | 2021-11-27T11:25:52.000Z | frootspi_examples/launch/conductor.launch.py | SSL-Roots/FrootsPi | 3aff59342a9d3254d8b089b66aeeed59bcb66c7b | [
"Apache-2.0"
]
| 1 | 2018-07-31T13:29:57.000Z | 2018-07-31T13:36:50.000Z | frootspi_examples/launch/conductor.launch.py | SSL-Roots/FrootsPi | 3aff59342a9d3254d8b089b66aeeed59bcb66c7b | [
"Apache-2.0"
]
| null | null | null | # Copyright 2021 Roots
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import ComposableNodeContainer
from launch_ros.actions import PushRosNamespace
from launch_ros.descriptions import ComposableNode
def generate_launch_description():
declare_arg_robot_id = DeclareLaunchArgument(
'id', default_value='0',
description=('Set own ID.')
)
push_ns = PushRosNamespace(['robot', LaunchConfiguration('id')])
# robot_id = LaunchConfiguration('robot_id')
container = ComposableNodeContainer(
name='frootspi_container',
namespace='',
package='rclcpp_components',
executable='component_container', # component_container_mtはmulti threads
composable_node_descriptions=[
ComposableNode(
package='frootspi_conductor',
plugin='frootspi_conductor::Conductor',
name='frootspi_conductor',
extra_arguments=[{'use_intra_process_comms': True}],
),
],
output='screen',
)
return LaunchDescription([
declare_arg_robot_id,
push_ns,
container
])
| 34.826923 | 81 | 0.703479 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 837 | 0.461666 |
fd876869a60981b01094fa1c90ddae1cb851c885 | 1,639 | py | Python | src/vnf/l23filter/controllers/InitializeDbController.py | shield-h2020/vnsfs | 864bdd418d3910b86783044be94d2bdb07e95aec | [
"Apache-2.0"
]
| 2 | 2018-11-06T17:55:56.000Z | 2021-02-09T07:40:17.000Z | src/vnf/l23filter/controllers/InitializeDbController.py | shield-h2020/vnsfs | 864bdd418d3910b86783044be94d2bdb07e95aec | [
"Apache-2.0"
]
| null | null | null | src/vnf/l23filter/controllers/InitializeDbController.py | shield-h2020/vnsfs | 864bdd418d3910b86783044be94d2bdb07e95aec | [
"Apache-2.0"
]
| 4 | 2018-03-28T18:06:26.000Z | 2021-07-17T00:33:55.000Z | import logging
from sqlalchemy import create_engine, event
from configuration import config as cnf
from helpers.DbHelper import on_connect, db_session, assert_database_type
from models import Base, Flow
# from models.depreciated import Metric
logging.basicConfig()
logging.getLogger('sqlalchemy.engine').setLevel(logging.ERROR)
class InitializeDbController:
def create_DB(self):
mysqldbType = "mysql"
connection_string = None
# empty string
connection_string = mysqldbType + cnf.DATABASE_CONN_STRING
print(connection_string)
# if connection_string.startswith('sqlite'):
# db_file = re.sub("sqlite.*:///", "", connection_string)
# os.makedirs(os.path.dirname(db_file))
engine = create_engine(connection_string, echo=False)
# event.listen(engine, 'connect', on_connect)
conn = engine.connect()
conn.execute("commit")
conn.execute("CREATE DATABASE IF NOT EXISTS test;")
conn.close()
def init_DB(self):
# if connection_string.startswith('sqlite'):
# db_file = re.sub("sqlite.*:///", "", connection_string)
# os.makedirs(os.path.dirname(db_file))
# 3 commands for creating database
base = Base.Base()
Flow.Flow()
engine = assert_database_type()
base.metadata.create_all(engine)
response = "OK"
return response
def delete_DB(self):
engine = assert_database_type()
base = Base.Base()
for tbl in reversed(base.metadata.sorted_tables):
tbl.drop(engine, checkfirst=True)
| 26.015873 | 73 | 0.649786 | 1,303 | 0.794997 | 0 | 0 | 0 | 0 | 0 | 0 | 503 | 0.306894 |
fd88ba06d62178ae22d727dfd3879ac3e980173e | 166 | py | Python | tests/web_platform/css_flexbox_1/test_flexbox_flex_1_1_Npercent.py | fletchgraham/colosseum | 77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f | [
"BSD-3-Clause"
]
| null | null | null | tests/web_platform/css_flexbox_1/test_flexbox_flex_1_1_Npercent.py | fletchgraham/colosseum | 77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f | [
"BSD-3-Clause"
]
| null | null | null | tests/web_platform/css_flexbox_1/test_flexbox_flex_1_1_Npercent.py | fletchgraham/colosseum | 77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f | [
"BSD-3-Clause"
]
| 1 | 2020-01-16T01:56:41.000Z | 2020-01-16T01:56:41.000Z | from tests.utils import W3CTestCase
class TestFlexbox_Flex11Npercent(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'flexbox_flex-1-1-Npercent'))
| 27.666667 | 80 | 0.807229 | 127 | 0.76506 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.162651 |
fd88c78266fc4209a289fbc25268f76bce338838 | 150 | py | Python | frontends/python/tests/analysis/constant_attribute.py | aardwolf-sfl/aardwolf | 33bfe3e0649a73aec7efa0fa80bff8077b550bd0 | [
"MIT"
]
| 2 | 2020-08-15T08:55:39.000Z | 2020-11-09T17:31:16.000Z | frontends/python/tests/analysis/constant_attribute.py | aardwolf-sfl/aardwolf | 33bfe3e0649a73aec7efa0fa80bff8077b550bd0 | [
"MIT"
]
| null | null | null | frontends/python/tests/analysis/constant_attribute.py | aardwolf-sfl/aardwolf | 33bfe3e0649a73aec7efa0fa80bff8077b550bd0 | [
"MIT"
]
| null | null | null | # AARD: function: __main__
# AARD: #1:1 -> :: defs: %1 / uses: [@1 4:1-4:22] { call }
'value: {}'.format(3)
# AARD: @1 = constant_attribute.py
| 21.428571 | 63 | 0.546667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.893333 |
fd88d6da1a9e31351274cdc8c9d06c97bd2fa421 | 884 | py | Python | rl_server/tensorflow/networks/layer_norm.py | parilo/tars-rl | 17595905a0d1bdc90fe3d8f793acb60de96ea897 | [
"MIT"
]
| 9 | 2019-03-11T11:02:12.000Z | 2022-03-10T12:53:25.000Z | rl_server/tensorflow/networks/layer_norm.py | parilo/tars-rl | 17595905a0d1bdc90fe3d8f793acb60de96ea897 | [
"MIT"
]
| 1 | 2021-01-06T20:18:33.000Z | 2021-01-06T20:19:53.000Z | rl_server/tensorflow/networks/layer_norm.py | parilo/tars-rl | 17595905a0d1bdc90fe3d8f793acb60de96ea897 | [
"MIT"
]
| 3 | 2019-01-19T03:32:26.000Z | 2020-11-29T18:15:57.000Z | from tensorflow.python.keras.layers import Layer
from tensorflow.python.keras import backend as K
class LayerNorm(Layer):
def __init__(self, axis=-1, eps=1e-6, **kwargs):
self.axis = axis
self.eps = eps
super(LayerNorm, self).__init__(**kwargs)
def build(self, input_shape):
input_dim = input_shape[self.axis]
self.gamma = self.add_weight(shape=(input_dim,), initializer="ones", name="gamma")
self.beta = self.add_weight(shape=(input_dim,), initializer="zeros", name="beta")
super(LayerNorm, self).build(input_shape)
def call(self, x):
mean = K.mean(x, axis=self.axis, keepdims=True)
std = K.std(x, axis=self.axis, keepdims=True)
out = self.gamma * (x - mean) / (std + self.eps) + self.beta
return out
def compute_output_shape(self, input_shape):
return input_shape
| 34 | 90 | 0.645928 | 783 | 0.885747 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.029412 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.