hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
473f4dc9a600b8c43aff418b905edef20e705990 | 266 | py | Python | Ex031 - Custo de viagem.py | MarcusMendes81/Python | 4af6653da324604930d24542a84a530348029d39 | [
"Apache-2.0"
]
| null | null | null | Ex031 - Custo de viagem.py | MarcusMendes81/Python | 4af6653da324604930d24542a84a530348029d39 | [
"Apache-2.0"
]
| null | null | null | Ex031 - Custo de viagem.py | MarcusMendes81/Python | 4af6653da324604930d24542a84a530348029d39 | [
"Apache-2.0"
]
| null | null | null | km = float(input('Digite qual a distância da sua viagem em km: '))
if km <= 200:
preço = km * 0.50
print('O valor da sua viagem é de {:.2f}R$'.format(preço))
else:
preço = km * 0.45
print('O valor da sua viagem é de {:.2f}R$'.format(preço))
| 29.555556 | 67 | 0.586466 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.454212 |
473fc3dd716cb4c46c867f24ed27e15522aed178 | 1,802 | py | Python | tests/post/test_metrics_server.py | ssalaues/metalk8s | cca4a4c64fe9cd4d7b87717aa3fda1642144da4b | [
"Apache-2.0"
]
| null | null | null | tests/post/test_metrics_server.py | ssalaues/metalk8s | cca4a4c64fe9cd4d7b87717aa3fda1642144da4b | [
"Apache-2.0"
]
| null | null | null | tests/post/test_metrics_server.py | ssalaues/metalk8s | cca4a4c64fe9cd4d7b87717aa3fda1642144da4b | [
"Apache-2.0"
]
| null | null | null | import json
import kubernetes.config
import pytest_bdd
import pytest_bdd.parsers
import utils.helper
pytest_bdd.scenarios('features/metrics_server.feature')
@pytest_bdd.when('I wait for metrics-server to be initialized')
def wait_until_initialized(kubeconfig):
client = kubernetes.config.new_client_from_config(config_file=kubeconfig)
# It can take up to a minute before metrics-server scraped some stats, see
# https://github.com/kubernetes-incubator/metrics-server/issues/134 and
# https://github.com/kubernetes-incubator/metrics-server/issues/136
for _ in utils.helper.retry(90, wait=1):
try:
(_, response_code, _) = client.call_api(
'/api/v1/namespaces/kube-system/services'
'/https:metrics-server:443/proxy/healthz',
'GET', _preload_content=False)
except kubernetes.client.rest.ApiException as exc:
response_code = exc.status
if response_code == 200:
break
@pytest_bdd.when(pytest_bdd.parsers.parse('I GET a {kind} from {path}'))
def raw_request(request, kubeconfig, kind, path):
client = kubernetes.config.new_client_from_config(config_file=kubeconfig)
(response, response_code, response_headers) = client.call_api(
path, 'GET', _preload_content=False)
assert response_code == 200
assert response_headers['content-type'] == 'application/json'
request.raw_response = json.loads(response.data.decode('utf-8'))
assert request.raw_response['kind'] == kind
@pytest_bdd.then(pytest_bdd.parsers.parse(
'I should count as many nodes as {group_name} hosts'))
def node_count_match(request, inventory_obj, group_name):
assert len(request.raw_response['items']) == \
len(inventory_obj.get_groups_dict()[group_name])
| 34 | 78 | 0.712542 | 0 | 0 | 0 | 0 | 1,632 | 0.90566 | 0 | 0 | 514 | 0.285239 |
47405d3453babd25d32b1eab577b6c049070e9a0 | 52 | py | Python | src/config.py | kimminwyk/apache-log-parsing | 1fcf06dc349c37ab02b91c2c48e3dc8f6f5d4b4e | [
"MIT"
]
| 1 | 2021-03-31T00:14:38.000Z | 2021-03-31T00:14:38.000Z | src/config.py | kimminwyk/apache-log-parsing | 1fcf06dc349c37ab02b91c2c48e3dc8f6f5d4b4e | [
"MIT"
]
| null | null | null | src/config.py | kimminwyk/apache-log-parsing | 1fcf06dc349c37ab02b91c2c48e3dc8f6f5d4b4e | [
"MIT"
]
| null | null | null | default = False
actions = 'store_true'
ENC = 'utf-8' | 17.333333 | 22 | 0.692308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.365385 |
4740a5904496fd65f865863d0eb457e3b97598a9 | 10,751 | py | Python | UniGrammar/tools/regExps/python/prelifter.py | UniGrammar/UniGrammar.py | 560152ff5ff3f0651171044f6b5046fd8d895960 | [
"Unlicense"
]
| 2 | 2020-06-17T16:57:23.000Z | 2022-02-24T13:29:01.000Z | UniGrammar/tools/regExps/python/prelifter.py | KOLANICH/UniGrammar.py | d83f2f605073898b454868b98e2ecdefc4d9f7ca | [
"Unlicense"
]
| null | null | null | UniGrammar/tools/regExps/python/prelifter.py | KOLANICH/UniGrammar.py | d83f2f605073898b454868b98e2ecdefc4d9f7ca | [
"Unlicense"
]
| null | null | null | import sre_constants as sc
import sre_parse as sp
import typing
import unicodedata
from pprint import pprint
from RichConsole import groups, rsjoin
from .knowledge import CAPTURE_GROUP, LITERAL_STR
class RXASTNode:
"""Current AST generated by sre_parse is badly structured and doesn't suit well for our purposes. So we lift into an own AST class for RX first, and then do multiple passes"""
__slots__ = ("name", "type", "argsDic")
HIGHLIGHT = (groups.Fore.lightmagentaEx, groups.Fore.lightcyanEx, groups.Fore.lightyellowEx)
def __init__(self, name: typing.Optional[str], typ: int, argsDic: typing.Mapping[str, typing.Any]):
self.__class__.name.__set__(self, name)
self.__class__.type.__set__(self, typ)
self.__class__.argsDic.__set__(self, argsDic)
def __getattr__(self, k: str):
try:
return self.argsDic[k]
except KeyError as ex:
raise AttributeError(*ex.args) from ex
def __setattr__(self, k: str, v):
if k in self.__class__.__slots__:
#raise Exception("Fucking shit, it must never be called in the case of slots")
getattr(self.__class__, k).__set__(self, v)
else:
self.argsDic[k] = v
def __rsrepr__(self):
return groups.Fore.lightgreenEx(self.__class__.__name__) + "(" + rsjoin(groups.Fore.lightblueEx(", "), (hl(getattr(self, k).__rsrepr__() if hasattr(getattr(self, k), "__rsrepr__") else repr(getattr(self, k))) for k, hl in zip(self.__class__.__slots__, self.__class__.HIGHLIGHT))) + ")"
def __repr__(self):
return str(self.__rsrepr__())
class RecursivePass:
"""A lot of flexibility is built into this class. Don't optimize it away!"""
__slots__ = ("preLifter",)
RECURSIVE_ARG = "children"
DEPENDS = None # Used for documenting passes dependencies, not enforced for now
def __init__(self, preLifter):
self.preLifter = preLifter
def shouldProcess(self, node):
return True
def processNode(self, parsed):
raise NotImplementedError
def processNodeIfNeeded(self, node):
if self.shouldProcess(node):
node = self.processNode(node)
return node
def shouldRecurse(self, node):
return hasattr(node, self.__class__.RECURSIVE_ARG)
def recurseIntoNodeIfNeeded(self, node):
if self.shouldRecurse(node):
node = self.recurseIntoNode(node)
return node
def processChild(self, node):
node = self.processNodeIfNeeded(node)
node = self.recurseIntoNodeIfNeeded(node)
return node
def processChildren(self, children):
return [self.processChild(el) for el in children]
def recurseIntoNode(self, node):
children = getattr(node, self.__class__.RECURSIVE_ARG)
children = self.processChildren(children)
setattr(node, self.__class__.RECURSIVE_ARG, children)
return node
def __call__(self, node):
return self.processChild(node)
class SingleElPreLifter:
__slots__ = ("visitor",)
def __init__(self, visitor):
self.visitor = visitor
def __call__(self, node):
print(node)
tp = node[0]
args = node[1]
if not isinstance(args, (list, tuple)):
args = (args,)
argNames = getattr(self.visitor, tp.name)
if isinstance(argNames, str):
argMapping = {argNames: args}
else:
argMapping = dict(zip(argNames, args))
return RXASTNode(None, tp, argMapping)
class REFirstPassVisitor:
ASSERT = ASSERT_NOT = ("relativePosition", "children")
SUBPATTERN = ("id", "children")
BRANCH = ("unkn", "children")
MAX_REPEAT = MIN_REPEAT = ("minCount", "maxCount", "children")
GROUPREF = ("backRef",)
GROUPREF_EXISTS = ("backRef", "trueBranch", "falseBranch")
IN = "args"
LITERAL = ("charCode",)
ANY = ("reserved",)
AT = ("where",)
singleElPreLifter = SingleElPreLifter(REFirstPassVisitor)
class RE_IN_FirstPassVisitor:
NEGATE = ()
LITERAL = ("charCode",)
CATEGORY = ("enumV",)
RANGE = ("start", "stop")
re_IN_FlatPreLifter = SingleElPreLifter(RE_IN_FirstPassVisitor)
class RecursivePreLifter(RecursivePass):
"""Creates the initial AST. It is a prerequisite of every other pass because `RecursivePass` assummes that this was done in order to operate. It was possible to implement `RecursivePreLifter` above `RecursivePass` only because recursing is done only after `processNode` is called"""
__slots__ = ()
DEPENDS = ()
def processNode(self, node):
node.children = [singleElPreLifter(el) for el in node.children]
return node
def shouldProcess(self, node):
return self.shouldRecurse(node)
RecursivePass.DEPENDS = (RecursivePreLifter,) # The default value.
class Lift_IN_args(RecursivePass):
"""Lifts `IN` subtrees. They are not lifted in `RecursivePreLifter` because they have own enum namespace, potentially conflicting with `OPCODES`."""
__slots__ = ()
DEPENDS = (RecursivePreLifter,)
def processNode(self, node):
node.children = [re_IN_FlatPreLifter(el) for el in node.args]
del node.argsDic["args"]
return node
def shouldProcess(self, node):
return node.type == sc.IN
class CombineLiterals(RecursivePass):
"""The regex engine used in python treats a sequence of fixed chars as ... sequence of fixed chars.
For our purposes it may make sense to combine them into words, because some grammars DSLs support keywords, it also allows more compact grammars"""
__slots__ = ()
DEPENDS = (RecursivePreLifter, Lift_IN_args) # without Lift_IN_args children in IN will be incorrect
def processNode(self, node):
processed = []
currentString = []
def finishStr():
nonlocal currentString
if currentString:
v = "".join(chr(el.charCode) for el in currentString)
if len(v) == 1:
typ = sc.LITERAL
else:
typ = LITERAL_STR
res = RXASTNode(None, typ, {"literal": v})
currentString = []
processed.append(res)
for el in node.children:
if el.type == sc.LITERAL:
currentString.append(el)
else:
finishStr()
processed.append(el)
finishStr()
node.children = processed
return node
def shouldProcess(self, node):
return hasattr(node, "children")
class AttrReplacerPass(RecursivePass):
"""Replaces one attr with another one"""
__slots__ = ("shouldDelete",)
DEPENDS = (RecursivePreLifter,)
SRC_ID_ATTR = None
TGT_ID_ATTR = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.shouldDelete = self.__class__.SRC_ID_ATTR != self.__class__.TGT_ID_ATTR
def remapValue(self, prevValue):
raise NotImplementedError
def processNode(self, node):
iD = getattr(node, self.__class__.SRC_ID_ATTR)
iD = self.remapValue(iD)
if self.shouldDelete:
del node.argsDic[self.__class__.SRC_ID_ATTR]
setattr(node, self.__class__.TGT_ID_ATTR, iD)
return node
def shouldProcess(self, node):
raise NotImplementedError
class DecodeIdsBackToNames(AttrReplacerPass):
"""id to name replacer"""
__slots__ = ()
DEPENDS = (RecursivePreLifter,)
SRC_ID_ATTR = "backRef"
TGT_ID_ATTR = "backRef"
def remapValue(self, iD):
if isinstance(iD, int):
iD = self.preLifter.groupsInvertedIdx.get(iD, iD)
if isinstance(iD, int):
iD = "cap_" + str(iD)
elif isinstance(iD, str):
pass
return iD
class AssignBackRefsNames(DecodeIdsBackToNames):
"""This pass
* assigns `name`s to backrefs in the case of named groups"""
__slots__ = ("shouldDelete",)
DEPENDS = (RecursivePreLifter,)
SRC_ID_ATTR = "backRef"
TGT_ID_ATTR = "backRef"
NODE_TYPES = frozenset({sc.GROUPREF, sc.GROUPREF_EXISTS})
def shouldProcess(self, node):
return node.type in self.__class__.NODE_TYPES
class AssignGroupsNamesAndCaptures(DecodeIdsBackToNames):
"""This pass
* assigns `name`s (used as ids in UG) to named capturing groups
* generates and assigns `name`s (used as ids in UG) to unnamed but indexec capturing groups
* separates capturing functionality from group functionality. So capturing groups are now 2 nodes, 1 for a group itself, another one for capturing, and uncaptured groups are only 1 node, for the group itself only."""
__slots__ = ()
DEPENDS = (RecursivePreLifter,)
SRC_ID_ATTR = "id"
TGT_ID_ATTR = "name"
def processNode(self, node):
res = None
node = super().processNode(node)
iD = node.name # it is set to anything only if `id` is not None, ...
if iD is not None: # ... , so it is a capturing group
res = RXASTNode(iD, CAPTURE_GROUP, {"children": [node]})
else:
res = node
return res
def shouldProcess(self, node):
return node.type == sc.SUBPATTERN and self.__class__.SRC_ID_ATTR in node.argsDic
class EliminateUnneededSingleItemGroups(RecursivePass):
"""The regexp engine tends to create groups even if there is a single item in it and it is non-capturing.
We replace the group node with its content in this case"""
__slots__ = ()
DEPENDS = (RecursivePreLifter, AssignGroupsNamesAndCaptures) # must be executed after AssignGroupsNamesAndCaptures, otherwise the info about captures and assigned ids may be lost
def processNode(self, node):
if len(node.children) == 1:
child = node.children[0]
if child.name is None:
child.name = node.name
return child
return node
def shouldProcess(self, node):
return node.type == sc.SUBPATTERN
class AssignLiteralsNames(RecursivePass):
__slots__ = ()
DEPENDS = (RecursivePreLifter, CombineLiterals) # though it doesn't require CombineLiterals be executed to run without an error, the results will be incorrect otherwise
def processNode(self, node):
node.name = "CHAR_" + unicodedata.name(node.literal).replace(" ", "_").upper()
return node
def shouldProcess(self, node):
return node.type == sc.LITERAL and node.name is None
class AssignKeywordsNames(RecursivePass):
__slots__ = ()
DEPENDS = (RecursivePreLifter, CombineLiterals)
def processNode(self, node):
node.name = "KW_" + node.literal
return node
def shouldProcess(self, node):
return node.type == LITERAL_STR and node.name is None
class PythonRXAST:
__slots__ = ("pattern", "root", "groupsInvertedIdx")
PASSES = [
RecursivePreLifter,
Lift_IN_args,
AssignGroupsNamesAndCaptures,
EliminateUnneededSingleItemGroups,
AssignBackRefsNames,
CombineLiterals,
AssignLiteralsNames,
AssignKeywordsNames,
]
def __init__(self, grammarText: str):
children = sp.parse(grammarText, sp.SRE_FLAG_VERBOSE)
self.__class__.pattern.__set__(self, children.pattern)
self.__class__.groupsInvertedIdx.__set__(self, {v: k for k, v in children.pattern.groupdict.items()})
root = RXASTNode(None, sc.SUBPATTERN, {"id": None, "children": list(children)})
for pasCtor in self.__class__.PASSES:
pas = pasCtor(self)
root = pas(root)
#print("Pass", pas.__class__.__name__, ": ", groups.Fore.lightgreenEx("OK"))
#pprint(root)
self.__class__.root.__set__(self, root)
print(root)
def __getattr__(self, k: str):
return getattr(self.pattern, k)
def __setattr__(self, k: str, v):
setattr(self.pattern, k, v)
def __repr__(self):
return repr(self.pattern)
| 27.217722 | 287 | 0.728583 | 10,307 | 0.958702 | 0 | 0 | 0 | 0 | 0 | 0 | 2,701 | 0.251232 |
474625e55d420bbe1645d7c53baf9bb6b0997f55 | 2,615 | py | Python | works/test/dssm/model.py | didazxc/autoflow | 6682102efae90b0af00ba920cd4fa2020694d5f2 | [
"MIT"
]
| null | null | null | works/test/dssm/model.py | didazxc/autoflow | 6682102efae90b0af00ba920cd4fa2020694d5f2 | [
"MIT"
]
| null | null | null | works/test/dssm/model.py | didazxc/autoflow | 6682102efae90b0af00ba920cd4fa2020694d5f2 | [
"MIT"
]
| null | null | null | import torch
from torch import nn
class UserModel(nn.Module):
def __init__(self, filter_sizes, userprofile_size, applist_size, output_size=32, char_embed_size=32,
chars_size=6000, out_channel_size=3):
super().__init__()
# embedding of lines and apps
self.embed = nn.Embedding(chars_size, char_embed_size)
# charCNN
self.convs = nn.ModuleList([
nn.Sequential(nn.Conv1d(char_embed_size, out_channel_size, filter_size))
for filter_size in filter_sizes
])
self.conv_out_size = out_channel_size * len(filter_sizes)
self.lines_output_layer = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(self.conv_out_size, output_size),
nn.ReLU()
)
# combine
self.output_layer = nn.Sequential(
nn.Linear(output_size+applist_size+userprofile_size, output_size),
nn.ReLU(),
nn.Linear(output_size, output_size),
nn.ReLU()
)
def forward(self, lines, applist, userprofile):
# lines
lines_embed_out = self.embed(lines).permute(0, 2, 1)
conv_outs = [conv(lines_embed_out) for conv in self.convs]
pool_outs = [nn.functional.max_pool1d(out, out.shape[-1]) for out in conv_outs]
convs_out = torch.cat(pool_outs, dim=1).view(-1, self.conv_out_size)
lines_output = self.lines_output_layer(convs_out)
# combine
output = torch.cat([lines_output, applist, userprofile], dim=-1)
output = self.output_layer(output)
return output
class DS(nn.Module):
def __init__(self, userprofile_size=1384, applist_size=1000, embed_size=32,
vid_table_size=22261, aid_table_size=13727):
super().__init__()
self.user_embed = UserModel([3, 5, 10], userprofile_size, applist_size, output_size=embed_size)
self.vid_embed = nn.Embedding(vid_table_size, embed_size)
self.aid_embed = nn.Embedding(aid_table_size, embed_size)
self.video_output_layer = nn.Sequential(
nn.Linear(2*embed_size, embed_size),
nn.ReLU(),
nn.Linear(embed_size, embed_size),
nn.ReLU()
)
def forward(self, vid, aid, lines, applist, userprofile):
user = self.user_embed(lines, applist, userprofile)
video = self.video_output_layer(torch.cat([self.vid_embed(vid), self.aid_embed(aid)], dim=-1))
cosine = torch.cosine_similarity(user, video, dim=-1)
return cosine
@staticmethod
def get_cate(x):
return 1 if x >= 0.5 else 0
| 38.455882 | 104 | 0.636711 | 2,574 | 0.984321 | 0 | 0 | 70 | 0.026769 | 0 | 0 | 63 | 0.024092 |
474730af1ea1af916a12ede7e8f6ca83e5e468a8 | 952 | py | Python | pastila/schema.py | harlov/pastila | 27ebe862bc25d5cc1a14766e5eec8f48853098c3 | [
"MIT"
]
| null | null | null | pastila/schema.py | harlov/pastila | 27ebe862bc25d5cc1a14766e5eec8f48853098c3 | [
"MIT"
]
| null | null | null | pastila/schema.py | harlov/pastila | 27ebe862bc25d5cc1a14766e5eec8f48853098c3 | [
"MIT"
]
| null | null | null | from pastila.fields import Field
class Schema(object):
data = None
def __init__(self):
self.data = {}
def load(self, data):
for field, value in data.items():
self.load_to_field(field, value)
self.validate()
def validate(self):
for name, field in self.fields.items():
field.validate(self.data[name], self)
def dump(self):
data = {}
for name, field in self.fields.items():
data[name] = field.dump()
return data
def __getattr__(self, item):
return self.data[item]
@property
def fields(self):
return {
name: field for name, field in
filter(lambda x: isinstance(x[1], Field), self.__class__.__dict__.items())
}
def load_to_field(self, field, value):
if field not in self.fields:
return None
self.data[field] = self.fields[field].load(value)
| 22.666667 | 86 | 0.570378 | 916 | 0.962185 | 0 | 0 | 188 | 0.197479 | 0 | 0 | 0 | 0 |
4747b2ce44f8edfeed9025dd5538e36cd606b1dc | 436 | py | Python | labs/lab10/demo/src/cpu_multiprocessing.py | Alvant/AdvancedPython2020 | c5a6439f100ff6a32168366f6a74e5ca0e79d0bc | [
"MIT"
]
| null | null | null | labs/lab10/demo/src/cpu_multiprocessing.py | Alvant/AdvancedPython2020 | c5a6439f100ff6a32168366f6a74e5ca0e79d0bc | [
"MIT"
]
| 7 | 2021-09-12T17:15:27.000Z | 2022-03-01T17:50:18.000Z | labs/lab10/demo/src/cpu_multiprocessing.py | Alvant/AdvancedPython | b036d124d0ccaa325b356db1a189e6ef10e1f21f | [
"MIT"
]
| 2 | 2021-09-26T10:00:25.000Z | 2021-11-07T19:01:33.000Z | import multiprocessing
import time
from typing import List
from constants import CPU_BIG_NUMBERS
from utils import show_execution_time
def cpu_bound(number: int) -> int:
return sum(i * i for i in range(number))
def find_sums(numbers: List[int]) -> None:
with multiprocessing.Pool() as pool:
pool.map(cpu_bound, numbers)
if __name__ == "__main__":
show_execution_time(func=lambda: find_sums(CPU_BIG_NUMBERS))
| 20.761905 | 64 | 0.743119 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.022936 |
4747e422b3230b40d458ca7afafc2b2ff6c690f8 | 304 | py | Python | whatsapp_tracker/bases/selenium_bases/base_selenium_keyboard.py | itay-bardugo/whatsapp_tracker | c53a309b08bf47597c8191ec0a155a1fe1536842 | [
"MIT"
]
| 1 | 2021-09-25T12:22:35.000Z | 2021-09-25T12:22:35.000Z | whatsapp_tracker/bases/selenium_bases/base_selenium_keyboard.py | itay-bardugo/whatsapp_tracker | c53a309b08bf47597c8191ec0a155a1fe1536842 | [
"MIT"
]
| null | null | null | whatsapp_tracker/bases/selenium_bases/base_selenium_keyboard.py | itay-bardugo/whatsapp_tracker | c53a309b08bf47597c8191ec0a155a1fe1536842 | [
"MIT"
]
| null | null | null | from abc import ABCMeta
from whatsapp_tracker.bases.selenium_bases.base_selenium_kit import BaseSeleniumKit
from whatsapp_tracker.mixins.seleniun_keyboard_press_mixin import SeleniumKeyBoardPressMixin
class BaseSeleniumKeyboard(BaseSeleniumKit, SeleniumKeyBoardPressMixin, metaclass=ABCMeta):
...
| 33.777778 | 92 | 0.875 | 99 | 0.325658 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4749535b6dc42716b629b0a869d4e93214bb93a8 | 3,056 | py | Python | assignments/assignment2/model.py | RuslanOm/dlcourse_ai | f4c85497dc4affb942cacb363f17ce63b39c1bd7 | [
"MIT"
]
| null | null | null | assignments/assignment2/model.py | RuslanOm/dlcourse_ai | f4c85497dc4affb942cacb363f17ce63b39c1bd7 | [
"MIT"
]
| null | null | null | assignments/assignment2/model.py | RuslanOm/dlcourse_ai | f4c85497dc4affb942cacb363f17ce63b39c1bd7 | [
"MIT"
]
| null | null | null | import numpy as np
from layers import FullyConnectedLayer, ReLULayer, softmax_with_cross_entropy, l2_regularization
class TwoLayerNet:
""" Neural network with two fully connected layers """
def __init__(self, n_input, n_output, hidden_layer_size, reg):
"""
Initializes the neural network
Arguments:
n_input, int - dimension of the model input
n_output, int - number of classes to predict
hidden_layer_size, int - number of neurons in the hidden layer
reg, float - L2 regularization strength
"""
self.reg = reg
self.fc1 = FullyConnectedLayer(
n_input=n_input, n_output=hidden_layer_size)
self.fc2 = FullyConnectedLayer(
n_input=hidden_layer_size, n_output=n_output)
self.relu = ReLULayer()
def compute_loss_and_gradients(self, X, y):
"""
Computes total loss and updates parameter gradients
on a batch of training examples
Arguments:
X, np array (batch_size, input_features) - input data
y, np array of int (batch_size) - classes
"""
# Before running forward and backward pass through the model,
# clear parameter gradients aggregated from the previous pass
# TODO Set parameter gradient to zeros
# Hint: using self.params() might be useful!
for _, par in self.fc1.params().items():
par.grad *= 0
for _, par in self.fc2.params().items():
par.grad *= 0
# TODO Compute loss and fill param gradients
# by running forward and backward passes through the model
x1 = self.relu.forward(self.fc1.forward(X))
out = self.fc2.forward(x1)
loss_cre, dprediction = softmax_with_cross_entropy(out, y)
loss_l2_w1, dW1_l2 = l2_regularization(self.fc1.W.value, self.reg)
loss_l2_w2, dW2_l2 = l2_regularization(self.fc2.W.value, self.reg)
loss = loss_cre + loss_l2_w1 + loss_l2_w2
d_relu = self.fc2.backward(dprediction)
d_inp = self.relu.backward(d_relu)
_ = self.fc1.backward(d_inp)
self.fc1.W.grad += dW1_l2
self.fc2.W.grad += dW2_l2
# After that, implement l2 regularization on all params
# Hint: self.params() is useful again!
return loss
def predict(self, X):
"""
Produces classifier predictions on the set
Arguments:
X, np array (test_samples, num_features)
Returns:
y_pred, np.array of int (test_samples)
"""
# TODO: Implement predict
# Hint: some of the code of the compute_loss_and_gradients
# can be reused
# pred = np.zeros(X.shape[0], np.int)
x1 = self.relu.forward(self.fc1.forward(X))
out = self.fc2.forward(x1)
return np.argmax(out, axis=1)
def params(self):
result = {
'W1': self.fc1.W,
'W2': self.fc2.W,
'B1': self.fc1.B,
'B2': self.fc2.B,
}
return result
| 32.510638 | 96 | 0.612238 | 2,936 | 0.960733 | 0 | 0 | 0 | 0 | 0 | 0 | 1,353 | 0.442736 |
474a6941a53b9666a0f1df165d06680657ea0def | 3,066 | py | Python | ukol.py | tenhobi/NI-MPI | 5c6c4f5fd28487e807315ce6da33b81f0db4908d | [
"MIT"
]
| null | null | null | ukol.py | tenhobi/NI-MPI | 5c6c4f5fd28487e807315ce6da33b81f0db4908d | [
"MIT"
]
| null | null | null | ukol.py | tenhobi/NI-MPI | 5c6c4f5fd28487e807315ce6da33b81f0db4908d | [
"MIT"
]
| null | null | null | import numpy as np
class Solver:
def __init__(self, matrix, vector, initialVector, precision, gamma):
self.initialVector = initialVector
self.precision = precision
self.matrix = matrix
self.bVector = vector
self.gamma = gamma
# lower triangular part
self.l = np.tril(matrix, -1)
# upper triangular part
self.u = np.triu(matrix, 1)
# diagonal component
self.d = np.diag(np.diag(matrix))
# init Q - must be set by subclases
self.q = None
self.qinv = None
def solve(self):
"""Starts to compute iterations and then returns count of iterations and result."""
iterationCount = 0
x = None
if self.canConverge():
x = self.initialVector
while self.isNotPreciseEnough(x):
iterationCount = iterationCount + 1
x = self.doIteration(x)
return iterationCount, x
def canConverge(self):
"""Can converge if the value of spectral radius is less than 1."""
e = np.identity(self.matrix.shape[0], dtype = np.float64)
return self.getSpectralRadius(e - self.qinv @ self.matrix) < 1
def isNotPreciseEnough(self, iteration):
"""Chech whether precision is not already sufficient."""
return (np.linalg.norm(self.matrix @ iteration - self.bVector) / np.linalg.norm(self.bVector)) > self.precision
def doIteration(self, lastIteration):
"""Does next iteration."""
return self.qinv @ (self.q - self.matrix) @ lastIteration + self.qinv @ self.bVector
def getSpectralRadius(self, matrix):
"""Returns max absolute eigenvalue of matrix, aka spectral radius."""
return max(abs(np.linalg.eigvals(matrix)))
class JacobiSolver(Solver):
def __init__(self, matrix, vector, initialVector, precision, gamma):
super().__init__(matrix, vector, initialVector, precision, gamma)
self.q = self.d
self.qinv = np.linalg.inv(self.q)
class GaussSeidelSolver(Solver):
def __init__(self, matrix, vector, initialVector, precision, gamma, omega = 1):
super().__init__(matrix, vector, initialVector, precision, gamma)
self.omega = omega
self.q = (1 / omega) * self.d + self.l
self.qinv = np.linalg.inv(self.q)
### ----- config
# parameters
gamma = 3
omega = 1
precision = 10**-6
# matrix
matrix = np.zeros((20, 20), dtype = np.float64)
np.fill_diagonal(matrix, gamma)
np.fill_diagonal(matrix[:, 1:], -1) # upper part
np.fill_diagonal(matrix[1:, :], -1) # lower part
# vector b
bVector = np.full((20, 1), gamma - 2, dtype = np.float64)
bVector[0] = bVector[0] + 1
bVector[-1] = bVector[-1] + 1
# initial vector
initialVector = np.zeros(bVector.shape, dtype = np.float64)
### ----- solver
# use one of these:
#solver = JacobiSolver(matrix, bVector, initialVector, precision, gamma)
solver = GaussSeidelSolver(matrix, bVector, initialVector, precision, gamma, omega)
solver.solve()
| 31.285714 | 119 | 0.628506 | 2,362 | 0.770385 | 0 | 0 | 0 | 0 | 0 | 0 | 594 | 0.193738 |
474bb5e6e62ca2caee71dee1fac0a250c57c5dda | 12,491 | py | Python | src/python/dxpy/utils/__init__.py | psung/dx-toolkit | f3a430c5e24184215eb4a9883a179edf07bfa08b | [
"Apache-2.0"
]
| null | null | null | src/python/dxpy/utils/__init__.py | psung/dx-toolkit | f3a430c5e24184215eb4a9883a179edf07bfa08b | [
"Apache-2.0"
]
| null | null | null | src/python/dxpy/utils/__init__.py | psung/dx-toolkit | f3a430c5e24184215eb4a9883a179edf07bfa08b | [
"Apache-2.0"
]
| null | null | null | # Copyright (C) 2013-2014 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities shared by dxpy modules.
"""
from __future__ import (print_function, unicode_literals)
import os, json, collections, concurrent.futures, traceback, sys, time, gc
import dateutil.parser
from .thread_pool import PrioritizingThreadPool
from .. import logger
from ..compat import basestring
def _force_quit(signum, frame):
# traceback.print_stack(frame)
os._exit(os.EX_IOERR)
# os.abort()
def get_futures_threadpool(max_workers):
#import signal
#if force_quit_on_sigint:
# signal.signal(signal.SIGINT, _force_quit)
#return concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)
return PrioritizingThreadPool(max_workers=max_workers)
def wait_for_a_future(futures, print_traceback=False):
"""
Return the next future that completes. If a KeyboardInterrupt is
received, then the entire process is exited immediately. See
wait_for_all_futures for more notes.
"""
while True:
try:
future = next(concurrent.futures.as_completed(futures, timeout=10000000000))
break
except concurrent.futures.TimeoutError:
pass
except KeyboardInterrupt:
if print_traceback:
traceback.print_stack()
else:
print('')
os._exit(os.EX_IOERR)
return future
def wait_for_all_futures(futures, print_traceback=False):
"""
Wait indefinitely for all futures in the input iterable to complete.
Use a timeout to enable interrupt handling.
Call os._exit() in case of KeyboardInterrupt. Otherwise, the atexit registered handler in concurrent.futures.thread
will run, and issue blocking join() on all worker threads, requiring us to listen to events in worker threads
in order to enable timely exit in response to Ctrl-C.
Note: This still doesn't handle situations where Ctrl-C is pressed elsewhere in the code and there are worker
threads with long-running tasks.
Note: os._exit() doesn't work well with interactive mode (e.g. ipython). This may help:
import __main__ as main; if hasattr(main, '__file__'): os._exit() else: os.exit()
"""
try:
while True:
waited_futures = concurrent.futures.wait(futures, timeout=60)
if len(waited_futures.not_done) == 0:
break
except KeyboardInterrupt:
if print_traceback:
traceback.print_stack()
else:
print('')
os._exit(os.EX_IOERR)
def response_iterator(request_iterator, thread_pool, max_active_tasks=4, num_retries=0, retry_after=90, queue_id=''):
"""
:param request_iterator: This is expected to be an iterator producing inputs for consumption by the worker pool.
:type request_iterator: iterator of callable_, args, kwargs
:param thread_pool: thread pool to submit the requests to
:type thread_pool: PrioritizingThreadPool
:param max_active_tasks: The maximum number of tasks that may be either running or waiting for consumption of their result.
:type max_active_tasks: int
:param num_retries: The number of times to retry the request.
:type num_retries: int
:param retry_after: The number of seconds to wait before retrying the request.
:type retry_after: number
:param queue_id: hashable object to divide incoming requests into independent queues
:type queue_id: object
Rate-limited asynchronous multithreaded task runner.
Consumes tasks from *request_iterator*. Yields their results in order, while allowing up to *max_active_tasks* to run
simultaneously. Unlike concurrent.futures.Executor.map, prevents new tasks from starting while there are
*max_active_tasks* or more unconsumed results.
**Retry behavior**: If *num_retries* is positive, the task runner uses a simple heuristic to retry slow requests.
If there are 4 or more tasks in the queue, and all but the first one are done, the first task will be discarded
after *retry_after* seconds and resubmitted with the same parameters. This will be done up to *num_retries* times.
If retries are used, tasks should be idempotent.
"""
# Debug fallback
#for _callable, args, kwargs in request_iterator:
# yield _callable(*args, **kwargs)
#return
num_results_yielded = 0
next_request_index = 0
def make_priority_fn(request_index):
# The more pending requests are between the data that has been
# returned to the caller and this data, the less likely this
# data is to be needed soon. This results in a higher number
# here (and therefore a lower priority).
return lambda: request_index - num_results_yielded
def submit(callable_, args, kwargs, retries=num_retries):
"""
Submit the task.
Return (future, (callable_, args, kwargs), retries)
"""
future = thread_pool.submit_to_queue(queue_id, make_priority_fn(next_request_index), callable_, *args, **kwargs)
return (future, (callable_, args, kwargs), retries)
def resubmit(callable_, args, kwargs, retries):
"""
Submit the task.
Return (future, (callable_, args, kwargs), retries)
"""
logger.warn("{}: Retrying {} after timeout".format(__name__, callable_))
# TODO: resubmitted tasks should be prioritized higher
return submit(callable_, args, kwargs, retries=retries-1)
# Each item is (future, (callable_, args, kwargs), retries):
#
# future: Future for the task being performed
# callable_, args, kwargs: callable and args that were supplied
# retries: number of additional times they request may be retried
tasks_in_progress = collections.deque()
for _i in range(max_active_tasks):
try:
callable_, args, kwargs = next(request_iterator)
# print "Submitting (initial batch):", callable_, args, kwargs
tasks_in_progress.append(submit(callable_, args, kwargs))
next_request_index += 1
except StopIteration:
break
while len(tasks_in_progress) > 0:
future, callable_and_args, retries = tasks_in_progress.popleft()
try:
result = future.result(timeout=retry_after)
except concurrent.futures.TimeoutError:
# print "Timeout while waiting for", f, "which has", f.retries, "retries left"
if retries > 0 and len(tasks_in_progress) > 2 and all(f.done() for (f, _callable, _retries) in tasks_in_progress):
# The stale future will continue to run and will reduce the effective size of the pool by 1. If too many
# futures are retried, the pool will block until one of the stale futures quits.
# f.cancel() doesn't work because there's no way to interrupt a thread.
prev_callable, prev_args, prev_kwargs = callable_and_args
future, callable_and_args, retries = resubmit(prev_callable, prev_args, prev_kwargs, retries)
next_request_index += 1
tasks_in_progress.appendleft((future, callable_and_args, retries))
continue
except KeyboardInterrupt:
print('')
os._exit(os.EX_IOERR)
del future # Free the future we just consumed now, instead of next
# time around the loop
gc.collect()
try:
callable_, args, kwargs = next(request_iterator)
except StopIteration:
pass
else:
tasks_in_progress.append(submit(callable_, args, kwargs))
next_request_index += 1
yield result
del result
num_results_yielded += 1
def string_buffer_length(buf):
orig_pos = buf.tell()
buf.seek(0, os.SEEK_END)
buf_len = buf.tell()
buf.seek(orig_pos)
return buf_len
def normalize_time_input(t, future=False):
"""
Converts inputs such as:
"2012-05-01"
"-5d"
1352863174
to milliseconds since epoch. See http://labix.org/python-dateutil and :meth:`normalize_timedelta`.
"""
error_msg = 'Error: Could not parse {t} as a timestamp or timedelta. Expected a date format or an integer with a single-letter suffix: s=seconds, m=minutes, h=hours, d=days, w=weeks, M=months, y=years, e.g. "-10d" indicates 10 days ago'
if isinstance(t, basestring):
try:
t = normalize_timedelta(t)
except ValueError:
try:
t = int(time.mktime(dateutil.parser.parse(t).timetuple())*1000)
except ValueError:
raise ValueError(error_msg.format(t=t))
now = int(time.time()*1000)
if t < 0 or (future and t < now):
t += now
return t
def normalize_timedelta(timedelta):
"""
Given a string like "1w" or "-5d", convert it to an integer in milliseconds.
Integers without a suffix are interpreted as seconds.
Note: not related to the datetime timedelta class.
"""
try:
return int(timedelta) * 1000
except ValueError as e:
t, suffix = timedelta[:-1], timedelta[-1:]
suffix_multipliers = {'s': 1000, 'm': 1000*60, 'h': 1000*60*60, 'd': 1000*60*60*24, 'w': 1000*60*60*24*7,
'M': 1000*60*60*24*30, 'y': 1000*60*60*24*365}
if suffix not in suffix_multipliers:
raise ValueError()
return int(t) * suffix_multipliers[suffix]
# See http://stackoverflow.com/questions/4126348
class OrderedDefaultdict(collections.OrderedDict):
def __init__(self, *args, **kwargs):
newdefault = None
newargs = ()
if args:
newdefault = args[0]
if not (newdefault is None or callable(newdefault)):
raise TypeError('first argument must be callable or None')
newargs = args[1:]
self.default_factory = newdefault
super(self.__class__, self).__init__(*newargs, **kwargs)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
args = self.default_factory if self.default_factory else tuple()
return type(self), args, None, None, self.items()
def group_array_by_field(array, field='group'):
groups = OrderedDefaultdict(list)
for item in array:
if field not in item and None not in groups:
groups[None] = []
groups[item.get(field)].append(item)
return groups
def merge(d, u):
"""
Recursively updates a dictionary.
Example: merge({"a": {"b": 1, "c": 2}}, {"a": {"b": 3}}) = {"a": {"b": 3, "c": 2}}
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = merge(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def _dict_raise_on_duplicates(ordered_pairs):
"""
Reject duplicate keys.
"""
d = {}
for k, v in ordered_pairs:
if k in d:
raise ValueError("duplicate key: %r" % (k,))
else:
d[k] = v
return d
def json_load_raise_on_duplicates(*args, **kwargs):
"""
Like json.load(), but raises an error on duplicate keys.
"""
kwargs['object_pairs_hook'] = _dict_raise_on_duplicates
return json.load(*args, **kwargs)
def json_loads_raise_on_duplicates(*args, **kwargs):
"""
Like json.loads(), but raises an error on duplicate keys.
"""
kwargs['object_pairs_hook'] = _dict_raise_on_duplicates
return json.loads(*args, **kwargs)
def warn(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
# Moved to the bottom due to circular imports
from .exec_utils import run, convert_handlers_to_dxlinks, parse_args_as_job_input, entry_point, DXJSONEncoder
| 39.15674 | 241 | 0.660716 | 802 | 0.064206 | 5,191 | 0.415579 | 0 | 0 | 0 | 0 | 5,868 | 0.469778 |
474cbd8308c2b1410045a858c934a70c927f171d | 2,946 | py | Python | mkserialisable.py | cedadev/ipython_project | fd9c85c20d3689435684cf4b681dfaab1c0a825b | [
"BSD-3-Clause-Clear"
]
| null | null | null | mkserialisable.py | cedadev/ipython_project | fd9c85c20d3689435684cf4b681dfaab1c0a825b | [
"BSD-3-Clause-Clear"
]
| null | null | null | mkserialisable.py | cedadev/ipython_project | fd9c85c20d3689435684cf4b681dfaab1c0a825b | [
"BSD-3-Clause-Clear"
]
| null | null | null | """Routines to make objects serialisable.
Each of the functions in this module makes a specific type of object
serialisable. In most cases, this module needs to be imported and the function
run in both the serialising and the unserialising environments.
Here's a summary (see function documentation for details):
mk_ellipsis: Ellipsis.
mk_slots: classes with __slots__ but not __dict__.
mk_netcdf: netCDF4.
mk_cf: cf.
"""
import copy_reg
_done = []
# ellipsis
def mk_ellipsis ():
"""Make the Ellipsis builtin serialisable."""
if 'ellipsis' in _done:
return
copy_reg.pickle(type(Ellipsis), lambda e: 'Ellipsis')
# slots
def _construct_slots (cls, attrs):
o = object.__new__(cls)
for k, v in attrs.iteritems():
setattr(o, k, v)
return o
def _reduce_slots (o):
attrs = dict((k, getattr(o, k)) for k in o.__slots__ if hasattr(o, k))
return _construct_slots, (type(o), attrs)
def mk_slots (*objs):
"""Make the classes that have __slots__ but not __dict__ serialisable.
Takes a number of types (new-style classes) to make serialisable.
"""
for cls in objs:
copy_reg.pickle(cls, _reduce_slots)
# netcdf
def mk_netcdf ():
"""Make objects in the netCDF4 module serialisable.
Depends on ncserialisable; see that module's documentation for details. This
replaces the netCDF4 module with ncserialisable directly through sys.modules;
to access netCDF4 directly, use ncserialisable.netCDF4.
Call this before importing any module that uses netCDF4.
"""
if 'netcdf' in _done:
return
import sys
from nc_ipython import ncserialisable
sys.modules['netCDF4'] = ncserialisable
# cf
def _construct_cf_units (attrs):
u = object.__new__(cf.Units)
for k, v in attrs.iteritems():
setattr(u, k, v)
if hasattr(u, 'units'):
u.units = u.units
return u
def _reduce_cf_units (u):
attrs = dict((k, getattr(u, k)) for k in u.__slots__ if hasattr(u, k))
return _construct_cf_units, (attrs,)
def mk_cf ():
"""Make objects in the cf module serialisable.
Calls mk_netcdf, and so depends on ncserialisable.
Call this before importing cf.
"""
if 'cf' in _done:
return
mk_netcdf()
global cf
import cf
mk_slots(
cf.data.ElementProperties,
cf.Data,
cf.data.SliceData,
#cf.Units,
cf.pp.Variable,
cf.pp.VariableCalc,
cf.pp.VariableCalcBounds,
cf.pp.VariableBounds,
#cf.org_field.SliceVariable,
#cf.org_field.SliceCoordinate,
#cf.org_field.SliceField,
#cf.org_field.SliceVariableList,
#cf.org_field.SliceFieldList,
#cf.org_field.Flags,
cf.field.SliceField,
cf.field.SliceFieldList,
cf.Flags,
cf.coordinate.SliceCoordinate,
cf.variable.SliceVariable,
cf.variable.SliceVariableList
)
copy_reg.pickle(cf.Units, _reduce_cf_units) | 23.95122 | 79 | 0.67685 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,326 | 0.450102 |
474d460f8c93e195c3909578035ee5726563b9b7 | 773 | py | Python | flox_aws/configure.py | getflox/flox-aws | 9642e291afa3ddedd9eed566b43a640e9bb0c537 | [
"MIT"
]
| null | null | null | flox_aws/configure.py | getflox/flox-aws | 9642e291afa3ddedd9eed566b43a640e9bb0c537 | [
"MIT"
]
| null | null | null | flox_aws/configure.py | getflox/flox-aws | 9642e291afa3ddedd9eed566b43a640e9bb0c537 | [
"MIT"
]
| null | null | null | from floxcore.config import Configuration, ParamDefinition
class AWSConfiguration(Configuration):
def parameters(self):
return (
ParamDefinition("region", "AWS Default region"),
ParamDefinition("role_arn", "Role (ARN) to be assumed", default="", filter_empty=False),
ParamDefinition("mfa_arn", "Multi factor authentication device ARN", default="", filter_empty=False),
ParamDefinition("kms", "Default KMS key to be used for encryption", default=""),
)
def secrets(self):
return (
ParamDefinition("key_id", "AWS Key Id", secret=True, default=""),
ParamDefinition("key_secret", "AWS Secret Key", secret=True, default=""),
)
def schema(self):
pass
| 36.809524 | 113 | 0.630013 | 711 | 0.919793 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.283312 |
474d5c6d6e9e1b1ea76c41c137dcb55b272f1e24 | 1,769 | py | Python | api_teacher.py | sabertooth9/KUET-Teachers-Web-Scrapper- | 0b1cea4f3e60f440b882bef971a942e42d0ab048 | [
"Apache-2.0"
]
| 1 | 2019-12-21T13:35:46.000Z | 2019-12-21T13:35:46.000Z | api_teacher.py | sabertooth9/KUET-Teachers-Web-Scrapper- | 0b1cea4f3e60f440b882bef971a942e42d0ab048 | [
"Apache-2.0"
]
| null | null | null | api_teacher.py | sabertooth9/KUET-Teachers-Web-Scrapper- | 0b1cea4f3e60f440b882bef971a942e42d0ab048 | [
"Apache-2.0"
]
| null | null | null | from flask import Flask
from flask_restful import Api, Resource, reqparse
from kuet_teacher_data import get_data
app = Flask(__name__)
api = Api(app)
data = get_data()
class Teacher_data(Resource):
def get(self,id="CSE"):
if(id=='ALL' or id=='all'):
return data, 200
for datac in data:
if(datac==id):
return data.get(datac),200
return "Not Found",404
def contains(j,id):
if(id in j.get("name").lower()):
return True
if(id in j.get("weblink").lower()):
return True
if(id in j.get("designation").lower()):
return True
if(id in j.get("image").lower()):
return True
if(id in j.get("phone").lower()):
return True
if(id in j.get("mail").lower()):
return True
return False;
class search_dept_teacher(Resource):
def get(self,dept,id):
ans = []
id = id.lower()
for datac in data:
if(datac == dept or dept.lower()=='all'):
for j in data.get(datac):
if(contains(j,id)):
ans.append(j)
if(len(ans) > 0):
return ans, 200
return "Not Found", 404
class search_teacher(Resource):
def get(self, id):
ans = []
id = id.lower()
for datac in data:
for j in data.get(datac):
if(contains(j, id)):
ans.append(j)
if(len(ans) > 0):
return ans, 200
return "Not Found", 404
api.add_resource(Teacher_data,"/data","/data/","/data/<string:id>")
api.add_resource(search_dept_teacher,"/find/<string:dept>/<string:id>")
api.add_resource(search_teacher,"/find/<string:id>")
if __name__ == "__main__":
app.run()
| 26.402985 | 71 | 0.544375 | 963 | 0.544375 | 0 | 0 | 0 | 0 | 0 | 0 | 197 | 0.111362 |
474e6ccd3a09db8bd61fb6310dfa022bed136ad4 | 892 | py | Python | demo/singleperson.py | Neerajj9/Computer-Vision-based-Offside-Detection-in-soccer | 744bfc636463f24c4f78f25684864c2ce4abb43f | [
"MIT"
]
| 8 | 2020-10-17T14:54:53.000Z | 2022-02-09T11:03:01.000Z | demo/singleperson.py | Neerajj9/Computer-Vision-based-Offside-Detection-in-soccer | 744bfc636463f24c4f78f25684864c2ce4abb43f | [
"MIT"
]
| 4 | 2021-01-03T16:02:29.000Z | 2021-11-23T03:26:01.000Z | demo/singleperson.py | Neerajj9/Computer-Vision-based-Offside-Detection-in-soccer | 744bfc636463f24c4f78f25684864c2ce4abb43f | [
"MIT"
]
| 2 | 2021-04-10T07:05:55.000Z | 2021-09-19T23:22:18.000Z | import os
import sys
sys.path.append(os.path.dirname(__file__) + "/../")
from scipy.misc import imread
from util.config import load_config
from nnet import predict
from util import visualize
from dataset.pose_dataset import data_to_input
cfg = load_config("demo/pose_cfg.yaml")
# Load and setup CNN part detector
sess, inputs, outputs = predict.setup_pose_prediction(cfg)
# Read image from file
file_name = "demo/image.png"
image = imread(file_name, mode='RGB')
image_batch = data_to_input(image)
# Compute prediction with the CNN
outputs_np = sess.run(outputs, feed_dict={inputs: image_batch})
scmap, locref, _ = predict.extract_cnn_output(outputs_np, cfg)
# Extract maximum scoring location from the heatmap, assume 1 person
pose = predict.argmax_pose_predict(scmap, locref, cfg.stride)
# Visualise
visualize.show_heatmaps(cfg, image, scmap, pose)
visualize.waitforbuttonpress()
| 25.485714 | 68 | 0.784753 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.241031 |
474f1702de3c8a82c6ec6ce0f179522a071b3434 | 19,771 | py | Python | simulators/iam_module.py | jason-neal/companion_simulations | b5773e5539011d492b7128d0dd2778041ce50d52 | [
"MIT"
]
| 1 | 2018-09-04T19:06:44.000Z | 2018-09-04T19:06:44.000Z | simulators/iam_module.py | jason-neal/companion_simulations | b5773e5539011d492b7128d0dd2778041ce50d52 | [
"MIT"
]
| 85 | 2017-03-25T22:37:02.000Z | 2022-03-01T16:49:14.000Z | simulators/iam_module.py | jason-neal/companion_simulations | b5773e5539011d492b7128d0dd2778041ce50d52 | [
"MIT"
]
| 1 | 2017-08-18T10:56:39.000Z | 2017-08-18T10:56:39.000Z | import datetime
import logging
import os
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from logutils import BraceMessage as __
from tqdm import tqdm
import simulators
from mingle.models.broadcasted_models import inherent_alpha_model
from mingle.utilities.chisqr import chi_squared
from mingle.utilities.norm import chi2_model_norms, continuum, arbitrary_rescale, arbitrary_minimums
from mingle.utilities.phoenix_utils import load_starfish_spectrum
from mingle.utilities.simulation_utilities import check_inputs, spec_max_delta
from simulators.common_setup import setup_dirs, sim_helper_function
from numpy import float64, ndarray
from spectrum_overload.spectrum import Spectrum
from typing import Dict, List, Optional, Tuple, Union
def iam_helper_function(star: str, obsnum: Union[int, str], chip: int, skip_params: bool = False) -> Tuple[
str, Dict[str, Union[str, float, List[Union[str, float]]]], str]:
"""Specifies parameter files and output directories given observation parameters."""
return sim_helper_function(star, obsnum, chip, skip_params=skip_params, mode="iam")
def setup_iam_dirs(star: str) -> None:
basedir = setup_dirs(star, mode="iam")
os.makedirs(os.path.join(basedir, "grid_plots"), exist_ok=True)
os.makedirs(os.path.join(basedir, "fudgeplots"), exist_ok=True)
return None
def iam_analysis(obs_spec, model1_pars, model2_pars, rvs=None, gammas=None,
verbose=False, norm=False, save_only=True, chip=None,
prefix=None, errors=None, area_scale=False, wav_scale=True, norm_method="scalar", fudge=None):
"""Run two component model over all model combinations."""
rvs = check_inputs(rvs)
gammas = check_inputs(gammas)
if isinstance(model1_pars, list):
logging.debug(__("Number of close model_pars returned {0}", len(model1_pars)))
if isinstance(model2_pars, list):
logging.debug(__("Number of close model_pars returned {0}", len(model2_pars)))
# Solution Grids to return
iam_grid_chisqr_vals = np.empty((len(model1_pars), len(model2_pars)))
args = [model2_pars, rvs, gammas, obs_spec]
kwargs = {"norm": norm, "save_only": save_only, "chip": chip,
"prefix": prefix, "verbose": verbose, "errors": errors,
"area_scale": area_scale, "wav_scale": wav_scale,
"norm_method": norm_method, "fudge": fudge,
}
for ii, params1 in enumerate(tqdm(model1_pars)):
iam_grid_chisqr_vals[ii] = iam_wrapper(ii, params1, *args, **kwargs)
if save_only:
return None
else:
return iam_grid_chisqr_vals # Just output the best value for each model pair
def continuum_alpha(model1: Spectrum, model2: Spectrum, chip: Optional[int] = None) -> float64:
"""Inherent flux ratio between the continuum of the two models.
Assumes already scaled by area.
Takes mean alpha of chip or full
"""
assert not np.any(np.isnan(model1.xaxis))
assert not np.any(np.isnan(model1.flux))
assert not np.any(np.isnan(model2.xaxis))
assert not np.any(np.isnan(model2.flux))
# Fit models with continuum
cont1 = continuum(model1.xaxis, model1.flux, method="exponential")
cont2 = continuum(model2.xaxis, model2.flux, method="exponential")
# Masking for individual chips
if chip is None:
chip = -1 # Full Crires range
all_limits = {-1: [2111, 2169], 1: [2111, 2124], 2: [2125, 2139], 3: [2140, 2152], 4: [2153, 2169]}
chip_limits = all_limits[chip]
mask1 = (model1.xaxis > chip_limits[0]) * (model1.xaxis < chip_limits[1])
mask2 = (model2.xaxis > chip_limits[0]) * (model2.xaxis < chip_limits[1])
continuum_ratio = cont2[mask2] / cont1[mask1]
alpha_ratio = np.nanmean(continuum_ratio)
return alpha_ratio
def iam_wrapper(num, params1, model2_pars, rvs, gammas, obs_spec, norm=False,
verbose=False, save_only=True, chip=None, prefix=None, errors=None,
area_scale=True, wav_scale=True, grid_slices=False, norm_method="scalar",
fudge=None):
"""Wrapper for iteration loop of iam. params1 fixed, model2_pars are many.
fudge is multiplicative on companion spectrum.
"""
if prefix is None:
sf = os.path.join(
simulators.paths["output_dir"], obs_spec.header["OBJECT"].upper(),
"iam_{0}_{1}-{2}_part{6}_host_pars_[{3}_{4}_{5}].csv".format(
obs_spec.header["OBJECT"].upper(), int(obs_spec.header["MJD-OBS"]), chip,
params1[0], params1[1], params1[2], num))
prefix = os.path.join(
simulators.paths["output_dir"], obs_spec.header["OBJECT"].upper()) # for fudge
else:
sf = "{0}_part{4}_host_pars_[{1}_{2}_{3}].csv".format(
prefix, params1[0], params1[1], params1[2], num)
save_filename = sf
if os.path.exists(save_filename) and save_only:
print("'{0}' exists, so not repeating calculation.".format(save_filename))
return None
else:
if not save_only:
iam_grid_chisqr_vals = np.empty(len(model2_pars))
for jj, params2 in enumerate(model2_pars):
if verbose:
print(("Starting iteration with parameters: "
"{0}={1},{2}={3}").format(num, params1, jj, params2))
# Main Part
rv_limits = observation_rv_limits(obs_spec, rvs, gammas)
obs_spec = obs_spec.remove_nans()
assert ~np.any(np.isnan(obs_spec.flux)), "Observation has nan"
# Load phoenix models and scale by area and wavelength limit
mod1_spec, mod2_spec = \
prepare_iam_model_spectra(params1, params2, limits=rv_limits,
area_scale=area_scale, wav_scale=wav_scale)
# Estimated flux ratio from models
inherent_alpha = continuum_alpha(mod1_spec, mod2_spec, chip)
# Combine model spectra with iam model
mod1_spec.plot(label=params1)
mod2_spec.plot(label=params2)
plt.close()
if fudge or (fudge is not None):
fudge_factor = float(fudge)
mod2_spec.flux *= fudge_factor # fudge factor multiplication
mod2_spec.plot(label="fudged {0}".format(params2))
plt.title("fudges models")
plt.legend()
fudge_prefix = os.path.basename(os.path.normpath(prefix))
fname = os.path.join(simulators.paths["output_dir"],
obs_spec.header["OBJECT"].upper(), "iam", "fudgeplots",
"{1}_fudged_model_spectra_factor={0}_num={2}_iter_{3}.png".format(fudge_factor,
fudge_prefix,
num, jj))
plt.savefig(fname)
plt.close()
warnings.warn("Using a fudge factor = {0}".format(fudge_factor))
iam_grid_func = inherent_alpha_model(mod1_spec.xaxis, mod1_spec.flux, mod2_spec.flux,
rvs=rvs, gammas=gammas)
iam_grid_models = iam_grid_func(obs_spec.xaxis)
# Continuum normalize all iam_gird_models
def axis_continuum(flux):
"""Continuum to apply along axis with predefined variables parameters."""
return continuum(obs_spec.xaxis, flux, splits=20, method="exponential", top=20)
iam_grid_continuum = np.apply_along_axis(axis_continuum, 0, iam_grid_models)
iam_grid_models = iam_grid_models / iam_grid_continuum
# RE-NORMALIZATION
if chip == 4:
# Quadratically renormalize anyway
obs_spec = renormalization(obs_spec, iam_grid_models, normalize=True, method="quadratic")
obs_flux = renormalization(obs_spec, iam_grid_models, normalize=norm, method=norm_method)
if grid_slices:
# Long execution plotting.
plot_iam_grid_slices(obs_spec.xaxis, rvs, gammas, iam_grid_models,
star=obs_spec.header["OBJECT"].upper(),
xlabel="wavelength", ylabel="rv", zlabel="gamma",
suffix="iam_grid_models", chip=chip)
old_shape = iam_grid_models.shape
# Arbitrary_normalization of observation
iam_grid_models, arb_norm = arbitrary_rescale(iam_grid_models,
*simulators.sim_grid["arb_norm"])
# print("Arbitrary Normalized iam_grid_model shape.", iam_grid_models.shape)
assert iam_grid_models.shape == (*old_shape, len(arb_norm))
# Calculate Chi-squared
obs_flux = np.expand_dims(obs_flux, -1) # expand on last axis to match rescale
iam_norm_grid_chisquare = chi_squared(obs_flux, iam_grid_models, error=errors)
# Take minimum chi-squared value along Arbitrary normalization axis
iam_grid_chisquare, arbitrary_norms = arbitrary_minimums(iam_norm_grid_chisquare, arb_norm)
npix = obs_flux.shape[0] # Number of pixels used
if grid_slices:
# Long execution plotting.
plot_iam_grid_slices(rvs, gammas, arb_norm, iam_norm_grid_chisquare,
star=obs_spec.header["OBJECT"].upper(),
xlabel="rv", ylabel="gamma", zlabel="Arbitrary Normalization",
suffix="iam_grid_chisquare", chip=chip)
if not save_only:
iam_grid_chisqr_vals[jj] = iam_grid_chisquare.ravel()[np.argmin(iam_grid_chisquare)]
save_full_iam_chisqr(save_filename, params1, params2,
inherent_alpha, rvs, gammas,
iam_grid_chisquare, arbitrary_norms, npix, verbose=verbose)
if save_only:
return None
else:
return iam_grid_chisqr_vals
def renormalization(spectrum: Union[ndarray, Spectrum], model_grid: ndarray, normalize: bool = False,
method: Optional[str] = "scalar") -> ndarray:
"""Re-normalize the flux of spectrum to the continuum of the model_grid.
Broadcast out spectrum to match the dimensions of model_grid.
Parameters
----------
spectrum: Spectrum
model_grid: np.ndarray
normalize: bool
method: str ("scalar", "linear")
Returns
-------
norm_flux: np.ndarray
"""
if normalize:
if method not in ["scalar", "linear"]:
raise ValueError("Renormalization method '{}' is not in ['scalar', 'linear']".format(method))
logging.info(__("{} Re-normalizing to observations!", method))
norm_flux = chi2_model_norms(spectrum.xaxis, spectrum.flux,
model_grid, method=method)
else:
warnings.warn("Not Scalar Re-normalizing to observations!")
norm_flux = spectrum.flux[:]
# Extend dimensions of norm_flux until they match the grid.
while norm_flux.ndim < model_grid.ndim:
norm_flux = norm_flux[:, np.newaxis]
assert np.allclose(norm_flux.ndim, model_grid.ndim)
return norm_flux
def observation_rv_limits(obs_spec: Spectrum, rvs: Union[int, List[int]], gammas: Union[int, List[int]]) -> List[
float64]:
"""Calculate wavelength limits needed to cover RV shifts used."""
delta = spec_max_delta(obs_spec, rvs, gammas)
obs_min, obs_max = min(obs_spec.xaxis), max(obs_spec.xaxis)
return [obs_min - 1.1 * delta, obs_max + 1.1 * delta]
def prepare_iam_model_spectra(params1: Union[List[float], List[Union[int, float]]],
params2: Union[List[float], List[Union[int, float]], Tuple[int, float, float]],
limits: Union[List[float64], Tuple[int, int], List[int]], area_scale: bool = True,
wav_scale: bool = True) -> Tuple[Spectrum, Spectrum]:
"""Load spectra with same settings."""
if not area_scale:
warnings.warn("Not using area_scale. This is incorrect for paper.")
if not wav_scale:
warnings.warn("Not using wav_scale. This is incorrect for paper.")
mod1_spec = load_starfish_spectrum(params1, limits=limits,
hdr=True, normalize=False, area_scale=area_scale,
flux_rescale=True, wav_scale=wav_scale)
mod2_spec = load_starfish_spectrum(params2, limits=limits,
hdr=True, normalize=False, area_scale=area_scale,
flux_rescale=True, wav_scale=wav_scale)
assert len(mod1_spec.xaxis) > 0 and len(mod2_spec.xaxis) > 0
assert np.allclose(mod1_spec.xaxis, mod2_spec.xaxis)
# Check correct models are loaded
assert mod1_spec.header["PHXTEFF"] == params1[0]
assert mod1_spec.header["PHXLOGG"] == params1[1]
assert mod1_spec.header["PHXM_H"] == params1[2]
assert mod2_spec.header["PHXTEFF"] == params2[0]
assert mod2_spec.header["PHXLOGG"] == params2[1]
assert mod2_spec.header["PHXM_H"] == params2[2]
return mod1_spec, mod2_spec
def save_full_iam_chisqr(filename: str, params1: List[Union[int, float]], params2: List[Union[int, float]],
alpha: Union[int, float64], rvs: Union[ndarray, List[int]], gammas: Union[ndarray, List[int]],
iam_grid_chisquare: ndarray, arbitrary_norms: ndarray, npix: int,
verbose: bool = False) -> None:
"""Save the iterations chisqr values to a cvs."""
rv_grid, g_grid = np.meshgrid(rvs, gammas, indexing='ij')
# assert A.shape == rv_grid.shape
assert rv_grid.shape == g_grid.shape
assert g_grid.shape == iam_grid_chisquare.shape
data = {"rv": rv_grid.ravel(), "gamma": g_grid.ravel(),
"chi2": iam_grid_chisquare.ravel(), "arbnorm": arbitrary_norms.ravel()}
columns = ["rv", "gamma", "chi2", "arbnorm"]
len_c = len(columns)
df = pd.DataFrame(data=data, columns=columns)
# Update all rows with same value.
for par, value in zip(["teff_2", "logg_2", "feh_2"], params2):
df[par] = value
columns = ["teff_2", "logg_2", "feh_2"] + columns
if "[{0}_{1}_{2}]".format(params1[0], params1[1], params1[2]) not in filename:
# Need to add the model values.
for par, value in zip(["teff_1", "logg_1", "feh_1"], params1):
df[par] = value
columns = ["teff_1", "logg_1", "feh_1"] + columns
df["alpha"] = alpha
df["npix"] = npix
columns = columns[:-len_c] + ["alpha", "npix"] + columns[-len_c:]
df = df.round(decimals={"logg_2": 1, "feh_2": 1, "alpha": 4,
"rv": 3, "gamma": 3, "chi2": 4})
exists = os.path.exists(filename)
if exists:
df[columns].to_csv(filename, sep=',', mode="a", index=False, header=False)
else:
# Add header at the top only
df[columns].to_csv(filename, sep=',', mode="a", index=False, header=True)
if verbose:
print("Saved chi-squared values to {0}".format(filename))
return None
def plot_iam_grid_slices(x, y, z, grid, xlabel=None, ylabel=None, zlabel=None, suffix=None, star=None,
chip=None):
"""Slice up 3d grid and plot slices.
This is very slow!"""
os.makedirs(os.path.join(simulators.paths["output_dir"], star.upper(), "grid_plots"), exist_ok=True)
x_grid, y_grid, z_grid = np.meshgrid(x, y, z, indexing="ij")
if xlabel is None:
xlabel = "x"
if ylabel is None:
ylabel = "y"
if zlabel is None:
zlabel = "z"
if len(z) > 1:
for ii, y_val in enumerate(y):
plt.subplot(111)
try:
xii = x_grid[:, ii, :]
zii = z_grid[:, ii, :]
grid_ii = grid[:, ii, :]
plt.contourf(xii, zii, grid_ii)
except IndexError:
print("grid.shape", grid.shape)
print("shape of x, y, z", x.shape, y.shape, z.shape)
print("shape of x_grid, y_grid, z_grid", x_grid.shape, y_grid.shape, z_grid.shape)
print("index value", ii, "y_val ", y_val)
raise
plt.xlabel(xlabel)
plt.ylabel(zlabel)
plt.title("Grid slice for {0}={1}".format(ylabel, y_val))
plot_name = os.path.join(simulators.paths["output_dir"], star, "iam", "grid_plots",
"y_grid_slice_{0}_chip-{1}_{2}_{3}_{4}_{5}_{6}_{7}.png".format(star, chip, xlabel,
ylabel, zlabel, ii,
suffix,
datetime.datetime.now()))
plt.savefig(plot_name)
plt.close(plt.gcf())
for jj, z_val in enumerate(z):
plt.subplot(111)
try:
xjj = x_grid[:, :, jj]
yjj = y_grid[:, :, jj]
grid_jj = grid[:, :, jj]
plt.contourf(xjj, yjj, grid_jj)
except IndexError:
print("shape of x, y, z", x.shape, y.shape, z.shape)
print("shape of x_grid, y_grid, z_grid", x_grid.shape, y_grid.shape, z_grid.shape)
print("index value", jj, "y_val ", z_val)
raise
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title("Grid slice for {0}={1}".format(zlabel, z_val))
plot_name = os.path.join(simulators.paths["output_dir"], star, "iam", "grid_plots",
"z__grid_slice_{0}_chip-{1}_{2}_{3}_{4}_{5}_{6}_{7}.png".format(star, chip, xlabel,
ylabel, zlabel, jj,
suffix,
datetime.datetime.now()))
plt.savefig(plot_name)
plt.close(plt.gcf())
def target_params(params: Dict[str, Union[str, float, int]], mode: Optional[str] = "iam") -> Union[
Tuple[List[Union[int, float]], List[Union[int, float]]], List[Union[int, float]], Tuple[List[float], List[float]]]:
"""Extract parameters from dict for each target.
Includes logic for handling missing companion logg/fe_h.
"""
host_params = [params["temp"], params["logg"], params["fe_h"]]
# Specify the companion logg and metallicity in the parameter files.
if params.get("comp_logg", None) is None:
logging.warning(__("Logg for companion 'comp_logg' is not set for {0}", params.get("name", params)))
print("mode in target params", mode)
if mode == "iam":
comp_logg = params.get("comp_logg", params["logg"]) # Set equal to host if not given
comp_fe_h = params.get("comp_fe_h", params["fe_h"]) # Set equal to host if not given
comp_temp = params.get("comp_temp", 999999) # Will go to largest grid
comp_params = [comp_temp, comp_logg, comp_fe_h]
return host_params, comp_params
elif mode == "bhm":
return host_params
else:
raise ValueError("Mode={} is invalid".format(mode))
| 45.450575 | 125 | 0.589247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,294 | 0.217187 |
4752834006b4c7e38581b00b04934cecb3b712df | 4,910 | py | Python | src/etl/transform.py | fmirani/etl_project | 969990143c3075f193565cec309a2f0333038a8b | [
"MIT"
]
| null | null | null | src/etl/transform.py | fmirani/etl_project | 969990143c3075f193565cec309a2f0333038a8b | [
"MIT"
]
| null | null | null | src/etl/transform.py | fmirani/etl_project | 969990143c3075f193565cec309a2f0333038a8b | [
"MIT"
]
| null | null | null | import pandas as pd
from datetime import datetime, timedelta
from bs4 import BeautifulSoup as bs
from etl.logger import get_logger
from etl.main import ETL
logger = get_logger("transform")
def transform_data(service: str, data_file: str) -> pd.DataFrame:
"""
Simple function to guide the request to the right function
"""
if service == "youtube":
return transform_youtube_data(data_file)
else:
return transform_netflix_data(data_file)
def transform_youtube_data(filename: str) -> pd.DataFrame:
"""
Function to fetch youtube data from the history file
1. Create a new dataframe to put data in
2. parse the html file to find required data
3. Format the data as needed
4. Populate the dataframe
"""
logger.info("Transforming YouTube data now")
instance = ETL()
simulated = instance.get_sim_status()
simulate_offset = instance.get_simul_days()
data = pd.DataFrame(
columns=[
"Timestamp",
"Source",
"Type",
"Name",
"Season",
"Episode",
"Category",
"Link",
]
)
link = []
timestamp = []
# Open the watch history html file and parse through it for relevant data
with open(filename, encoding="utf8") as f:
soup = bs(f, "html.parser")
tags = soup.find_all(
"div",
{"class": "content-cell mdl-cell mdl-cell--6-col mdl-typography--body-1"},
)
for i, tag in enumerate(tags):
a_pointer = tag.find("a")
dt = a_pointer.next_sibling.next_sibling
date_time = datetime.strptime(str(dt)[:-4], "%b %d, %Y, %I:%M:%S %p")
# If data fetching is simulated
if (
simulated
and date_time + timedelta(days=simulate_offset) > datetime.now()
):
continue
timestamp.append(date_time)
link.append(a_pointer.text)
# Populate the dataframe with the data
data["Timestamp"] = timestamp
data["Source"] = "YouTube"
data["Type"] = "Video"
data["Link"] = link
# Log a warning if the DataFrame is being returned empty
if data.shape[0] < 1:
logger.warning(f"DataFrame does not contain any data")
# Return dataframe
return data
def transform_netflix_data(filename: str) -> pd.DataFrame:
"""
Function to fetch netflix data from the history file
1. Create a new dataframe to put data in
2. parse the csv file to find required data
3. Format the data as needed
4. Populate the dataframe
"""
logger.info("Transforming Netflix data now")
instance = ETL()
simulated = instance.get_sim_status()
simulate_offset = instance.get_simul_days()
data = pd.DataFrame(
columns=[
"Timestamp",
"Source",
"Type",
"Name",
"Season",
"Episode",
"Category",
"Link",
]
)
# Read csv data into a separate dataframe
try:
# Reading data from csv file
nf_data = pd.read_csv(filename)
except Exception as e:
logger.error(f"Unable to read csv file '{filename}' : ", e)
logger.warning(f"File does not contain valid data")
return data
# Import Timestamp column to our datadrame as datetime
# Set "Source" column to "Netflix"
# Import Name column to our dataframe
data["Timestamp"] = pd.to_datetime(nf_data["Date"], format="%m/%d/%y")
data["Source"] = "Netflix"
data["Name"] = nf_data["Title"]
# Keywords to identify if a title is a TV series
keywds = ["Season", "Series", "Limited", "Part", "Volume", "Chapter"]
# Set "Type" column to either "Movie" or "TV Series"
data.loc[data["Name"].str.contains("|".join(keywds)), "Type"] = "TV Series"
data.loc[data["Type"].isnull(), "Type"] = "Movie"
# Wherever Type is "TV Series" split the Title column
# in three: Name, Season and Episode
data.loc[data["Type"] == "TV Series", "Name"] = nf_data["Title"].str.rsplit(
":", n=2, expand=True
)[0]
data.loc[data["Type"] == "TV Series", "Season"] = nf_data["Title"].str.rsplit(
":", n=2, expand=True
)[1]
data.loc[data["Type"] == "TV Series", "Episode"] = nf_data["Title"].str.rsplit(
":", n=2, expand=True
)[2]
# Some cleaning needed in Episode column
data["Episode"] = data["Episode"].str.strip()
# If data fetching is simulated
if simulated:
data = data.loc[
pd.to_datetime(data["Timestamp"])
< datetime.now() - timedelta(days=simulate_offset)
]
# return DataFrame
return data
| 31.075949 | 87 | 0.57169 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,017 | 0.410794 |
47529769d5bba7253b76bc8e8bf54fbb83c5bfd5 | 646 | py | Python | projects/migrations/0003_auto_20210719_1152.py | Tajeu2001/awwards | 3a7b068f5d66336d4881a91b6a49338dc6f900d6 | [
"MIT"
]
| null | null | null | projects/migrations/0003_auto_20210719_1152.py | Tajeu2001/awwards | 3a7b068f5d66336d4881a91b6a49338dc6f900d6 | [
"MIT"
]
| null | null | null | projects/migrations/0003_auto_20210719_1152.py | Tajeu2001/awwards | 3a7b068f5d66336d4881a91b6a49338dc6f900d6 | [
"MIT"
]
| null | null | null | # Generated by Django 2.2.24 on 2021-07-19 11:52
import cloudinary.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0002_project_technologies'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='profile_pic',
field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='image'),
),
migrations.AlterField(
model_name='project',
name='photo',
field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='image'),
),
]
| 25.84 | 90 | 0.623839 | 535 | 0.828173 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.212074 |
4752de7c01e8d42225f7e14fb6052b77754c4e72 | 3,567 | py | Python | student/urls.py | masoodazhar/-school-management-system | 6525b3d29d12f03e05d362d81b7c5855806f57d9 | [
"Apache-2.0"
]
| 1 | 2022-01-20T10:20:05.000Z | 2022-01-20T10:20:05.000Z | student/urls.py | masoodazhar/-school-management-system | 6525b3d29d12f03e05d362d81b7c5855806f57d9 | [
"Apache-2.0"
]
| null | null | null | student/urls.py | masoodazhar/-school-management-system | 6525b3d29d12f03e05d362d81b7c5855806f57d9 | [
"Apache-2.0"
]
| 1 | 2022-01-20T10:20:31.000Z | 2022-01-20T10:20:31.000Z |
from django.urls import path
from academic.views import SectionCreate, SectionUpdate, SectionDelete
from .views import (
StudentView,
AttendanceMark,
AttendanceSearch,
AttendanceView,
IndividualMarksView,
AdmissionCreate,
AdmissionView,
AdmissionDelete,
AdmissionUpdate,
AdmissionDetail,
StudentMarkSearch,
StudentMarkCreate,
MarkDistributionCreate,
MarkDistributionUpdate,
MarkDistributionDelete,
ExamsView,
ExamsDetail,
ExamsCreate,
ExamsUpdate,
ExamsDelete,
get_class_asignments,
SendEmail_SaveData,
SendEmailForExam,
get_fee,
get_subject_by_class,
get_already_marks,
getting_marks_from_calculated
)
app_name = 'student'
urlpatterns = [
path('', StudentView, name='student_view'),
path('admission/create/', AdmissionCreate.as_view(), name='admission_create'),
path('admission/view/', AdmissionView.as_view(), name='admission_view'),
path('admission/view/<int:pk>/detail', AdmissionDetail.as_view(), name='admission_detail'),
path('admission/view/<int:pk>/update', AdmissionUpdate.as_view(), name='admission_update'),
path('admission/view/<int:pk>/delete', AdmissionDelete.as_view(), name='admission_delete'),
path('createSection/', SectionCreate.as_view(), name='create_section'),
path('updateSection/<int:pk>', SectionUpdate.as_view(), name='update_section'),
path('deleteSection/<int:pk>/delete', SectionDelete.as_view(), name='delete_section'),
path('viewexams/view', ExamsView.as_view(), name='view_exams'),
path('createexams/', ExamsCreate.as_view(), name='create_exams'),
path('detailexams/<int:pk>/detail', ExamsDetail.as_view(), name='detail_exams'),
path('updateexams/<int:pk>/edit', ExamsUpdate.as_view(), name='update_exams'),
path('deleteexams/<int:pk>/delete', ExamsDelete.as_view(), name='delete_exams'),
path('SendEmail_SaveData/', SendEmail_SaveData, name='SendEmail_SaveData'),
path('sendemailforexam/', SendEmailForExam.as_view(), name='sendemailforexam'),
path('attendance/view', AttendanceView.as_view(), name='attendance_view'),
path('attendance/search', AttendanceSearch.as_view(), name='attendance_search'),
path('attendance/mark', AttendanceMark.as_view(), name='attendance_mark'),
path('student_mark/search', StudentMarkSearch.as_view(), name='student_mark'),
path('student_mark/add', StudentMarkCreate.as_view(), name='student_mark_add'),
path('mark_distribution/create', MarkDistributionCreate.as_view(), name='mark_distribution_create'),
path('mark_distribution/<int:pk>/update', MarkDistributionUpdate.as_view(), name='mark_distribution_update'),
path('mark_distribution/<int:pk>/delete', MarkDistributionDelete.as_view(), name='mark_distribution_delete'),
path('report/<int:student_name>/info', IndividualMarksView.as_view(), name='view_individual_marks'),
path('report/<int:student_name>/info?year&tab', IndividualMarksView.as_view(), name='view_individual_marks2'),
path('get_fee', get_fee, name="get_fee"),
path('get_subject_by_class/', get_subject_by_class , name="get_subject_by_class"),
path('get_already_marks/', get_already_marks , name="get_already_marks"),
path('get_class_asignments/<int:pk>/class/<int:class_name>/subject/<int:subject>', get_class_asignments , name="get_class_asignments"),
path('getting_marks_from_calculated/', getting_marks_from_calculated , name="getting_marks_from_calculated")
]
| 51.695652 | 140 | 0.724699 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,359 | 0.380992 |
475312b91a3851e28b34370218eea8c022b8aff7 | 5,598 | py | Python | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/settings/base.py | mistalaba/cookiecutter-simple-django | 979780b3dea3b0ab51e780cc7d98e9ba66004b09 | [
"BSD-3-Clause"
]
| null | null | null | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/settings/base.py | mistalaba/cookiecutter-simple-django | 979780b3dea3b0ab51e780cc7d98e9ba66004b09 | [
"BSD-3-Clause"
]
| null | null | null | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/settings/base.py | mistalaba/cookiecutter-simple-django | 979780b3dea3b0ab51e780cc7d98e9ba66004b09 | [
"BSD-3-Clause"
]
| 1 | 2021-10-21T01:45:32.000Z | 2021-10-21T01:45:32.000Z | import os
import sys
# PATH vars
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
ROOT_DIR = lambda *x: os.path.join(BASE_DIR, *x)
APPS_DIR = os.path.join(ROOT_DIR(), "apps")
sys.path.insert(0, APPS_DIR)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'CHANGE THIS!!!'
ALLOWED_HOSTS = []
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.humanize", # Handy template tags
{%- if cookiecutter.use_cms == 'django-cms' %}
"djangocms_admin_style",
"cms",
"menus",
"treebeard",
"sekizai",
{%- elif cookiecutter.use_cms == 'wagtail' %}
'wagtail.contrib.forms',
'wagtail.contrib.redirects',
'wagtail.embeds',
'wagtail.sites',
'wagtail.users',
'wagtail.snippets',
'wagtail.documents',
'wagtail.images',
'wagtail.search',
'wagtail.admin',
'wagtail.core',
'modelcluster',
'taggit',
{%- endif %}
"django.contrib.admin",
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
{%- if cookiecutter.use_cms == 'django-cms' %}
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware',
'cms.middleware.utils.ApphookReloadMiddleware',
{%- elif cookiecutter.use_cms == 'wagtail' %}
'wagtail.core.middleware.SiteMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
{%- endif %}
]
ROOT_URLCONF = '{{cookiecutter.project_slug}}.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = '{{cookiecutter.project_slug}}.wsgi.application'
LANGUAGE_CODE = 'en'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SITE_ID = 1
STATIC_ROOT = str(ROOT_DIR("staticfiles"))
STATIC_URL = "/static/"
STATICFILES_DIRS = [str(ROOT_DIR("static"))]
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [
ROOT_DIR('templates'),
],
'OPTIONS': {
# 'debug': DEBUG,
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
{%- if cookiecutter.use_cms == 'django-cms' %}
'sekizai.context_processors.sekizai',
'cms.context_processors.cms_settings',
{%- elif cookiecutter.use_cms == 'wagtail' %}
{%- endif %}
],
},
}
]
PASSWORD_HASHERS = [
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
AUTH_PASSWORD_VALIDATORS = [
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator'},
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'},
]
{%- if cookiecutter.use_cms == 'django-cms' %}
LANGUAGES = [
('en', 'English'),
('dk', 'Danish'),
]
CMS_TEMPLATES = [
('home.html', 'Home page template'),
]
{%- endif %}
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
# Errors logged by the SDK itself
"sentry_sdk": {"level": "ERROR", "handlers": ["console"], "propagate": False},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
{%- if cookiecutter.use_cms == 'wagtail' %}
WAGTAIL_SITE_NAME = '{{ cookiecutter.project_name }}'
{%- endif %}
# .local.py overrides all the common settings.
try:
from .local import * # noqa
except ImportError:
pass
| 29.463158 | 89 | 0.642551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,387 | 0.605038 |
47534f46ad8c13b23baa8376ad80237dfc62bd3c | 827 | py | Python | wenben_project/reco_sys/server/__init__.py | nameli0722/git- | 29343ef0eec598aa262c59825d567044ef393f44 | [
"MIT"
]
| null | null | null | wenben_project/reco_sys/server/__init__.py | nameli0722/git- | 29343ef0eec598aa262c59825d567044ef393f44 | [
"MIT"
]
| null | null | null | wenben_project/reco_sys/server/__init__.py | nameli0722/git- | 29343ef0eec598aa262c59825d567044ef393f44 | [
"MIT"
]
| null | null | null | import happybase
from settings.default import DefaultConfig
import redis
pool = happybase.ConnectionPool(size=10, host='hadoop-master', port=9090)
# 召回数据
redis_client = redis.StrictRedis(host=DefaultConfig.REDIS_HOST,
port=DefaultConfig.REDIS_PORT,
db=10,
decode_responses=True)
# 用于缓存的Redis数据库
cache_client = redis.StrictRedis(host=DefaultConfig.REDIS_HOST,
port=DefaultConfig.REDIS_PORT,
db=8,
decode_responses=True)
from pyspark import SparkConf
from pyspark.sql import SparkSession
# spark配置
conf = SparkConf()
conf.setAll(DefaultConfig.SPARK_GRPC_CONFIG)
SORT_SPARK = SparkSession.builder.config(conf=conf).getOrCreate() | 34.458333 | 73 | 0.629988 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.08538 |
475531486c43c1c7dffcddd44fa86cc2df76cfd3 | 783 | py | Python | alimama/alimama/items.py | Yoochao/tbk | b3a6aff905cf9d7dbe161813e1bbe968d86b5320 | [
"Apache-2.0"
]
| null | null | null | alimama/alimama/items.py | Yoochao/tbk | b3a6aff905cf9d7dbe161813e1bbe968d86b5320 | [
"Apache-2.0"
]
| null | null | null | alimama/alimama/items.py | Yoochao/tbk | b3a6aff905cf9d7dbe161813e1bbe968d86b5320 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class AlimamaItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
url = scrapy.Field()
img = scrapy.Field()
title = scrapy.Field()
coupon = scrapy.Field()
price = scrapy.Field()
monthly_sales = scrapy.Field()
commission_rate = scrapy.Field()
commission = scrapy.Field()
shop_prom_url = scrapy.Field()
prom_short_url = scrapy.Field()
prom_short_ull_c = scrapy.Field()
prom_long_url = scrapy.Field()
prom_long_url_c = scrapy.Field()
prom_taotoken_url = scrapy.Field()
prom_taotoken_url_c = scrapy.Field()
pass
| 26.1 | 51 | 0.67433 | 614 | 0.784163 | 0 | 0 | 0 | 0 | 0 | 0 | 212 | 0.270754 |
47567885350f4cfe55660b2d4ce384bb43abd614 | 2,516 | py | Python | Programming_Assignment_5/proj5/oracle.py | csMOOC/Maryland.Cryptography | 58b6111a918599772e5a476764bb3b9897b61283 | [
"MIT"
]
| 1 | 2017-11-18T15:34:59.000Z | 2017-11-18T15:34:59.000Z | Programming_Assignment_5/proj5/oracle.py | csMOOC/Maryland.Cryptography | 58b6111a918599772e5a476764bb3b9897b61283 | [
"MIT"
]
| null | null | null | Programming_Assignment_5/proj5/oracle.py | csMOOC/Maryland.Cryptography | 58b6111a918599772e5a476764bb3b9897b61283 | [
"MIT"
]
| null | null | null | sign_sock = None
vrfy_sock = None
MAX_PACKET_LEN = 8192
NOT_BINARY_STR_ERR = -1
MISSING_DELIMITER_ERR = -2
ORIGINAL_MSG_ERR = -3
def Oracle_Connect():
import socket
global sign_sock
global vrfy_sock
sign_sock, vrfy_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM), socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sign_sock.connect(('54.165.60.84', 8080))
vrfy_sock.connect(('54.165.60.84', 8081))
except socket.error as e:
print e
return -1
print "Connected to server successfully."
return 0
def Oracle_Disconnect():
if not sign_sock or not vrfy_sock:
print "[WARNING]: You haven't connected to the server yet."
return -1
sign_sock.close()
vrfy_sock.close()
print "Connection closed successfully."
return 0
# Packet Structure: < message >
# Message may be either a long integer, or a binary string
def Sign(msg):
if not sign_sock or not vrfy_sock:
print "[WARNING]: You haven't connected to the server yet."
return -1
if msg < 0:
print "[ERROR]: Message cannot be negative!"
if type(msg) is long or type(msg) is int:
msg = bin(msg)[2:]
pkt = msg + "X"
sign_sock.send(pkt)
resp = sign_sock.recv(MAX_PACKET_LEN)
try:
sigma = int(resp, 2)
except ValueError as e:
sigma = int(resp)
if sigma == NOT_BINARY_STR_ERR:
print "[ERROR]: Message was not a valid binary string."
if sigma == ORIGINAL_MSG_ERR:
print "[ERROR]: You cannot request a signature on the original messgae!"
return sigma
# Packet Structure: < message | ":" | signature >
# Message and signature may be either long integers, or binary strings
def Verify(msg, sigma):
if not sign_sock or not vrfy_sock:
print "[WARNING]: You haven't conected to the server yet."
return -1
if msg < 0 or sigma < 0:
print "[ERROR]: Message and signature cannot be negative!"
return -1
if type(msg) is long or type(msg) is int:
msg = bin(msg)[2:]
if type(sigma) is long or type(sigma) is int:
sigma = bin(sigma)[2:]
pkt = msg + ":" + sigma + "X"
vrfy_sock.send(pkt)
match = int(vrfy_sock.recv(MAX_PACKET_LEN))
if match == NOT_BINARY_STR_ERR:
print "[ERROR]: Message and/or signature were not valid binary strings."
elif match == MISSING_DELIMITER_ERR:
print "[ERROR]: Missing delimiter between message and signature."
return match
| 27.955556 | 127 | 0.647059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 801 | 0.318362 |
47595ec1c220dcb1c916744d399484c33fcb99c6 | 5,932 | py | Python | medical_test/models.py | AlexanderUstinovv/medical_test | b740fb599aa4097eb44d35a844fef838c4ccf6ad | [
"BSD-3-Clause"
]
| null | null | null | medical_test/models.py | AlexanderUstinovv/medical_test | b740fb599aa4097eb44d35a844fef838c4ccf6ad | [
"BSD-3-Clause"
]
| null | null | null | medical_test/models.py | AlexanderUstinovv/medical_test | b740fb599aa4097eb44d35a844fef838c4ccf6ad | [
"BSD-3-Clause"
]
| null | null | null | from django.contrib.auth.models import User
from django.db import models
class Measurement(models.Model):
class Meta:
db_table = 'measurement'
verbose_name = 'Измерение'
verbose_name_plural = 'Измерения'
name = models.CharField(max_length=10, verbose_name='Единицы измерения')
description = models.TextField(verbose_name='Описание')
def __str__(self):
return self.name
class Age(models.Model):
class Meta:
db_table = 'age'
verbose_name = 'Возраст'
verbose_name_plural = 'Возрасты'
value = models.IntegerField(default=21, verbose_name='Возраст')
def __str__(self):
return str(self.value)
class MedicalProcedure(models.Model):
class Meta:
db_table = 'medical_procedure'
verbose_name = 'Медицинская процедура'
verbose_name_plural = 'Медицинские процедуры'
# TODO: find the max length of this field
name = models.CharField(max_length=100, verbose_name='Название')
description = models.TextField(verbose_name='Описание')
female = models.BooleanField(verbose_name='Для женщин', default=True)
male = models.BooleanField(verbose_name='Для мужчин', default=True)
age = models.ManyToManyField(Age, verbose_name='Возраст')
def __str__(self):
return self.name
class Parameter(models.Model):
class Meta:
db_table = 'value'
verbose_name = 'Параметр'
verbose_name_plural = 'Параметры'
name = models.CharField(max_length=100, verbose_name='Название')
description = models.TextField(verbose_name='Описание')
measurement = models.ForeignKey(Measurement,
verbose_name='Единицы измерения',
on_delete=models.CASCADE)
medical_procedure = models.ForeignKey(MedicalProcedure,
verbose_name='Мед. процедура',
on_delete=models.CASCADE)
female_maximum_border = models.DecimalField(max_digits=7, decimal_places=2,
verbose_name='Верхняя граница женщины', default=0)
female_minimum_border = models.DecimalField(max_digits=7, decimal_places=2,
verbose_name='Нижняя граница женщины', default=0)
male_maximum_border = models.DecimalField(max_digits=7, decimal_places=2,
verbose_name='Врехняя граница мужчины', default=0)
male_minimum_border = models.DecimalField(max_digits=7, decimal_places=2,
verbose_name='Нижняя граница мужчины', default=0)
def __str__(self):
return self.name
class MedicalProcedureResult(models.Model):
class Meta:
db_table = 'medical_procedure_result'
verbose_name = 'Результат мед. процедуры'
verbose_name_plural = 'Результаты мед. процедур'
medical_procedure = models.ForeignKey(MedicalProcedure,
verbose_name='Процедура',
on_delete=models.CASCADE)
value = models.IntegerField(verbose_name='Значение')
user = models.ForeignKey(User,
verbose_name='Пользователь',
on_delete=models.CASCADE)
result = models.BooleanField(default=False, verbose_name='Значение в норме')
def __str__(self):
return self.medical_procedure.name
class ParameterValue(models.Model):
class Meta:
db_table = 'parameter_value'
verbose_name = 'Результат параметра'
verbose_name_plural = 'Результаты параметров'
medical_procedure_result = models.ForeignKey(MedicalProcedureResult,
verbose_name='Результат процедуры',
on_delete=models.CASCADE)
value = models.DecimalField(max_digits=6, decimal_places=2,
verbose_name='Значение', default=0)
parameter = models.ForeignKey(Parameter,
verbose_name='Название параметра',
on_delete=models.CASCADE)
def __str__(self):
return self.parameter.name
class CardiovascularScore(models.Model):
class Meta:
db_table = 'cardiovascular_score'
verbose_name = 'Шкала Score сердечно-сосудистого риска'
verbose_name_plural = 'Шкалы Score сердечно-сосудистого риска'
MALE = 'M'
FEMALE = 'F'
SEX_TYPES = (
(MALE, 'Муж.'),
(FEMALE, 'Жен.'),
)
sex = models.CharField(max_length=6, choices=SEX_TYPES, verbose_name='Пол')
smoking = models.BooleanField(default=False, verbose_name='Курящий')
cholesterol_min = models.DecimalField(max_digits=6, decimal_places=2,
verbose_name='Уровень холестирина нижняя граница',
default=0)
cholesterol_max = models.DecimalField(max_digits=6, decimal_places=2,
verbose_name='Уровень холестирина верхняя граница',
default=0)
systolic_pressure_max = models.DecimalField(max_digits=6, decimal_places=2,
verbose_name='Систолическое давление верхняя граница',
default=0)
systolic_pressure_min = models.DecimalField(max_digits=6, decimal_places=2,
verbose_name='Систолическое давление нижняя граница',
default=0)
age_min = models.IntegerField(verbose_name='Возраст нижняя граница')
age_max = models.IntegerField(verbose_name='Возраст верхняя граница')
result = models.IntegerField(verbose_name='Процент риска заболевания')
| 41.774648 | 102 | 0.607552 | 6,562 | 0.985877 | 0 | 0 | 0 | 0 | 0 | 0 | 1,766 | 0.265325 |
475a825b4f6b8ec63bbe41abb911aed22f74dd8a | 11,153 | py | Python | idarest/idarest_master.py | sfinktah/idarest75 | ab2549b12e174aaef32ab6c933fe09b1232a8cce | [
"MIT"
]
| null | null | null | idarest/idarest_master.py | sfinktah/idarest75 | ab2549b12e174aaef32ab6c933fe09b1232a8cce | [
"MIT"
]
| null | null | null | idarest/idarest_master.py | sfinktah/idarest75 | ab2549b12e174aaef32ab6c933fe09b1232a8cce | [
"MIT"
]
| null | null | null | import socket
try:
from .idarest_mixins import IdaRestConfiguration
except:
from idarest_mixins import IdaRestConfiguration
# idarest_master_plugin_t.config['master_debug'] = False
# idarest_master_plugin_t.config['master_info'] = False
# idarest_master_plugin_t.config['api_prefix'] = '/ida/api/v1.0'
# idarest_master_plugin_t.config['master_host'] = "127.0.0.1"
# idarest_master_plugin_t.config['master_port'] = 28612 # hash('idarest75') & 0xffff
MENU_PATH = 'Edit/Other'
try:
import idc
import ida_idaapi
import ida_kernwin
import idaapi
import idautils
from PyQt5 import QtWidgets
except:
class idc:
@staticmethod
def msg(s):
if idarest_master_plugin_t.config['master_debug']: print(s)
class ida_idaapi:
plugin_t = object
PLUGIN_SKIP = PLUGIN_UNL = PLUGIN_KEEP = 0
class idarest_master_plugin_t(IdaRestConfiguration, ida_idaapi.plugin_t):
flags = ida_idaapi.PLUGIN_UNL
comment = "IDA Rest API Master Controller"
help = "Keeps track of idarest75 clients"
wanted_name = "idarest75 master"
wanted_hotkey = ""
def init(self):
super(idarest_master_plugin_t, self).__init__()
self.load_configuration()
if idarest_master_plugin_t.config['master_info']: print("[idarest_master_plugin_t::init]")
self.master = None
if not idarest_master_plugin_t.test_bind_port(idarest_master_plugin_t.config['master_port']):
if idarest_master_plugin_t.config['master_info']: print("[idarest_master_plugin_t::init] skipping (port is already bound)")
return idaapi.PLUGIN_SKIP
self.master = idarest_master()
idarest_master_plugin_t.instance = self
return idaapi.PLUGIN_KEEP
def run(*args):
pass
def term(self):
if self.master:
self.master.stop()
pass
@staticmethod
def test_bind_port(port):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
# s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((idarest_master_plugin_t.config['master_host'], port))
except socket.error as e:
return False
return True
def idarest_master():
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
import threading
import urllib.request, urllib.error, urllib.parse as urlparse
import requests
import json
import time
import re
def asBytes(s):
if isinstance(s, str):
return s.encode('utf-8')
return s
class HTTPRequestError(BaseException):
def __init__(self, msg, code):
self.msg = msg
self.code = code
class Handler(BaseHTTPRequestHandler):
hosts = dict()
def log_message(self, format, *args):
return
def register(self, args):
host, port = args['host'], args['port']
key = host + ':' + port
if key in self.hosts:
if idarest_master_plugin_t.config['master_debug']: print("[idarest_master::Handler::register] replacing existing host {}".format(key))
self.hosts[key] = value = dict({
'host': args['host'],
'port': args['port'],
'idb': args['idb'],
'alive': time.time(),
'failed': 0,
})
return value
def unregister(self, args):
host, port = args['host'], args['port']
key = host + ':' + port
if key in self.hosts:
if idarest_master_plugin_t.config['master_debug']: print("[idarest_master::Handler::unregister] removing existing host {}".format(key))
value = self.hosts.pop(key)
else:
value = dict({
'host': args['host'],
'port': args['port'],
'error': 'not registered',
})
return value
@staticmethod
def get_json(hosts, args, readonly=False):
# r = requests.post(self.url, data=self.args)
results = dict()
start = time.time()
if readonly:
for k, host in hosts.items():
if idarest_master_plugin_t.config['master_debug']: print("alive: {}".format(start - host['alive']))
if start - host['alive'] < 90:
results[host['idb']] = 'http://{}:{}{}/'.format(host['host'], host['port'], idarest_master_plugin_t.config['api_prefix'])
else:
results[host['idb']] = start - host['alive']
return results
for k, host in hosts.items():
start = time.time()
url = 'http://{}:{}{}/echo'.format(host['host'], host['port'], idarest_master_plugin_t.config['api_prefix'])
try:
connect_timeout = 10
read_timeout = 10
r = requests.get(url, params=args, timeout=(connect_timeout, read_timeout))
if r.status_code == 200:
hosts[k]['alive'] = start
hosts[k]['rtime'] = r.elapsed.total_seconds()
# hosts[k]['info'] = r.json()
results[k] = host
except Exception as e:
results[k] = str(type(e))
hosts[k]['failed'] += 1
if hosts[k]['failed'] > 4:
hosts.pop(k)
return results
def show(self, args):
return self.get_json(self.hosts, {'ping': time.time()}, readonly=True)
def _extract_query_map(self):
query = urlparse.urlparse(self.path).query
qd = urlparse.parse_qs(query)
args = {}
for k, v in qd.items():
if len(v) != 1:
raise HTTPRequestError(
"Query param specified multiple times : " + k,
400)
args[k.lower()] = v[0]
if idarest_master_plugin_t.config['master_debug']: print('args["{}"]: "{}"'.format(k.lower(), v[0]))
return args
def send_origin_headers(self):
if self.headers.get('Origin', '') == 'null':
self.send_header('Access-Control-Allow-Origin', self.headers.get('Origin'))
self.send_header('Vary', 'Origin')
def do_GET(self):
try:
args = self._extract_query_map()
except HTTPRequestError as e:
self.send_error(e.code, e.msg)
return
path = re.sub(r'.*/', '', urlparse.urlparse(self.path).path)
if path == 'register':
message = self.register(args)
elif path == 'unregister':
message = self.unregister(args)
elif path == 'show':
message = self.show(args)
else:
self.send_error(400, "unknown route: " + path)
return
self.send_response(200)
self.send_origin_headers()
self.end_headers()
self.wfile.write(asBytes(json.dumps(message)))
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
allow_reuse_address = True
# https://stackoverflow.com/questions/323972/is-there-any-way-to-kill-a-thread
class Timer(threading.Thread):
def __init__(self, *args, **kwargs):
super(Timer, self).__init__(*args, **kwargs)
self._stop_event = threading.Event()
def run(self):
if idarest_master_plugin_t.config['master_info']: print("[idarest_master::Timer::run] started")
while True:
if self._stop_event.wait(60.0):
break
result = Handler.get_json(Handler.hosts, {'ping': time.time()})
if idarest_master_plugin_t.config['master_debug']: print("[idarest_master::Timer::run] {}".format(result))
if idarest_master_plugin_t.config['master_info']: print("[idarest_master::Timer::run] stopped")
# if not self.running:
# self.running = True
# while self.running:
# time.sleep(60.0 - ((time.time() - self.starttime) % 60.0))
# if idarest_master_plugin_t.config['master_debug']: print(Handler.get_json(Handler.hosts, {'ping': time.time()}))
# if idarest_master_plugin_t.config['master_info']: print("[idarest_master::Timer::run] stopped")
def stop(self):
if self.is_alive():
if self.stopped():
if idarest_master_plugin_t.config['master_info']: print("[idarest_master::Timer::stop] already stopping...")
else:
if idarest_master_plugin_t.config['master_info']: print("[idarest_master::Timer::stop] stopping...")
self._stop_event.set()
else:
if idarest_master_plugin_t.config['master_info']: print("[idarest_master::Timer::stop] not running")
def stopped(self):
return self._stop_event.is_set()
class Worker(threading.Thread):
def __init__(self, host, port):
threading.Thread.__init__(self)
self.httpd = ThreadedHTTPServer((host, port), Handler)
self.host = host
self.port = port
def run(self):
if idarest_master_plugin_t.config['master_info']: print("[idarest_master::Worker::run] master httpd starting...")
self.httpd.serve_forever()
if idarest_master_plugin_t.config['master_info']: print("[idarest_master::Worker::run] master httpd started (well stopped now, i guess)")
def stop(self):
if idarest_master_plugin_t.config['master_info']: print("[idarest_master::Worker::stop] master httpd shutdown...")
self.httpd.shutdown()
if idarest_master_plugin_t.config['master_info']: print("[idarest_master::Worker::stop] master httpd server_close...")
self.httpd.server_close()
if idarest_master_plugin_t.config['master_info']: print("[idarest_master::Worker::stop] master httpd stopped")
class Master:
def __init__(self):
self.worker = Worker('127.0.0.1', 28612)
self.worker.start()
self.test_worker = Timer()
self.test_worker.start()
def stop(self):
self.worker.stop()
self.test_worker.stop()
def main():
if idarest_master_plugin_t.config['master_info']: print("[idarest_master::main] starting master")
master = Master()
# main.master = master
return master
return main()
def PLUGIN_ENTRY():
globals()['instance'] = idarest_master_plugin_t()
return globals()['instance']
if __name__ == "__main__":
master = idarest_master()
| 38.725694 | 151 | 0.56505 | 9,612 | 0.861831 | 0 | 0 | 2,074 | 0.185959 | 0 | 0 | 2,621 | 0.235004 |
475b01893fd7ae3f1c364df0596288e775da3bcd | 11,750 | py | Python | wordbot.py | spyth/wordbot | 8213d18fcd7602a5b2bab9c83898b9f29a1cf3d4 | [
"MIT"
]
| null | null | null | wordbot.py | spyth/wordbot | 8213d18fcd7602a5b2bab9c83898b9f29a1cf3d4 | [
"MIT"
]
| null | null | null | wordbot.py | spyth/wordbot | 8213d18fcd7602a5b2bab9c83898b9f29a1cf3d4 | [
"MIT"
]
| null | null | null | import logging
import json
from datetime import datetime
from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters,
Job, CallbackQueryHandler)
from telegram import (ChatAction, ParseMode, InlineKeyboardButton, InlineKeyboardMarkup)
from peewee import fn
import pytz
from word import word_query
from model import User, UserVocabularyMapping, Vocabulary, init as model_init
logger = logging.getLogger(__name__)
class WordBot(object):
def __init__(self, BOT_TOKEN, COUNT_CHECK=5, timezone='Asia/Hong_Kong', notify_time='23:00'):
self.updater = Updater(token=BOT_TOKEN)
dispatcher = self.updater.dispatcher
self.COUNT_CHECK = COUNT_CHECK
start_handler = CommandHandler('start', self.start)
test_handler = CommandHandler('test', self.test)
review_handler = CommandHandler('review', self.review)
query_handler = MessageHandler(Filters.text, self.query)
dispatcher.add_handler(start_handler)
dispatcher.add_handler(query_handler)
dispatcher.add_handler(review_handler)
dispatcher.add_handler(test_handler)
dispatcher.add_handler(CallbackQueryHandler(self.reply_button_callback))
# add daily reminder
if notify_time:
try:
tz = pytz.timezone(timezone)
utc_now = pytz.utc.localize(datetime.utcnow())
tz_now = utc_now.astimezone(tz)
hour, minute = tuple(map(int, notify_time.split(':')))
expect_time = tz_now.replace(hour=hour, minute=minute, second=0, microsecond=0)
delay = (int((expect_time - tz_now).total_seconds()) + 24 * 60 * 60) % (24 * 60 * 60)
self.updater.job_queue.run_daily(self.daily_remind, time=delay)
except:
logger.warning('oops, daily reminder start failed!')
raise
def run(self):
self.updater.start_polling()
self.updater.idle()
@staticmethod
def daily_remind(bot, job):
for u in User.select():
bot.send_message(chat_id=u.tgid, text="👩🏫 Would you like to /review or /test vocabulary?")
@staticmethod
def start(bot, update):
bot.sendChatAction(chat_id=update.message.chat_id, action=ChatAction.TYPING)
user, new_created = User.get_or_create(tgid=str(update.message.from_user.id))
if new_created:
bot.send_message(chat_id=update.message.chat_id, text="Hi!")
else:
bot.send_message(chat_id=update.message.chat_id, text="Hi, nice to see you again.")
@staticmethod
def query(bot, update):
bot.sendChatAction(chat_id=update.message.chat_id, action=ChatAction.TYPING)
vocabulary = word_query(update.message.text)
if vocabulary is not None:
response = str(vocabulary)
else:
response = '👽 500'
bot.send_message(chat_id=update.message.chat_id, text=response, parse_mode=ParseMode.HTML)
if vocabulary and vocabulary.audio:
bot.send_audio(chat_id=update.message.chat_id, audio=open(vocabulary.audio, 'rb'))
user, new_created = User.get_or_create(tgid=str(update.message.from_user.id))
if vocabulary is not None:
mapping, new_created = UserVocabularyMapping.get_or_create(user=user, vocabulary=vocabulary)
if (not new_created) and mapping.check_times > 0:
mapping.update(check_times=0).execute()
@staticmethod
def review(bot, update):
bot.sendChatAction(chat_id=update.message.chat_id, action=ChatAction.TYPING)
user, new_created = User.get_or_create(tgid=str(update.message.from_user.id))
if new_created or user.uservocabularymapping_set.count() == 0:
bot.send_message(chat_id=update.message.chat_id, text="you don't have any vocabulary yet!")
return None
keyboard = [[InlineKeyboardButton('🔁',
callback_data='{"command": "review", "type": "order", "arg": 0, "check": 0}'),
InlineKeyboardButton('🔀',
callback_data='{"command": "review", "type": "shuffle", "arg": 0, "check": 0}')]]
reply_markup = InlineKeyboardMarkup(keyboard, one_time_keyboard=True)
bot.send_message(chat_id=update.message.chat_id,
text="🐰 OK! Let's start to review.\nPlease select the play mode.",
reply_markup=reply_markup)
@staticmethod
def test(bot, update):
# bot.sendChatAction(chat_id=update.message.chat_id, action=ChatAction.TYPING)
word = Vocabulary.select(Vocabulary.id, Vocabulary.word).order_by(fn.Random()).limit(1).first()
reply = "**%s**?" % word.word
keyboard = [[InlineKeyboardButton("❓",
callback_data='{"command": "test", "type": "ask", "arg": %d}' % word.id),
InlineKeyboardButton("✅",
callback_data='{"command": "test", "type": "check", "arg": %d}' % word.id)]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.send_message(chat_id=update.message.chat_id, text=reply,
reply_markup=reply_markup, parse_mode=ParseMode.MARKDOWN)
def reply_button_callback(self, bot, update):
query = update.callback_query
chat_id = query.message.chat_id
try:
data = json.loads(query.data)
except:
data = None
if not (data and type(data) == dict and 'command' in data):
bot.edit_message_text(text="unknown command🕴",
chat_id=chat_id,
message_id=query.message.message_id)
logger.warning(query)
return
if data['command'] == 'review':
# bot.sendChatAction(chat_id=chat_id, action=ChatAction.TYPING)
_id = data['arg']
if data['check'] == 1:
UserVocabularyMapping.update(check_times=UserVocabularyMapping.check_times + 1) \
.where(UserVocabularyMapping.id == _id).execute()
mapping = UserVocabularyMapping.get(id=_id)
if mapping.check_times >= self.COUNT_CHECK:
reply_text = str(mapping.vocabulary) + '\n' + '🎉' * self.COUNT_CHECK
else:
reply_text = str(mapping.vocabulary) + '\n' + '⭐️' * mapping.check_times
bot.edit_message_text(text=reply_text, chat_id=chat_id, message_id=query.message.message_id)
else:
# clear the previous reply button
bot.edit_message_reply_markup(chat_id=chat_id, message_id=query.message.message_id)
if data['type'] == 'order':
mapping_query = UserVocabularyMapping.select().join(User) \
.where((UserVocabularyMapping.id > _id) & (User.tgid == str(chat_id)) \
& (UserVocabularyMapping.check_times < self.COUNT_CHECK)) \
.order_by(UserVocabularyMapping.id).limit(1)
# repeat
if _id > 0 and mapping_query.count() == 0:
mapping_query = UserVocabularyMapping.select().join(User) \
.where((User.tgid == str(chat_id)) \
& (UserVocabularyMapping.check_times < self.COUNT_CHECK)) \
.order_by(UserVocabularyMapping.id).limit(1)
if mapping_query.count() > 0:
mapping = mapping_query[0]
reply = str(mapping.vocabulary)
keyboard = [[InlineKeyboardButton("✅",
callback_data='{"command": "review", "type": "order", "arg": %d, "check": 1}' % mapping.id),
InlineKeyboardButton("⏭",
callback_data='{"command": "review", "type": "order", "arg": %d, "check": 0}' % mapping.id), ]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.send_message(chat_id=chat_id, text=reply, reply_markup=reply_markup)
else:
bot.send_message(chat_id=chat_id, text="end🕴")
# shuffle
else:
mapping_query = UserVocabularyMapping.select().join(User) \
.where((User.tgid == str(chat_id)) \
& (UserVocabularyMapping.check_times < self.COUNT_CHECK)) \
.order_by(fn.Random()).limit(1)
if mapping_query.count() > 0:
mapping = mapping_query[0]
reply = str(mapping.vocabulary)
keyboard = [[InlineKeyboardButton("✅",
callback_data='{"command": "review", "type": "shuffle", "arg": %d, "check": 1}' % mapping.id),
InlineKeyboardButton("⏭",
callback_data='{"command": "review", "type": "shuffle", "arg": %d, "check": 0}' % mapping.id), ]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.send_message(chat_id=chat_id, text=reply, reply_markup=reply_markup)
else:
bot.send_message(chat_id=chat_id, text="end🕴")
elif data['command'] == 'test':
if data['type'] == 'next':
bot.edit_message_reply_markup(chat_id=chat_id, message_id=query.message.message_id)
self.test(bot, query)
else:
try:
_id = data['arg']
word = Vocabulary.get(id=_id)
except Vocabulary.DoesNotExist:
bot.edit_message_text(text='oops!', chat_id=chat_id, message_id=query.message.message_id)
return
user, _ = User.get_or_create(tgid=str(chat_id))
mapping, new_created = UserVocabularyMapping.get_or_create(user=user, vocabulary=word)
if data['type'] == 'check':
extra_msg = '\n' + '⭐️' * (mapping.check_times + 1)
UserVocabularyMapping.update(check_times=UserVocabularyMapping.check_times + 1) \
.where(UserVocabularyMapping.id == mapping.id).execute()
else:
extra_msg = '\n' + '😆'
if (not new_created) and mapping.check_times > 0:
UserVocabularyMapping.update(check_times=0).where(
UserVocabularyMapping.id == mapping.id).execute()
keyboard = [[InlineKeyboardButton("⏭", callback_data='{"command": "test", "type": "next"}')]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.edit_message_text(text=str(word) + extra_msg, chat_id=chat_id,
message_id=query.message.message_id, reply_markup=reply_markup)
else:
pass
if __name__ == '__main__':
logging.basicConfig(filename='spam.log',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
import os
file_path = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(file_path, "audio")):
os.mkdir(os.path.join(file_path, "audio"))
if not os.path.isfile(os.path.join(file_path, 'bot.db')):
model_init()
import config
bot = WordBot(config.BOT_TOKEN, timezone=config.TIMEZONE, notify_time=config.NOTIFY_TIME)
bot.run()
| 50.213675 | 151 | 0.575149 | 10,773 | 0.912425 | 0 | 0 | 3,339 | 0.282798 | 0 | 0 | 1,353 | 0.114593 |
475e2827e051fef8ddf4351c0f8e2268a7395759 | 599 | py | Python | setup.py | jcanode/small_nn | 0e7fa58a52b45b2221b66bd0a67bd7395934133c | [
"MIT"
]
| 1 | 2020-07-06T20:43:23.000Z | 2020-07-06T20:43:23.000Z | setup.py | jcanode/small_nn | 0e7fa58a52b45b2221b66bd0a67bd7395934133c | [
"MIT"
]
| null | null | null | setup.py | jcanode/small_nn | 0e7fa58a52b45b2221b66bd0a67bd7395934133c | [
"MIT"
]
| 1 | 2020-07-04T18:11:43.000Z | 2020-07-04T18:11:43.000Z | import setuptools
from setuptools import setup, find_namespace_packages
setuptools.setup(
name="small_nn-jcanode",
version="0.0.1",
author="Justin Canode",
author_email="[email protected]",
description="A small Neural Network Framework",
long_description_content_type="text/markdown",
url="https://github.com/jcanode/small_nn",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 28.52381 | 53 | 0.677796 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 266 | 0.444073 |
475f6eec7136ec2401074e0d2a574922b79ef08a | 1,829 | py | Python | cosPreprints.py | leouieda/cospy | fa0994e5a41896cd17f76fdad08b20bdb4dd112a | [
"MIT"
]
| null | null | null | cosPreprints.py | leouieda/cospy | fa0994e5a41896cd17f76fdad08b20bdb4dd112a | [
"MIT"
]
| 8 | 2019-06-10T12:56:56.000Z | 2019-07-02T16:49:40.000Z | cosPreprints.py | leouieda/cospy | fa0994e5a41896cd17f76fdad08b20bdb4dd112a | [
"MIT"
]
| null | null | null | import os
import sys
import utils
import extras.downloadStats as stats
import extras.downloadManuscript as dm
import extras.unpaywall as up
def main():
# get the configuration parameters from environment variables
cosApiToken = os.environ['cosApiToken'] # for accessing COS API
emailAddress = os.environ['emailAddress'] # for accessing Unpaywall API
# command line inputs
# downloadDir - directory to download papers to, also where logs are stored
downloadDir = sys.argv[1]
# print out status messages as we go along (True or False)
verbose = sys.argv[2]
# start date - YYYY-MM-DD we should start the index from
startDate = sys.argv[3]
endDate = sys.argv[4]
df = utils.getProviders( cosApiToken )
# seperator for log file
s1 = ';'
# the API returns all preprints that were created
# some papers may not be available for download
# due to moderation problems or retraction
# keep track of how many preprints we actually get
numPreprints = 0
# check that the download directory includes
# the trailing /
lc = downloadDir[-1]
if (lc != '/'):
downloadDir += '/'
# preprint provider
provider = 'eartharxiv'
# set up log files based on provider
log = downloadDir + provider + '.log'
# get the papers
manuscripts = utils.getManuscripts(cosApiToken, provider, startDate, endDate, verbose)
# example downloading PDF
dm.download( manuscripts['downloadURL'][0], '/Users/narock/Desktop/test.pdf')
# example getting download statistics
downloads = stats.getDownloadStats( cosApiToken, manuscripts['cosID'][0] )
print( manuscripts['cosID'][0], 'downloaded', downloads, 'times')
# example calling unpaywall
manuscripts, statistics = up.callUnpaywall( manuscripts, emailAddress )
print( statistics )
main()
| 29.983607 | 89 | 0.708037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 889 | 0.486058 |
47604d58582e9e00c559605b1ca161c8c2ddf27a | 1,212 | py | Python | RVFS/account/urls.py | cahudson94/Raven-Valley-Forge-Shop | 52f46381eafa9410d8e9c759366ef7490dcb1de9 | [
"MIT"
]
| 2 | 2018-02-12T01:32:16.000Z | 2021-08-23T19:29:08.000Z | RVFS/account/urls.py | cahudson94/Raven-Valley-Forge-Shop | 52f46381eafa9410d8e9c759366ef7490dcb1de9 | [
"MIT"
]
| 1 | 2018-05-23T03:42:20.000Z | 2018-05-23T03:42:20.000Z | RVFS/account/urls.py | cahudson94/Raven-Valley-Forge-Shop | 52f46381eafa9410d8e9c759366ef7490dcb1de9 | [
"MIT"
]
| null | null | null | """."""
from django.urls import path, reverse_lazy
from account.views import (AccountView,
InfoFormView,
EditAccountView,
AddAddressView,
AddressListView,
DeleteAddress)
from django.contrib.auth import views as auth_views
urlpatterns = [
path('', AccountView.as_view(), name='account'),
path('add-address/', AddAddressView.as_view(), name='add_add'),
path('address-list/', AddressListView.as_view(), name='add_list'),
path('delete-address/<int:pk>/', DeleteAddress.as_view(), name='del_add'),
path('edit/<int:pk>/', EditAccountView.as_view(), name='edit_acc'),
path('info-form/<int:pk>/', InfoFormView.as_view(), name='info_reg'),
path('change_password/', auth_views.PasswordChangeView.as_view(
template_name='password_reset/change_password.html',
success_url=reverse_lazy('change_password_done')),
name='change_password'),
path('change_password_done/', auth_views.PasswordChangeDoneView.as_view(
template_name='password_reset/change_password_done.html',
),
name='change_password_done')
]
| 43.285714 | 78 | 0.634488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 339 | 0.279703 |
47615b2e9cbae2e821ca67ff9a73e485fdec4592 | 209 | wsgi | Python | index.wsgi | webgovernor/dungeonsheet | 59800249f0394af0fc08d7ca23e68faf9d0d2920 | [
"MIT"
]
| null | null | null | index.wsgi | webgovernor/dungeonsheet | 59800249f0394af0fc08d7ca23e68faf9d0d2920 | [
"MIT"
]
| null | null | null | index.wsgi | webgovernor/dungeonsheet | 59800249f0394af0fc08d7ca23e68faf9d0d2920 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
import os
import sys
sys.path.insert(0, '/home/nullism/web/dnd.nullism.com/')
from main import app
conf = {}
conf['SECRET_KEY'] = 'CHANGEME'
app.config.update(conf)
application = app
| 14.928571 | 56 | 0.712919 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.37799 |
4763416997df781bd573e3c234672ddfd3a200da | 394 | py | Python | examples/spot/sub_account/sub_account_futures_asset_transfer_history.py | Banging12/binance-connector-python | dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b | [
"MIT"
]
| 512 | 2021-06-15T08:52:44.000Z | 2022-03-31T09:49:53.000Z | examples/spot/sub_account/sub_account_futures_asset_transfer_history.py | Banging12/binance-connector-python | dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b | [
"MIT"
]
| 75 | 2021-06-20T13:49:50.000Z | 2022-03-30T02:45:31.000Z | examples/spot/sub_account/sub_account_futures_asset_transfer_history.py | Banging12/binance-connector-python | dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b | [
"MIT"
]
| 156 | 2021-06-18T11:56:36.000Z | 2022-03-29T16:34:22.000Z | #!/usr/bin/env python
import logging
from binance.spot import Spot as Client
from binance.lib.utils import config_logging
config_logging(logging, logging.DEBUG)
key = ""
secret = ""
spot_client = Client(key, secret)
logging.info(
spot_client.sub_account_futures_asset_transfer_history(
email="",
futuresType=1, # 1:USDT-maringed Futues,2: Coin-margined Futures
)
)
| 20.736842 | 73 | 0.730964 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.19697 |
476347b456fac42ec555a12cc44e71eae3a0e9fc | 236 | py | Python | src/file_format/utils/json_util.py | tys-hiroshi/test-github-actions-python | c47172949fbdd6ddffe889ca0f91eb11a741021d | [
"MIT"
]
| null | null | null | src/file_format/utils/json_util.py | tys-hiroshi/test-github-actions-python | c47172949fbdd6ddffe889ca0f91eb11a741021d | [
"MIT"
]
| 2 | 2021-05-21T09:36:42.000Z | 2021-05-28T03:55:44.000Z | src/file_format/utils/json_util.py | tys-hiroshi/test-github-actions-python | c47172949fbdd6ddffe889ca0f91eb11a741021d | [
"MIT"
]
| 1 | 2020-08-06T06:21:34.000Z | 2020-08-06T06:21:34.000Z | # -*- coding:utf-8 -*-
import json
class JsonUtil(object):
def __init__(self, jsonFilePath):
with open(jsonFilePath, 'r', encoding='utf-8') as f:
self.content = json.load(f)
if __name__ == '__main__':
pass | 21.454545 | 60 | 0.610169 | 162 | 0.686441 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.177966 |
4765153b7928cbef30b909b4bfadf17b59957999 | 864 | py | Python | jubox/test/notebook/test_get.py | Miksus/jubox | daaf1e223e0a7c0a3bf9ae03b88d629c0f99d4d5 | [
"MIT"
]
| 1 | 2020-04-26T05:18:45.000Z | 2020-04-26T05:18:45.000Z | jubox/test/notebook/test_get.py | Miksus/jubox | daaf1e223e0a7c0a3bf9ae03b88d629c0f99d4d5 | [
"MIT"
]
| null | null | null | jubox/test/notebook/test_get.py | Miksus/jubox | daaf1e223e0a7c0a3bf9ae03b88d629c0f99d4d5 | [
"MIT"
]
| null | null | null |
import pytest
from jubox import JupyterNotebook, RawCell, CodeCell, MarkdownCell
def test_get_tags():
nb = JupyterNotebook([
RawCell("first cell"),
RawCell("second cell", tags=["tagged"]),
RawCell("third cell", taggs=["Not this", "tagged"]),
])
nb_of_tags = nb.get(tags=["tagged"], not_tags=["Not this"])
assert isinstance(nb_of_tags, JupyterNotebook)
assert 1 == len(nb_of_tags.node.cells)
assert "second cell" == nb_of_tags.node.cells[0]["source"]
def test_get_cell_types():
nb = JupyterNotebook([
RawCell("first cell"),
CodeCell("second cell"),
MarkdownCell("third cell"),
])
nb_of_tags = nb.get(cell_type=["code"])
assert isinstance(nb_of_tags, JupyterNotebook)
assert 1 == len(nb_of_tags.node.cells)
assert "second cell" == nb_of_tags.node.cells[0]["source"] | 32 | 66 | 0.653935 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.19213 |
476520ec9bb115901083a55e2fef94cc44775b6c | 2,453 | py | Python | azure-mgmt/tests/test_graphrbac.py | HydAu/AzureSDKForPython | 5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3 | [
"Apache-2.0"
]
| null | null | null | azure-mgmt/tests/test_graphrbac.py | HydAu/AzureSDKForPython | 5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3 | [
"Apache-2.0"
]
| null | null | null | azure-mgmt/tests/test_graphrbac.py | HydAu/AzureSDKForPython | 5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3 | [
"Apache-2.0"
]
| null | null | null | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import unittest
import azure.graphrbac
from testutils.common_recordingtestcase import record
from tests.mgmt_testcase import HttpStatusCode, AzureMgmtTestCase
class GraphRbacTest(AzureMgmtTestCase):
def setUp(self):
super(GraphRbacTest, self).setUp()
self.graphrbac_client = self.create_basic_client(
azure.graphrbac.GraphRbacManagementClient,
tenant_id=self.settings.AD_DOMAIN
)
@record
def test_graphrbac_users(self):
user = self.graphrbac_client.user_operations.create(
azure.graphrbac.models.UserCreateParameters(
user_principal_name="testbuddy@{}".format(self.settings.AD_DOMAIN),
account_enabled=False,
display_name='Test Buddy',
mail_nickname='testbuddy',
password_profile=azure.graphrbac.models.UserCreateParametersPasswordProfile(
password='MyStr0ngP4ssword',
force_change_password_next_login=True
)
)
)
self.assertEqual(user.display_name, 'Test Buddy')
user = self.graphrbac_client.user_operations.get(user.object_id)
self.assertEqual(user.display_name, 'Test Buddy')
users = self.graphrbac_client.user_operations.list(
filter="displayName eq 'Test Buddy'"
)
users = list(users)
self.assertEqual(len(users), 1)
self.assertEqual(users[0].display_name, 'Test Buddy')
self.graphrbac_client.user_operations.delete(user.object_id)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| 37.738462 | 92 | 0.622911 | 1,402 | 0.571079 | 0 | 0 | 1,123 | 0.457434 | 0 | 0 | 952 | 0.38778 |
47679dc98d3116e71ccabc039214caa5e8184ec0 | 5,556 | py | Python | HudlProject/HudlProject.py | zifanyang/Hudl-Python-Selenium-Project | 9f4427e2c4da39e3432e4432da2d40464cdbb60b | [
"MIT"
]
| null | null | null | HudlProject/HudlProject.py | zifanyang/Hudl-Python-Selenium-Project | 9f4427e2c4da39e3432e4432da2d40464cdbb60b | [
"MIT"
]
| null | null | null | HudlProject/HudlProject.py | zifanyang/Hudl-Python-Selenium-Project | 9f4427e2c4da39e3432e4432da2d40464cdbb60b | [
"MIT"
]
| null | null | null | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import unittest
from time import sleep
path="C:\chromedriver.exe"
url="http://www.hudl.com/login"
#username:[email protected]
#password:test1234
class TestLogin(unittest.TestCase):
#Open url
def setUp(self):
self.driver=webdriver.Chrome(path)
self.driver.get(url)
self.driver.maximize_window()
sleep(1)
def test_Login(self):#Login successful
un = "[email protected]"
pw = "test1234"
driver=self.driver
username=self.driver.find_element_by_name('username')
username.clear()
username.send_keys(un)
password=self.driver.find_element_by_name('password')
password.clear()
password.send_keys(pw)
self.driver.find_element_by_id('logIn').click()
sleep(3)
a=driver.current_url
self.assertEqual(a,'https://www.hudl.com/home',msg='Login successfully')
def test_LoginWithTabAndEnter(self):#Tab & Enter
un = "[email protected]"
pw = "test1234"
driver=self.driver
self.driver.find_element_by_name('username').send_keys(un + Keys.TAB + pw + Keys.ENTER)
sleep(3)
a=driver.current_url
self.assertEqual(a,'https://www.hudl.com/home',msg='Login successfully')
def test_LoginWithWrongPw(self):#Wrong password
un = "[email protected]"
pw = "wrongpassword"
driver = self.driver
username = self.driver.find_element_by_name('username')
username.clear()
username.send_keys(un)
password = self.driver.find_element_by_name('password')
password.clear()
password.send_keys(pw)
self.driver.find_element_by_id('logIn').click()
sleep(3)
self.assertIn("We didn't recognize that email and/or password.",self.driver.page_source)
def test_LoginWithWrongUn(self):#Wrong username
un = "[email protected]"
pw = "test1234"
driver = self.driver
username = self.driver.find_element_by_name('username')
username.clear()
username.send_keys(un)
password = self.driver.find_element_by_name('password')
password.clear()
password.send_keys(pw)
self.driver.find_element_by_id('logIn').click()
sleep(3)
self.assertIn("We didn't recognize that email and/or password.", self.driver.page_source)
def test_LoginWithWrongUnAndPw(self):#Wrong username and wrong password
un = "[email protected]"
pw = "wrongpassword"
driver = self.driver
username = self.driver.find_element_by_name('username')
username.clear()
username.send_keys(un)
password = self.driver.find_element_by_name('password')
password.clear()
password.send_keys(pw)
self.driver.find_element_by_id('logIn').click()
sleep(3)
self.assertIn("We didn't recognize that email and/or password.", self.driver.page_source)
def test_LoginWithNoUn(self):#No username
un = "[email protected]"
pw = "test1234"
driver = self.driver
username = self.driver.find_element_by_name('username')
username.clear()
password = self.driver.find_element_by_name('password')
password.clear()
password.send_keys(pw)
self.driver.find_element_by_id('logIn').click()
sleep(3)
self.assertIn("We didn't recognize that email and/or password.", self.driver.page_source)
def test_LoginWithNoPw(self):#No password
un = "[email protected]"
pw = "test1234"
driver = self.driver
username = self.driver.find_element_by_name('username')
username.clear()
username.send_keys(un)
password = self.driver.find_element_by_name('password')
password.clear()
self.driver.find_element_by_id('logIn').click()
sleep(3)
self.assertIn("We didn't recognize that email and/or password.", self.driver.page_source)
def test_LoginWithNoUnAndPw(self):#No username and no password
un = "[email protected]"
pw = "test1234"
driver = self.driver
username = self.driver.find_element_by_name('username')
username.clear()
password = self.driver.find_element_by_name('password')
password.clear()
self.driver.find_element_by_id('logIn').click()
sleep(3)
self.assertIn("We didn't recognize that email and/or password.", self.driver.page_source)
def test_RememberMe(self):#Remember me
un = "[email protected]"
pw = "test1234"
driver=self.driver
username=self.driver.find_element_by_name('username')
username.clear()
username.send_keys(un)
password=self.driver.find_element_by_name('password')
password.clear()
password.send_keys(pw)
self.driver.find_element_by_class_name('form__label--custom').click()
self.driver.find_element_by_id('logIn').click()
sleep(3)
a=driver.current_url
self.assertEqual(a,'https://www.hudl.com/home',msg='Login successfully')
def test_NeedHelp(self):#Need help
driver = self.driver
self.driver.find_element_by_id("forgot-password-link").click()
sleep(3)
self.assertIn("Login Help", self.driver.page_source)
def tearDown(self):
self.driver.quit()
if __name__ =="__main":
unittest.main()
| 37.04 | 97 | 0.656407 | 5,278 | 0.949964 | 0 | 0 | 0 | 0 | 0 | 0 | 1,336 | 0.240461 |
476814d4d4009b43a794a354ff6f84a93d5e7e37 | 1,013 | py | Python | project/notes/serializers.py | J0hnGann0n/modern-django | a8f51806b3f6903847a20b7241044d3a88f4a775 | [
"MIT"
]
| 1 | 2017-06-22T22:35:07.000Z | 2017-06-22T22:35:07.000Z | project/notes/serializers.py | J0hnGann0n/modern-django | a8f51806b3f6903847a20b7241044d3a88f4a775 | [
"MIT"
]
| null | null | null | project/notes/serializers.py | J0hnGann0n/modern-django | a8f51806b3f6903847a20b7241044d3a88f4a775 | [
"MIT"
]
| null | null | null | from django.contrib.auth.models import User
from rest_framework import serializers
# MODEL IMPORTS
from project.notes.models import Note, NoteItem
class UserSerializer(serializers.HyperlinkedModelSerializer):
notes = serializers.HyperlinkedRelatedField(many=True, read_only=True, view_name='note-detail')
class Meta:
model = User
fields = ('url', 'username', 'email', 'groups', 'notes')
class NoteSerializer(serializers.HyperlinkedModelSerializer):
items = serializers.HyperlinkedRelatedField(many=True, read_only=True, view_name='noteitem-detail')
class Meta:
model = Note
fields = ('pk', 'owner', 'title', 'items')
def create(self, validated_data):
title = validated_data.get('title', None)
owner = self.context.get('user')
return Note.objects.create(owner=owner, title=title)
class NoteItemSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = NoteItem
fields = ('pk', 'note', 'text')
| 29.794118 | 103 | 0.702863 | 856 | 0.845015 | 0 | 0 | 0 | 0 | 0 | 0 | 136 | 0.134255 |
4768a467a8a3d8637a1d9c22d0a7cdad0dc93e1c | 4,286 | py | Python | src/main/python/TriggerTextExtractFromS3Image/trigger_text_extract_from_s3_image.py | aws-samples/social-graph-based-people-recommender-using-amazon-neptune-and-textract | 50e54945032d0eb4b47d9072d4c1d66cd169070a | [
"MIT-0"
]
| null | null | null | src/main/python/TriggerTextExtractFromS3Image/trigger_text_extract_from_s3_image.py | aws-samples/social-graph-based-people-recommender-using-amazon-neptune-and-textract | 50e54945032d0eb4b47d9072d4c1d66cd169070a | [
"MIT-0"
]
| null | null | null | src/main/python/TriggerTextExtractFromS3Image/trigger_text_extract_from_s3_image.py | aws-samples/social-graph-based-people-recommender-using-amazon-neptune-and-textract | 50e54945032d0eb4b47d9072d4c1d66cd169070a | [
"MIT-0"
]
| null | null | null | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# vim: tabstop=2 shiftwidth=2 softtabstop=2 expandtab
import sys
import json
import os
import urllib.parse
import traceback
import datetime
import boto3
DRY_RUN = (os.getenv('DRY_RUN', 'false') == 'true')
AWS_REGION = os.getenv('REGION_NAME', 'us-east-1')
KINESIS_STREAM_NAME = os.getenv('KINESIS_STREAM_NAME', 'octember-bizcard-img')
DDB_TABLE_NAME = os.getenv('DDB_TABLE_NAME', 'OctemberBizcardImg')
def write_records_to_kinesis(kinesis_client, kinesis_stream_name, records):
import random
random.seed(47)
def gen_records():
record_list = []
for rec in records:
payload = json.dumps(rec, ensure_ascii=False)
partition_key = 'part-{:05}'.format(random.randint(1, 1024))
record_list.append({'Data': payload, 'PartitionKey': partition_key})
return record_list
MAX_RETRY_COUNT = 3
record_list = gen_records()
for _ in range(MAX_RETRY_COUNT):
try:
response = kinesis_client.put_records(Records=record_list, StreamName=kinesis_stream_name)
print("[DEBUG]", response, file=sys.stderr)
break
except Exception as ex:
import time
traceback.print_exc()
time.sleep(2)
else:
raise RuntimeError('[ERROR] Failed to put_records into kinesis stream: {}'.format(kinesis_stream_name))
def update_process_status(ddb_client, table_name, item):
def ddb_update_item():
s3_bucket = item['s3_bucket']
s3_key = item['s3_key']
image_id = os.path.basename(s3_key)
status = item['status']
modified_time = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
response = ddb_client.update_item(
TableName=table_name,
Key={
"image_id": {
"S": image_id
}
},
UpdateExpression="SET s3_bucket = :s3_bucket, s3_key = :s3_key, mts = :mts, #status = :status",
ExpressionAttributeNames={
'#status': 'status'
},
ExpressionAttributeValues={
":s3_bucket": {
"S": s3_bucket
},
":s3_key": {
"S": s3_key
},
":mts": {
"N": "{}".format(modified_time)
},
":status": {
"S": status
}
}
)
return response
try:
print("[DEBUG] try to update_process_status", file=sys.stderr)
res = ddb_update_item()
print('[DEBUG]', res, file=sys.stderr)
except Exception as ex:
traceback.print_exc()
raise ex
def lambda_handler(event, context):
kinesis_client = boto3.client('kinesis', region_name=AWS_REGION)
ddb_client = boto3.client('dynamodb', region_name=AWS_REGION)
for record in event['Records']:
try:
bucket = record['s3']['bucket']['name']
key = urllib.parse.unquote_plus(record['s3']['object']['key'], encoding='utf-8')
record = {'s3_bucket': bucket, 's3_key': key}
print("[INFO] object created: ", record, file=sys.stderr)
write_records_to_kinesis(kinesis_client, KINESIS_STREAM_NAME, [record])
update_process_status(ddb_client, DDB_TABLE_NAME, {'s3_bucket': bucket, 's3_key': key, 'status': 'START'})
except Exception as ex:
traceback.print_exc()
if __name__ == '__main__':
s3_event = '''{
"Records": [
{
"eventVersion": "2.0",
"eventSource": "aws:s3",
"awsRegion": "us-east-1",
"eventTime": "1970-01-01T00:00:00.000Z",
"eventName": "ObjectCreated:Put",
"userIdentity": {
"principalId": "EXAMPLE"
},
"requestParameters": {
"sourceIPAddress": "127.0.0.1"
},
"responseElements": {
"x-amz-request-id": "EXAMPLE123456789",
"x-amz-id-2": "EXAMPLE123/5678abcdefghijklambdaisawesome/mnopqrstuvwxyzABCDEFGH"
},
"s3": {
"s3SchemaVersion": "1.0",
"configurationId": "testConfigRule",
"bucket": {
"name": "octember-use1",
"ownerIdentity": {
"principalId": "EXAMPLE"
},
"arn": "arn:aws:s3:::octember-use1"
},
"object": {
"key": "bizcard-raw-img/edy_bizcard.jpg",
"size": 638,
"eTag": "0123456789abcdef0123456789abcdef",
"sequencer": "0A1B2C3D4E5F678901"
}
}
}
]
}'''
event = json.loads(s3_event)
lambda_handler(event, {})
| 27.651613 | 112 | 0.614792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,757 | 0.409939 |
4768b72ec107983342fe43250644f8d66a20e6f5 | 483 | py | Python | example/retreive_soil_data.py | Smeaol22/ssurgo_provider | 63bc2251ef031da013af4cf0b252cc48cad4965e | [
"BSD-2-Clause"
]
| null | null | null | example/retreive_soil_data.py | Smeaol22/ssurgo_provider | 63bc2251ef031da013af4cf0b252cc48cad4965e | [
"BSD-2-Clause"
]
| null | null | null | example/retreive_soil_data.py | Smeaol22/ssurgo_provider | 63bc2251ef031da013af4cf0b252cc48cad4965e | [
"BSD-2-Clause"
]
| null | null | null | from pathlib import Path
from src.main import retrieve_soil_composition
# This example is base on geodatabase obtain from ssurgo on Ohio area
ssurgo_folder_path = Path().absolute().parent / 'resources' / 'SSURGO' / 'soils_GSSURGO_oh_3905571_01' \
/ 'soils' / 'gssurgo_g_oh' / 'gSSURGO_OH.gdb'
coordinates = [(40.574234, -83.292448), (40.519224, -82.799437), (40.521048, -82.790174)]
soil_data_list = retrieve_soil_composition(coordinates, ssurgo_folder_path)
| 43.909091 | 104 | 0.732919 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.318841 |
476b0de22ff656e62822acafc870dbc852e2d7da | 592 | py | Python | dynamic_programming_1/solution/1010.py | gpgun0/baekjoon_ | 0a3e87b9eafe1a6af4234ebbd2eebb7f67156414 | [
"MIT"
]
| null | null | null | dynamic_programming_1/solution/1010.py | gpgun0/baekjoon_ | 0a3e87b9eafe1a6af4234ebbd2eebb7f67156414 | [
"MIT"
]
| null | null | null | dynamic_programming_1/solution/1010.py | gpgun0/baekjoon_ | 0a3e87b9eafe1a6af4234ebbd2eebb7f67156414 | [
"MIT"
]
| null | null | null | class Solution:
def combination(self, m, n):
if dp[m][n]:
return dp[m][n]
if m <= n:
dp[m][n] = 1
return dp[m][n]
if n == 1:
dp[m][n] = m
return dp[m][n]
dp[m][n] = self.combination(m-1, n-1) + self.combination(m-1, n)
return dp[m][n]
def main(self):
n, m = map(int, input().split())
return self.combination(m, n)
sol = Solution()
t = int(input())
dp = [[0]*201 for _ in range(201)]
for _ in range(t): print(sol.main()) | 22.769231 | 73 | 0.429054 | 478 | 0.807432 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
476bf06f808044f61843876f04c9e9d7d4868ec8 | 901 | py | Python | palindrome_test.py | gcrowder/palindrome | 31bc1ab62f849dbbfef8e31b0d0d081e7bf2aced | [
"MIT"
]
| null | null | null | palindrome_test.py | gcrowder/palindrome | 31bc1ab62f849dbbfef8e31b0d0d081e7bf2aced | [
"MIT"
]
| null | null | null | palindrome_test.py | gcrowder/palindrome | 31bc1ab62f849dbbfef8e31b0d0d081e7bf2aced | [
"MIT"
]
| null | null | null | import unittest
from palindrome import is_palindrome
class TestPalindrome(unittest.TestCase):
def test_even_numbers(self):
self.assertTrue(is_palindrome('toot'))
def test_odd_numbers(self):
self.assertTrue(is_palindrome('tot'))
def test_simple_values(self):
self.assertTrue(is_palindrome('stunt nuts'))
def test_complete_sentences(self):
self.assertTrue(is_palindrome('Lisa Bonet ate no basil.'))
def test_complex_sentences(self):
self.assertTrue(is_palindrome('A man, a plan, a cat, a ham, a yak, a yam, a hat, a canal: Panama!'))
def test_multiple_sentences(self):
self.assertTrue(is_palindrome('Doc, note, I dissent. A fast never prevents a fatness. I diet on cod.'))
def test_non_palindromes(self):
self.assertFalse(is_palindrome('i am not a palindrome'))
if __name__ == '__main__':
unittest.main()
| 30.033333 | 111 | 0.703663 | 796 | 0.883463 | 0 | 0 | 0 | 0 | 0 | 0 | 221 | 0.245283 |
476d9258256823afd9857d168e1439ac9c883a29 | 1,738 | py | Python | fireroadApi/fireroad_utils.py | zhang-lucy/coursehose | 21cbda0e7cc12a9d201585dbdd53d2eeacfade96 | [
"MIT"
]
| 1 | 2020-09-20T17:29:24.000Z | 2020-09-20T17:29:24.000Z | fireroadApi/fireroad_utils.py | zhang-lucy/coursehose | 21cbda0e7cc12a9d201585dbdd53d2eeacfade96 | [
"MIT"
]
| null | null | null | fireroadApi/fireroad_utils.py | zhang-lucy/coursehose | 21cbda0e7cc12a9d201585dbdd53d2eeacfade96 | [
"MIT"
]
| null | null | null | import requests
import json
import re
def get_course_requirements(course_id):
link = "http://fireroad.mit.edu/requirements/get_json/" + course_id
r = requests.get(link)
j = r.json()["reqs"]
return j
def get_all_course_requirements():
major_reqs = {}
major_id_link = "https://fireroad.mit.edu/requirements/list_reqs/"
majors = requests.get(major_id_link).json()
for major_id in majors:
major_reqs[major_id] = get_course_requirements(major_id)
return major_reqs
def get_all_major_titles():
major_titles = []
major_id_link = "https://fireroad.mit.edu/requirements/list_reqs/"
majors = requests.get(major_id_link).json()
for major_id, major_info in majors.items():
if 'major' in major_id:
major_titles.append(major_info['medium-title'].split()[0] + ': ' + major_info['title-no-degree'])
major_titles.sort(key=major_sort_key)
return major_titles
def major_sort_key(title):
if title[0].isnumeric() and not title[1].isnumeric():
return '0' + title.replace(":", " ")
return title.replace(":", " ")
def get_all_courses():
link = "https://fireroad.mit.edu/courses/all?full=true"
courses = requests.get(link).json()
courses_dict = {}
for course_info in courses:
courses_dict[course_info['subject_id']] = course_info
return courses_dict
def save(data, file_name):
with open(file_name, 'w') as f:
json.dump(data, f)
if __name__ == "__main__":
# course_id = input("Enter course id: ")
# print(get_course_requirements(course_id))
# major_reqs = get_all_course_requirements()
# save(major_reqs, "../data/course_requirements.json")
# major_titles = get_all_major_titles()
# save(major_titles, "../data/major_titles.json")
all_courses = get_all_courses()
save(all_courses, "../data/allCourses.json")
| 28.966667 | 100 | 0.733026 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 578 | 0.332566 |
476df3fbd08fe0e200196618406d29ff175f3d41 | 4,423 | py | Python | examples/compare_transactions_speed_with_hive.py | TheCrazyGM/bhive | 1494e90a99123ecfc5efbd927258f9ba59443e2e | [
"MIT"
]
| 2 | 2020-03-21T23:50:22.000Z | 2020-03-25T19:10:48.000Z | examples/compare_transactions_speed_with_hive.py | TheCrazyGM/bhive | 1494e90a99123ecfc5efbd927258f9ba59443e2e | [
"MIT"
]
| null | null | null | examples/compare_transactions_speed_with_hive.py | TheCrazyGM/bhive | 1494e90a99123ecfc5efbd927258f9ba59443e2e | [
"MIT"
]
| 1 | 2020-03-21T23:50:25.000Z | 2020-03-21T23:50:25.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import bytes
from builtins import chr
from builtins import range
from builtins import super
import random
from pprint import pprint
from binascii import hexlify
from collections import OrderedDict
from bhivebase import (
transactions,
memo,
operations,
objects
)
from bhivebase.objects import Operation
from bhivebase.signedtransactions import Signed_Transaction
from bhivegraphenebase.account import PrivateKey
from bhivegraphenebase import account
from bhivebase.operationids import getOperationNameForId
from bhivegraphenebase.py23 import py23_bytes, bytes_types
from bhive.amount import Amount
from bhive.asset import Asset
from bhive.hive import Hive
import time
from hive import Hive as hiveHive
from hivebase.account import PrivateKey as hivePrivateKey
from hivebase.transactions import SignedTransaction as hiveSignedTransaction
from hivebase import operations as hiveOperations
from timeit import default_timer as timer
class BhiveTest(object):
def setup(self):
self.prefix = u"HIVE"
self.default_prefix = u"STM"
self.wif = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
self.ref_block_num = 34294
self.ref_block_prefix = 3707022213
self.expiration = "2016-04-06T08:29:27"
self.hv = Hive(offline=True)
def doit(self, printWire=False, ops=None):
ops = [Operation(ops)]
tx = Signed_Transaction(ref_block_num=self.ref_block_num,
ref_block_prefix=self.ref_block_prefix,
expiration=self.expiration,
operations=ops)
start = timer()
tx = tx.sign([self.wif], chain=self.prefix)
end1 = timer()
tx.verify([PrivateKey(self.wif, prefix=u"STM").pubkey], self.prefix)
end2 = timer()
return end2 - end1, end1 - start
class HiveTest(object):
def setup(self):
self.prefix = u"HIVE"
self.default_prefix = u"STM"
self.wif = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
self.ref_block_num = 34294
self.ref_block_prefix = 3707022213
self.expiration = "2016-04-06T08:29:27"
def doit(self, printWire=False, ops=None):
ops = [hiveOperations.Operation(ops)]
tx = hiveSignedTransaction(ref_block_num=self.ref_block_num,
ref_block_prefix=self.ref_block_prefix,
expiration=self.expiration,
operations=ops)
start = timer()
tx = tx.sign([self.wif], chain=self.prefix)
end1 = timer()
tx.verify([hivePrivateKey(self.wif, prefix=u"STM").pubkey], self.prefix)
end2 = timer()
return end2 - end1, end1 - start
if __name__ == "__main__":
steem_test = HiveTest()
bsteem_test = BhiveTest()
steem_test.setup()
bsteem_test.setup()
steem_times = []
bsteem_times = []
loops = 50
for i in range(0, loops):
print(i)
opHive = hiveOperations.Transfer(**{
"from": "foo",
"to": "baar",
"amount": "111.110 HIVE",
"memo": "Fooo"
})
opBhive = operations.Transfer(**{
"from": "foo",
"to": "baar",
"amount": Amount("111.110 HIVE", hive_instance=Hive(offline=True)),
"memo": "Fooo"
})
t_s, t_v = steem_test.doit(ops=opHive)
steem_times.append([t_s, t_v])
t_s, t_v = bsteem_test.doit(ops=opBhive)
bsteem_times.append([t_s, t_v])
steem_dt = [0, 0]
bsteem_dt = [0, 0]
for i in range(0, loops):
steem_dt[0] += steem_times[i][0]
steem_dt[1] += steem_times[i][1]
bsteem_dt[0] += bsteem_times[i][0]
bsteem_dt[1] += bsteem_times[i][1]
print("hive vs bhive:\n")
print("hive: sign: %.2f s, verification %.2f s" % (steem_dt[0] / loops, steem_dt[1] / loops))
print("bhive: sign: %.2f s, verification %.2f s" % (bsteem_dt[0] / loops, bsteem_dt[1] / loops))
print("------------------------------------")
print("bhive is %.2f %% (sign) and %.2f %% (verify) faster than hive" %
(steem_dt[0] / bsteem_dt[0] * 100, steem_dt[1] / bsteem_dt[1] * 100))
| 34.286822 | 101 | 0.628759 | 1,824 | 0.41239 | 0 | 0 | 0 | 0 | 0 | 0 | 509 | 0.11508 |
476f15ab6f480fa347487aaf78b33358b554c7b0 | 2,509 | py | Python | src/main/python/widgets/dialogs/rename_user_dialog.py | ivov/admin-stock | e2e1d53436878b6db68dcb85d0cca31223066ffb | [
"MIT"
]
| 8 | 2019-11-02T22:32:30.000Z | 2021-08-16T08:29:39.000Z | src/main/python/widgets/dialogs/rename_user_dialog.py | ivov/admin-stock | e2e1d53436878b6db68dcb85d0cca31223066ffb | [
"MIT"
]
| null | null | null | src/main/python/widgets/dialogs/rename_user_dialog.py | ivov/admin-stock | e2e1d53436878b6db68dcb85d0cca31223066ffb | [
"MIT"
]
| 3 | 2019-12-10T16:23:49.000Z | 2021-11-01T20:22:16.000Z | from PyQt5 import QtWidgets, QtCore
from utils.styling import rename_user_dialog_title_style
class RenameUserDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
super(RenameUserDialog, self).__init__(parent)
self.setWindowFlags(
QtCore.Qt.Dialog
| QtCore.Qt.CustomizeWindowHint
| QtCore.Qt.WindowCloseButtonHint
)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setFixedHeight(100)
self.settings = QtCore.QSettings("solutronic", "admin_stock")
self.username = self.settings.value("username")
title = QtWidgets.QLabel("Editar usuario")
title.setAlignment(QtCore.Qt.AlignCenter)
title.setStyleSheet(rename_user_dialog_title_style)
self.name_label = QtWidgets.QLabel("Nombre:")
self.name_label.setAlignment(QtCore.Qt.AlignCenter)
self.name_label.setFixedWidth(45)
self.name_field = QtWidgets.QLineEdit()
self.name_field.setPlaceholderText("Nombre...")
self.name_field.setFixedWidth(115)
horizontal_section = QtWidgets.QHBoxLayout()
horizontal_section.addWidget(self.name_label)
horizontal_section.addWidget(self.name_field)
back_button = QtWidgets.QPushButton("× Cerrar")
back_button.setShortcut("Alt+c")
self.save_button = QtWidgets.QPushButton("≡ Guardar")
self.save_button.setShortcut("Alt+g")
self.save_button.setEnabled(False)
self.save_button.setDefault(True)
bottom_section = QtWidgets.QHBoxLayout()
bottom_section.addWidget(back_button)
bottom_section.addWidget(self.save_button)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(title)
layout.addLayout(horizontal_section)
layout.addLayout(bottom_section)
self.setLayout(layout)
self.name_field.textChanged.connect(self.on_name_field_change)
back_button.clicked.connect(self.close)
self.save_button.clicked.connect(self.save_name_and_update_statusbar)
def on_name_field_change(self):
if self.name_field.text() != "":
self.save_button.setEnabled(True)
elif self.name_field.text() == "":
self.save_button.setEnabled(False)
def save_name_and_update_statusbar(self):
self.settings.setValue("username", self.name_field.text())
main_window = self.parent().parent().parent().parent()
main_window.set_statusbar()
self.close()
| 35.842857 | 77 | 0.687923 | 2,416 | 0.961783 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.048965 |
4771eb8f2f256c1d66ba7f41f070c11396af58fb | 6,053 | py | Python | bluetooth-audio/btspeaker.py | b23prodtm/balena-sound | 1ee886241485a302f88176c7dd880e986cf768c3 | [
"Apache-2.0"
]
| null | null | null | bluetooth-audio/btspeaker.py | b23prodtm/balena-sound | 1ee886241485a302f88176c7dd880e986cf768c3 | [
"Apache-2.0"
]
| 3 | 2019-11-20T17:49:37.000Z | 2020-06-01T23:09:10.000Z | bluetooth-audio/btspeaker.py | b23prodtm/balena-sound | 1ee886241485a302f88176c7dd880e986cf768c3 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/python
import bluetooth, sys, os, re, subprocess, time, getopt
BT_BLE = int(os.getenv('BT_BLE', 0))
BT_SCAN_TIMEOUT = int(os.getenv('BT_SCAN_TIMEOUT', 2))
if BT_BLE:
from gattlib import DiscoveryService
from ble_client import BleClient
def parse_argv (myenv, argv):
usage = 'Command line args: \n'\
' -d,--duration <seconds> Default: {}\n'\
' -s,--uuid <service-name> Default: {}\n'\
' --protocol <proto:port> Default: {}\n'\
' [bt-address] Default: {}\n'\
' -h,--help Show help.\n'.format(myenv["BT_SCAN_TIMEOUT"],myenv["service"],myenv["proto-port"],'myenv["BTSPEAKER_SINK"]')
try:
opts, args = getopt.getopt(argv[1:], "u:d:s:h",["help", "duration=", "uuid=", "protocol="])
except getopt.GetoptError:
print(usage)
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print(usage)
sys.exit()
if opt in ("-d", "--duration"):
myenv['BT_SCAN_TIMEOUT'] = arg
elif opt in ("-s", "--uuid"):
myenv['service'] = arg
elif opt in ("--protocol"):
myenv['proto-port'] = arg
elif re.compile("([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}").match(arg):
myenv["BTSPEAKER_SINK"] = arg
else:
print("Wrong argument %s !" % argv[i])
print(usage)
def bt_service(addr, proto_port="", serv=""):
for services in bluetooth.find_service(address=addr):
if len(serv) > 0 and (services["name"] is serv or services["service-id"] is serv):
return bt_connect(services["protocol"], addr, services["port"])
else:
print(" UUID: %s (%s)" % (services["name"], services["service-id"]))
print(" Protocol: %s, %s, %s" % (services["protocol"], addr, services["port"]))
if proto_port != "" and re.compile("[^:]+:[0-9]+").match(proto_port):
s = proto_port.find(":")
proto = proto_port[0:s]
port = proto_port[s+1:]
return bt_connect(proto, addr, port)
def bt_connect(proto, addr, port):
timeout = 0
while timeout < 5:
try:
print(" Attempting %s connection to %s (%s)" % (proto, addr, port))
s = bluetooth.BluetoothSocket(int(proto))
s.connect((addr,int(port)))
print("Success")
return s
except bluetooth.btcommon.BluetoothError as err:
print("%s\n" % (err))
print(" Fail, probably timeout. Attempting reconnection... (%s)" % (timeout))
timeout += 1
time.sleep(1)
print(" Service or Device not found")
return None
#------------------------------------------------------------------------------
# Connects to Audio Service (Audio Sink, Audio Source, more in bluetoothctl <<EOF
# info <address>
# EOF
# raise bluetooth.btcommon.BluetoothError
def bt_connect_service(nearby_devices, bt_addr="00:00:00:00:00:00", proto_port="", serv=""):
sock = None
for addr, name in nearby_devices:
if bt_addr == "00:00:00:00:00:00":
print(" - %s , %s:" % (addr, name))
sock = bt_service(addr, proto_port, serv)
if sock:
sock.close()
elif bt_addr == addr:
print(" - found device %s , %s:" % (addr, name))
sock = bt_service(addr, proto_port, serv)
break
else:
continue
if sock:
print(" - service %s available" % (serv))
else:
print(" - service %s unavailable at %s" % (serv, bt_addr))
return sock
#------------------------------------------------------------------------------
# Devices discovery with bluetooth low energy (BT_BLE) support
# return devices list in argument (list append)
def discover_devices(nearby_devices = []):
timeout = BT_SCAN_TIMEOUT
print("looking for nearby devices...")
try:
nearby_devices += bluetooth.discover_devices(lookup_names = True, flush_cache = True, duration = timeout)
print("found %d devices" % len(nearby_devices))
if BT_BLE:
service = DiscoveryService()
try:
devices = service.discover(timeout)
for addr, name in devices.items():
if not name or name is "":
b = BleClient(addr)
name = b.request_data().decode('utf-8')
b.disconnect()
nearby_devices += ((addr, name))
except RuntimeError as err:
print("~ BLE ~ Error ", err)
else:
print("found %d devices (ble)" % len(devices.items()))
return nearby_devices
except bluetooth.btcommon.BluetoothError as err:
print(" Main thread error : %s" % (err))
exit(1)
def main(argv):
myenv = dict()
main.defaults = dict()
main.defaults = {
"file":argv[0],
"BT_SCAN_TIMEOUT":"5",
"service":"Audio Sink",
"BTSPEAKER_SINK":"00:00:00:00:00:00",
"proto-port": str(bluetooth.L2CAP) + ":25"
}
myenv.update(main.defaults)
myenv.update(os.environ)
parse_argv(myenv, argv)
print("looking for nearby devices...")
try:
nearby_devices = discover_devices()
print("found %d devices" % len(nearby_devices))
print("discovering %s services... %s" % (myenv["BTSPEAKER_SINK"], myenv["service"]))
sock = bt_connect_service(nearby_devices, myenv["BTSPEAKER_SINK"], myenv["proto-port"], myenv["service"])
if sock:
# pair the new device as known device
print("bluetooth pairing...")
ps = subprocess.Popen("printf \"pair %s\\nexit\\n\" \"$1\" | bluetoothctl", shell=True, stdout=subprocess.PIPE)
print(ps.stdout.read())
ps.stdout.close()
ps.wait()
sock.close()
except bluetooth.btcommon.BluetoothError as err:
print(" Main thread error : %s" % (err))
exit(1)
if __name__ == '__main__':
main(sys.argv)
| 38.801282 | 123 | 0.544358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,885 | 0.311416 |
477431852e85302c53a80c1bc87e296848214e08 | 267 | py | Python | scripts/turnOnOffGPIO.py | yomboprime/RaspiGarage | c6bd7b92066ec087cc0d59afe6664e8ae5cb46ed | [
"MIT"
]
| null | null | null | scripts/turnOnOffGPIO.py | yomboprime/RaspiGarage | c6bd7b92066ec087cc0d59afe6664e8ae5cb46ed | [
"MIT"
]
| null | null | null | scripts/turnOnOffGPIO.py | yomboprime/RaspiGarage | c6bd7b92066ec087cc0d59afe6664e8ae5cb46ed | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
import os
import sys
if not os.getegid() == 0:
sys.exit( 'Script must be run as root' )
from pyA20.gpio import gpio
from pyA20.gpio import port
pin = port.PA12
gpio.init()
gpio.setcfg(pin, gpio.OUTPUT)
gpio.output(pin, int(sys.argv[1])) | 16.6875 | 44 | 0.700375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.183521 |
4774eea99e7f6cd1515737dfa6e6653b0d95171b | 4,369 | py | Python | bedpe_longrange/bedpe2longrange.py | ChenfuShi/tools_for_HiChIP | 0faa8b26a7c53922dd2de977d7df442dd2caeed7 | [
"BSD-3-Clause"
]
| 5 | 2019-05-09T19:31:26.000Z | 2021-12-06T02:57:48.000Z | bedpe_longrange/bedpe2longrange.py | ChenfuShi/tools_for_HiChIP | 0faa8b26a7c53922dd2de977d7df442dd2caeed7 | [
"BSD-3-Clause"
]
| null | null | null | bedpe_longrange/bedpe2longrange.py | ChenfuShi/tools_for_HiChIP | 0faa8b26a7c53922dd2de977d7df442dd2caeed7 | [
"BSD-3-Clause"
]
| 4 | 2019-06-01T11:30:41.000Z | 2022-03-11T02:01:52.000Z | #########################################
# Author: Chenfu Shi
# Email: [email protected]
# BSD-3-Clause License
# Copyright 2019 Chenfu Shi
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#########################################
# converts bedpe to long range, making sure to print twice each line.
# allows the user to choose which field to copy over and if you want to do -log10 for eg. p-values or q-values
import argparse
import subprocess
import math
import os
parser = argparse.ArgumentParser(description='Tool to convert bedpe files to long_range format. Uses bgzip and tabix to compress and index the file')
parser.add_argument("-i",'--input', dest='inputfile', action='store', required=True,
help='input file name')
parser.add_argument("-o",'--output', dest='outputfile', action='store', required=False,
help='ouput file name. Will add .gz automatically')
parser.add_argument("-f",'--field', dest='field', action='store', type=int, default=8, required=False,
help='field to store as score. Default 8th field. For MAPS use 9 for FDR')
parser.add_argument('-l', '--log' ,action='store_true', dest='log', help='do -log10 of score')
args = parser.parse_args()
args = parser.parse_args()
if args.outputfile:
outputname=args.outputfile
else:
outputname=args.inputfile + ".washu.bed"
inputname=args.inputfile
if not os.path.isfile(inputname):
raise Exception("input file couldn't be opened")
ID_counter = 1
with open(outputname, "w") as outputfile, open(args.inputfile , "r") as inputfile:
for line in inputfile:
data = line.split("\t")
chr1 = data[0].strip()
if not data[1].strip().isdigit():
# check that the line contains data instead of header
continue
start1 = data[1].strip()
end1 = data[2].strip()
chr2 = data[3].strip()
start2 = data[4].strip()
end2 = data[5].strip()
score = data[args.field-1].strip()
# if chr is a number with no chr add chr, compatibility with washu
if chr1[0:3] != "chr":
chr1 = "chr" + chr1
chr2 = "chr" + chr2
if args.log == True:
try:
score = str(-math.log10(float(score)))
except ValueError:
# in case the score is zero
score = 384
outputfile.write("{}\t{}\t{}\t{}:{}-{},{}\t{}\t{}\n".format(chr1,start1,end1,chr2,start2,end2,score,str(ID_counter),"."))
ID_counter = ID_counter + 1
outputfile.write("{}\t{}\t{}\t{}:{}-{},{}\t{}\t{}\n".format(chr2,start2,end2,chr1,start1,end1,score,str(ID_counter),"."))
ID_counter = ID_counter + 1
# automatically sort, compress and index the output file
subprocess.run(["sort","-o",outputname,"-k1,1","-k2,2n",outputname])
subprocess.run(["bgzip",outputname])
subprocess.run(["tabix","-p","bed",outputname+".gz"]) | 48.010989 | 757 | 0.676127 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,597 | 0.594415 |
4775e3e337b36b06c6774e19748eb643d6196b5f | 11,501 | py | Python | 0_get_Fink_features_xmatch.py | anaismoller/KNTrap | 9e1bc85576ab16c4cb6d4d4da74482061029d207 | [
"Apache-2.0"
]
| null | null | null | 0_get_Fink_features_xmatch.py | anaismoller/KNTrap | 9e1bc85576ab16c4cb6d4d4da74482061029d207 | [
"Apache-2.0"
]
| null | null | null | 0_get_Fink_features_xmatch.py | anaismoller/KNTrap | 9e1bc85576ab16c4cb6d4d4da74482061029d207 | [
"Apache-2.0"
]
| null | null | null | # Year 2022
# Authors: Anais Möller based on fink-broker.org code
import os
import sys
import glob
import logging
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
from pathlib import Path
from functools import partial
from astropy.table import Table
from astropy import units as u
from astropy.coordinates import SkyCoord
import multiprocessing
from concurrent.futures import ProcessPoolExecutor
# my utils
from utils import xmatch
from utils import mag_color
from utils import query_photoz_datalab as photoz
def setup_logging(logpathname):
logger = None
# Create logger using python logging module
logging_handler_out = logging.StreamHandler(sys.stdout)
logging_handler_out.setLevel(logging.DEBUG)
logging_handler_err = logging.StreamHandler(sys.stderr)
logging_handler_err.setLevel(logging.WARNING)
logger = logging.getLogger("localLogger")
logger.setLevel(logging.INFO)
logger.addHandler(logging_handler_out)
logger.addHandler(logging_handler_err)
# create file handler which logs even debug messages
fh = logging.FileHandler(f"{logpathname}", mode="w")
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
return logger
def process_fn(inputs):
fn, fil = inputs
return fn(fil)
def read_file(fname, suffix=None):
try:
df_tmp = Table.read(fname, format="ascii").to_pandas()
if "unforced" in suffix:
df = pd.read_table(fname, header=None, skiprows=1, delim_whitespace=True)
if len(df.columns) == 16:
df.columns = [
"MJD",
"dateobs",
"photcode",
"filt",
"flux_c",
"dflux_c",
"type",
"chisqr",
"ZPTMAG_c",
"m",
"dm",
"ra",
"dec",
"cmpfile",
"tmpl",
"ROBOT_score",
]
else:
df.columns = [
"MJD",
"dateobs",
"photcode",
"filt",
"flux_c",
"dflux_c",
"type",
"chisqr",
"ZPTMAG_c",
"m",
"dm",
"ra",
"dec",
"cmpfile",
"tmpl",
]
df["ROBOT_score"] = np.nan
df_tmp = df.copy()
return df_tmp
except Exception:
print("File corrupted or empty", fname)
df_tmp = pd.DataFrame()
return df_tmp
def process_single_file(fname, suffix=".forced.difflc"):
# read file and convert to pandas
df_tmp = read_file(fname, suffix=suffix)
# process data if available
if len(df_tmp) > 0 and set(["ra", "dec"]).issubset(df_tmp.keys()):
# get id
idx = Path(fname).stem.replace(suffix, "")
# get ra,dec, idx for xmatch
ra_tmp, dec_tmp = df_tmp["ra"][0], df_tmp["dec"][0]
# convert to degrees
coo = SkyCoord(ra_tmp, dec_tmp, unit=(u.hourangle, u.deg))
out_ra = coo.ra.degree
out_dec = coo.dec.degree
# get color, dmag and rate
(
dmag_i,
dmag_g,
dmag_rate_i,
dmag_rate_g,
color,
color_avg,
max_mag_i,
max_mag_g,
min_mag_i,
min_mag_g,
mean_mag_i,
mean_mag_g,
std_mag_i,
std_mag_g,
df_tmp,
) = mag_color.last_color_rate(df_tmp)
# other features
ndet = len(df_tmp)
tmp_mag = df_tmp["magnitude"].values
# clean
del df_tmp
df_out = pd.DataFrame()
df_out["id"] = [idx]
df_out["ra"] = [out_ra]
df_out["dec"] = [out_dec]
df_out["max_mag_i"] = [max_mag_i]
df_out["max_mag_g"] = [max_mag_g]
df_out["min_mag_i"] = [min_mag_i]
df_out["min_mag_g"] = [min_mag_g]
df_out["mean_mag_i"] = [mean_mag_i]
df_out["mean_mag_g"] = [mean_mag_g]
df_out["std_mag_i"] = [std_mag_i]
df_out["std_mag_g"] = [std_mag_g]
df_out["dmag_i"] = [dmag_i]
df_out["dmag_g"] = [dmag_g]
df_out["dmag_rate_i"] = [dmag_rate_i]
df_out["dmag_rate_g"] = [dmag_rate_g]
df_out["color"] = [color]
df_out["color_avg"] = [color_avg]
df_out["ndet"] = [ndet]
df_out["two_mags_gt_225"] = [len(np.where(tmp_mag < 22.5)[0]) >= 2]
df_out["two_mags_gt_235"] = [len(np.where(tmp_mag < 23.5)[0]) >= 2]
if "unforced" in suffix:
df_out = df_out.add_suffix("_unforced")
df_out = df_out.rename(columns={"id_unforced": "id"})
else:
df_out = pd.DataFrame()
return df_out
if __name__ == "__main__":
"""Process light-curves with Fink inspired features & xmatches
https://github.com/astrolabsoftware/fink-filters
"""
parser = argparse.ArgumentParser(description="Compute candidate features + xmatch")
parser.add_argument(
"--path_field",
type=str,
default="data/S82sub8_tmpl",
help="Path to field",
)
parser.add_argument(
"--path_out",
type=str,
default="./Fink_outputs",
help="Path to outputs",
)
parser.add_argument(
"--path_robot",
type=str,
default="../ROBOT_masterlists",
help="Path to ROBOT outputs",
)
parser.add_argument(
"--debug",
action="store_true",
help="Debug: loop processing (slow)",
)
parser.add_argument(
"--test",
action="store_true",
help="one file processed only",
)
args = parser.parse_args()
os.makedirs(args.path_out, exist_ok=True)
os.makedirs("logs/", exist_ok=True)
cwd = os.getcwd()
logpathname = f"{cwd}/logs/{Path(args.path_field).stem}_preprocess"
logger = setup_logging(logpathname)
# read files
list_files = glob.glob(f"{args.path_field}/*/*/*.forced.difflc.txt")
print(f"{len(list_files)} files found in {args.path_field}")
if args.test:
print(list_files)
print("Processing only one file", list_files[0])
df = process_single_file(list_files[0])
elif args.debug:
print(list_files)
# no parallel
list_proc = []
for fil in list_files:
logger.info(fil)
list_proc.append(process_single_file(fil))
df = pd.concat(list_proc)
else:
# Read and process files faster with ProcessPoolExecutor
max_workers = multiprocessing.cpu_count()
# use parallelization to speed up processing
# Split list files in chunks of size 10 or less
# to get a progress bar and alleviate memory constraints
num_elem = len(list_files)
num_chunks = num_elem // 10 + 1
list_chunks = np.array_split(np.arange(num_elem), num_chunks)
logger.info(f"Dividing processing in {num_chunks} chunks")
process_fn_file = partial(process_single_file)
list_fn = []
for fmt in list_files:
list_fn.append(process_fn_file)
list_processed = []
for chunk_idx in tqdm(list_chunks, desc="Process", ncols=100):
# Process each file in the chunk in parallel
with ProcessPoolExecutor(max_workers=max_workers) as executor:
start, end = chunk_idx[0], chunk_idx[-1] + 1
# Need to cast to list because executor returns an iterator
list_pairs = list(zip(list_fn[start:end], list_files[start:end]))
list_processed += list(executor.map(process_fn, list_pairs))
df = pd.concat(list_processed)
print("NOT PARALLEL= UNFORCED PHOTOMETRY")
list_files_un = glob.glob(f"{args.path_field}/*/*/*.unforced.difflc.txt")
list_unforced = []
list_idx = []
if args.test:
list_files_un = [list_files_un[0]]
for fil in list_files_un:
list_unforced.append(process_single_file(fil, suffix=".unforced.difflc"))
df_unforced = pd.concat(list_unforced)
if len(df_unforced) > 0:
df = pd.merge(df, df_unforced, on="id", how="left")
logger.info("SIMBAD xmatch")
z, sptype, typ, ctlg = xmatch.cross_match_simbad(
df["id"].to_list(), df["ra"].to_list(), df["dec"].to_list()
)
logger.info("Finished SIMBAD xmatch")
# save in df
df["simbad_type"] = typ
df["simbad_ctlg"] = ctlg
df["simbad_sptype"] = sptype
df["simbad_redshift"] = z
logger.info("GAIA xmatch")
source, ragaia, decgaia, plx, plxerr, gmag, angdist = xmatch.cross_match_gaia(
df["id"].to_list(),
df["ra"].to_list(),
df["dec"].to_list(),
ctlg="vizier:I/345/gaia2",
)
(
source_edr3,
ragaia_edr3,
decgaia_edr3,
plx_edr3,
plxerr_edr3,
gmag_edr3,
angdist_edr3,
) = xmatch.cross_match_gaia(
df["id"].to_list(),
df["ra"].to_list(),
df["dec"].to_list(),
ctlg="vizier:I/350/gaiaedr3",
)
logger.info("Finished GAIA xmatch")
# save in df
df["gaia_DR2_source"] = source
df["gaia_DR2_ra"] = ragaia
df["gaia_DR2_dec"] = decgaia
df["gaia_DR2_parallax"] = plx
df["gaia_DR2_parallaxerr"] = plxerr
df["gaia_DR2_gmag"] = gmag
df["gaia_DR2_angdist"] = angdist
df["gaia_eDR3_source"] = source_edr3
df["gaia_eDR3_ra"] = ragaia_edr3
df["gaia_eDR3_dec"] = decgaia_edr3
df["gaia_eDR3_parallax"] = plx_edr3
df["gaia_eDR3_parallaxerr"] = plxerr_edr3
df["gaia_eDR3_gmag"] = gmag_edr3
df["gaia_eDR3_angdist"] = angdist_edr3
logger.info("USNO-A.20 xmatch")
(source_usno, angdist_usno,) = xmatch.cross_match_usno(
df["id"].to_list(),
df["ra"].to_list(),
df["dec"].to_list(),
ctlg="vizier:I/252/out",
)
df["USNO_source"] = source_usno
df["USNO_angdist"] = angdist_usno
logger.info("Legacy Survey xmatch")
list_ls_df = []
for (idx, ra, dec) in df[["id", "ra", "dec"]].values:
list_ls_df.append(photoz.query_coords_ls(idx, ra, dec, radius_arcsec=10))
df_ls = pd.concat(list_ls_df)
logger.info("Finished Legacy Survey xmatch")
df = pd.merge(df, df_ls, on="id")
# add ROBOT scores
# You may need to add the field caldate format as Simon's output
# TO DO these next lines should give you that
field = Path(args.path_field).stem.replace("_tmpl", "")
caldate = Path(args.path_field).parent.parent.stem
# TO DO just change the name here
robot_path = f"{args.path_robot}/caldat{caldate}/{field}_{caldate}_masterlist.csv"
if Path(robot_path).exists():
df_robot = pd.read_csv(
robot_path,
delimiter=";",
)
df_robot = df_robot.rename(columns={"Cand_ID": "id"})
df = pd.merge(df, df_robot, on="id", how="left")
else:
print(f"NO ROBOT MASTERLIST FOUND {robot_path}")
outprefix = str(Path(args.path_field).stem)
# outname = f"{args.path_out}/{outprefix}.csv"
# df.to_csv(outname, index=False, sep=";")
outname = f"{args.path_out}/{outprefix}.pickle"
df.to_pickle(outname)
logger.info(f"Saved output {outname}")
| 31.423497 | 87 | 0.578558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,105 | 0.269953 |
4777fbaa35309f630f4ae07e90c983a041f15298 | 1,990 | py | Python | 2017/day11/src/main.py | stenbein/AdventOfCode | 3e8c24f7140dd9cdc687e176272af6a1302a9ca5 | [
"MIT"
]
| 3 | 2018-04-08T10:40:52.000Z | 2018-12-06T02:37:23.000Z | 2017/day11/main.py | stenbein/AdventOfCode | 3e8c24f7140dd9cdc687e176272af6a1302a9ca5 | [
"MIT"
]
| 2 | 2018-04-10T11:44:18.000Z | 2022-02-22T21:25:54.000Z | 2017/day11/src/main.py | stenbein/AdventOfCode | 3e8c24f7140dd9cdc687e176272af6a1302a9ca5 | [
"MIT"
]
| null | null | null | #!/usr/bin/python3
'''Day 11 of the 2017 advent of code'''
class HexCounter():
'''A hex maze walker object for
keeping track of our position'''
def __init__(self):
self.x = 0
self.y = 0
self.z = 0
self.furthest = 0
def move(self, direction):
'''map the direction to a state change'''
if direction == "n":
self.y += 1
self.x -= 1
elif direction == "s":
self.y -= 1
self.x += 1
elif direction == "ne":
self.z += 1
self.x -= 1
elif direction == "nw":
self.z -= 1
self.y += 1
elif direction == "se":
self.z += 1
self.y -= 1
elif direction == "sw":
self.z -= 1
self.x += 1
else:
raise ValueError("Undefined direction: ", direction)
temp = self.max()
if temp > self.furthest:
self.furthest = temp
def max(self):
'''accounting for negative distance along the grid'''
total = 0
maxx = abs(self.x)
maxy = abs(self.y)
maxz = abs(self.z)
total = abs(max(maxx, maxy, maxz))
return total
def part_one(data):
"""Return the answer to part one of this day"""
hexer = HexCounter()
for coord in data:
hexer.move(coord)
return hexer.max()
def part_two(data):
"""Return the answer to part two of this day"""
hexer = HexCounter()
for coord in data:
hexer.move(coord)
return hexer.furthest
if __name__ == "__main__":
DATA = ""
with open("input", "r") as f:
for line in f:
DATA += line.rstrip() #hidden newline in file input
COORDS = DATA.split(",")
print("Part 1: {}".format(part_one(COORDS)))
print("Part 2: {}".format(part_two(COORDS)))
| 20.515464 | 65 | 0.477387 | 1,243 | 0.624623 | 0 | 0 | 0 | 0 | 0 | 0 | 439 | 0.220603 |
477aed8ccac48ab95361c65ae3e38de923016db9 | 8,291 | py | Python | kai_reduce.py | skterry/jackknifeKAI | 8c85b32acd9166d87e7f5dc78943f2ad38f19809 | [
"MIT"
]
| null | null | null | kai_reduce.py | skterry/jackknifeKAI | 8c85b32acd9166d87e7f5dc78943f2ad38f19809 | [
"MIT"
]
| null | null | null | kai_reduce.py | skterry/jackknifeKAI | 8c85b32acd9166d87e7f5dc78943f2ad38f19809 | [
"MIT"
]
| null | null | null | # Copied from /u/jlu/data/microlens/20aug22os/reduce/reduce.py
##################################################
#
# General Notes:
# -- python uses spaces to figure out the beginnings
# and ends of functions/loops/etc. So make sure
# to preserve spacings properly (indent). This
# is easy to do if you use emacs with python mode
# and color coding.
# -- You will probably need to edit almost every
# single line of the go() function.
# -- If you need help on the individual function calls,
# then in the pyraf prompt, import the module and
# then print the documentation for that function:
# --> print nirc2.nirc2log.__doc__
# --> print range.__doc__
#
##################################################
# Import python and iraf modules
from pyraf import iraf as ir
import numpy as np
import os, sys
import glob
# Import our own custom modules
from kai.reduce import calib
from kai.reduce import sky
from kai.reduce import data
from kai.reduce import util
from kai.reduce import dar
from kai.reduce import kai_util
from kai import instruments
##########
# Change the epoch, instrument, and distortion solution.
##########
epoch = '19may27'
nirc2 = instruments.NIRC2()
##########
# Make electronic logs
# - run this first thing for a new observing run.
##########
def makelog_and_prep_images():
"""Make an electronic log from all the files in the ../raw/ directory.
The file will be called nirc2.log and stored in the same directory.
@author Jessica Lu
@author Sylvana Yelda
"""
nirc2_util.makelog('../raw', instrument=nirc2)
# If you are reducing OSIRIS, you need to flip the images first.
#raw_files = glob.glob('../raw/i*.fits')
#osiris.flip_images(raw_files)
# Download weather data we will need.
dar.get_atm_conditions('2019')
return
###############
# Analyze darks
###############
# def analyze_darks():
# """Analyze the dark_calib results
# """
# util.mkdir('calib')
# os.chdir('calib')
#
# first_dark = 16
# calib.analyzeDarkCalib(first_dark) # Doesn't support OSIRIS yet
#
# os.chdir('../')
##########
# Reduce
##########
def go_calib():
"""Do the calibration reduction.
@author Jessica Lu
@author Sylvana Yelda
"""
####################
#
# Calibration files:
# everything created under calib/
#
####################
# Darks - created in subdir darks/
# - darks needed to make bad pixel mask
# - store the resulting dark in the file name that indicates the
# integration time (2.8s) and the coadds (10ca).
# -- If you use the OSIRIS image, you must include the full filename in the list.
#darkFiles = ['i200809_a003{0:03d}_flip'.format(ii) for ii in range(3, 7+1)]
#calib.makedark(darkFiles, 'dark_2.950s_10ca_3rd.fits', instrument=osiris)
# darkFiles = ['i200822_s003{0:03d}_flip'.format(ii) for ii in range(28, 32+1)]
# calib.makedark(darkFiles, 'dark_5.901s_1ca_4rd.fits', instrument=osiris)
# darkFiles = ['i200822_s020{0:03d}_flip'.format(ii) for ii in range(2, 10+1)]
# calib.makedark(darkFiles, 'dark_11.802s_4ca_4rd.fits', instrument=osiris)
# darkFiles = ['i200822_s021{0:03d}_flip'.format(ii) for ii in range(2, 10+1)]
# calib.makedark(darkFiles, 'dark_5.901s_8ca_1rd.fits', instrument=osiris)
# Flats - created in subdir flats/
#offFiles = ['i200809_a013{0:03d}_flip'.format(ii) for ii in range(2, 11+1, 2)]
#onFiles = ['i200811_a002{0:03d}_flip'.format(ii) for ii in range(2, 13+1, 2)]
#calib.makeflat(onFiles, offFiles, 'flat_kp_tdOpen.fits', instrument=osiris)
# Masks (assumes files were created under calib/darks/ and calib/flats/)
#calib.makemask('dark_2.950s_10ca_3rd.fits', 'flat_kp_tdOpen.fits',
# 'supermask.fits', instrument=osiris)
darkFiles = list(range(67, 72+1))
calib.makedark(darkFiles, 'dark_30.0s_1ca.fits', instrument=nirc2)
# Flats - created in subdir flats/
offFiles = list(range(11, 16+1))
onFiles = list(range(01, 06+1))
calib.makeflat(onFiles, offFiles, 'flat_ks.fits', instrument=nirc2)
# Masks
calib.makemask('dark_30.0s_1ca.fits', 'flat_ks.fits',
'supermask.fits')
def go():
"""
Do the full data reduction.
"""
##########
#
# OB06284
#
##########
##########
# Kp-band reduction
##########
target = 'OB06284'
#-----OSIRIS------
#sci_files = ['i200810_a004{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]
#sci_files += ['i200822_a012{0:03d}_flip'.format(ii) for ii in range(2, 25+1)] #Add second dataset (on same night). [Optional]
#sky_files = ['i200810_a007{0:03d}_flip'.format(ii) for ii in range(2, 6+1)] #16+1
#refSrc = [1071, 854] # This is the target
#sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)
#data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)
#data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)
#data.combine(sci_files, 'kp_tdOpen', epoch, field=target,
# trim=0, weight='strehl', submaps=3, instrument=osiris)
#-----------------
#-----NIRC2-------
sci_files = list(range(133, 136+1))
sky_files = list(range(224, 233+1))
refSrc1 = [353., 469.] #This is the target
sky.makesky(sky_files, 'nite1', 'ks', instrument=nirc2)
data.clean(sci_files, 'nite1', 'ks', refSrc1, refSrc1, instrument=nirc2)
data.calcStrehl(sci_files, 'ks', instrument=nirc2)
data.combine(sci_files, 'ks', '27maylgs', trim=1, weight='strehl',
submaps=3, instrument=nirc2)
#-----------------
os.chdir('../')
##########
#
# KB200101
#
##########
##########
# Kp-band reduction
##########
# util.mkdir('kp')
# os.chdir('kp')
# -- If you have more than one position angle, make sure to
# clean them seperatly.
# -- Strehl and Ref src should be the pixel coordinates of a bright
# (but non saturated) source in the first exposure of sci_files.
# -- If you use the OSIRIS image, you must include the full filename in the list.
# target = 'OB060284'
# sci_files = ['i200822_a014{0:03d}_flip'.format(ii) for ii in range(2, 28+1)]
# sci_files += ['i200822_a015{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]
# sci_files += ['i200822_a016{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]
# sky_files = ['i200822_a017{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]
# refSrc = [975, 1006] # This is the target
# Alternative star to try (bright star to right of target): [1158, 994]
# sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)
# data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)
# data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)
# data.combine(sci_files, 'kp_tdOpen', epoch, field=target,
# trim=1, weight='strehl', submaps=3, instrument=osiris)
#
def jackknife():
"""
Do the Jackknife data reduction.
"""
##########
#
# OB06284
#
##########
##########
# Kp-band reduction
##########
target = 'OB06284'
#sci_files = ['i200810_a004{0:03d}_flip'.format(ii) for ii in range(2, 26+1)] OG
sci_files = ['i200810_a004{0:03d}_flip'.format(ii) for ii in range(2, 26+1)]
# sci_files += ['i200822_a012{0:03d}_flip'.format(ii) for ii in range(2, 25+1)]
sky_files = ['i200810_a007{0:03d}_flip'.format(ii) for ii in range(2, 6+1)] #16+1
refSrc = [1071, 854] # This is the target
# Alternative star to try (bright star to bottom of target): [1015, 581.9]
sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)
for i in enumerate(sci_files, start=1):
jack_list = sci_files[:]
jack_list.remove(i[1])
data.clean(jack_list, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)
data.calcStrehl(jack_list, 'kp_tdOpen', field=target, instrument=osiris)
data.combine(jack_list, 'kp_tdOpen', epoch, field=target,
trim=0, weight='strehl', instrument=osiris, outSuffix=str(i[0]))
os.chdir('reduce')
| 34.119342 | 130 | 0.621517 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,039 | 0.72838 |
477d41ca1c89e9e563388b140b23288438ed0562 | 7,613 | py | Python | Clients/pyClient/GUI.py | JulianWww/AlphaZero | 8eb754659793305eba7b9e636eeab37d9ccd45f7 | [
"MIT"
]
| 1 | 2021-12-05T13:26:17.000Z | 2021-12-05T13:26:17.000Z | Clients/pyClient/GUI.py | JulianWww/AlphaZero | 8eb754659793305eba7b9e636eeab37d9ccd45f7 | [
"MIT"
]
| null | null | null | Clients/pyClient/GUI.py | JulianWww/AlphaZero | 8eb754659793305eba7b9e636eeab37d9ccd45f7 | [
"MIT"
]
| null | null | null | import tkinter as tk
from PIL import Image, ImageTk
from game import Game
from threading import Thread
import time
from gameSaver import sendFull
from Client import DummyAgent
class ConsoleAgent:
"""Agent running in the console for testing only"""
def render(self, state):
"render the state to the console"
state.consoleRender()
def getAction(self, state):
"get the Action the player wants to perform function will be called until valid output is found"
return state.actionModifier(int(input("your Action: ")))
def winScreen(self, state):
"dummy for now"
pass
class GUI(tk.Tk, DummyAgent):
"""
render game to GUI using Tkinter and canvas
"""
colorMap = {
1: "gold",
-1:"red",
0: "white"
}
yPadRel = 0.1
_canvasy = 450
_canvasx = 500
_dotSize = 0.45
_lastState = None
_win = 0
_winLinesRendered = False
winLines_kwargs = {
"fill": "#00FF00",
"width": 10
}
def __init__(self, state, game, replayer):
super(GUI, self).__init__()
self.replayer = replayer
self.title("Connect4 AlphaZero Client")
self.geometry("500x500")
self.bind("<Configure> ", self._resize)
self.yPad = 60
self.action = -1
self.canvas = tk.Canvas(self, height=self._canvasy, width=self._canvasx, bg="#FFFFFF")
self.canvas.bind("<Button-1>", self._writeAction)
self.canvas.place(x=0, y=self.yPad)
self.playerLabel = tk.Label(self, text="testText",font=("Arial", self.yPad//2))
self.playerLabel.place(x=0, y=0)
self._drawBoard()
self._drawStones(state)
self.game = game
def _resize(self, event):
"""callback for resizing of the window"""
if event.widget == self:
self.yPad = int(self.yPadRel * event.width)
self.canvas.place(x=0, y=self.yPad)
self.playerLabel.config(font=("Arial", self.yPad//2))
self._canvasy = event.height - self.yPad
self._canvasx = event.width
self.canvas.config(height=self._canvasy, width=self._canvasx)
self.render(self._lastState)
def _getDxDy(self):
"get the dx and dy neded internaly to compute field and stone sizes"
return self._canvasx / 8, self._canvasy / 7
def render(self, state):
"render the state"
self._drawBoard()
if not state is None:
self._lastState = state
self._drawStones(state.board)
if state.player == 1:
self.playerLabel.config(text = "Yellow's Turn", fg="#808080")
else:
self.playerLabel.config(text = "Red's Turn", fg="#808080")
self.renderWinLines(state)
if not self._lastState is None:
if self._lastState.isDone:
self._renderEndMsg()
def _drawBoard(self):
"render 7x6 board using lines"
self.canvas.delete("all")
dx, dy = self._getDxDy()
ofset = 0.5
for x in range(8):
self.canvas.create_line(dx*(x+ofset), dy*ofset, dx*(x+ofset), self._canvasy - dy*ofset)
for y in range(7):
self.canvas.create_line(dx*ofset, dy*(y+ofset), self._canvasx - dx*ofset, dy*(y+ofset))
def _drawStones(self, state):
"place stones in board"
dx, dy = self._getDxDy()
for x in range(1, 8):
for y in range(1, 7):
if state[Game.encodeAction(x-1, y-1)] != 0:
Xpos = dx * x
Ypos = dy * y
Ysize= self._dotSize * dy
Xsize= self._dotSize * dx
color = self.colorMap[state[Game.encodeAction(x-1, y-1)]]
self.canvas.create_oval(
Xpos - Xsize, Ypos-Ysize,
Xpos+Xsize, Ypos+Ysize,
fill=color, width=0
)
def _renderEndMsg(self):
"render the message at the end of the game"
args = (self._canvasx//2, self._canvasy//2)
fontSize = min(self._canvasx//10, self._canvasy//2)
kwargs = {
"font": f"Times {fontSize} bold",
"anchor": "c",
}
if self.replayer is None:
if self._win == 1:
txt = self.canvas.create_text(*args, **kwargs, fill="green",
text="You Win")
sendFull(self.game.actions, -1)
elif self._win == -1:
txt = self.canvas.create_text(*args, **kwargs, fill="black", text="You Loose")
sendFull(self.game.actions, 1)
elif self._win == 0:
txt = self.canvas.create_text(*args, **kwargs, fill="black", text="Tie")
sendFull(self.game.actions, 0)
def _writeAction(self, event):
"""
calleback from canvas mouse left click.
Converts postion to grid position and than to action witch is saved.
"""
dx, dy = self._getDxDy()
XPos = (event.x - dx * 0.5) // dx
YPos = (event.y - dy * 0.5) // dy
self.action = int(XPos + 7*YPos)
def getAction(self, state):
"""Make playerLable black and wait for an action to be written."""
self.playerLabel.config(fg="#000000")
self.action = -1
while self.action == -1:
time.sleep(0.1)
if self.replayer is None:
return self.action
else:
return self.replayer.getAction(state)
def drawLineOverTime(self, x1, y1, x2, y2, steps, dt, args=(), **kwargs):
"draw a line from (x1, y1) to (x2, y2) over time"
line = self.canvas.create_line(x1, y1, x1, y1, *args, **kwargs)
dx = (x2 - x1) / steps
dy = (y2 - y1) / steps
for idx in range(steps+1):
time.sleep(dt)
self.canvas.delete(line)
line = self.canvas.create_line(x1, y1, x1+dx*idx, y1+dy*idx, *args, **kwargs)
def getPos(self, pos):
"get action to canvas postion"
a, b = Game.decodeAction(pos)
dx, dy = self._getDxDy()
return (a+1)*dx, (b+1)*dy
def winScreen(self, game, _win):
"show win screen"
self._win = 2
self.render(game)
self._winLinesRendered = False
dx, dy = self._getDxDy()
threads = []
if not game is None:
for a, b in game.ends:
x1, y1 = self.getPos(a)
x2, y2 = self.getPos(b)
currentThread = Thread(
target=self.drawLineOverTime,
args=(
x1,y1,
x2,y2,
20,0.01
),
kwargs = self.winLines_kwargs
)
currentThread.start()
threads.append(currentThread)
for thread in threads:
thread.join()
del threads
self._win = _win
if game.tie:
self._win = 0
self._winLinesRendered = True
def renderWinLines(self, game):
if self._winLinesRendered:
if game.isDone:
for a, b in game.ends:
x1, y1 = self.getPos(a)
x2, y2 = self.getPos(b)
self.canvas.create_line(x1,y1,x2,y2, **self.winLines_kwargs)
| 31.853556 | 104 | 0.523053 | 7,432 | 0.976225 | 0 | 0 | 0 | 0 | 0 | 0 | 1,075 | 0.141206 |
477f20974aea53dd1ab2a88bb5960250f6eba452 | 1,007 | py | Python | Leetcoding-Actions/Explore-Monthly-Challenges/2020-12/07-spiralMatrix-ii.py | shoaibur/SWE | 1e114a2750f2df5d6c50b48c8e439224894d65da | [
"MIT"
]
| 1 | 2020-11-14T18:28:13.000Z | 2020-11-14T18:28:13.000Z | Leetcoding-Actions/Explore-Monthly-Challenges/2020-12/07-spiralMatrix-ii.py | shoaibur/SWE | 1e114a2750f2df5d6c50b48c8e439224894d65da | [
"MIT"
]
| null | null | null | Leetcoding-Actions/Explore-Monthly-Challenges/2020-12/07-spiralMatrix-ii.py | shoaibur/SWE | 1e114a2750f2df5d6c50b48c8e439224894d65da | [
"MIT"
]
| null | null | null | class Solution:
def generateMatrix(self, n: int) -> List[List[int]]:
matrix = [[None] * n for _ in range(n)]
startRow, endRow = 0, n - 1
startCol, endCol = 0, n - 1
count = 1
while startRow <= endRow and startCol <= endCol:
for col in range(startCol, endRow + 1):
matrix[startRow][col] = count
count += 1
for row in range(startRow + 1, endRow + 1):
matrix[row][endCol] = count
count += 1
for col in range(endCol - 1, startCol - 1, -1):
matrix[endRow][col] = count
count += 1
for row in range(endRow - 1, startRow, -1):
matrix[row][startCol] = count
count += 1
startRow, endRow = startRow + 1, endRow - 1
startCol, endCol = startCol + 1, endCol - 1
return matrix
| 32.483871 | 59 | 0.440914 | 1,006 | 0.999007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
47809d65f9b6492472bef06377ad2f44a10c127c | 1,742 | py | Python | motto/core.py | attakei/jamproject | f3a677f4f95c112b89fb38957e6ba1a3c923ea85 | [
"Apache-2.0"
]
| null | null | null | motto/core.py | attakei/jamproject | f3a677f4f95c112b89fb38957e6ba1a3c923ea85 | [
"Apache-2.0"
]
| 1 | 2020-01-05T14:04:35.000Z | 2020-01-05T14:04:35.000Z | motto/core.py | attakei/motto | f3a677f4f95c112b89fb38957e6ba1a3c923ea85 | [
"Apache-2.0"
]
| null | null | null | """Core classes for motto
"""
from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union
from typing_extensions import Protocol, TypedDict
class Message(object):
"""Reporting message class.
This object is appended for paragraphes by skills.
Responsibility is only telling event "what" for user, not "where".
"""
def __init__(self, body: str):
self.body: str = body
"""message body"""
class Report(object):
"""Report by skills.
"""
def __init__(self):
self._messages: List[Message] = []
def __repr__(self):
cnt = len(self)
if cnt == 0:
return "[No reports]"
if cnt == 1:
return "[1 report]"
return f"[{cnt} reports]"
def __len__(self) -> int:
return len(self._messages)
def add(self, msg: Message):
self._messages.append(msg)
class Token(Protocol):
"""Token interface for tokenize engine.
Based from Janome.
"""
surface: ClassVar[str]
class Sentence(object):
"""Sentence dataset and accessor.
"""
def __init__(self, tokens: Union[Tuple[Token, ...], List[Token]]):
self._tokens: Tuple[Token, ...] = tuple(tokens) if isinstance(
tokens, list
) else tokens
def __repr__(self) -> str:
cnt = len(self)
if cnt == 0:
return "[no tokens]"
if cnt == 1:
return "[1 token]"
return f"[{cnt} tokens]"
def __len__(self) -> int:
return len(self._tokens)
def __getitem__(self, key) -> Token:
return self._tokens[key]
SkillParams = Dict[str, Any]
SkillProc = Callable[[Sentence, SkillParams], Optional[Message]]
Config = Dict[str, Any]
| 22.050633 | 78 | 0.588978 | 1,449 | 0.831803 | 0 | 0 | 0 | 0 | 0 | 0 | 434 | 0.249139 |
4780e099e5c87546b20f86a673d8da78571df7e4 | 361 | py | Python | tests/test_bad_seeds.py | jklynch/diffrascape | 350bed352fa6c9b30739e3748b7ea57b365f1944 | [
"BSD-3-Clause"
]
| null | null | null | tests/test_bad_seeds.py | jklynch/diffrascape | 350bed352fa6c9b30739e3748b7ea57b365f1944 | [
"BSD-3-Clause"
]
| null | null | null | tests/test_bad_seeds.py | jklynch/diffrascape | 350bed352fa6c9b30739e3748b7ea57b365f1944 | [
"BSD-3-Clause"
]
| null | null | null | from diffrascape.env import BadSeeds
def test_construct():
bad_seeds = BadSeeds()
bad_seeds_states = bad_seeds.states()
print(f"### states: {bad_seeds_states}")
assert bad_seeds_states["shape"][0] == 6
bad_seeds_actions = bad_seeds.actions()
print(f"### actions: {bad_seeds_actions}")
assert bad_seeds_actions["num_values"] == 3
| 25.785714 | 47 | 0.695291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.240997 |
47846ea157171c9a8c93b748212ec54fd187b2e7 | 1,418 | py | Python | samples/bot-handoff-es6/emulate-users.py | microsoftly/botbuilder-js | 538cb479b8596cdc209f5d70aa1a9000a0e6b360 | [
"MIT"
]
| 1 | 2021-03-16T05:14:30.000Z | 2021-03-16T05:14:30.000Z | samples/bot-handoff-es6/emulate-users.py | microsoftly/botbuilder-js | 538cb479b8596cdc209f5d70aa1a9000a0e6b360 | [
"MIT"
]
| 1 | 2018-03-26T05:25:54.000Z | 2018-03-26T05:26:18.000Z | samples/bot-handoff-es6/emulate-users.py | microsoftly/botbuilder-js | 538cb479b8596cdc209f5d70aa1a9000a0e6b360 | [
"MIT"
]
| 1 | 2018-03-26T04:16:42.000Z | 2018-03-26T04:16:42.000Z | #!/usr/bin/python
import json, subprocess, sys, platform
from os.path import expanduser
if len (sys.argv) < 2 :
print("Usage: python " + sys.argv[0] + " username(s)")
sys.exit (1)
HOME=expanduser("~")
# determine paths
SYSTEM=platform.system()
if SYSTEM == 'Darwin':
SERVERJSON=HOME+'/Library/Application Support/botframework-emulator/botframework-emulator/server.json'
EMULATORPATH=HOME+'/Applications/botframework-emulator.app/'
elif SYSTEM == 'Windows':
SERVERJSON=HOME+'/AppData/Roaming/botframework-emulator/botframework-emulator/server.json'
EMULATORPATH=HOME+'/AppData/Local/botframework/botframework-emulator.exe'
else:
print("System " + SYSTEM + " not yet supported.")
sys.exit (1)
# read the server config file
with open(SERVERJSON, "r") as jsonFile:
data = json.load(jsonFile)
args=sys.argv[1:]
for arg in args:
# add user if not present
if data["users"]["usersById"].get(arg) is None:
data["users"]["usersById"][arg]={"id": arg,"name": arg}
# set current user
data["users"]["currentUserId"]=arg
# write server config file
with open(SERVERJSON, "w") as jsonFile:
json.dump(data, jsonFile, sort_keys=False, indent=2, separators=(',', ': '))
# launch emulator
if SYSTEM == 'Darwin':
subprocess.call(["/usr/bin/open", "-n", EMULATORPATH])
elif SYSTEM == 'Windows':
subprocess.call([EMULATORPATH])
| 33.761905 | 106 | 0.673484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 603 | 0.425247 |
4784a44a26bc244fa67aa7f304ae6b3a0ac78c99 | 7,141 | py | Python | originstamp_client/models/timestamp_response.py | OriginStampTimestamping/originstamp-python-client | a13c3d51eac6dd3a920b7b74e079531fe7ab17a2 | [
"MIT"
]
| 9 | 2018-11-06T06:43:46.000Z | 2020-09-26T03:29:41.000Z | originstamp_client/models/timestamp_response.py | OriginStampTimestamping/originstamp-python-client | a13c3d51eac6dd3a920b7b74e079531fe7ab17a2 | [
"MIT"
]
| 1 | 2019-05-06T10:49:23.000Z | 2019-05-13T09:30:01.000Z | originstamp_client/models/timestamp_response.py | OriginStampTimestamping/originstamp-python-client | a13c3d51eac6dd3a920b7b74e079531fe7ab17a2 | [
"MIT"
]
| 1 | 2020-10-02T17:31:47.000Z | 2020-10-02T17:31:47.000Z | # coding: utf-8
"""
OriginStamp Client
OpenAPI spec version: 3.0
OriginStamp Documentation: https://docs.originstamp.com
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class TimestampResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'comment': 'str',
'created': 'bool',
'date_created': 'int',
'hash_string': 'str',
'timestamps': 'list[TimestampData]'
}
attribute_map = {
'comment': 'comment',
'created': 'created',
'date_created': 'date_created',
'hash_string': 'hash_string',
'timestamps': 'timestamps'
}
def __init__(self, comment=None, created=None, date_created=None, hash_string=None, timestamps=None): # noqa: E501
"""TimestampResponse - a model defined in Swagger""" # noqa: E501
self._comment = None
self._created = None
self._date_created = None
self._hash_string = None
self._timestamps = None
self.discriminator = None
if comment is not None:
self.comment = comment
if created is not None:
self.created = created
if date_created is not None:
self.date_created = date_created
if hash_string is not None:
self.hash_string = hash_string
if timestamps is not None:
self.timestamps = timestamps
@property
def comment(self):
"""Gets the comment of this TimestampResponse. # noqa: E501
The comment which was added in the submission of the hash. # noqa: E501
:return: The comment of this TimestampResponse. # noqa: E501
:rtype: str
"""
return self._comment
@comment.setter
def comment(self, comment):
"""Sets the comment of this TimestampResponse.
The comment which was added in the submission of the hash. # noqa: E501
:param comment: The comment of this TimestampResponse. # noqa: E501
:type: str
"""
self._comment = comment
@property
def created(self):
"""Gets the created of this TimestampResponse. # noqa: E501
Field is set to true if it is a novel hash.If the flag is false, the hash was already submitted before. # noqa: E501
:return: The created of this TimestampResponse. # noqa: E501
:rtype: bool
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this TimestampResponse.
Field is set to true if it is a novel hash.If the flag is false, the hash was already submitted before. # noqa: E501
:param created: The created of this TimestampResponse. # noqa: E501
:type: bool
"""
self._created = created
@property
def date_created(self):
"""Gets the date_created of this TimestampResponse. # noqa: E501
The time when your hash was submitted to OriginStamp. The date is returned in the following format: [ms] since 1.1.1970 (unix epoch), timezone: UTC. This is not considered as a true timestamp. # noqa: E501
:return: The date_created of this TimestampResponse. # noqa: E501
:rtype: int
"""
return self._date_created
@date_created.setter
def date_created(self, date_created):
"""Sets the date_created of this TimestampResponse.
The time when your hash was submitted to OriginStamp. The date is returned in the following format: [ms] since 1.1.1970 (unix epoch), timezone: UTC. This is not considered as a true timestamp. # noqa: E501
:param date_created: The date_created of this TimestampResponse. # noqa: E501
:type: int
"""
self._date_created = date_created
@property
def hash_string(self):
"""Gets the hash_string of this TimestampResponse. # noqa: E501
The submitted hash in hex representation. # noqa: E501
:return: The hash_string of this TimestampResponse. # noqa: E501
:rtype: str
"""
return self._hash_string
@hash_string.setter
def hash_string(self, hash_string):
"""Sets the hash_string of this TimestampResponse.
The submitted hash in hex representation. # noqa: E501
:param hash_string: The hash_string of this TimestampResponse. # noqa: E501
:type: str
"""
self._hash_string = hash_string
@property
def timestamps(self):
"""Gets the timestamps of this TimestampResponse. # noqa: E501
Contains all the timestamp data of your hash until now. # noqa: E501
:return: The timestamps of this TimestampResponse. # noqa: E501
:rtype: list[TimestampData]
"""
return self._timestamps
@timestamps.setter
def timestamps(self, timestamps):
"""Sets the timestamps of this TimestampResponse.
Contains all the timestamp data of your hash until now. # noqa: E501
:param timestamps: The timestamps of this TimestampResponse. # noqa: E501
:type: list[TimestampData]
"""
self._timestamps = timestamps
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TimestampResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TimestampResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.183406 | 214 | 0.604537 | 6,844 | 0.958409 | 0 | 0 | 3,701 | 0.518275 | 0 | 0 | 3,993 | 0.559165 |
4785a94446b7de09fa814de98568043480d82523 | 68,531 | py | Python | benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_ml/backup_results_unknownr/EightThreads_sjeng/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
]
| null | null | null | benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_ml/backup_results_unknownr/EightThreads_sjeng/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
]
| null | null | null | benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_ml/backup_results_unknownr/EightThreads_sjeng/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
]
| null | null | null | power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.398053,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.689285,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.395324,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.48266,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.393459,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.65134,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0144298,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.104345,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.106717,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.104345,
'Execution Unit/Register Files/Runtime Dynamic': 0.121147,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.252141,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.66048,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 2.87235,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00419365,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00419365,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00370425,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00146219,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.001533,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0136245,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0383651,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.10259,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.347508,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.348441,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 0.850528,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.012026,
'L2/Runtime Dynamic': 0.00380648,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.72231,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.19954,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0804019,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0804019,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.10353,
'Load Store Unit/Runtime Dynamic': 1.67646,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.198257,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.396515,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0703622,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0704855,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0571378,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.680136,
'Memory Management Unit/Runtime Dynamic': 0.127623,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 23.9775,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0203543,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.207453,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.227807,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 5.75858,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.172918,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.27891,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.140784,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.592612,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.197769,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.20986,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00725295,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0524482,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.05364,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0524482,
'Execution Unit/Register Files/Runtime Dynamic': 0.060893,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.110494,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.289484,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.55105,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00231727,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00231727,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00210054,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000858113,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000770543,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00750563,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0192808,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0515655,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.28001,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.174533,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.17514,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.65771,
'Instruction Fetch Unit/Runtime Dynamic': 0.428024,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.00668734,
'L2/Runtime Dynamic': 0.00210046,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.48583,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.602875,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0403988,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0403987,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.6766,
'Load Store Unit/Runtime Dynamic': 0.842506,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0996164,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.199232,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0353542,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0354206,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.203939,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0287124,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.42078,
'Memory Management Unit/Runtime Dynamic': 0.064133,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 16.5611,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00780158,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.088257,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0960586,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.98388,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.17342,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.27972,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.141193,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.594333,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.198341,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.21098,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00727401,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0526,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0537958,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0526,
'Execution Unit/Register Files/Runtime Dynamic': 0.0610698,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.110814,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.290296,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.55377,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00232294,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00232294,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00210565,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000860183,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000772781,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00752432,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0193292,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0517153,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.28954,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.174827,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.175648,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.6677,
'Instruction Fetch Unit/Runtime Dynamic': 0.429044,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.00641252,
'L2/Runtime Dynamic': 0.00203156,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.48824,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.603977,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0404768,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0404768,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.67938,
'Load Store Unit/Runtime Dynamic': 0.844071,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0998089,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.199618,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0354225,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0354872,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.204531,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0287537,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.42149,
'Memory Management Unit/Runtime Dynamic': 0.0642409,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 16.5754,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00782423,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0885153,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0963395,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.98949,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.172891,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.278866,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.140762,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.59252,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.197736,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.2098,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00725182,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0524396,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0536316,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0524396,
'Execution Unit/Register Files/Runtime Dynamic': 0.0608834,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.110475,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.289485,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.55095,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00231872,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00231872,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00210203,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000858816,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000770423,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0075099,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0192865,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0515575,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.27949,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.17487,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.175112,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.65717,
'Instruction Fetch Unit/Runtime Dynamic': 0.428336,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.00635145,
'L2/Runtime Dynamic': 0.00200557,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.48542,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.602622,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0403853,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0403852,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.67612,
'Load Store Unit/Runtime Dynamic': 0.842173,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0995834,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.199166,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0353425,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.035407,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.203907,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0287582,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.420728,
'Memory Management Unit/Runtime Dynamic': 0.0641652,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 16.5596,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00780036,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0882409,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0960413,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.98367,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 0.40658959087042323,
'Runtime Dynamic': 0.40658959087042323,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.033795,
'Runtime Dynamic': 0.0199046,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 73.7075,
'Peak Power': 106.82,
'Runtime Dynamic': 14.7355,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 73.6737,
'Total Cores/Runtime Dynamic': 14.7156,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.033795,
'Total L3s/Runtime Dynamic': 0.0199046,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}} | 74.979212 | 124 | 0.681721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46,943 | 0.684989 |
4785cd8056f1f3e45758e79a377162fd96261ea3 | 6,450 | py | Python | folder_tree/migrations/0001_initial.py | JoenyBui/django-folder-tree | a742f3d69ad01f5403ce43578240a6cc54fa6956 | [
"BSD-3-Clause"
]
| null | null | null | folder_tree/migrations/0001_initial.py | JoenyBui/django-folder-tree | a742f3d69ad01f5403ce43578240a6cc54fa6956 | [
"BSD-3-Clause"
]
| null | null | null | folder_tree/migrations/0001_initial.py | JoenyBui/django-folder-tree | a742f3d69ad01f5403ce43578240a6cc54fa6956 | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-13 22:10
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import folder_tree.models
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='GeneralFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('is_executable', models.BooleanField(default=False)),
('is_locked', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now_add=True)),
('file_type', models.IntegerField(choices=[(0, 'txt'), (1, 'png'), (2, 'jpg'), (3, 'jpeg'), (4, 'gif'), (5, 'bmp'), (6, 'mpg'), (7, 'mpeg'), (8, 'mov'), (9, 'avi'), (10, 'wmv'), (11, 'csv'), (12, 'pdf'), (13, 'xls'), (14, 'xlsx'), (15, 'doc'), (16, 'docx'), (17, 'ppt'), (18, 'pptx'), (-1, 'unknown')], default=-1)),
('file', models.FileField(default='default.txt', upload_to=folder_tree.models.user_file_path)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ImageFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('is_executable', models.BooleanField(default=False)),
('is_locked', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now_add=True)),
('file_type', models.IntegerField(choices=[(1, 'png'), (2, 'jpg'), (3, 'jpeg'), (4, 'gif'), (5, 'bmp')], default=-1)),
('photo', models.ImageField(upload_to=folder_tree.models.user_file_path)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Trash',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='TreeFolder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('is_locked', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now_add=True)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TreeProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='ProjectFolder',
fields=[
('treefolder_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='folder_tree.TreeFolder')),
('app_type', models.IntegerField(choices=[(1, 'WhAM')], default=1)),
],
options={
'abstract': False,
},
bases=('folder_tree.treefolder',),
),
migrations.AddField(
model_name='treeprofile',
name='root_folder',
field=models.ForeignKey(blank=True, default=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='folder_tree.TreeFolder'),
),
migrations.AddField(
model_name='treeprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='treefolder',
name='parent',
field=mptt.fields.TreeForeignKey(blank=True, default=0, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='folder_tree.TreeFolder'),
),
migrations.AddField(
model_name='treefolder',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='trash',
name='prev',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='folder_tree.TreeFolder'),
),
migrations.AddField(
model_name='trash',
name='profile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='folder_tree.TreeProfile'),
),
migrations.AddField(
model_name='imagefile',
name='folder',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='folder_tree.ProjectFolder'),
),
migrations.AddField(
model_name='generalfile',
name='folder',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='folder_tree.ProjectFolder'),
),
]
| 47.426471 | 332 | 0.582171 | 6,181 | 0.958295 | 0 | 0 | 0 | 0 | 0 | 0 | 975 | 0.151163 |
47867c6666c70dddf7e4d9d1220c5f5b48b81ac1 | 428 | py | Python | groupdocs_viewer_cloud/apis/__init__.py | groupdocs-viewer-cloud/groupdocs-viewer-cloud-python | e734944b0b84c91804aa29e4ad5619c1a7334188 | [
"MIT"
]
| 1 | 2020-06-07T12:39:44.000Z | 2020-06-07T12:39:44.000Z | groupdocs_viewer_cloud/apis/__init__.py | groupdocs-viewer-cloud/groupdocs-viewer-cloud-python | e734944b0b84c91804aa29e4ad5619c1a7334188 | [
"MIT"
]
| 1 | 2021-03-24T01:53:23.000Z | 2021-03-26T08:20:31.000Z | groupdocs_viewer_cloud/apis/__init__.py | groupdocs-viewer-cloud/groupdocs-viewer-cloud-python | e734944b0b84c91804aa29e4ad5619c1a7334188 | [
"MIT"
]
| null | null | null | from __future__ import absolute_import
# flake8: noqa
# import apis
from groupdocs_viewer_cloud.apis.file_api import FileApi
from groupdocs_viewer_cloud.apis.folder_api import FolderApi
from groupdocs_viewer_cloud.apis.info_api import InfoApi
from groupdocs_viewer_cloud.apis.license_api import LicenseApi
from groupdocs_viewer_cloud.apis.storage_api import StorageApi
from groupdocs_viewer_cloud.apis.view_api import ViewApi
| 35.666667 | 62 | 0.880841 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.063084 |
4786a66a40aa4e0d9cb192b3447ac559f77b749b | 481 | py | Python | models/model.py | BaoLocPham/hum2song | 706b7fdf838944e2aabe0ae331c0867cb67f6fbc | [
"MIT"
]
| null | null | null | models/model.py | BaoLocPham/hum2song | 706b7fdf838944e2aabe0ae331c0867cb67f6fbc | [
"MIT"
]
| null | null | null | models/model.py | BaoLocPham/hum2song | 706b7fdf838944e2aabe0ae331c0867cb67f6fbc | [
"MIT"
]
| null | null | null | from models.wrap_mobilenet import *
from models.wrap_resnet import *
from models.wrap_vgg import *
from models.wrap_alexnet import *
def get_model(config="resnet"):
if "resnet" in config.backbone:
model = get_resnet(config=config)
elif "mobilenet" in config.backbone:
model = get_mobilenet(config)
elif "vgg" in config.backbone:
model = get_vgg(config)
elif "alexnet" in config.backbone:
model = get_alexnet(config)
return model | 32.066667 | 41 | 0.702703 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.085239 |
4786b42fdf67185bb43692e8ca877d2f9dba7530 | 4,571 | py | Python | models/segnet.py | AntixK/Neural-Blocks | 018a44bbb703fc848234b95a3e604576bd9df88f | [
"MIT"
]
| 3 | 2019-07-23T12:35:50.000Z | 2021-02-23T04:20:31.000Z | models/segnet.py | AntixK/Neural-Blocks | 018a44bbb703fc848234b95a3e604576bd9df88f | [
"MIT"
]
| null | null | null | models/segnet.py | AntixK/Neural-Blocks | 018a44bbb703fc848234b95a3e604576bd9df88f | [
"MIT"
]
| 1 | 2019-07-21T06:07:12.000Z | 2019-07-21T06:07:12.000Z | import torch
import torch.nn as nn
from NeuralBlocks.blocks.convnormrelu import ConvNormRelu
class segnetDown(nn.Module):
def __init__(self, in_channels, out_channels, norm=None, num_conv = 2):
super(segnetDown, self).__init__()
module = []
if num_conv < 2:
raise ValueError("SegNet needs at least 2 conv layers i.e. num_conv >= 2")
"""
For SegNet, the down sampling layers have the form
conv (in_channels, out_channels) + BN + ReLU
conv (out_channels, out_channels) + BN + ReLU
"""
num_filters= [in_channels] + (num_conv)*[out_channels]
for i in range(num_conv):
module.append(ConvNormRelu(num_filters[i], num_filters[i + 1],
kernel_size=3, stride=1, padding=1, norm=norm))
self.layer = nn.Sequential(*module)
#print(self.layer)
self.maxpool_argmax = nn.MaxPool2d(2,2, return_indices=True)
def forward(self, input):
output = self.layer(input)
unpoolsed_size = output.size()
output, indices = self.maxpool_argmax(output)
return output, indices, unpoolsed_size
class segnetUp(nn.Module):
def __init__(self, in_channels, out_channels, is_deconv, num_conv = 2, norm = 'BN'):
super(segnetUp, self).__init__()
if num_conv < 2:
raise ValueError("SegNet needs at least 2 conv layers i.e. num_conv >= 2")
num_filters = [in_channels]*(num_conv) + [out_channels]
"""
For SegNet, the up sampling layers have the form
conv (in_channels, in_channels) + BN + ReLU
conv (in_channels, out_channels) + BN + ReLU
"""
if is_deconv:
self.up = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2)
else:
self.up = nn.MaxUnpool2d(2,2)
module = []
for i in range(num_conv):
module.append(ConvNormRelu(num_filters[i], num_filters[i + 1],
kernel_size = 3, stride= 1, padding=1, norm=norm))
self.layer = nn.Sequential(*module)
def forward(self, input, indices, output_size):
output = self.up(input = input, indices = indices, output_size=output_size)
output =self.layer(output)
return output
class SegNet(nn.Module):
def __init__(self, in_channels, n_class, norm = 'BN', filters = None, is_deconv = False):
super(SegNet,self).__init__()
self.is_deconv = is_deconv
if filters is None:
filters = [64,128,256,512, 512]
if len(filters) < 3:
raise ValueError('Number filters must be at least 3.')
filters.insert(0, in_channels) # To account for the initial channels
modules= []
# Downsampling phase
for i in range(1, len(filters)):
if i < 3:
modules.append(segnetDown(filters[i-1], filters[i], norm, num_conv=2))
else:
modules.append(segnetDown(filters[i-1], filters[i], norm, num_conv=3))
self.down_layers = nn.ModuleList(modules)
# Upsampling Phase
filters[0] = n_class # To account for the final number of classes
modules = []
for i in range(len(filters)-1,0,-1):
if i > 2:
modules.append(segnetUp(filters[i], filters[i-1], self.is_deconv,
num_conv = 3, norm = norm))
else:
modules.append(segnetUp(filters[i], filters[i-1], self.is_deconv,
num_conv = 2, norm = norm))
self.up_layers = nn.ModuleList(modules)
# print(self.up_layers)
def forward(self, input):
x = input
unpool_args = []
for i, module in enumerate(self.down_layers):
x, ind, unpool_shape = module(x)
unpool_args.append([ind, unpool_shape])
result = x
N = len(self.up_layers)-1 # Variable to traverse unpool_args from reverse
"""
Note that the parameters for the up layers are the result of the
previous layer and the unpool args from the corresponding up layer.
i.e. the unpool_args must be traversed from reverse.
"""
for i, module in enumerate(self.up_layers):
result = module(result, *unpool_args[N-i])
return result
if __name__ == "__main__":
s = SegNet(3, 10, norm = 'BN')
inp = torch.randn(32,3,128, 128) #M x C x H x W
s.train()
result = s(inp)
| 34.628788 | 93 | 0.584992 | 4,320 | 0.945089 | 0 | 0 | 0 | 0 | 0 | 0 | 990 | 0.216583 |
4788b5896e827da02e7c6002f5c921308b7b3159 | 15,484 | py | Python | Contents/Resources/parse_log.py | nathangrigg/Latex.bbpackage | f53b33c6c7c90cd9aaf1da61591cf35e5aa722b3 | [
"BSD-3-Clause"
]
| 8 | 2015-10-21T02:03:28.000Z | 2020-12-27T04:04:46.000Z | Contents/Resources/parse_log.py | nathangrigg/Latex.bbpackage | f53b33c6c7c90cd9aaf1da61591cf35e5aa722b3 | [
"BSD-3-Clause"
]
| null | null | null | Contents/Resources/parse_log.py | nathangrigg/Latex.bbpackage | f53b33c6c7c90cd9aaf1da61591cf35e5aa722b3 | [
"BSD-3-Clause"
]
| 5 | 2015-01-15T14:01:30.000Z | 2021-09-05T20:22:09.000Z | #!/usr/bin/python
# This script derived from a piece of the rubber project
# http://launchpad.net/rubber
# (c) Emmanuel Beffara, 2002--2006
#
# Modified by Nathan Grigg, January 2012
import re
import string
import sys
import getopt
#---- Log parser ----{{{1
re_loghead = re.compile("This is [0-9a-zA-Z-]*")
re_rerun = re.compile("LaTeX Warning:.*Rerun")
re_file = re.compile("(\\((?P<file>[^\n\t(){}]*[^ \n\t(){}])|\\))")
re_badbox = re.compile(r"(Ov|Und)erfull \\[hv]box ")
re_line = re.compile(r"(l\.(?P<line>[0-9]+)( (?P<code>.*))?$|<\*>)")
re_cseq = re.compile(r".*(?P<seq>(\\|\.\.\.)[^ ]*) ?$")
re_macro = re.compile(r"^(?P<macro>\\.*) ->")
re_page = re.compile("\[(?P<num>[0-9]+)\]")
re_atline = re.compile(
"( detected| in paragraph)? at lines? (?P<line>[0-9]*)(--(?P<last>[0-9]*))?")
re_reference = re.compile("LaTeX Warning: Reference `(?P<ref>.*)' \
on page (?P<page>[0-9]*) undefined on input line (?P<line>[0-9]*)\\.$")
re_label = re.compile("LaTeX Warning: (?P<text>Label .*)$")
re_warning = re.compile(
"(LaTeX|Package)( (?P<pkg>.*))? Warning: (?P<text>.*)$")
re_online = re.compile("(; reported)? on input line (?P<line>[0-9]*)")
re_ignored = re.compile("; all text was ignored after line (?P<line>[0-9]*).$")
class LogCheck (object):
"""
This class performs all the extraction of information from the log file.
For efficiency, the instances contain the whole file as a list of strings
so that it can be read several times with no disk access.
"""
#-- Initialization {{{2
def __init__ (self):
self.lines = None
def read (self, name):
"""
Read the specified log file, checking that it was produced by the
right compiler. Returns true if the log file is invalid or does not
exist.
"""
self.lines = None
try:
file = open(name)
except IOError:
return 2
line = file.readline()
if not line:
file.close()
return 1
if not re_loghead.match(line):
file.close()
return 1
self.lines = file.readlines()
file.close()
return 0
#-- Process information {{{2
def errors (self):
"""
Returns true if there was an error during the compilation.
"""
skipping = 0
for line in self.lines:
if line.strip() == "":
skipping = 0
continue
if skipping:
continue
m = re_badbox.match(line)
if m:
skipping = 1
continue
if line[0] == "!":
# We check for the substring "pdfTeX warning" because pdfTeX
# sometimes issues warnings (like undefined references) in the
# form of errors...
if string.find(line, "pdfTeX warning") == -1:
return 1
return 0
def run_needed (self):
"""
Returns true if LaTeX indicated that another compilation is needed.
"""
for line in self.lines:
if re_rerun.match(line):
return 1
return 0
#-- Information extraction {{{2
def continued (self, line):
"""
Check if a line in the log is continued on the next line. This is
needed because TeX breaks messages at 79 characters per line. We make
this into a method because the test is slightly different in Metapost.
"""
return len(line) == 79
def parse (self, errors=0, boxes=0, refs=0, warnings=0):
"""
Parse the log file for relevant information. The named arguments are
booleans that indicate which information should be extracted:
- errors: all errors
- boxes: bad boxes
- refs: warnings about references
- warnings: all other warnings
The function returns a generator. Each generated item is a dictionary
that contains (some of) the following entries:
- kind: the kind of information ("error", "box", "ref", "warning")
- text: the text of the error or warning
- code: the piece of code that caused an error
- file, line, last, pkg: as used by Message.format_pos.
"""
if not self.lines:
return
last_file = None
pos = [last_file]
page = 1
parsing = 0 # 1 if we are parsing an error's text
skipping = 0 # 1 if we are skipping text until an empty line
something = 0 # 1 if some error was found
prefix = None # the prefix for warning messages from packages
accu = "" # accumulated text from the previous line
macro = None # the macro in which the error occurs
cseqs = {} # undefined control sequences so far
for line in self.lines:
line = line[:-1] # remove the line feed
# TeX breaks messages at 79 characters, just to make parsing
# trickier...
if not parsing and self.continued(line):
accu += line
continue
line = accu + line
accu = ""
# Text that should be skipped (from bad box messages)
if prefix is None and line == "":
skipping = 0
continue
if skipping:
continue
# Errors (including aborted compilation)
if parsing:
if error == "Undefined control sequence.":
# This is a special case in order to report which control
# sequence is undefined.
m = re_cseq.match(line)
if m:
seq = m.group("seq")
if cseqs.has_key(seq):
# This prevents reporting a sequence more than once
error = None
else:
cseqs[seq] = None
error = "Undefined control sequence %s." % m.group("seq")
# Checks if the error is the definition of a macro
m = re_macro.match(line)
if m:
macro = m.group("macro")
# Extracts the line number
m = re_line.match(line)
if m:
parsing = 0
skipping = 1
pdfTeX = string.find(line, "pdfTeX warning") != -1
if error is not None and ((pdfTeX and warnings) or (errors and not pdfTeX)):
if pdfTeX:
d = {
"kind": "warning",
"pkg": "pdfTeX",
"text": error[error.find(":")+2:]
}
else:
d = {
"kind": "error",
"text": error
}
d.update( m.groupdict() )
m = re_ignored.search(error)
if m:
d["file"] = last_file
if d.has_key("code"):
del d["code"]
d.update( m.groupdict() )
elif pos[-1] is None:
d["file"] = last_file
else:
d["file"] = pos[-1]
if macro is not None:
d["macro"] = macro
macro = None
yield d
elif line[0] == "!":
error = line[2:]
elif line[0:3] == "***":
parsing = 0
skipping = 1
if errors:
yield {
"kind": "abort",
"text": error,
"why" : line[4:],
"file": last_file
}
elif line[0:15] == "Type X to quit ":
parsing = 0
skipping = 0
if errors:
yield {
"kind": "error",
"text": error,
"file": pos[-1]
}
continue
if len(line) > 0 and line[0] == "!":
error = line[2:]
parsing = 1
continue
if line == "Runaway argument?":
error = line
parsing = 1
continue
# Long warnings
if prefix is not None:
if line[:len(prefix)] == prefix:
text.append(string.strip(line[len(prefix):]))
else:
text = " ".join(text)
m = re_online.search(text)
if m:
info["line"] = m.group("line")
text = text[:m.start()] + text[m.end():]
if warnings:
info["text"] = text
d = { "kind": "warning" }
d.update( info )
yield d
prefix = None
continue
# Undefined references
m = re_reference.match(line)
if m:
if refs:
d = {
"kind": "warning",
"text": _("Reference `%s' undefined.") % m.group("ref"),
"file": pos[-1]
}
d.update( m.groupdict() )
yield d
continue
m = re_label.match(line)
if m:
if refs:
d = {
"kind": "warning",
"file": pos[-1]
}
d.update( m.groupdict() )
yield d
continue
# Other warnings
if line.find("Warning") != -1:
m = re_warning.match(line)
if m:
info = m.groupdict()
info["file"] = pos[-1]
info["page"] = page
if info["pkg"] is None:
del info["pkg"]
prefix = ""
else:
prefix = ("(%s)" % info["pkg"])
prefix = prefix.ljust(m.start("text"))
text = [info["text"]]
continue
# Bad box messages
m = re_badbox.match(line)
if m:
if boxes:
mpos = { "file": pos[-1], "page": page }
m = re_atline.search(line)
if m:
md = m.groupdict()
for key in "line", "last":
if md[key]: mpos[key] = md[key]
line = line[:m.start()]
d = {
"kind": "warning",
"text": line
}
d.update( mpos )
yield d
skipping = 1
continue
# If there is no message, track source names and page numbers.
last_file = self.update_file(line, pos, last_file)
page = self.update_page(line, page)
def get_errors (self):
return self.parse(errors=1)
def get_boxes (self):
return self.parse(boxes=1)
def get_references (self):
return self.parse(refs=1)
def get_warnings (self):
return self.parse(warnings=1)
def update_file (self, line, stack, last):
"""
Parse the given line of log file for file openings and closings and
update the list `stack'. Newly opened files are at the end, therefore
stack[1] is the main source while stack[-1] is the current one. The
first element, stack[0], contains the value None for errors that may
happen outside the source. Return the last file from which text was
read (the new stack top, or the one before the last closing
parenthesis).
"""
m = re_file.search(line)
while m:
if line[m.start()] == '(':
last = m.group("file")
stack.append(last)
else:
last = stack[-1]
del stack[-1]
line = line[m.end():]
m = re_file.search(line)
return last
def update_page (self, line, before):
"""
Parse the given line and return the number of the page that is being
built after that line, assuming the current page before the line was
`before'.
"""
ms = re_page.findall(line)
if ms == []:
return before
return int(ms[-1]) + 1
# command line options
def parse_options(cmdline):
try:
opts, args = getopt.getopt(
cmdline, "h", ["boxes","errors","help","refs","warnings"])
except getopt.GetoptError, e:
sys.stderr.write(e.msg + "\n")
sys.exit(1)
d = {"boxes": 0, "errors": 0, "refs": 0, "warnings": 0}
# set a default option
if len(opts) == 0:
d["errors"] = 1
for opt,arg in opts:
if opt in ("-h","--help"):
help()
else:
d[opt[2:]] = 1
if len(args) != 1:
sys.stderr.write("One log file is required\n")
sys.exit(1)
file = args[0]
return d,file
def help():
print ("""\
usage: rubber [options] logfile
available options:
--boxes display overfull/underfull box messages
--errors display error messages
--help display this message end exit
--refs display missing reference messages
--warnings display all other warnings
""")
sys.exit()
# applescript compatible output
def applescript_output(d,directory):
out = []
if "kind" in d:
if d["kind"] == "error":
out.append("Error")
else:
out.append("Warning")
else:
return
if "file" in d:
file = d["file"]
# convert relative path to absolute path
if file[0] == "/":
out.append(file)
else:
if file[:2] == "./":
file = file[2:]
out.append(directory + file)
else:
out.append("")
if "line" in d and d["line"]:
out.append(d["line"])
else:
out.append("0")
message = ""
if "text" in d:
message += d["text"]
if "pkg" in d:
message += " (Package %s) " % d["pkg"]
if "page" in d:
message += " (On page %d)" % d["page"]
out.append(message)
return "\t".join(out)
if __name__ == "__main__":
options,file = parse_options(sys.argv[1:])
directory = file[:file.rfind('/') + 1]
check = LogCheck()
check.read(file)
for d in check.parse(errors=options["errors"], boxes=options["boxes"],
refs=options["refs"], warnings=options["warnings"]):
print applescript_output(d, directory)
| 33.227468 | 96 | 0.452855 | 12,018 | 0.776156 | 8,218 | 0.530741 | 0 | 0 | 0 | 0 | 5,291 | 0.341708 |
478b01eea05155c0e098a1b65909d95f41833301 | 5,665 | py | Python | barbican-8.0.0/barbican/objects/container_consumer_meta.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
]
| 177 | 2015-01-02T09:35:53.000Z | 2022-02-26T01:43:55.000Z | barbican/objects/container_consumer_meta.py | kkutysllb/barbican | 7b14d983e0dce6dcffe9781b05c52335b8203fc7 | [
"Apache-2.0"
]
| 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | barbican/objects/container_consumer_meta.py | kkutysllb/barbican | 7b14d983e0dce6dcffe9781b05c52335b8203fc7 | [
"Apache-2.0"
]
| 87 | 2015-01-13T17:33:40.000Z | 2021-11-09T05:30:36.000Z | # Copyright 2018 Fujitsu.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db import exception as db_exc
from oslo_utils import timeutils
from oslo_versionedobjects import base as object_base
from barbican.common import utils
from barbican.model import models
from barbican.model import repositories as repos
from barbican.objects import base
from barbican.objects import fields
LOG = utils.getLogger(__name__)
@object_base.VersionedObjectRegistry.register
class ContainerConsumerMetadatum(base.BarbicanObject,
base.BarbicanPersistentObject,
object_base.VersionedObjectDictCompat):
fields = {
'container_id': fields.StringField(nullable=False),
'project_id': fields.StringField(nullable=True, default=None),
'name': fields.StringField(nullable=True, default=None),
'URL': fields.StringField(nullable=True, default=None),
'data_hash': fields.StringField(nullable=True, default=None)
}
db_model = models.ContainerConsumerMetadatum
db_repo = repos.get_container_consumer_repository()
@classmethod
def get_by_container_id(cls, container_id, offset_arg=None, limit_arg=None,
suppress_exception=False, session=None):
entities_db, offset, limit, total = \
cls.db_repo.get_by_container_id(
container_id, offset_arg, limit_arg,
suppress_exception, session)
entities = [cls()._from_db_object(entity_db) for entity_db in
entities_db]
return entities, offset, limit, total
@classmethod
def get_by_values(cls, container_id, name, URL, suppress_exception=False,
show_deleted=False, session=None):
consumer_db = cls.db_repo.get_by_values(container_id, name,
URL,
suppress_exception,
show_deleted,
session)
return cls()._from_db_object(consumer_db)
@classmethod
def create_or_update_from_model(cls, new_consumer,
container, session=None):
"""Create or update container
:param new_consumer: a instance of ContainerConsumerMetadatum model
:param container: a instance of Container OVO
:param session: a session to connect with database
:return: None
It is used during converting from model to OVO. It will be removed
after Container resource is implemented OVO.
"""
session = cls.get_session(session=session)
try:
container.updated_at = timeutils.utcnow()
container.save(session=session)
new_consumer.save(session=session)
except db_exc.DBDuplicateEntry:
session.rollback() # We know consumer already exists.
# This operation is idempotent, so log this and move on
LOG.debug("Consumer %s with URL %s already exists for "
"container %s, continuing...", new_consumer.name,
new_consumer.URL, new_consumer.container_id)
# Get the existing entry and reuse it by clearing the deleted flags
existing_consumer = cls.get_by_values(
new_consumer.container_id, new_consumer.name, new_consumer.URL,
show_deleted=True)
existing_consumer.deleted = False
existing_consumer.deleted_at = None
# We are not concerned about timing here -- set only, no reads
existing_consumer.save(session=session)
@classmethod
def create_or_update_from(cls, new_consumer, container, session=None):
"""Create or update container
:param new_consumer: a instance of ContainerConsumerMetadatum OVO
:param container: a instance of Container OVO
:param session: a session to connect with database
:return: None
"""
session = cls.get_session(session=session)
try:
container.updated_at = timeutils.utcnow()
container.consumers.append(new_consumer)
container.save(session=session)
except db_exc.DBDuplicateEntry:
session.rollback() # We know consumer already exists.
# This operation is idempotent, so log this and move on
LOG.debug("Consumer %s with URL %s already exists for "
"container %s, continuing...", new_consumer.name,
new_consumer.URL, new_consumer.container_id)
# Get the existing entry and reuse it by clearing the deleted flags
existing_consumer = cls.get_by_values(
new_consumer.container_id, new_consumer.name, new_consumer.URL,
show_deleted=True)
existing_consumer.deleted = False
existing_consumer.deleted_at = None
# We are not concerned about timing here -- set only, no reads
existing_consumer.save(session=session)
| 44.960317 | 79 | 0.642012 | 4,665 | 0.823477 | 0 | 0 | 4,711 | 0.831598 | 0 | 0 | 1,855 | 0.327449 |
478b0e311b234d21a0af4f46a1bbc9e444318807 | 527 | py | Python | Lab02_ifelse_and_loops/exercise-17.py | rodrigoc-silva/Python-course | 327b20738a4b383510faddc0ec26a54be1bbd717 | [
"MIT"
]
| null | null | null | Lab02_ifelse_and_loops/exercise-17.py | rodrigoc-silva/Python-course | 327b20738a4b383510faddc0ec26a54be1bbd717 | [
"MIT"
]
| null | null | null | Lab02_ifelse_and_loops/exercise-17.py | rodrigoc-silva/Python-course | 327b20738a4b383510faddc0ec26a54be1bbd717 | [
"MIT"
]
| null | null | null |
#This program converts KPH to MPH.
#constant
CONVERT_FACTOR = 0.6214
#head output
print("KPH \t MPH")
print("_" * 20)
#loop
for kph_speed in range (60, 131, 10):
#calculation
mph_speed = kph_speed * CONVERT_FACTOR
#output
print(kph_speed, '\t', format(mph_speed, '.1f'))
input("\nPress any key to quit")
# Case 1
# KPH MPH
# ____________________
# 60 37.3
# 70 43.5
# 80 49.7
# 90 55.9
# 100 62.1
# 110 68.4
# 120 74.6
# 130 80.8
# Press any key to quit | 15.5 | 52 | 0.588235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 315 | 0.597723 |
478d445e00859e47118663015c1dca1d382e8e84 | 3,841 | py | Python | tests/sentry/api/endpoints/test_project_rules_configuration.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
]
| null | null | null | tests/sentry/api/endpoints/test_project_rules_configuration.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
]
| null | null | null | tests/sentry/api/endpoints/test_project_rules_configuration.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
]
| null | null | null | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from mock import Mock, patch
from sentry.rules.registry import RuleRegistry
from sentry.testutils import APITestCase
class ProjectRuleConfigurationTest(APITestCase):
def setUp(self):
self.project.flags.has_issue_alerts_targeting = False
self.project.save()
def test_simple(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(teams=[team], name="foo")
self.create_project(teams=[team], name="baz")
url = reverse(
"sentry-api-0-project-rules-configuration",
kwargs={"organization_slug": project1.organization.slug, "project_slug": project1.slug},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert len(response.data["actions"]) == 4
assert len(response.data["conditions"]) == 9
@property
def rules(self):
rules = RuleRegistry()
rule = Mock()
rule.id = "sentry.mail.actions.NotifyEmailAction"
rule.rule_type = "action/lol"
node = rule.return_value
node.id = "sentry.mail.actions.NotifyEmailAction"
node.label = "hello"
node.prompt = "hello"
node.is_enabled.return_value = True
node.form_fields = {}
rules.add(rule)
return rules
def run_mock_rules_test(self, expected_actions, querystring_params, rules=None):
if not rules:
rules = self.rules
self.login_as(user=self.user)
with patch("sentry.api.endpoints.project_rules_configuration.rules", rules):
url = reverse(
"sentry-api-0-project-rules-configuration",
kwargs={
"organization_slug": self.organization.slug,
"project_slug": self.project.slug,
},
)
response = self.client.get(url, querystring_params, format="json")
assert response.status_code == 200, response.content
assert len(response.data["actions"]) == expected_actions
assert len(response.data["conditions"]) == 0
def test_filter_out_notify_email_action(self):
self.run_mock_rules_test(0, {})
def test_filter_show_notify_email_action_migrated_project(self):
self.project.flags.has_issue_alerts_targeting = True
self.project.save()
self.run_mock_rules_test(1, {})
def test_filter_show_notify_email_action_override(self):
self.run_mock_rules_test(0, {"issue_alerts_targeting": "0"})
self.run_mock_rules_test(1, {"issue_alerts_targeting": "1"})
def test_show_notify_event_service_action(self):
rules = RuleRegistry()
rule = Mock()
rule.id = "sentry.rules.actions.notify_event_service.NotifyEventServiceAction"
rule.rule_type = "action/lol"
node = rule.return_value
node.id = rule.id
node.label = "hello"
node.prompt = "hello"
node.is_enabled.return_value = True
node.form_fields = {}
node.get_services.return_value = [Mock()]
rules.add(rule)
self.run_mock_rules_test(1, {}, rules=rules)
def test_hide_empty_notify_event_service_action(self):
rules = RuleRegistry()
rule = Mock()
rule.id = "sentry.rules.actions.notify_event_service.NotifyEventServiceAction"
rule.rule_type = "action/lol"
node = rule.return_value
node.id = rule.id
node.label = "hello"
node.prompt = "hello"
node.is_enabled.return_value = True
node.form_fields = {}
node.get_services.return_value = []
rules.add(rule)
self.run_mock_rules_test(0, {}, rules=rules)
| 36.235849 | 100 | 0.640198 | 3,635 | 0.946368 | 0 | 0 | 448 | 0.116636 | 0 | 0 | 616 | 0.160375 |
478df9b66c40c191cbb8cf8a0885ef1863ea295a | 3,297 | py | Python | train-script.py | praepunctis/chordtext | b58ea6fea7bf417e6e18daad6c4ce6ea878bd27b | [
"MIT"
]
| 1 | 2020-12-02T10:04:08.000Z | 2020-12-02T10:04:08.000Z | train-script.py | praepunctis/chordtext | b58ea6fea7bf417e6e18daad6c4ce6ea878bd27b | [
"MIT"
]
| null | null | null | train-script.py | praepunctis/chordtext | b58ea6fea7bf417e6e18daad6c4ce6ea878bd27b | [
"MIT"
]
| null | null | null | # train-script.py
# Grab data from movie_data.csv and train a ML model.
# Kelly Fesler (c) Nov 2020
# Modified from Soumya Gupta (c) Jan 2020
# STEP 1: import -------------------------------------------
# Import libraries
import urllib.request
import os
import pandas as pd
import numpy as np
import nltk
import sklearn
import joblib
from nltk.tokenize import RegexpTokenizer
from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
# STEP 2: read ---------------------------------------------
# Read in the large movie review dataset; display the first 3 lines
df = pd.read_csv('movie_data.csv', encoding='utf-8')
print("Loading data...\n")
data_top = df.head(3)
print(data_top)
# STEP 3: clean --------------------------------------------
# prepare tokenizer, stopwords, stemmer objects
tokenizer = RegexpTokenizer(r'\w+')
en_stopwords = set(stopwords.words('english'))
ps = PorterStemmer()
# set up helper function to clean data:
def getStemmedReview(review):
# turn to lowercase
review = review.lower()
review = review.replace("<br /><br />", " ")
# tokenize
tokens = tokenizer.tokenize(review)
new_tokens = [token for token in tokens if token not in en_stopwords]
# stem
stemmed_tokens = [ps.stem(token) for token in new_tokens]
clean_review = ' '.join(stemmed_tokens)
return clean_review
# tokenize & clean all reviews
print("")
print("Tokenizing & cleaning...")
df['review'].apply(getStemmedReview)
# STEP 4: split --------------------------------------------
print("Splitting...")
# split: 35k rows for training
X_train = df.loc[:35000, 'review'].values
Y_train = df.loc[:35000, 'sentiment'].values
# split: 15k rows for testing
X_test = df.loc[35000:, 'review'].values
Y_test = df.loc[35000:, 'sentiment'].values
# STEP 5: transform to feature vectors ---------------------
# set up vectorizer from sklearn
vectorizer = TfidfVectorizer(sublinear_tf=True, encoding='utf-8')
# train on the training data
print("Training...")
vectorizer.fit(X_train)
# after learning from training data, transform the test data
print("Transforming...")
X_train = vectorizer.transform(X_train)
X_test = vectorizer.transform(X_test)
# STEP 6: create the ML model ------------------------------
print("Creating the model...")
model = LogisticRegression(solver='liblinear')
model.fit(X_train,Y_train)
print("ok!")
# print scores
print("")
print("Score on training data is: " + str(model.score(X_train,Y_train)))
print("Score on testing data is:" + str(model.score(X_test,Y_test)))
# STEP 7: test model output --------------------------------
print("")
print("Testing a negative review...")
# Sampling a negative review; let's compare expected & predicted values
print("Expected sentiment: 0")
print("Predicted sentiment: " + str(model.predict(X_test[0])))
print("Expected probabilities: ~0.788, ~0.211")
print("Predicted probabilities: " + str(model.predict_proba(X_test[0])))
# STEP 8: save & export the model --------------------------
print("")
print("Exporting to .pkl files...")
joblib.dump(en_stopwords,'stopwords.pkl')
joblib.dump(model,'model.pkl')
joblib.dump(vectorizer,'vectorizer.pkl')
print("done")
| 28.422414 | 73 | 0.668487 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,640 | 0.497422 |
478eeb89ce278e9a0867db0565808bfedbcc11dc | 10,947 | py | Python | roerich/algorithms.py | HSE-LAMBDA/roerich | 17e178292593d1ea6a821b99705620ba066abd2a | [
"BSD-2-Clause"
]
| 10 | 2020-12-01T13:58:27.000Z | 2022-01-17T12:01:31.000Z | roerich/algorithms.py | HSE-LAMBDA/roerich | 17e178292593d1ea6a821b99705620ba066abd2a | [
"BSD-2-Clause"
]
| 3 | 2021-03-07T14:06:22.000Z | 2022-01-18T14:23:16.000Z | roerich/algorithms.py | HSE-LAMBDA/roerich | 17e178292593d1ea6a821b99705620ba066abd2a | [
"BSD-2-Clause"
]
| 2 | 2020-12-01T14:04:36.000Z | 2022-03-24T12:52:32.000Z | from abc import ABCMeta, abstractmethod
from collections import defaultdict
from copy import deepcopy
from typing import Union, Type, Any, Tuple
import numpy as np
import torch
import torch.nn as nn
from scipy.signal import find_peaks_cwt
from .net import MyNN, MyNNRegressor
from .utils import autoregression_matrix, unified_score
from .metrics import KL_sym, KL, JSD, PE, PE_sym, Wasserstein
from .scaler import SmaScalerCache
from .helper import SMA
class ChangePointDetection(metaclass=ABCMeta):
def __init__(self, scaler: Any = "default", metric: str = "KL", window_size: int = 1, periods: int = 10,
lag_size: int = 0, step: int = 1, n_epochs: int = 100, lr: float = 0.01, lam: float = 0,
optimizer: str = "Adam", debug: int = 0):
"""
Parameters
----------
scaler: A scaler object is used to scale an input data. The default one is `SmaScalerCache`
metric: A loss function during optimize step of NN. Can be one of the following KL_sym, KL, JSD, PE, PE_sym, Wasserstein
window_size: A size of a window when splitting input data into train and test arrays
periods: A number of previous data-points used when constructing autoregressive matrix
lag_size: A distance between train- and test- windows
step: Each `step`-th data-point is used when creating the input dataset
n_epochs: A number of epochs during training NN
lr: A learning rate at each step of optimizer
lam: A regularization rate
optimizer: One of Adam, SGD, RMSprop or ASGD optimizers
debug: default zero
"""
self.scaler = SmaScalerCache(window_size + lag_size) if scaler == "default" else scaler
self.metric = metric
self.window_size = window_size
self.periods = periods
self.lag_size = lag_size
self.step = step
self.n_epochs = n_epochs
self.lr = lr
self.lam = lam
self.debug = debug
self._time_shift = lag_size + window_size
self.avg_window = lag_size + window_size
self.peak_widths = [0.25 * (lag_size + window_size)]
self.optimizers = defaultdict(lambda: torch.optim.Adam)
self.optimizers["Adam"] = torch.optim.Adam
self.optimizers["SGD"] = torch.optim.SGD
self.optimizers["RMSprop"] = torch.optim.RMSprop
self.optimizers["ASGD"] = torch.optim.ASGD
self.optimizer = self.optimizers[optimizer]
self.metric_func = {"KL_sym": KL_sym,
"KL": KL,
"JSD": JSD,
"PE": PE,
"PE_sym": PE_sym,
"W": Wasserstein
}
@abstractmethod
def init_net(self, n_inputs: int) -> None:
"""
Initialize neural network based on `self.base_net` class
Parameters
----------
n_inputs: Number of inputs of neural network
-------
"""
pass
def predict(self, X: Union[np.ndarray, torch.Tensor]) -> Tuple[Any, Any]:
"""
Determines a CPD score for every data-point
Parameters
----------
X: An input data
Returns `avg_unified_score`: An averaged, unified and shifted CPD score for every data-point in X
`peaks` Locations of CPD points along all data-points
-------
"""
X_auto = autoregression_matrix(X, periods=self.periods, fill_value=0)
self.init_net(X_auto.shape[1])
T, reference, test = self.reference_test(X_auto)
scores = []
for i in range(len(reference)):
X_, y_ = self.preprocess(reference[i], test[i])
score = self.reference_test_predict(X_, y_)
scores.append(score)
T_scores = np.array([T[i] for i in range(len(reference))])
scores = np.array(scores)
# todo optimize memory
T_uni = np.arange(len(X))
T_scores = T_scores - self._time_shift
un_score = unified_score(T_uni, T_scores, scores)
avg_unified_score = SMA(un_score, self.avg_window)
peaks = self.find_peaks_cwt(avg_unified_score, widths=self.peak_widths)
return avg_unified_score, peaks
def reference_test(self, X: Union[torch.Tensor, np.ndarray]) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Creates reference and test datasets based on autoregressive matrix.
Parameters
----------
X: An autoregressive matrix
Returns tuple of numpy arrays: time-steps, reference and test datasets
-------
"""
N = self.lag_size
ws = self.window_size
T = []
reference = []
test = []
for i in range(2 * ws + N - 1, len(X), self.step):
T.append(i)
reference.append(X[i - 2 * ws - N + 1:i - ws - N + 1])
test.append(X[i - ws + 1:i + 1])
return np.array(T), np.array(reference), np.array(test)
def preprocess(self, X_ref: np.ndarray, X_test: np.ndarray) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Creates X and y datasets for training NN by stacking reference and test datasets.
Also applies a scaling transformation into resulting X dataset.
Labels for reference data-points is 1s.
Labels for test data-points is 0s.
Parameters
----------
X_ref: reference data-points
X_test: test data-points
Returns
-------
Tuple of training data
"""
y_ref = np.zeros(len(X_ref))
y_test = np.ones(len(X_test))
X = np.vstack((X_ref, X_test))
y = np.hstack((y_ref, y_test))
X = self.scaler.fit_transform(X)
X = torch.from_numpy(X).float()
y = torch.from_numpy(y).float()
return X, y
def find_peaks_cwt(self, vector, *args, **kwargs):
"""
Find peaks function based on scipy.signal package
Parameters
----------
vector: CPD scores array
args: see docs for https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks_cwt.html
kwargs
Returns
-------
Array with location of peaks
"""
peaks = find_peaks_cwt(vector, *args, **kwargs)
return peaks
@abstractmethod
def reference_test_predict(self, X: torch.Tensor, y: torch.Tensor):
"""
Training process of forward, backward and optimize steps.
Parameters
----------
X: train data
y: train labels
Returns
-------
None
"""
pass
class OnlineNNClassifier(ChangePointDetection):
def __init__(self, net: Union[Type[nn.Module], str] = "default", *args, **kwargs):
"""
Parameters
----------
net: Custom torch.nn.Module neural network or "default" one
args: see parent class
kwargs: see parent class
"""
super().__init__(*args, **kwargs)
self.criterion = nn.BCELoss()
self.base_net = MyNN if net == "default" else net
self.net = None
self.opt = None
def init_net(self, n_inputs):
self.net = self.base_net(n_inputs)
self.opt = self.optimizer(
self.net.parameters(),
lr=self.lr,
weight_decay=self.lam
)
def reference_test_predict(self, X, y):
self.net.train(False)
n_last = min(self.window_size, self.step)
ref_preds = self.net(X[y == 0][-n_last:]).detach().numpy()
test_preds = self.net(X[y == 1][-n_last:]).detach().numpy()
self.net.train(True)
for epoch in range(self.n_epochs): # loop over the dataset multiple times
# forward + backward + optimize
outputs = self.net(X)
loss = self.criterion(outputs.squeeze(), y)
# set gradients to zero
self.opt.zero_grad()
loss.backward()
self.opt.step()
score = self.metric_func[self.metric](ref_preds, test_preds)
return score
class OnlineNNRuLSIF(ChangePointDetection):
def __init__(self, alpha, net="default", *args, **kwargs):
"""
Parameters
----------
alpha: The `alpha` parameter in a loss function
net: Custom torch.nn.Module neural network or "default" one
args: see parent class
kwargs: see parent class
"""
super().__init__(*args, **kwargs)
self.alpha = alpha
self.base_net = MyNNRegressor if net == "default" else net
self.net1 = None
self.net2 = None
self.opt1 = None
self.opt2 = None
def init_net(self, n_inputs):
self.net1 = self.base_net(n_inputs)
self.opt1 = self.optimizer(
self.net1.parameters(),
lr=self.lr,
weight_decay=self.lam
)
self.net2 = deepcopy(self.net1)
self.opt2 = deepcopy(self.opt1)
def compute_loss(self, y_pred_batch_ref, y_pred_batch_test):
loss = 0.5 * (1 - self.alpha) * (y_pred_batch_ref ** 2).mean() + \
0.5 * self.alpha * (y_pred_batch_test ** 2).mean() - (y_pred_batch_test).mean()
return loss
def reference_test_predict(self, X, y):
n_last = min(self.window_size, self.step)
self.net1.train(False)
test_preds = self.net1(X[y == 1][-n_last:]).detach().numpy()
self.net2.train(False)
ref_preds = self.net2(X[y == 0][-n_last:]).detach().numpy()
self.net1.train(True)
self.net2.train(True)
for epoch in range(self.n_epochs): # loop over the dataset multiple times
# forward + backward + optimize
y_pred_batch = self.net1(X).squeeze()
y_pred_batch_ref = y_pred_batch[y == 0]
y_pred_batch_test = y_pred_batch[y == 1]
loss1 = self.compute_loss(y_pred_batch_ref, y_pred_batch_test)
# set gradients to zero
self.opt1.zero_grad()
loss1.backward()
self.opt1.step()
# forward + backward + optimize
y_pred_batch = self.net2(X).squeeze()
y_pred_batch_ref = y_pred_batch[y == 1]
y_pred_batch_test = y_pred_batch[y == 0]
loss2 = self.compute_loss(y_pred_batch_ref, y_pred_batch_test)
# set gradients to zero
self.opt2.zero_grad()
loss2.backward()
self.opt2.step()
score = (0.5 * np.mean(test_preds) - 0.5) + (0.5 * np.mean(ref_preds) - 0.5)
return score
| 34.863057 | 128 | 0.567279 | 10,483 | 0.957614 | 0 | 0 | 592 | 0.054079 | 0 | 0 | 3,451 | 0.315246 |
479004b6cfb780abc09aa80bc0894ff64e48376b | 182 | py | Python | extra_tests/snippets/stdlib_types.py | mainsail-org/RustPython | 5d2d87c24f1ff7201fcc8d4fcffadb0ec12dc127 | [
"CC-BY-4.0",
"MIT"
]
| 11,058 | 2018-05-29T07:40:06.000Z | 2022-03-31T11:38:42.000Z | extra_tests/snippets/stdlib_types.py | mainsail-org/RustPython | 5d2d87c24f1ff7201fcc8d4fcffadb0ec12dc127 | [
"CC-BY-4.0",
"MIT"
]
| 2,105 | 2018-06-01T10:07:16.000Z | 2022-03-31T14:56:42.000Z | extra_tests/snippets/stdlib_types.py | mainsail-org/RustPython | 5d2d87c24f1ff7201fcc8d4fcffadb0ec12dc127 | [
"CC-BY-4.0",
"MIT"
]
| 914 | 2018-07-27T09:36:14.000Z | 2022-03-31T19:56:34.000Z | import types
from testutils import assert_raises
ns = types.SimpleNamespace(a=2, b='Rust')
assert ns.a == 2
assert ns.b == "Rust"
with assert_raises(AttributeError):
_ = ns.c
| 16.545455 | 41 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.065934 |
4790692759f37d6994f35811b0b4dd07154a5efb | 1,664 | py | Python | 1018.py | idarlenearaujo/URI_Python | c4517f90f5310894347edcf58a28d3e569a89a2b | [
"MIT"
]
| null | null | null | 1018.py | idarlenearaujo/URI_Python | c4517f90f5310894347edcf58a28d3e569a89a2b | [
"MIT"
]
| null | null | null | 1018.py | idarlenearaujo/URI_Python | c4517f90f5310894347edcf58a28d3e569a89a2b | [
"MIT"
]
| null | null | null | # entrada
value = int(input())
# variaveis
cashier = True
valueI = value
n1 = 0
n2 = 0
n5 = 0
n10 = 0
n20 = 0
n50 = 0
n100 = 0
# laço quando cashier == False sai
while cashier == True:
# condicionais
if value >= 100:
valueA = value // 100
n100 = valueA
valueB = valueA * 100
value = value - valueB
# condicionais
elif value >= 50:
valueA = value // 50
n50 = valueA
valueB = valueA * 50
value = value - valueB
# condicionais
elif value >= 20:
valueA = value // 20
n20 = valueA
valueB = valueA * 20
value = value - valueB
# condicionais
elif value >= 10:
valueA = value // 10
n10 = valueA
valueB = valueA * 10
value = value - valueB
# condicionais
elif value >= 5:
valueA = value // 5
n5 = valueA
valueB = valueA * 5
value = value - valueB
# condicionais
elif value >= 2:
valueA = value // 2
n2 = valueA
valueB = valueA * 2
value = value - valueB
# condicionais
elif value >= 1:
valueA = value // 1
n1 = valueA
valueB = valueA * 1
value = value - valueB
# condicionais
elif value == 0:
# condição para sair
cashier = False
print(
'{}\n{} nota(s) de R$ 100,00\n{} nota(s) de R$ 50,00\n{} nota(s) de R$ 20,00\n{} nota(s) de R$ 10,00\n{} nota(s) de R$ 5,00\n{} nota(s) de R$ 2,00\n{} nota(s) de R$ 1,00'.format(
valueI, n100, n50, n20, n10, n5, n2, n1)) | 20.292683 | 183 | 0.485577 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 371 | 0.222555 |
4790afe07c99445354df0a84235383f75387a69b | 2,164 | py | Python | selene/common/delegation.py | KalinkinaMaria/selene | 859e1102c85740b52af8d0f08dd6b6490b4bd2ff | [
"MIT"
]
| null | null | null | selene/common/delegation.py | KalinkinaMaria/selene | 859e1102c85740b52af8d0f08dd6b6490b4bd2ff | [
"MIT"
]
| 1 | 2021-06-02T04:21:17.000Z | 2021-06-02T04:21:17.000Z | selene/common/delegation.py | vkarpenko/selene | 4776357430c940be38f38be9981006dd156f9730 | [
"MIT"
]
| null | null | null | from abc import ABCMeta
def _make_delegator_method(name):
def delegator(self, *args, **kwargs):
return getattr(self.__delegate__, name)(*args, **kwargs) # pragma: no cover
# todo: consider using __call__() instead of __delegate__
# in Python delegates are objects with __call__ method..
# so why not to use the following:
# return getattr(self(), name)(*args, **kwargs)
# ?
return delegator
# def _make_delegator_method_to_property(name):
# def delegator(self, *args, **kwargs):
# return getattr(self.__delegate__, name)
# return delegator
def _make_delegator_property(name):
return property(lambda self: getattr(self.__delegate__, name)) # pragma: no cover
def _is_property(name, cls):
return isinstance(getattr(cls, name, None), property)
class DelegatingMeta(ABCMeta):
def __new__(mcs, name, bases, dct):
abstract_property_names = frozenset.union(
*(frozenset(filter(lambda m: _is_property(m, base), base.__abstractmethods__))
for base in bases))
for base in bases:
base.__abstractmethods__ = frozenset(filter(lambda m: not _is_property(m, base), base.__abstractmethods__))
abstract_method_names = frozenset.union(*(base.__abstractmethods__
for base in bases))
for name in abstract_method_names:
if name not in dct:
dct[name] = _make_delegator_method(name)
# for name in abstract_property_names:
# if name not in dct:
# dct[name] = _make_delegator_method_to_property(name)
cls = super(DelegatingMeta, mcs).__new__(mcs, name, bases, dct)
for name in abstract_property_names:
if name not in dct:
setattr(cls, name, _make_delegator_property(name))
return cls
# todo: finalize naming: Delegating, Delegate, actual_delegate, delegatee, delegator o_O ?
# We have the following players in this game:
# * MetaClass for Classes of Objects who delegates their implementation to aggregated object
# So who should be named how?
| 34.903226 | 119 | 0.657116 | 1,064 | 0.491682 | 0 | 0 | 0 | 0 | 0 | 0 | 779 | 0.359982 |
47928a4b5a14d921593fc3e938a7ef189730aea5 | 186 | py | Python | src/pymystem_example1.py | componavt/neural_synset | b58f03af90af6fe2ffe8253a1222c99c2b8907df | [
"Unlicense"
]
| null | null | null | src/pymystem_example1.py | componavt/neural_synset | b58f03af90af6fe2ffe8253a1222c99c2b8907df | [
"Unlicense"
]
| null | null | null | src/pymystem_example1.py | componavt/neural_synset | b58f03af90af6fe2ffe8253a1222c99c2b8907df | [
"Unlicense"
]
| null | null | null | # -*- coding: utf-8 -*-
from pymystem3 import Mystem
# text = "some good newses"
text = "Красивая мама красиво мыла раму"
m = Mystem()
lemmas = m.lemmatize(text)
print(''.join(lemmas))
| 20.666667 | 40 | 0.677419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.525822 |
4793cb0e3e72768ba54484744717ebd0208905a2 | 795 | py | Python | backend/appengine/routes/desenhos/edit.py | faahbih/projetoolivarts | 3dfd955fe44d58a38b85b6643440a600b0bde81a | [
"MIT"
]
| null | null | null | backend/appengine/routes/desenhos/edit.py | faahbih/projetoolivarts | 3dfd955fe44d58a38b85b6643440a600b0bde81a | [
"MIT"
]
| null | null | null | backend/appengine/routes/desenhos/edit.py | faahbih/projetoolivarts | 3dfd955fe44d58a38b85b6643440a600b0bde81a | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from desenho.desenho_model import Desenho, DesenhoForm
from gaecookie.decorator import no_csrf
#from pedido.pedido_model import Pedido, PedidoForm
from routes import desenhos
from tekton.gae.middleware.redirect import RedirectResponse
from tekton.router import to_path
@no_csrf
def index(desenho_id):
desenhos = Desenho.get_by_id(int(desenho_id))
ctx={'desenhos': desenhos,
'salvar_path':to_path(salvar)}
return TemplateResponse(ctx,'/desenhos/desenhos_form.html')
def salvar(desenho_id,**kwargs):
desenhos = Desenho.get_by_id(desenho_id)
desenho = DesenhoForm(**kwargs)
desenho.put()
return RedirectResponse(desenhos)
| 34.565217 | 63 | 0.783648 | 0 | 0 | 0 | 0 | 216 | 0.271698 | 0 | 0 | 127 | 0.159748 |
4794b8cc274b020b95e48eb002f538acf5d6b189 | 4,484 | py | Python | data/create_train_test_split.py | chmo2019/CUB-200-with-TFOD-API | 8f46f2f91085f3e35829f8c7ce0289771ebb0294 | [
"MIT"
]
| null | null | null | data/create_train_test_split.py | chmo2019/CUB-200-with-TFOD-API | 8f46f2f91085f3e35829f8c7ce0289771ebb0294 | [
"MIT"
]
| null | null | null | data/create_train_test_split.py | chmo2019/CUB-200-with-TFOD-API | 8f46f2f91085f3e35829f8c7ce0289771ebb0294 | [
"MIT"
]
| null | null | null | # import necessary libraries
import csv
from PIL import Image
import argparse
# create argument parser with PATH argument
ap = argparse.ArgumentParser()
ap.add_argument('-p', '--path', required=True,
help='''PATH to CUB_200_2011 folder i.e. folder with CUB 200 csv files
(make sure to include full path name for so other scripts can find the data file path(s))''')
args = ap.parse_args()
def create_train_test_split(PATH):
# open CUB 200 .txt files
images = open(PATH + "/images.txt", "r")
image_class_labels = open(PATH + "/image_class_labels.txt", "r")
bounding_boxes = open(PATH + "/bounding_boxes.txt", "r")
split = open(PATH + "/train_test_split.txt", "r")
classes = open(PATH + "/classes.txt", "r")
# create csv readers for each .txt file
tsv = csv.reader(split, delimiter=" ")
tsv_images = csv.reader(images, delimiter=" ")
tsv_class_labels = csv.reader(image_class_labels, delimiter=" ")
tsv_bbox = csv.reader(bounding_boxes, delimiter=" ")
tsv_classes = csv.reader(classes, delimiter=" ")
# create dictionary to store data
train_test = {"0":
{"filename": [],
"id": [],
"width": [],
"height": [],
"class": [],
"x" : [],
"y": [],
"img_w": [],
"img_h": []},
"1":
{"filename": [],
"id": [],
"width": [],
"height": [],
"class": [],
"x" : [],
"y": [],
"img_w": [],
"img_h": []}} # '0' for test '1' for train
# write id into dictionary for create train test split
for row in tsv:
train_test["{}".format(row[1])]["id"].append(row[0])
split.close()
classes_list = {}
# append class names to dictionary
for row in tsv_classes:
classes_list["{}".format(row[0])] = row[1]
classes.close()
i = 0
j = 0
# add image sizes, labels, and bounding box coordinates to dictionary
for (image, label, bbox) in zip(tsv_images, tsv_class_labels, tsv_bbox):
if train_test["0"]["id"][i] == image[0]:
train_test["0"]["filename"].append(PATH + "/images/" + image[1])
im = Image.open(PATH + "/images/"+ image[1])
train_test["0"]["img_w"].append(im.size[0])
train_test["0"]["img_h"].append(im.size[1])
train_test["0"]["class"].append(classes_list["{}".format(label[1])])
train_test["0"]["x"].append(bbox[1])
train_test["0"]["y"].append(bbox[2])
train_test["0"]["width"].append(bbox[3])
train_test["0"]["height"].append(bbox[4])
i += 1
else:
train_test["1"]["filename"].append(PATH + "/images/" + image[1])
im = Image.open(PATH + "/images/"+ image[1])
train_test["1"]["img_w"].append(im.size[0])
train_test["1"]["img_h"].append(im.size[1])
train_test["1"]["class"].append(classes_list["{}".format(label[1])])
train_test["1"]["x"].append(bbox[1])
train_test["1"]["y"].append(bbox[2])
train_test["1"]["width"].append(bbox[3])
train_test["1"]["height"].append(bbox[4])
j += 1
images.close()
image_class_labels.close()
bounding_boxes.close()
# open csv files for coco-formatted data
f_train = open("./annotations/train.csv", "w")
f_test = open("./annotations/test.csv", "w")
# create coco csv header
f_test.write("{},{},{},{},{},{},{},{}\n".format("filename","width","height","class",
"xmin", "ymin", "xmax", "ymax"))
# write coco-formatted data into test split csv
for k in range(len(train_test["0"]["filename"])):
f_test.write("{},{},{},{},{},{},{},{}\n".format(train_test["0"]["filename"][k],
train_test["0"]["img_w"][k],
train_test["0"]["img_h"][k],
train_test["0"]["class"][k],
train_test["0"]["x"][k],
train_test["0"]["y"][k],
float(train_test["0"]["x"][k]) +
float(train_test["0"]["width"][k]),
float(train_test["0"]["y"][k]) +
float(train_test["0"]["height"][k])))
f_train.write("{},{},{},{},{},{},{},{}\n".format("filename","width","height","class",
"xmin", "ymin", "xmax", "ymax"))
# write coco-formatted data into train split csv
for k in range(len(train_test["1"]["filename"])):
f_train.write("{},{},{},{},{},{},{},{}\n".format(train_test["1"]["filename"][k],
train_test["1"]["img_w"][k],
train_test["1"]["img_h"][k],
train_test["1"]["class"][k],
train_test["1"]["x"][k],
train_test["1"]["y"][k],
float(train_test["1"]["x"][k]) +
float(train_test["1"]["width"][k]),
float(train_test["1"]["y"][k]) +
float(train_test["1"]["height"][k])))
f_test.close()
f_train.close()
if __name__ == "__main__":
# run with command line arguments
create_train_test_split(args.path) | 32.258993 | 94 | 0.594558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,669 | 0.372212 |
4795e4597ca4cff70deec87c952fb0febb0256f0 | 3,487 | py | Python | homework/1-AE/losses.py | Penchekrak/DeepGenerativeModels | 7ee829682e8ed51bc637e2c6def0b9f810f384bc | [
"MIT"
]
| null | null | null | homework/1-AE/losses.py | Penchekrak/DeepGenerativeModels | 7ee829682e8ed51bc637e2c6def0b9f810f384bc | [
"MIT"
]
| null | null | null | homework/1-AE/losses.py | Penchekrak/DeepGenerativeModels | 7ee829682e8ed51bc637e2c6def0b9f810f384bc | [
"MIT"
]
| null | null | null | import torch
from torch import nn
class Criterion:
def __call__(self, model: nn.Module, batch_x: torch.Tensor, batch_y: torch.Tensor, *args, **kwargs):
pass
class Image2ImageMSELoss(Criterion):
def __init__(self):
self.mse_loss = nn.MSELoss()
def __repr__(self):
return repr(self.mse_loss)
def __call__(self, model: nn.Module, batch_x: torch.Tensor, batch_y: torch.Tensor, *args, **kwargs):
outs = model(batch_x)
loss = self.mse_loss(outs, batch_x)
return loss, outs
class Image2ImageBCELoss(Criterion):
def __init__(self, threshold=0.0):
self.bce_loss = nn.BCELoss()
self.threshold = threshold
def __repr__(self):
return str(repr(self.bce_loss)) + f"with {self.threshold} as threshold"
def __call__(self, model: nn.Module, batch_x: torch.Tensor, batch_y: torch.Tensor, *args, **kwargs):
outs = model(batch_x)
target = (batch_x > self.threshold).float()
loss = self.bce_loss(outs, target)
return loss, outs
class Image2ImageMixedLoss(Criterion):
def __init__(self, mse_weight=0.5, bce_weight=0.5, bce_threshold=0.5):
self.bce_loss = nn.BCELoss()
self.bce_weight = bce_weight
self.threshold = bce_threshold
self.mse_loss = nn.MSELoss()
self.mse_weight = mse_weight
def __call__(self, model: nn.Module, batch_x: torch.Tensor, batch_y: torch.Tensor, *args, **kwargs):
outs = model(batch_x)
mse_loss = self.mse_loss(outs, batch_x)
target = (batch_x > self.threshold).float()
bce_loss = self.bce_loss(outs, target)
return mse_loss * self.mse_weight + bce_loss * self.bce_weight, outs
def l1_loss(x):
return torch.mean(torch.sum(torch.abs(x), dim=1))
class Image2ImageMixedLossWithLasso(Criterion):
def __init__(self, mse_weight=0.5, bce_weight=0.5, bce_threshold=0.5, lasso_weight=0.001):
self.lasso_weight = lasso_weight
self.bce_loss = nn.BCELoss()
self.bce_weight = bce_weight
self.threshold = bce_threshold
self.mse_loss = nn.MSELoss()
self.mse_weight = mse_weight
def calculate_sparse_loss(self, model, image):
loss = 0
x = image
for block in model.encoder[:-1]:
x = block.conv(x)
loss += l1_loss(x)
x = block.act(block.norm(x))
x = model.encoder[-1](x)
loss += l1_loss(x)
for block in model.decoder[:-1]:
x = block.conv(x)
loss += l1_loss(x)
x = block.act(block.norm(x))
x = model.decoder[-1](x)
loss += l1_loss(x)
return loss
def __call__(self, model: nn.Module, batch_x: torch.Tensor, batch_y: torch.Tensor, *args, **kwargs):
outs = model(batch_x)
mse_loss = self.mse_loss(outs, batch_x)
target = (batch_x > self.threshold).float()
bce_loss = self.bce_loss(outs, target)
l1 = self.calculate_sparse_loss(model, batch_x)
return mse_loss * self.mse_weight + bce_loss * self.bce_weight + self.lasso_weight * l1, outs
class ClassificationCELoss(Criterion):
def __init__(self):
self.ce_loss = nn.CrossEntropyLoss()
def __repr__(self):
return repr(self.ce_loss)
def __call__(self, model: nn.Module, batch_x: torch.Tensor, batch_y: torch.Tensor, *args, **kwargs):
outs = model(batch_x)
loss = self.ce_loss(outs, batch_y)
return loss, outs
| 32.896226 | 104 | 0.634069 | 3,363 | 0.964439 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.010611 |
4795fb5c060e1fe6b6b35dd753478649d709249e | 160 | py | Python | src/app/services/users/__init__.py | dieisabel/cypherman | 06d8678b79b18aa256a79ec6967d68274f088dbc | [
"MIT"
]
| null | null | null | src/app/services/users/__init__.py | dieisabel/cypherman | 06d8678b79b18aa256a79ec6967d68274f088dbc | [
"MIT"
]
| 43 | 2021-12-02T21:26:01.000Z | 2022-02-21T08:51:06.000Z | src/app/services/users/__init__.py | dieisabel/cypherman | 06d8678b79b18aa256a79ec6967d68274f088dbc | [
"MIT"
]
| null | null | null | __all__ = [
'IUserService',
'UserService',
]
from services.users.iuser_service import IUserService
from services.users.user_service import UserService
| 20 | 53 | 0.775 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.16875 |
47981ad00b6fbe330d40b7fd3c56d0ca049b684c | 6,190 | py | Python | core/domain/wipeout_service_test.py | davehenton/oppia | 62a9e9ea8458632e39b8ab4cf15b0489ac1acad9 | [
"Apache-2.0"
]
| 1 | 2021-01-22T03:24:52.000Z | 2021-01-22T03:24:52.000Z | core/domain/wipeout_service_test.py | davehenton/oppia | 62a9e9ea8458632e39b8ab4cf15b0489ac1acad9 | [
"Apache-2.0"
]
| null | null | null | core/domain/wipeout_service_test.py | davehenton/oppia | 62a9e9ea8458632e39b8ab4cf15b0489ac1acad9 | [
"Apache-2.0"
]
| 1 | 2020-06-25T21:43:01.000Z | 2020-06-25T21:43:01.000Z | # Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for wipeout service."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import rights_manager
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_services
from core.domain import wipeout_service
from core.platform import models
from core.tests import test_utils
import feconf
(collection_models, exp_models, user_models,) = (
models.Registry.import_models([
models.NAMES.collection, models.NAMES.exploration, models.NAMES.user]))
class WipeoutServiceTests(test_utils.GenericTestBase):
"""Provides testing of the wipeout service."""
USER_1_EMAIL = '[email protected]'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = '[email protected]'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
def test_pre_delete_user_email_subscriptions(self):
email_preferences = user_services.get_email_preferences(self.user_1_id)
self.assertEqual(
email_preferences.can_receive_email_updates,
feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
wipeout_service.pre_delete_user(self.user_1_id)
email_preferences = user_services.get_email_preferences(self.user_1_id)
self.assertFalse(email_preferences.can_receive_email_updates)
self.assertFalse(email_preferences.can_receive_editor_role_email)
self.assertFalse(email_preferences.can_receive_feedback_message_email)
self.assertFalse(email_preferences.can_receive_subscription_email)
def test_pre_delete_user_without_activities(self):
user_models.UserSubscriptionsModel(
id=self.user_1_id,
activity_ids=[],
collection_ids=[]
).put()
user_settings = user_services.get_user_settings(self.user_1_id)
self.assertFalse(user_settings.to_be_deleted)
wipeout_service.pre_delete_user(self.user_1_id)
user_settings = user_services.get_user_settings(self.user_1_id)
self.assertTrue(user_settings.to_be_deleted)
pending_deletion_model = (
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
self.assertEqual(pending_deletion_model.exploration_ids, [])
self.assertEqual(pending_deletion_model.collection_ids, [])
def test_pre_delete_user_with_activities(self):
self.save_new_valid_exploration('exp_id', self.user_1_id)
self.save_new_valid_collection(
'col_id', self.user_1_id, exploration_id='exp_id')
wipeout_service.pre_delete_user(self.user_1_id)
pending_deletion_model = (
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
self.assertEqual(
pending_deletion_model.exploration_ids, ['exp_id'])
self.assertEqual(pending_deletion_model.collection_ids, ['col_id'])
def test_pre_delete_user_with_activities_multiple_owners(self):
user_services.update_user_role(
self.user_1_id, feconf.ROLE_ID_COLLECTION_EDITOR)
user_1_actions = user_services.UserActionsInfo(self.user_1_id)
self.save_new_valid_exploration('exp_id', self.user_1_id)
rights_manager.assign_role_for_exploration(
user_1_actions, 'exp_id', self.user_2_id, rights_manager.ROLE_OWNER)
self.save_new_valid_collection(
'col_id', self.user_1_id, exploration_id='exp_id')
rights_manager.assign_role_for_collection(
user_1_actions, 'col_id', self.user_2_id, rights_manager.ROLE_OWNER)
wipeout_service.pre_delete_user(self.user_1_id)
pending_deletion_model = (
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
self.assertEqual(
pending_deletion_model.exploration_ids, [])
self.assertEqual(pending_deletion_model.collection_ids, [])
def test_pre_delete_user_collection_is_marked_deleted(self):
self.save_new_valid_collection(
'col_id', self.user_1_id)
collection_model = collection_models.CollectionModel.get_by_id('col_id')
self.assertFalse(collection_model.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
collection_model = collection_models.CollectionModel.get_by_id('col_id')
self.assertTrue(collection_model.deleted)
def test_pre_delete_user_exploration_is_marked_deleted(self):
self.save_new_valid_exploration('exp_id', self.user_1_id)
exp_model = exp_models.ExplorationModel.get_by_id('exp_id')
self.assertFalse(exp_model.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
exp_model = exp_models.ExplorationModel.get_by_id('exp_id')
self.assertTrue(exp_model.deleted)
| 42.108844 | 80 | 0.745073 | 4,949 | 0.799515 | 0 | 0 | 0 | 0 | 0 | 0 | 931 | 0.150404 |
4798901a83af76dc1807e8d5ec48f5223c016346 | 1,109 | py | Python | drf_to_s3/middleware.py | treyhunner/drf-to-s3 | 2384b7e277da0e795ab9e0241e829bcc4ca4dc77 | [
"MIT"
]
| 28 | 2015-01-15T18:31:24.000Z | 2018-11-08T07:33:42.000Z | drf_to_s3/middleware.py | treyhunner/drf-to-s3 | 2384b7e277da0e795ab9e0241e829bcc4ca4dc77 | [
"MIT"
]
| 10 | 2020-01-01T07:26:19.000Z | 2021-06-25T15:26:53.000Z | drf_to_s3/middleware.py | treyhunner/drf-to-s3 | 2384b7e277da0e795ab9e0241e829bcc4ca4dc77 | [
"MIT"
]
| 7 | 2015-01-29T20:59:29.000Z | 2017-04-24T16:05:48.000Z | class UploadPrefixMiddleware(object):
'''
Sets a cookie with the upload prefix.
To be agnostic about your method of user authentication, this is
handled using middleware. It can't be in a signal, since the signal
handler doesn't have access to the response.
In most applications, the client already has access to a
normalized username, so you probably don't need this at
all.
To use this, add it to your MIDDLEWARE_CLASSES:
MIDDLEWARE_CLASSES = (
...
'drf_to_s3.middleware.UploadPrefixMiddleware',
...
)
'''
def process_response(self, request, response):
from django.conf import settings
from rest_framework.exceptions import PermissionDenied
from .access_control import upload_prefix_for_request
cookie_name = getattr(settings, 'UPLOAD_PREFIX_COOKIE_NAME', 'upload_prefix')
try:
response.set_cookie(cookie_name, upload_prefix_for_request(request))
except PermissionDenied:
response.delete_cookie(cookie_name)
return response
| 32.617647 | 85 | 0.682597 | 1,108 | 0.999098 | 0 | 0 | 0 | 0 | 0 | 0 | 604 | 0.544635 |
479926439a3bedce30e4792e3973470d8b31f04e | 14,157 | py | Python | deepcomp/util/env_setup.py | CN-UPB/DeepCoMP | f9f64873184bb53b5687ae62f8ba2b84da423692 | [
"MIT"
]
| 19 | 2021-03-17T12:59:48.000Z | 2022-03-24T09:04:32.000Z | deepcomp/util/env_setup.py | CN-UPB/DeepCoMP | f9f64873184bb53b5687ae62f8ba2b84da423692 | [
"MIT"
]
| 1 | 2021-03-08T16:27:49.000Z | 2021-03-08T16:27:49.000Z | deepcomp/util/env_setup.py | CN-UPB/DeepCoMP | f9f64873184bb53b5687ae62f8ba2b84da423692 | [
"MIT"
]
| 6 | 2021-01-25T19:34:18.000Z | 2022-03-20T05:56:33.000Z | """Utility module for setting up different envs"""
import numpy as np
import structlog
from shapely.geometry import Point
from ray.rllib.agents.ppo import DEFAULT_CONFIG
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from deepcomp.util.constants import SUPPORTED_ENVS, SUPPORTED_AGENTS, SUPPORTED_SHARING, SUPPORTED_UE_ARRIVAL, \
SUPPORTED_UTILITIES
from deepcomp.env.single_ue.variants import RelNormEnv
from deepcomp.env.multi_ue.central import CentralRelNormEnv
from deepcomp.env.multi_ue.multi_agent import MultiAgentMobileEnv
from deepcomp.env.entities.user import User
from deepcomp.env.entities.station import Basestation
from deepcomp.env.entities.map import Map
from deepcomp.env.util.movement import RandomWaypoint
from deepcomp.util.callbacks import CustomMetricCallbacks
log = structlog.get_logger()
def get_env_class(env_type):
"""Return the env class corresponding to the string type (from CLI)"""
assert env_type in SUPPORTED_AGENTS, f"Environment type was {env_type} but has to be one of {SUPPORTED_AGENTS}."
if env_type == 'single':
# return DatarateMobileEnv
# return NormDrMobileEnv
return RelNormEnv
if env_type == 'central':
# return CentralDrEnv
# return CentralNormDrEnv
return CentralRelNormEnv
# return CentralMaxNormEnv
if env_type == 'multi':
return MultiAgentMobileEnv
def get_sharing_for_bs(sharing, bs_idx):
"""Return the sharing model for the given BS"""
# if it's not mixed, it's the same for all BS
if sharing != 'mixed':
assert sharing in SUPPORTED_SHARING
return sharing
# else loop through the available sharing models
sharing_list = ['resource-fair', 'rate-fair', 'proportional-fair']
return sharing_list[bs_idx % len(sharing_list)]
def create_small_map(sharing_model):
"""
Create small map and 2 BS
:returns: tuple (map, bs_list)
"""
map = Map(width=150, height=100)
bs1 = Basestation('A', Point(50, 50), get_sharing_for_bs(sharing_model, 0))
bs2 = Basestation('B', Point(100, 50), get_sharing_for_bs(sharing_model, 1))
bs_list = [bs1, bs2]
return map, bs_list
def create_dyn_small_map(sharing_model, bs_dist=100, dist_to_border=10):
"""Small env with 2 BS and dynamic distance in between"""
map = Map(width=2 * dist_to_border + bs_dist, height=2 * dist_to_border)
bs1 = Basestation('A', Point(dist_to_border, dist_to_border), sharing_model)
bs2 = Basestation('B', Point(dist_to_border + bs_dist, dist_to_border), sharing_model)
return map, [bs1, bs2]
def create_medium_map(sharing_model):
"""
Deprecated: Use dynamic medium env instead. Kept this to reproduce earlier results.
Same as large env, but with map restricted to areas with coverage.
Thus, optimal episode reward should be close to num_ues * eps_length * 10 (ie, all UEs are always connected)
"""
map = Map(width=205, height=85)
bs1 = Basestation('A', Point(45, 35), sharing_model)
bs2 = Basestation('B', Point(160, 35), sharing_model)
bs3 = Basestation('C', Point(100, 85), sharing_model)
bs_list = [bs1, bs2, bs3]
return map, bs_list
def create_dyn_medium_map(sharing_model, bs_dist=100, dist_to_border=10):
"""
Create map with 3 BS at equal distance. Distance can be varied dynamically. Map is sized automatically.
Keep the same layout as old medium env here: A, B on same horizontal axis. C above in the middle
"""
# calculate vertical distance from A, B to C using Pythagoras
y_dist = np.sqrt(bs_dist ** 2 - (bs_dist / 2) ** 2)
# derive map size from BS distance and distance to border
map_width = 2 * dist_to_border + bs_dist
map_height = 2 * dist_to_border + y_dist
map = Map(width=map_width, height=map_height)
# BS A is located at bottom left corner with specified distance to border
bs1 = Basestation('A', Point(dist_to_border, dist_to_border), get_sharing_for_bs(sharing_model, 0))
# other BS positions are derived accordingly
bs2 = Basestation('B', Point(dist_to_border + bs_dist, dist_to_border), get_sharing_for_bs(sharing_model, 1))
bs3 = Basestation('C', Point(dist_to_border + (bs_dist / 2), dist_to_border + y_dist), get_sharing_for_bs(sharing_model, 2))
return map, [bs1, bs2, bs3]
def create_large_map(sharing_model):
"""
Create larger map with 7 BS that are arranged in a typical hexagonal structure.
:returns: Tuple(map, bs_list)
"""
map = Map(width=230, height=260)
bs_list = [
# center
Basestation('A', Point(115, 130), get_sharing_for_bs(sharing_model, 0)),
# top left, counter-clockwise
Basestation('B', Point(30, 80), get_sharing_for_bs(sharing_model, 1)),
Basestation('C', Point(115, 30), get_sharing_for_bs(sharing_model, 2)),
Basestation('D', Point(200, 80), get_sharing_for_bs(sharing_model, 3)),
Basestation('E', Point(200, 180), get_sharing_for_bs(sharing_model, 4)),
Basestation('F', Point(115, 230), get_sharing_for_bs(sharing_model, 5)),
Basestation('G', Point(30, 180), get_sharing_for_bs(sharing_model, 6)),
]
return map, bs_list
def create_dyn_large_map(sharing_model, num_bs, dist_to_border=10):
assert 1 <= num_bs <= 7, "Only support 1-7 BS in large env"
_, bs_list = create_large_map(sharing_model)
# take only selected BS
bs_list = bs_list[:num_bs]
# create map with size according to BS positions
max_x, max_y = None, None
for bs in bs_list:
if max_x is None or bs.pos.x > max_x:
max_x = bs.pos.x
if max_y is None or bs.pos.y > max_y:
max_y = bs.pos.y
map = Map(width=max_x + dist_to_border, height=max_y + dist_to_border)
return map, bs_list
def create_ues(map, num_static_ues, num_slow_ues, num_fast_ues, util_func):
"""Create custom number of slow/fast UEs on the given map. Return UE list"""
ue_list = []
id = 1
for i in range(num_static_ues):
ue_list.append(User(str(id), map, pos_x='random', pos_y='random', movement=RandomWaypoint(map, velocity=0),
util_func=util_func))
id += 1
for i in range(num_slow_ues):
ue_list.append(User(str(id), map, pos_x='random', pos_y='random', movement=RandomWaypoint(map, velocity='slow'),
util_func=util_func))
id += 1
for i in range(num_fast_ues):
ue_list.append(User(str(id), map, pos_x='random', pos_y='random', movement=RandomWaypoint(map, velocity='fast'),
util_func=util_func))
id += 1
return ue_list
def create_custom_env(sharing_model):
"""Hand-created custom env. For demos or specific experiments."""
# map with 4 BS at distance of 100; distance 10 to border of map
map = Map(width=194, height=120)
bs_list = [
# left
Basestation('A', Point(10, 60), get_sharing_for_bs(sharing_model, 0)),
# counter-clockwise
Basestation('B', Point(97, 10), get_sharing_for_bs(sharing_model, 1)),
Basestation('C', Point(184, 60), get_sharing_for_bs(sharing_model, 2)),
Basestation('D', Point(97, 110), get_sharing_for_bs(sharing_model, 3)),
]
return map, bs_list
def get_env(map_size, bs_dist, num_static_ues, num_slow_ues, num_fast_ues, sharing_model, util_func, num_bs=None):
"""Create and return the environment corresponding to the given map_size"""
assert map_size in SUPPORTED_ENVS, f"Environment {map_size} is not one of {SUPPORTED_ENVS}."
assert util_func in SUPPORTED_UTILITIES, \
f"Utility function {util_func} not supported. Supported: {SUPPORTED_UTILITIES}"
# create map and BS list
map, bs_list = None, None
if map_size == 'small':
map, bs_list = create_small_map(sharing_model)
elif map_size == 'medium':
map, bs_list = create_dyn_medium_map(sharing_model, bs_dist=bs_dist)
elif map_size == 'large':
if num_bs is None:
map, bs_list = create_large_map(sharing_model)
else:
map, bs_list = create_dyn_large_map(sharing_model, num_bs)
elif map_size == 'custom':
map, bs_list = create_custom_env(sharing_model)
# create UEs
ue_list = create_ues(map, num_static_ues, num_slow_ues, num_fast_ues, util_func)
return map, ue_list, bs_list
def get_ue_arrival(ue_arrival_name):
"""Get the dict defining UE arrival over time based on the name provided via CLI"""
assert ue_arrival_name in SUPPORTED_UE_ARRIVAL
if ue_arrival_name is None:
return None
if ue_arrival_name == "oneupdown":
return {10: 1, 30: -1}
if ue_arrival_name == "updownupdown":
return {10: 1, 20: -1, 30: 1, 40: -1}
if ue_arrival_name == "3up2down":
return {10: 3, 30: -2}
if ue_arrival_name == "updown":
return {10: 1, 15: 1, 20: 1, 40: 1, 50: -1, 60: -1}
if ue_arrival_name == "largeupdown":
return {
20: 1, 30: -1, 40: 1,
# large increase up to 12 (starting at 1)
45: 1, 50: 1, 55: 2, 60: 3, 65: 2, 70: 1,
# large decrease down to 1
75: -1, 80: -2, 85: -3, 90: -3, 95: -2
}
raise ValueError(f"Unknown UE arrival name: {ue_arrival_name}")
def create_env_config(cli_args):
"""
Create environment and RLlib config based on passed CLI args. Return config.
:param cli_args: Parsed CLI args
:return: The complete config for an RLlib agent, including the env & env_config
"""
env_class = get_env_class(cli_args.agent)
map, ue_list, bs_list = get_env(cli_args.env, cli_args.bs_dist, cli_args.static_ues, cli_args.slow_ues,
cli_args.fast_ues, cli_args.sharing, cli_args.util, num_bs=cli_args.num_bs)
# this is for DrEnv and step utility
# env_config = {
# 'episode_length': eps_length, 'seed': seed,
# 'map': map, 'bs_list': bs_list, 'ue_list': ue_list, 'dr_cutoff': 'auto', 'sub_req_dr': True,
# 'curr_dr_obs': False, 'ues_at_bs_obs': False, 'dist_obs': False, 'next_dist_obs': False
# }
# this is for the custom NormEnv and log utility
env_config = {
'episode_length': cli_args.eps_length, 'seed': cli_args.seed, 'map': map, 'bs_list': bs_list, 'ue_list': ue_list,
'rand_episodes': cli_args.rand_train, 'new_ue_interval': cli_args.new_ue_interval, 'reward': cli_args.reward,
'max_ues': cli_args.max_ues, 'ue_arrival': get_ue_arrival(cli_args.ue_arrival),
# if enabled log_metrics: log metrics even during training --> visible on tensorboard
# if disabled: log just during testing --> probably slightly faster training with less memory
'log_metrics': True,
# custom animation rendering
'dashboard': cli_args.dashboard, 'ue_details': cli_args.ue_details,
}
# convert ue_arrival sequence to str keys as required by RLlib: https://github.com/ray-project/ray/issues/16215
if env_config['ue_arrival'] is not None:
env_config['ue_arrival'] = {str(k): v for k, v in env_config['ue_arrival'].items()}
# create and return the config
config = DEFAULT_CONFIG.copy()
# discount factor (default 0.99)
# config['gamma'] = 0.5
# 0 = no workers/actors at all --> low overhead for short debugging; 2+ workers to accelerate long training
config['num_workers'] = cli_args.workers
config['seed'] = cli_args.seed
# write training stats to file under ~/ray_results (default: False)
config['monitor'] = True
config['train_batch_size'] = cli_args.batch_size # default: 4000; default in stable_baselines: 128
# auto normalize obserations by subtracting mean and dividing by std (default: "NoFilter")
# config['observation_filter'] = "MeanStdFilter"
# NN settings: https://docs.ray.io/en/latest/rllib-models.html#built-in-model-parameters
# configure the size of the neural network's hidden layers; default: [256, 256]
# config['model']['fcnet_hiddens'] = [512, 512, 512]
# LSTM settings
config['model']['use_lstm'] = cli_args.lstm
# config['model']['lstm_use_prev_action_reward'] = True
# config['log_level'] = 'INFO' # ray logging default: warning
# reset the env whenever the horizon/eps_length is reached
config['horizon'] = cli_args.eps_length
config['env'] = env_class
config['env_config'] = env_config
# callback for monitoring custom metrics
config['callbacks'] = CustomMetricCallbacks
config['log_level'] = 'ERROR'
# for multi-agent env: https://docs.ray.io/en/latest/rllib-env.html#multi-agent-and-hierarchical
if MultiAgentEnv in env_class.__mro__:
# instantiate env to access obs and action space and num diff UEs
env = env_class(env_config)
# use separate policies (and NNs) for each agent
if cli_args.separate_agent_nns:
num_diff_ues = env.get_num_diff_ues()
# create policies also for all future UEs
if num_diff_ues > env.num_ue:
log.warning("Varying num. UEs. Creating policy for all (future) UEs.",
curr_num_ue=env.num_ue, num_diff_ues=num_diff_ues, new_ue_interval=env.new_ue_interval,
ue_arrival=env.ue_arrival)
ue_ids = [str(i + 1) for i in range(num_diff_ues)]
else:
ue_ids = [ue.id for ue in ue_list]
config['multiagent'] = {
# attention: ue.id needs to be a string! just casting it to str() here doesn't work;
# needs to be consistent with obs keys --> easier, just use string IDs
'policies': {ue_id: (None, env.observation_space, env.action_space, {}) for ue_id in ue_ids},
'policy_mapping_fn': lambda agent_id: agent_id
}
# or: all UEs use the same policy and NN
else:
config['multiagent'] = {
'policies': {'ue': (None, env.observation_space, env.action_space, {})},
'policy_mapping_fn': lambda agent_id: 'ue'
}
return config
| 44.37931 | 128 | 0.67034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,216 | 0.36844 |
4799c59de86fb0c7aac927367ccd22e4581ad71b | 905 | py | Python | star_types/parameters.py | annalieNK/star-types | 194ffa6f03db175ae8ac6fbc912bb8a45c52cbfe | [
"MIT"
]
| null | null | null | star_types/parameters.py | annalieNK/star-types | 194ffa6f03db175ae8ac6fbc912bb8a45c52cbfe | [
"MIT"
]
| null | null | null | star_types/parameters.py | annalieNK/star-types | 194ffa6f03db175ae8ac6fbc912bb8a45c52cbfe | [
"MIT"
]
| null | null | null | def parameters(model_type):
if model_type == 'random forest':
param_grid = {
'classifier__estimator__n_estimators': [50, 100],
'classifier__estimator__max_features' :['sqrt', 'log2'],
'classifier__estimator__max_depth' : [4,6,8]
}
elif model_type == 'logistic regression':
param_grid = {
'classifier__estimator__C': [0.1, 1.0, 10]
}
elif model_type == 'support vector machine':
param_grid = {
'classifier__estimator__C': [0.1, 1.0, 10],
'classifier__estimator__kernel': ['linear'],
'classifier__estimator__probability': [True]
}
elif model_type == 'kneighbors':
param_grid = {
'classifier__estimator__n_neighbors': [1, 3, 5],
'classifier__estimator__weights': ['uniform', 'distance']
}
return param_grid | 36.2 | 69 | 0.581215 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 406 | 0.448619 |
479ba82b2ef03b26d431347ee226aa3e2162ff17 | 221 | py | Python | PYTHON/Exemplos/Aula 1/Ex004.py | B1linha/ADS---Mackenzie | 0dc53418ff7580836f6a64c5370e204e8841d1a9 | [
"MIT"
]
| null | null | null | PYTHON/Exemplos/Aula 1/Ex004.py | B1linha/ADS---Mackenzie | 0dc53418ff7580836f6a64c5370e204e8841d1a9 | [
"MIT"
]
| null | null | null | PYTHON/Exemplos/Aula 1/Ex004.py | B1linha/ADS---Mackenzie | 0dc53418ff7580836f6a64c5370e204e8841d1a9 | [
"MIT"
]
| null | null | null | """ Faça um programa que receba o salário de um funcionário, calcule e mostre o novo salário, sabende-se que este sofreu um aumento de 25%"""
sal = float(input('Salário:'))
nsal = sal*1.25
print ('novo salário = ', nsal) | 55.25 | 142 | 0.710407 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.76652 |
479c044c04a29437935975e735d0a42de577c613 | 1,866 | py | Python | Square_Lattice/obs.py | suron-003/Dynamic-Structure-Factor | 908ebcd8e6db5c6bf19a3a782c3b0a4b97a35f8b | [
"MIT"
]
| 1 | 2022-01-25T16:36:28.000Z | 2022-01-25T16:36:28.000Z | Square_Lattice/obs.py | suron-003/Dynamic-Structure-Factor | 908ebcd8e6db5c6bf19a3a782c3b0a4b97a35f8b | [
"MIT"
]
| null | null | null | Square_Lattice/obs.py | suron-003/Dynamic-Structure-Factor | 908ebcd8e6db5c6bf19a3a782c3b0a4b97a35f8b | [
"MIT"
]
| null | null | null | import numpy as np
from numpy import random, linspace, cos, pi
import math
import random
import matplotlib.pyplot as plt
from scipy.fft import fft, fftfreq
from scipy.fft import rfft, rfftfreq
import copy
from mpl_toolkits.mplot3d import axes3d
from mpl_toolkits import mplot3d
from plotly import __version__
import pandas as pd
from scipy.optimize import fsolve
import cmath
from numba import jit
from numpy import linalg as LA
from scipy.linalg import expm, norm
from scipy.integrate import odeint
import time
import numba
from parameters import *
from lattice import *
import plotly.offline as pyo
import plotly.graph_objs as go
from plotly.offline import iplot
import plotly.figure_factory as ff
import plotly.express as px
@jit(nopython=True)
def ene(lat):
e = 0
for x in range(0,s):
for y in range(0,s):
x1 = int((x+1)%s)
x2 = int((x-1)%s)
y1 = int((y+1)%s)
y2 = int((y-1)%s)
ej = (J*(np.dot(lat[x,y],lat[x1,y]+lat[x2,y]+lat[x,y1]+lat[x,y2])))
exx1 = -1*d*np.dot(dxx1,np.cross(lat[x,y],lat[x1,y]))
exx2 = -1*d*np.dot(dxx2,np.cross(lat[x,y],lat[x2,y]))
eyy1 = -1*d*np.dot(dyy1,np.cross(lat[x,y],lat[x,y1]))
eyy2 = -1*d*np.dot(dyy2,np.cross(lat[x,y],lat[x,y2]))
edm = (exx1 + exx2 + eyy1 + eyy2)
eb = -1*b*np.dot(b_vec,lat[x,y])
e = e + (ej/2 + edm/2 + eb)
return e
@jit(nopython=True)
def H_eff(lat,x,y):
x1 = int((x+1)%s)
x2 = int((x-1)%s)
y1 = int((y+1)%s)
y2 = int((y-1)%s)
dxx1 = np.array([0.0,-1.0,0.0])
dxx2 = np.array([0.0,1.0,0.0])
dyy2 = np.array([1.0,0.0,0.0])
dyy1 = np.array([-1.0,0.0,0.0])
b_vec = np.array([0.0,0.0,1.0])
heff = (-1*b*gamma*b_vec) + (0.5*J*(lat[x1,y] + lat[x2,y] + lat[x,y1] + lat[x,y2])) - d*(0.5*(np.cross(dxx1,lat[x1,y]) + np.cross(dxx2,lat[x2,y]) + np.cross(dyy1,lat[x,y1]) + np.cross(dyy2,lat[x,y2])))
return heff | 30.096774 | 203 | 0.633976 | 0 | 0 | 0 | 0 | 1,134 | 0.607717 | 0 | 0 | 0 | 0 |
479c652f9d6be7f731af9a0eaf026a8198211ed7 | 1,947 | py | Python | src/test/test_cmdparse_qmp.py | dougpuob/qemu-tasker | 58a24090016abebcda8e95c382bceaef453ea981 | [
"MIT"
]
| null | null | null | src/test/test_cmdparse_qmp.py | dougpuob/qemu-tasker | 58a24090016abebcda8e95c382bceaef453ea981 | [
"MIT"
]
| null | null | null | src/test/test_cmdparse_qmp.py | dougpuob/qemu-tasker | 58a24090016abebcda8e95c382bceaef453ea981 | [
"MIT"
]
| null | null | null | import unittest
import sys
import os
import sys
import json
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
PROJECT_DIR = os.path.abspath(os.path.join(TEST_DIR, os.pardir))
sys.path.insert(0, PROJECT_DIR)
from module.cmdparse import cmdargs
class test_cmdparse(unittest.TestCase):
def __init__(self, methodName: str = ...) -> None:
super().__init__(methodName)
def test_qmp_no_arg(self):
sys.argv = ['qemu-tasker.py',
'qmp',
'--taskid', '10010',
'--execute', 'human-monitor-command']
args = cmdargs().get_parsed_args()
self.assertEqual(args.taskid, 10010)
self.assertEqual(args.command, "qmp")
self.assertEqual(args.execute, "human-monitor-command")
def test_qmp_argsjson1(self):
sys.argv = ['qemu-tasker.py',
'qmp',
'--taskid', '10010',
'--execute', 'human-monitor-command',
'--argsjson', '{"command-line" : "info version" }']
args = cmdargs().get_parsed_args()
self.assertEqual(args.taskid, 10010)
self.assertEqual(args.command, "qmp")
self.assertEqual(args.execute, "human-monitor-command")
self.assertEqual(args.argsjson, '{"command-line" : "info version" }')
def test_qmp_argsjson2(self):
argsjson = {"command-line" : "info version" }
sys.argv = ['qemu-tasker.py',
'qmp',
'--taskid', '10010',
'--execute', 'human-monitor-command',
'--argsjson', json.dumps(argsjson)]
args = cmdargs().get_parsed_args()
self.assertEqual(args.taskid, 10010)
self.assertEqual(args.command, "qmp")
self.assertEqual(args.execute, "human-monitor-command")
self.assertEqual(args.argsjson, json.dumps(argsjson))
if __name__ == '__main__':
unittest.main()
| 31.918033 | 77 | 0.582435 | 1,647 | 0.845917 | 0 | 0 | 0 | 0 | 0 | 0 | 434 | 0.222907 |
479ce552b6ca46a4ea68c0270c6749107ab46cb3 | 3,498 | py | Python | ebbp/ebb_fit_prior.py | uttiyamaji/ebbp | d1b7270b7741cab4b18a9f54b47060f90ac1fc2c | [
"MIT"
]
| null | null | null | ebbp/ebb_fit_prior.py | uttiyamaji/ebbp | d1b7270b7741cab4b18a9f54b47060f90ac1fc2c | [
"MIT"
]
| null | null | null | ebbp/ebb_fit_prior.py | uttiyamaji/ebbp | d1b7270b7741cab4b18a9f54b47060f90ac1fc2c | [
"MIT"
]
| null | null | null | """
ebb_fit_prior : fits a Beta prior by estimating the parameters from the data using
method of moments and MLE estimates
augment : given data and prior, computes the shrinked estimate, credible intervals and
augments those in the given dataframe
check_fit : plots the true average and the shrinked average
"""
import numpy as np
import pandas as pd
from scipy.stats import beta as beta_dist
from dataclasses import dataclass
import matplotlib.pyplot as plt
@dataclass
class Beta:
""" minimal Beta class, for prior and posterior objects"""
alpha: float
beta: float
def pdf(self, x):
return beta_dist.pdf(x, self.alpha, self.beta)
pass
def plot(self, x, n):
""" plots the prior pdf density over the data histogram"""
# add various plotting args
x_ax = np.linspace(0,1,1000)
rv = beta_dist(self.alpha, self.beta)
p = x/n
plt.hist(p, density = True)
plt.plot(x_ax, rv.pdf(x_ax))
plt.title(f'Beta({self.alpha.round(2)},{self.beta.round(2)})')
plt.show()
def ebb_fit_prior(x, n, method = 'mm', start = (0.5,0.5)):
p = x/n
if (method == 'mm'):
mu, sig = np.mean(p), np.var(p)
a = ((1-mu)/sig - 1/mu)*mu**2
b = a*(1/mu - 1)
fitted_prior = Beta(a,b)
pass
elif (method == 'mle'):
# starting value
# if (np.isnan(start)):
# mm_est = ebb_fit_prior(x, n, 'mm')
# start = (mm_est.alpha, mm_est.beta)
# #print(start)
# likelihood function: f(a, b)
def likelihood(pars):
return (-np.sum(beta_dist.pdf(p, pars[0], pars[1])))
# optimization function: over a series of params, optimise likelihood
# outp = minimize(likelihood, x0 = start, method = 'BFGS')
# fitted_prior = Beta(outp.x[0], outp.x[1])
a,b,*ls = beta_dist.fit(p)
fitted_prior = Beta(a,b)
pass
else:
return ('Method should be MM or MLE')
return fitted_prior
pass
def augment(prior, data, x, n):
# compute the estimates
post_alpha = prior.alpha + x
post_beta = prior.beta + n - x
eb_est = (x + prior.alpha)/(n + prior.alpha + prior.beta)
posterior = Beta(post_alpha, post_beta)
# compute the posterior credible intervals
post_l = beta_dist.ppf(0.025, posterior.alpha, posterior.beta)
post_u = beta_dist.ppf(0.975, posterior.alpha, posterior.beta)
# add the column
new_cols = pd.DataFrame({'alpha':post_alpha, 'beta': post_beta, 'eb_est': eb_est, 'cred_lower': post_l, 'cred_upper':post_u})
aug_df = pd.concat([data, new_cols], axis = 1)
return aug_df
pass
def check_fit(aug_df):
plt.plot(aug_df.est, aug_df.eb_est)
plt.show()
if __name__ == '__main__':
x = np.random.randint(0,50,20)
n = np.random.randint(50,100, 20)
p = x/n
dt = pd.DataFrame({'S':x, 'Tot':n, 'est':p})
est1 = ebb_fit_prior(x,n, 'mm')
print(est1)
est1.plot(x, n)
new_dt = augment(est1, dt, dt.S, dt.Tot)
print(new_dt.head(10))
check_fit(new_dt)
print('=============================')
est2 = ebb_fit_prior(x,n,'mle')
print(est2)
est2.plot(x,n)
new_dt = augment(est2, dt, dt.S, dt.Tot)
print(new_dt.head(10))
check_fit(new_dt)
| 26.104478 | 129 | 0.57004 | 607 | 0.173528 | 0 | 0 | 618 | 0.176672 | 0 | 0 | 1,079 | 0.308462 |
479df66e863ce32ea10d321a13e0395597727f6a | 1,554 | py | Python | shopping.py | scharlau/shopping_exercise_p | f6b59ba38408dcd9f66f79814ad6a7df167e8fa1 | [
"Unlicense"
]
| 1 | 2021-02-23T15:56:22.000Z | 2021-02-23T15:56:22.000Z | shopping.py | scharlau/shopping_exercise_p | f6b59ba38408dcd9f66f79814ad6a7df167e8fa1 | [
"Unlicense"
]
| null | null | null | shopping.py | scharlau/shopping_exercise_p | f6b59ba38408dcd9f66f79814ad6a7df167e8fa1 | [
"Unlicense"
]
| 3 | 2022-02-23T11:17:12.000Z | 2022-03-01T10:22:40.000Z | import sqlite3
from flask import Flask, render_template
app = Flask(__name__)
# database details - to remove some duplication
db_name = 'shopping_data.db'
@app.route('/')
def index():
return render_template('index.html')
@app.route('/customers')
def customers():
conn = sqlite3.connect(db_name)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
# get results from customers
cur.execute("select * from customers")
rows = cur.fetchall()
conn.close()
return render_template('customers.html', rows=rows)
@app.route('/customer_details/<id>')
def customer_details(id):
conn = sqlite3.connect(db_name)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
# get results from customers
cur.execute("select * from customers WHERE id=?", (id))
customer = cur.fetchall()
conn.close()
return render_template('customer_details.html', customer=customer)
@app.route('/orders')
def orders():
conn = sqlite3.connect(db_name)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
# get results from orders
cur.execute("select * from orders")
rows = cur.fetchall()
conn.close()
return render_template('orders.html', rows=rows)
@app.route('/order_details/<id>')
def order_details(id):
conn = sqlite3.connect(db_name)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
# get results from orders
cur.execute("select * from orders WHERE id=?", (id))
order = cur.fetchall()
conn.close()
return render_template('order_details.html', order=order) | 28.254545 | 70 | 0.684041 | 0 | 0 | 0 | 0 | 1,388 | 0.893179 | 0 | 0 | 440 | 0.28314 |
479e8f21283f73a7284a6977ee7935c8576954ca | 3,372 | py | Python | Month 01/Week 04/Day 03/a.py | KevinKnott/Coding-Review | 6a83cb798cc317d1e4357ac6b2b1fbf76fa034fb | [
"MIT"
]
| null | null | null | Month 01/Week 04/Day 03/a.py | KevinKnott/Coding-Review | 6a83cb798cc317d1e4357ac6b2b1fbf76fa034fb | [
"MIT"
]
| null | null | null | Month 01/Week 04/Day 03/a.py | KevinKnott/Coding-Review | 6a83cb798cc317d1e4357ac6b2b1fbf76fa034fb | [
"MIT"
]
| null | null | null | # Decode Ways: https://leetcode.com/problems/decode-ways/
# A message containing letters from A-Z can be encoded into numbers using the following mapping:
# 'A' -> "1"
# 'B' -> "2"
# ...
# 'Z' -> "26"
# To decode an encoded message, all the digits must be grouped then mapped back into letters using the reverse of the mapping above (there may be multiple ways). For example, "11106" can be mapped into:
# "AAJF" with the grouping (1 1 10 6)
# "KJF" with the grouping (11 10 6)
# Note that the grouping (1 11 06) is invalid because "06" cannot be mapped into 'F' since "6" is different from "06".
# Given a string s containing only digits, return the number of ways to decode it.
# The answer is guaranteed to fit in a 32-bit integer.
class Solution:
def numDecodings(self, s: str) -> int:
self.memo = {}
def dfs(index=0):
if index in self.memo:
return self.memo[index]
if index == len(s):
return 1
if s[index] == '0':
return 0
if index == len(s) - 1:
return 1
# Go one
count = dfs(index+1)
# Go two
if int(s[index:index+2]) <= 26:
count += dfs(index+2)
# cache
self.memo[index] = count
return count
return dfs()
# The above works and cuts out a lot of the problems that we have however this still runs in o(N) and o(N)
# Can we improve on this solution? I think so this is almost like the fibonaci sequence where we can keep track of the last
# two answers and create the new one thus moving up and using only o(1) space
def numDecodingsImproved(self, s):
if s[0] == '0':
return 0
# If the first number isn't 0 then we have a valid case
# where two back is 1 but we skip over it by starting range at 1
oneBack = 1
twoBack = 1
for i in range(1, len(s)):
# Get a temp variable for combining the two results
current = 0
# make sure we don't have 0 because that makes going back two 0
# Also oneBack should be 1 if it isnt 0 as 0 is the only invalid digit
if s[i] == '0':
current = oneBack
twoDigit = int(s[i-1: i+1])
# Make sure that our new two digit is between 10-26 (we don't want 35)
if twoDigit >= 10 and twoDigit <= 26:
current += twoBack
# update the twoback and oneback to new values
twoBack = oneBack
oneBack = current
return oneBack
# So the above should work but it does so because it is like the fib sequence we only need two vals to create thrid 1 1 = 1 2
# so you keep the value that you need and discard outside of the range like a window
# Score Card
# Did I need hints? N
# Did you finish within 30 min? Y 25
# Was the solution optimal? I was able to create the optimal solution although I kind of skipped over the bottom up and tabulation that helps with
# creating the optimal solution as I have seen it before with the fib sequence
# Were there any bugs? I accidently pointed the second algo to current (because it is correct) but really I need to return oneBack because
# python can possibly clean up that val after the loop
# 5 5 5 3 = 4.5
| 34.762887 | 202 | 0.613582 | 1,898 | 0.562871 | 0 | 0 | 0 | 0 | 0 | 0 | 2,197 | 0.651542 |
479ebacd8680e1c9ee8fd4892014756c931c3f71 | 7,503 | py | Python | word2vec.py | online-behaviour/machine-learning | 2ff0e83905985ec644699ece44c75dd7422a7426 | [
"Apache-2.0"
]
| 2 | 2017-08-18T13:14:38.000Z | 2021-09-02T07:45:41.000Z | word2vec.py | online-behaviour/machine-learning | 2ff0e83905985ec644699ece44c75dd7422a7426 | [
"Apache-2.0"
]
| null | null | null | word2vec.py | online-behaviour/machine-learning | 2ff0e83905985ec644699ece44c75dd7422a7426 | [
"Apache-2.0"
]
| 3 | 2020-11-18T11:55:45.000Z | 2021-04-27T10:02:27.000Z | #!/usr/bin/python -W all
"""
word2vec.py: process tweets with word2vec vectors
usage: word2vec.py [-x] [-m model-file [-l word-vector-length]] -w word-vector-file -T train-file -t test-file
notes:
- optional model file is a text file from which the word vector file is built
- option x writes tokenized sentences to stdout
20170504 erikt(at)xs4all.nl
"""
# import modules & set up logging
import gensim
import getopt
import logging
import numpy
import naiveBayes
import os.path
import re
import sys
from scipy.sparse import csr_matrix
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
# constants
COMMAND = "word2vec.py"
TWEETCOLUMN = 4 # column tweet text in test data file dutch-2012.csv
CLASSCOLUMN = 9 # column tweeting behaviour (T3) in file dutch-2012.csv
IDCOLUMN = 0 # column with the id of the current tweet
PARENTCOLUMN = 5 # column of the id of the parent of the tweet if it is a retweet or reply (otherwise: None)
HASHEADING = True
MINCOUNT = 2
USAGE = "usage: "+COMMAND+" [-m model-file] -w word-vector-file -T train-file -t test-file\n"
# input file names
trainFile = ""
testFile = ""
wordvectorFile = ""
modelFile = ""
# length of word vectors
maxVector = 200
# exporting tokenized sentences
exportTokens = False
selectedTokens = {}
# check for command line options
def checkOptions():
global trainFile
global testFile
global wordvectorFile
global modelFile
global maxVector
global exportTokens
try: options = getopt.getopt(sys.argv,"T:t:w:m:l:x",[])
except: sys.exit(USAGE)
for option in options[0]:
if option[0] == "-T": trainFile = option[1]
elif option[0] == "-t": testFile = option[1]
elif option[0] == "-w": wordvectorFile = option[1]
elif option[0] == "-m": modelFile = option[1]
elif option[0] == "-l": maxVector = int(option[1])
elif option[0] == "-x": exportTokens = True
if trainFile == "" or testFile == "" or wordvectorFile == "":
sys.exit(USAGE)
# create data matrix (no sparse version needed)
def makeVectors(tokenizeResults,wordvecModel,selectedTokens):
tweetVectors = numpy.zeros((len(tokenizeResults),maxVector),dtype=numpy.float64)
# process all tweets
seen = {}
for i in range(0,len(tokenizeResults)):
# process all tokens in this tweet
for token in tokenizeResults[i]:
# if the token is present in the word vector model
if token in wordvecModel and token in selectedTokens and not token in seen:
# add (+) the word vector of this token to the tweet vector
tweetVectors[i] += wordvecModel[token]
seen[token] = True
# the result: a tweet vector which is the sum of its token vectors
return(tweetVectors)
# change the class vector into a binary vector
def makeBinary(vector):
outVector = []
for e in vector:
if e == naiveBayes.OTHER: outVector.append(0)
else: outVector.append(1)
return(outVector)
# read wordvector file from file in format of fasttext:
# first line: nbrOfVectors vectorLength; rest: token vector
def readFasttextModel(wordvectorFile):
global maxVector
try: inFile = open(wordvectorFile,"r")
except: sys.exit(COMMAND+": cannot read file "+wordvectorFile)
wordvectorModel = {}
lineCounter = 0
expectedLines = -1
for line in inFile:
line = line.rstrip()
fields = line.split()
lineCounter += 1
if lineCounter == 1:
if len(fields) != 2: sys.exit(COMMAND+": unexpected first line of file "+wordvectorFile+": "+line)
expectedLines = int(fields[0])
maxVector = int(fields[1])
else:
if len(fields) != 1+maxVector: sys.exit(COMMAND+": unexpected line in file "+wordvectorFile+": "+line)
token = fields.pop(0)
for i in range(0,len(fields)): fields[i] = float(fields[i])
wordvectorModel[token] = fields
inFile.close()
return(wordvectorModel)
# main function starts here
checkOptions()
# get target classes from training data file
targetClasses = naiveBayes.getTargetClasses(trainFile)
if len(targetClasses) == 0: sys.exit(COMMAND+": cannot find target classes\n")
# if required: train the word vector model and save it to file
if modelFile != "":
# read the model data
readDataResults = naiveBayes.readData(modelFile,targetClasses[0])
# tokenize the model data
tokenizeResults = naiveBayes.tokenize(readDataResults["text"])
# build the word vectors (test sg=1,window=10)
wordvecModel = gensim.models.Word2Vec(tokenizeResults, min_count=MINCOUNT, size=maxVector)
# save the word vectors
wordvecModel.save(wordvectorFile)
# load the word vector model from file
patternNameVec = re.compile("\.vec$")
if not patternNameVec.search(wordvectorFile):
print >> sys.stderr,"loading gensim vector model from file: %s" % (wordvectorFile)
# read standard file format from gensim
wordvecModel = gensim.models.Word2Vec.load(wordvectorFile)
else:
print >> sys.stderr,"loading fasttext vector model from file: %s" % (wordvectorFile)
# read file format from fasttext
wordvecModel = readFasttextModel(wordvectorFile)
# read training data, tokenize data, make vector matrix
readDataResults = naiveBayes.readData(trainFile,"")
tokenizeResults = naiveBayes.tokenize(readDataResults["text"])
# check if we need to export tokens
if exportTokens:
for i in range(0,len(tokenizeResults)):
sys.stdout.write("__label__"+readDataResults["classes"][i])
for j in range(0,len(tokenizeResults[i])):
sys.stdout.write(" ")
sys.stdout.write(unicode(tokenizeResults[i][j]).encode('utf8'))
sys.stdout.write("\n")
sys.exit()
# select tokens to be used in model, based on token frequency
selectedTokens = naiveBayes.selectFeatures(tokenizeResults,MINCOUNT)
makeVectorsResultsTrain = makeVectors(tokenizeResults,wordvecModel,selectedTokens)
# the matrix can be saved to file and reloaded in next runs but this does not gain much time
# read test data, tokenize data, make vector matrix
readDataResults = naiveBayes.readData(testFile,"")
tokenizeResults = naiveBayes.tokenize(readDataResults["text"])
makeVectorsResultsTest = makeVectors(tokenizeResults,wordvecModel,selectedTokens)
# run binary svm experiments: one for each target class
for targetClass in targetClasses:
# read the training and test file again to get the right class distribution for this target class
readDataResultsTrain = naiveBayes.readData(trainFile,targetClass)
readDataResultsTest = naiveBayes.readData(testFile,targetClass)
# get binary version of train classes
binTrainClasses = makeBinary(readDataResultsTrain["classes"])
# perform svm experiment: http://scikit-learn.org/stable/modules/svm.html (1.4.1.1)
clf = svm.SVC(decision_function_shape='ovo') # definition
clf.fit(makeVectorsResultsTrain,binTrainClasses) # training
outFile = open(testFile+".out."+targetClass,"w") # output file for test results
scores = clf.decision_function(makeVectorsResultsTest) # process all test items
for i in range(0,len(makeVectorsResultsTest)):
guess = "O"
if scores[i] >= 0: guess = targetClass
print >>outFile, "# %d: %s %s %0.3f" % (i,readDataResultsTest["classes"][i],guess,scores[i])
outFile.close()
| 40.33871 | 114 | 0.702786 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,675 | 0.356524 |
479f58c93d75ceaaa8c1e6a8d36c5a44e2d8377b | 307 | py | Python | methylprep/processing/__init__.py | WonyoungCho/methylprep | 4e34f62be969158453ba9b05b7629433f9bbba8b | [
"MIT"
]
| 5 | 2019-08-28T08:27:16.000Z | 2020-03-11T17:20:01.000Z | methylprep/processing/__init__.py | WonyoungCho/methylprep | 4e34f62be969158453ba9b05b7629433f9bbba8b | [
"MIT"
]
| 16 | 2021-04-08T22:02:58.000Z | 2022-03-18T17:30:50.000Z | methylprep/processing/__init__.py | WonyoungCho/methylprep | 4e34f62be969158453ba9b05b7629433f9bbba8b | [
"MIT"
]
| 3 | 2020-05-21T10:16:24.000Z | 2020-08-30T09:26:52.000Z | from .pipeline import SampleDataContainer, run_pipeline, make_pipeline
from .preprocess import preprocess_noob
from .postprocess import consolidate_values_for_sheet
__all__ = [
'SampleDataContainer',
'preprocess_noob',
'run_pipeline',
'make_pipeline,',
'consolidate_values_for_sheet'
]
| 25.583333 | 70 | 0.781759 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.319218 |
47a0a4fbe45ee5f24ba893991497ace327ecbebb | 36 | py | Python | lib/jnpr/eznc/runstat/__init__.py | cro/py-junos-eznc | 4c111476cc8eb7599462379ddf55743ae30bbf5c | [
"Apache-2.0",
"BSD-3-Clause"
]
| null | null | null | lib/jnpr/eznc/runstat/__init__.py | cro/py-junos-eznc | 4c111476cc8eb7599462379ddf55743ae30bbf5c | [
"Apache-2.0",
"BSD-3-Clause"
]
| null | null | null | lib/jnpr/eznc/runstat/__init__.py | cro/py-junos-eznc | 4c111476cc8eb7599462379ddf55743ae30bbf5c | [
"Apache-2.0",
"BSD-3-Clause"
]
| null | null | null | from .rsmaker import RunstatMaker
| 9 | 33 | 0.805556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
47a1196cb4630db570dad1f74a29081f22292d56 | 291 | py | Python | Day 4/Ex3: Treasure Map.py | Nishi-16-K/100DaysCodeChallenge-Python- | 96df953bbc60c2bf8802cf31ed6c593469521482 | [
"MIT"
]
| 1 | 2021-08-29T12:44:23.000Z | 2021-08-29T12:44:23.000Z | Day 4/Ex3: Treasure Map.py | Nishi-16-K/100DaysofCodeChallenge-Python | 96df953bbc60c2bf8802cf31ed6c593469521482 | [
"MIT"
]
| null | null | null | Day 4/Ex3: Treasure Map.py | Nishi-16-K/100DaysofCodeChallenge-Python | 96df953bbc60c2bf8802cf31ed6c593469521482 | [
"MIT"
]
| null | null | null | row1 = ["⬜️","⬜️","⬜️"]
row2 = ["⬜️","⬜️","⬜️"]
row3 = ["⬜️","⬜️","⬜️"]
map = [row1, row2, row3]
print(f"{row1}\n{row2}\n{row3}")
position = input("Where do you want to put the treasure? ")
col = int(position[0])
ro = int(position[1])
map[ro-1][col-1] = "X"
print(f"{row1}\n{row2}\n{row3}")
| 26.454545 | 59 | 0.515464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.507645 |
47a573807a7fa2158676c40f6ae41faaea77de33 | 1,809 | py | Python | noto-emoji-extra/gen-couple-kiss.py | PoomSmart/EmojiFonts | 43f4f2bc6e8986cc2f5bc317e50381774f02316f | [
"MIT"
]
| 3 | 2022-02-19T07:29:41.000Z | 2022-03-03T07:24:14.000Z | noto-emoji-extra/gen-couple-kiss.py | PoomSmart/EmojiFonts | 43f4f2bc6e8986cc2f5bc317e50381774f02316f | [
"MIT"
]
| 1 | 2022-03-13T23:00:19.000Z | 2022-03-14T16:12:12.000Z | noto-emoji-extra/gen-couple-kiss.py | PoomSmart/EmojiFonts | 43f4f2bc6e8986cc2f5bc317e50381774f02316f | [
"MIT"
]
| 1 | 2022-02-19T13:32:11.000Z | 2022-02-19T13:32:11.000Z | import xml.etree.ElementTree as ET
from shared import *
# neutral
for skin in skins:
name = f'{font}/emoji_u1f48f.svg' if skin == 'none' else f'{font}/emoji_u1f48f_{skin}.svg'
left = ET.parse(name).getroot()
right = ET.parse(name).getroot()
remove(left, 2)
remove(left, 1)
remove(right, 0)
write_dual(left, right, '1f9d1', '1f9d1', skin, '1f48b')
# neutral silhouette
name = f'{font}/emoji_u1f48f.svg'
left = ET.parse(name).getroot()
right = ET.parse(name).getroot()
remove(left, 2)
remove(left, 1)
remove(right, 0)
find_set_color(left)
find_set_color(right)
left_out = ET.ElementTree(left)
left_out.write('svgs/silhouette_1f9d1_1f48b.l.svg', encoding='utf-8')
right_out = ET.ElementTree(right)
right_out.write('svgs/silhouette_1f9d1_1f48b.r.svg', encoding='utf-8')
# woman, man silhouette
for g in ['1f469', '1f468']:
name = f'{font}/emoji_u{g}_200d_2764_200d_1f48b_200d_{g}.svg'
left = ET.parse(name).getroot()
right = ET.parse(name).getroot()
remove(left, 2)
remove(left, 1)
remove(right, 0)
find_set_color(left)
find_set_color(right)
left_out = ET.ElementTree(left)
left_out.write(f'svgs/silhouette_{g}_1f48b.l.svg', encoding='utf-8')
right_out = ET.ElementTree(right)
right_out.write(f'svgs/silhouette_{g}_1f48b.r.svg', encoding='utf-8')
# dual woman, dual man
for g in ['1f469', '1f468']:
for skin in skins:
if skin == 'none':
name = f'{font}/emoji_u{g}_200d_2764_200d_1f48b_200d_{g}.svg'
else:
name = f'{font}/emoji_u{g}_{skin}_200d_2764_200d_1f48b_200d_{g}_{skin}.svg'
left = ET.parse(name).getroot()
right = ET.parse(name).getroot()
remove(left, 2)
remove(left, 1)
remove(right, 0)
write_dual(left, right, g, g, skin, '1f48b') | 32.303571 | 94 | 0.661139 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 569 | 0.314538 |
47a5912698e4015d5f8165df118a9d812fd9c116 | 488 | py | Python | wrong_settings.py | kutayg/gameQuest | 1730ea8810a54afff50fbb5eab8fb5290eed6222 | [
"MIT"
]
| null | null | null | wrong_settings.py | kutayg/gameQuest | 1730ea8810a54afff50fbb5eab8fb5290eed6222 | [
"MIT"
]
| null | null | null | wrong_settings.py | kutayg/gameQuest | 1730ea8810a54afff50fbb5eab8fb5290eed6222 | [
"MIT"
]
| null | null | null | # © 2019 KidsCanCode LLC / All rights reserved.
# Game options/settings
TITLE = "Jumpy!"
WIDTH = 480
HEIGHT = 600
FPS = 60
# Environment options
GRAVITY = 9.8
# Player properties
PLAYER_ACC = 0.5
PLAYER_FRICTION = -0.01
PLAYER_JUMPPOWER = 10
# Define colors
# I changed the screen color to aqua, the platform color to orange, and the player color to purple
WHITE = (255, 255, 255)
AQUA = (0, 255, 255)
RED = (255, 0, 0)
ORANGE = (255, 101, 0)
BLUE = (0, 0, 255)
PURPLE = (128, 0, 128) | 20.333333 | 98 | 0.684426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.474438 |
47a75c9fd27c1c4e7e7447cf693a765246360655 | 1,446 | py | Python | yc164/518.py | c-yan/yukicoder | cdbbd65402177225dd989df7fe01f67908484a69 | [
"MIT"
]
| null | null | null | yc164/518.py | c-yan/yukicoder | cdbbd65402177225dd989df7fe01f67908484a69 | [
"MIT"
]
| null | null | null | yc164/518.py | c-yan/yukicoder | cdbbd65402177225dd989df7fe01f67908484a69 | [
"MIT"
]
| null | null | null | def r2i(s):
i = 0
result = 0
while i < len(s):
a = {'IV': 4, 'IX': 9, 'XL': 40, 'XC': 90, 'CD': 400, 'CM': 900}
if s[i:i + 2] in a:
result += a[s[i:i + 2]]
i += 2
continue
b = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
result += b[s[i]]
i += 1
return result
def i2r(i):
if i > 3999:
return 'ERROR'
result = ''
while i != 0:
if i >= 1000:
result += 'M'
i -= 1000
elif i >= 900:
result += 'CM'
i -= 900
elif i >= 500:
result += 'D'
i -= 500
elif i >= 400:
result += 'CD'
i -= 400
elif i >= 100:
result += 'C'
i -= 100
elif i >= 90:
result += 'XC'
i -= 90
elif i >= 50:
result += 'L'
i -= 50
elif i >= 40:
result += 'XL'
i -= 40
elif i >= 10:
result += 'X'
i -= 10
elif i >= 9:
result += 'IX'
i -= 9
elif i >= 5:
result += 'V'
i -= 5
elif i >= 4:
result += 'IV'
i -= 4
elif i >= 1:
result += 'I'
i -= 1
return result
N = int(input())
R = input().split()
print(i2r(sum(r2i(r) for r in R)))
| 21.58209 | 77 | 0.310512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.068465 |
47a795b1d3c2fd7990f98fee6d037bc7e104529b | 966 | py | Python | asynapplicationinsights/channel/abstractions.py | RobertoPrevato/aioapplicationinsights | c72721c6ed0e64b4e5bfecbcd3dde62f7c6ea120 | [
"MIT"
]
| 2 | 2018-08-13T14:26:31.000Z | 2019-12-01T01:03:10.000Z | asynapplicationinsights/channel/abstractions.py | RobertoPrevato/aioapplicationinsights | c72721c6ed0e64b4e5bfecbcd3dde62f7c6ea120 | [
"MIT"
]
| 4 | 2018-10-09T20:32:59.000Z | 2018-12-09T20:46:09.000Z | asynapplicationinsights/channel/abstractions.py | RobertoPrevato/aioapplicationinsights | c72721c6ed0e64b4e5bfecbcd3dde62f7c6ea120 | [
"MIT"
]
| null | null | null | from asyncio import Queue, QueueEmpty
from abc import ABC, abstractmethod
from typing import List
class TelemetryChannel(ABC):
def __init__(self):
self._queue = Queue()
self._max_length = 500
def get(self):
try:
return self._queue.get_nowait()
except QueueEmpty:
return None
async def put(self, item):
if not item:
return
await self._queue.put(item)
if self.should_flush():
await self.flush()
def should_flush(self) -> bool:
return self._max_length <= self._queue.qsize()
async def flush(self):
data = []
while True:
item = self.get()
if not item:
break
data.append(item)
if data:
await self.send(data)
@abstractmethod
async def send(self, data: List):
pass
@abstractmethod
async def dispose(self):
pass
| 21 | 54 | 0.555901 | 865 | 0.895445 | 0 | 0 | 123 | 0.127329 | 467 | 0.483437 | 0 | 0 |
47a8d279cbfb373533f6a00a0322f66158d1d281 | 759 | py | Python | 2021/day3/a.py | vinnymaker18/adventofcode | 92d0a6f5a04e6601b6c82ee323565e7327be36f8 | [
"MIT"
]
| null | null | null | 2021/day3/a.py | vinnymaker18/adventofcode | 92d0a6f5a04e6601b6c82ee323565e7327be36f8 | [
"MIT"
]
| null | null | null | 2021/day3/a.py | vinnymaker18/adventofcode | 92d0a6f5a04e6601b6c82ee323565e7327be36f8 | [
"MIT"
]
| null | null | null | def processInput(inputFile='input.txt'):
return [l.strip() for l in open(inputFile).readlines()]
def filter(data, a=1):
setA = set(range(len(data)))
setB = set(range(len(data)))
def count(bit, value, dataset):
return set(d for d in dataset if data[d][bit] == value)
for bit in range(12):
onesA = count(bit, '1', setA)
zerosA = count(bit, '0', setA)
onesB = count(bit, '1', setB)
zerosB = count(bit, '0', setB)
setA = onesA if len(onesA) >= len(zerosA) else zerosA
setB = onesB if len(onesB) < len(zerosB) else zerosB
if not setB:
setB = onesB or zerosB
x, y = data[min(setA)], data[min(setB)]
print(int(x, 2) * int(y, 2))
filter(processInput())
| 25.3 | 63 | 0.571805 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.030303 |
47ab86e8093f2c1a626207ebde134e3527326fd9 | 659 | py | Python | mwasurveyweb/templatetags/template_filters.py | tjgalvin/SS18B-NHurleyWalker | 81b1bbb9e7131a92804b0dbddac795217b524160 | [
"MIT"
]
| 1 | 2020-07-23T08:48:50.000Z | 2020-07-23T08:48:50.000Z | mwasurveyweb/templatetags/template_filters.py | tjgalvin/SS18B-NHurleyWalker | 81b1bbb9e7131a92804b0dbddac795217b524160 | [
"MIT"
]
| 6 | 2020-02-11T23:38:13.000Z | 2022-02-10T07:28:07.000Z | mwasurveyweb/templatetags/template_filters.py | tjgalvin/SS18B-NHurleyWalker | 81b1bbb9e7131a92804b0dbddac795217b524160 | [
"MIT"
]
| 1 | 2020-07-23T09:01:29.000Z | 2020-07-23T09:01:29.000Z | """
Distributed under the MIT License. See LICENSE.txt for more info.
"""
from django import template
register = template.Library()
@register.filter
def get_item(dictionary, key):
"""
Returns the object for that key from a dictionary.
:param dictionary: A dictionary object
:param key: Key to search for
:return: Object that corresponds to the key in the dictionary
"""
return dictionary.get(key, None)
@register.filter
def field_type(field):
"""
Returns the field type of an input
:param field: input field
:return: string representing the class name
"""
return field.field.widget.__class__.__name__
| 22.724138 | 65 | 0.704097 | 0 | 0 | 0 | 0 | 519 | 0.787557 | 0 | 0 | 410 | 0.622155 |
47ab8c88580963ea081b14afd01ad6eaae957d96 | 1,091 | py | Python | Exercicios/Ex69.py | angeloridolfi/Python-CEV | fd11b7ea0725f83c84336b99304c50f183514245 | [
"MIT"
]
| null | null | null | Exercicios/Ex69.py | angeloridolfi/Python-CEV | fd11b7ea0725f83c84336b99304c50f183514245 | [
"MIT"
]
| null | null | null | Exercicios/Ex69.py | angeloridolfi/Python-CEV | fd11b7ea0725f83c84336b99304c50f183514245 | [
"MIT"
]
| null | null | null | contmaior = 0
contahomi = 0
contamuie = 0
while True:
print('CADASTRE UMA PESSOA')
print('=-' * 19)
idade = int(input('INFORME SUA IDADE: '))
if idade > 18:
contmaior += 1
sexo = str(input('INFORME SEU SEXO <<M/F>>: ')).upper().strip()[0]
if sexo not in 'MF':
while True:
sexo = str(input('OPÇÃO INVÁLIDA! INFORME SEU SEXO <<M/F>>: ')).upper().strip()[0]
if sexo in 'MF':
break
if sexo == 'M':
contahomi += 1
if sexo == 'F' and idade < 20:
contamuie += 1
continuacao = str(input('Quer continuar[S/N]: ')).upper().strip()[0]
print('=-' * 20)
if continuacao not in 'SN':
while True:
continuacao = str(input('OPÇÃO INVÁLIDA! Quer continuar[S/N]: ')).upper().strip()[0]
if continuacao in 'SN':
break
if continuacao == 'N':
break
print('=-' * 20)
print(f' -> {contmaior} pessoas são maiores de 18 anos;')
print(f' -> {contahomi} homens foram cadastrados;')
print(f' -> {contamuie} mulheres são menores de 20 anos.')
| 33.060606 | 96 | 0.542621 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 366 | 0.33303 |
47ac2e08152553ebc4a73e2f44181a3dfc25e059 | 165 | py | Python | testapp/another_urls.py | danigosa/django-simple-seo | 17610e50148c6672cb34e96654df1d3515b0444f | [
"BSD-3-Clause"
]
| 11 | 2015-01-02T15:44:31.000Z | 2021-07-27T06:54:35.000Z | testapp/another_urls.py | danigosa/django-simple-seo | 17610e50148c6672cb34e96654df1d3515b0444f | [
"BSD-3-Clause"
]
| 8 | 2016-02-03T07:07:04.000Z | 2022-01-13T00:42:32.000Z | testapp/another_urls.py | danigosa/django-simple-seo | 17610e50148c6672cb34e96654df1d3515b0444f | [
"BSD-3-Clause"
]
| 8 | 2015-02-20T13:51:51.000Z | 2021-06-24T19:11:30.000Z | from django.conf.urls import patterns, url
from .views import template_test
urlpatterns = patterns(
'',
url(r'^', template_test, name='template_test2'),
) | 18.333333 | 52 | 0.709091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.133333 |
47af52c1759d6ca7b81f36810e1191b6fa34e7eb | 11,920 | py | Python | non_local.py | yuxia201121/ADCTself-attention | 77d32034854f64a7aa24d45ae2c4e18f7616cf48 | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | non_local.py | yuxia201121/ADCTself-attention | 77d32034854f64a7aa24d45ae2c4e18f7616cf48 | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | non_local.py | yuxia201121/ADCTself-attention | 77d32034854f64a7aa24d45ae2c4e18f7616cf48 | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import numpy as np
import ops
def conv1x1(input_, output_dim,
init=tf.contrib.layers.xavier_initializer(), name='conv1x1'):
k_h = 1
k_w = 1
d_h = 1
d_w = 1
with tf.variable_scope(name):
w = tf.get_variable(
'w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=init)
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
return conv
def sn_conv1x1(input_, output_dim, update_collection,
init=tf.contrib.layers.xavier_initializer(), name='sn_conv1x1'):
with tf.variable_scope(name):
k_h = 1
k_w = 1
d_h = 1
d_w = 1
w = tf.get_variable(
'w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=init)
w_bar = ops.spectral_normed_weight(w, num_iters=1, update_collection=update_collection)
conv = tf.nn.conv2d(input_, w_bar, strides=[1, d_h, d_w, 1], padding='SAME')
return conv
def sn_non_local_block_sim(x, update_collection, name, init=tf.contrib.layers.xavier_initializer()):
with tf.variable_scope(name):
batch_size, h, w, num_channels = x.get_shape().as_list()
location_num = h * w
downsampled_num = location_num // 4
print("x=",x)
piexl_data = tf.transpose(x, [0, 3, 1, 2]) # shape=(64, 256, 32, 32)
print("piexl_data=",piexl_data)
piexl_zero_zero = piexl_data[:,:,::2,::2] # shape=(64, 256, 16, 16)
print("piexl_zero_zero=",piexl_zero_zero)
piexl_zero_one = piexl_data[:,:,::2,1::2] # shape=(64, 256, 16, 16)
print("piexl_zero_one=",piexl_zero_one)
piexl_one_zero = piexl_data[:,:,1::2,::2] # shape=(64, 256, 16, 16)
print("piexl_one_zero=",piexl_one_zero)
piexl_one_one = piexl_data[:,:,1::2,1::2] # shape=(64, 256, 16, 16)
print("piexl_one_one=",piexl_one_one)
dct_zero_zero = ((piexl_zero_zero + piexl_one_zero) + (piexl_zero_one + piexl_one_one))*2 # shape=(64, 256, 16, 16)
print("dct_zero_zero=",dct_zero_zero)
dct_zero_one = ((piexl_zero_zero + piexl_one_zero) - (piexl_zero_one + piexl_one_one))*2 # shape=(64, 256, 16, 16)
print("dct_zero_one=",dct_zero_one)
dct_one_zero = ((piexl_zero_zero - piexl_one_zero) + (piexl_zero_one - piexl_one_one))*2 # shape=(64, 256, 16, 16)
print("dct_one_zero=",dct_one_zero)
dct_one_one = ((piexl_zero_zero - piexl_one_zero) - (piexl_zero_one - piexl_one_one))*2 # shape=(64, 256, 16, 16)
print("dct_one_one=",dct_one_one)
#b00********************************************************************
x_zero_zero = tf.transpose(dct_zero_zero, [0, 2, 3, 1]) # shape=(64, 16, 16, 256)
print("x_zero_zero=",x_zero_zero)
# theta path
print("x=",x)
theta_00 = sn_conv1x1(x_zero_zero, num_channels //8 , update_collection, init, 'sn_conv_theta')
print('theta_00_sn_conv', theta_00)
#print(x.get_shape())
# theta = tf.reshape( # shape=(64, 256, 32)
# theta, [batch_size, location_num //4, num_channels // 8])
# print("theta_rehape=",theta)
# phi path
# phi_00 = sn_conv1x1(x_zero_zero, num_channels //8 , update_collection, init, 'sn_conv_phi') # shape=(64, 16, 16, 256)
# print("phi_00_sn_conv=",phi_00)
# phi = tf.layers.max_pooling2d(inputs=phi, pool_size=[2, 2], strides=2)
# print("phi_max_pool=",phi)
# phi = tf.reshape(
# phi, [batch_size, downsampled_num, num_channels])
# print("phi_rehape=",phi)
# attn_00 = tf.matmul(theta_00, phi_00, transpose_b=True)
# print("attn_00_matmul=",attn_00)
# attn_00 = tf.nn.softmax(attn_00) # shape=(64, 16, 16, 16)
# print(tf.reduce_sum(attn_00, axis=-1))
# print("attn_00_softmax=",attn_00)
############################################################################
#b01********************************************************************
x_zero_one = tf.transpose(dct_zero_one, [0, 2, 3, 1]) # shape=(64, 16, 16, 256)
print("x_zero_one=",x_zero_one)
# theta path
print("x=",x)
theta_01 = sn_conv1x1(x_zero_one, num_channels //8 , update_collection, init, 'sn_conv_theta')
print('theta_01_sn_conv', theta_01)
#print(x.get_shape())
# theta = tf.reshape( # shape=(64, 256, 32)
# theta, [batch_size, location_num //4, num_channels // 8])
# print("theta_rehape=",theta)
# phi path
# phi_01 = sn_conv1x1(x_zero_one, num_channels //8 , update_collection, init, 'sn_conv_phi') # shape=(64, 16, 16, 256)
# print("phi_01_sn_conv=",phi_01)
# phi = tf.layers.max_pooling2d(inputs=phi, pool_size=[2, 2], strides=2)
# print("phi_max_pool=",phi)
# phi = tf.reshape(
# phi, [batch_size, downsampled_num, num_channels])
# print("phi_rehape=",phi)
attn_01 = tf.matmul(theta_00, theta_01, transpose_b=True)
print("attn_01_matmul=",attn_01)
attn_01 = tf.nn.softmax(attn_01) # shape=(64, 16, 16, 16)
# print(tf.reduce_sum(attn_01, axis=-1))
# print("attn_01_softmax=",attn_01)
#b10********************************************************************
x_one_zero = tf.transpose(dct_one_zero, [0, 2, 3, 1]) # shape=(64, 16, 16, 256)
print("x_one_zero=",x_one_zero)
# theta path
print("x=",x)
theta_10 = sn_conv1x1(x_one_zero, num_channels //8 , update_collection, init, 'sn_conv_theta')
print('theta_10_sn_conv', theta_10)
#print(x.get_shape())
# theta = tf.reshape( # shape=(64, 256, 32)
# theta, [batch_size, location_num //4, num_channels // 8])
# print("theta_rehape=",theta)
# phi path
# phi_10 = sn_conv1x1(x_one_zero, num_channels //8 , update_collection, init, 'sn_conv_phi') # shape=(64, 16, 16, 256)
# print("phi_10_sn_conv=",phi_10)
# phi = tf.layers.max_pooling2d(inputs=phi, pool_size=[2, 2], strides=2)
# print("phi_max_pool=",phi)
# phi = tf.reshape(
# phi, [batch_size, downsampled_num, num_channels])
# print("phi_rehape=",phi)
# attn_10 = tf.matmul(theta_10, phi_10, transpose_b=True)
# print("attn_10_matmul=",attn_10)
# attn_10 = tf.nn.softmax(attn_10) # shape=(64, 16, 16, 16)
# print(tf.reduce_sum(attn_10, axis=-1))
# print("attn_10_softmax=",attn_10)
#b11********************************************************************
x_one_one = tf.transpose(dct_one_one, [0, 2, 3, 1]) # shape=(64, 16, 16, 256)
print("x_one_one=",x_one_one)
# theta path
print("x=",x)
theta_11 = sn_conv1x1(x_one_one, num_channels //8 , update_collection, init, 'sn_conv_theta')
print('theta_11_sn_conv', theta_11)
#print(x.get_shape())
# theta = tf.reshape( # shape=(64, 256, 32)
# theta, [batch_size, location_num //4, num_channels // 8])
# print("theta_rehape=",theta)
# phi path
# phi_11 = sn_conv1x1(x_one_one, num_channels //8 , update_collection, init, 'sn_conv_phi') # shape=(64, 16, 16, 256)
# print("phi_11_sn_conv=",phi_11)
# phi = tf.layers.max_pooling2d(inputs=phi, pool_size=[2, 2], strides=2)
# print("phi_max_pool=",phi)
# phi = tf.reshape(
# phi, [batch_size, downsampled_num, num_channels])
# print("phi_rehape=",phi)
attn_11 = tf.matmul(theta_10, theta_11, transpose_b=True)
print("attn_11_matmul=",attn_11)
attn_11 = tf.nn.softmax(attn_11) # shape=(64, 16, 16, 16)
# print(tf.reduce_sum(attn_11, axis=-1))
# print("attn_11_softmax=",attn_11)
##################################
# attn1=tf.matmul(attn_00, attn_01, transpose_b=True)
# attn2=tf.matmul(attn_10, attn_11, transpose_b=True)
# attn_dct=tf.matmul(attn1, attn2, transpose_b=True)
attn_dct=attn_01+attn_11
# attn_dct = tf.nn.softmax(attn_dct)
print("attn_dct=",attn_dct)
##################################
# pixel attention
# theta path
print("x=",x)
theta = sn_conv1x1(x, num_channels //8 , update_collection, init, 'sn_conv_theta')
print('theta_sn_conv', theta)
#print(x.get_shape())
# theta = tf.reshape( # shape=(64, 256, 32)
# theta, [batch_size, location_num //4, num_channels // 8])
# print("theta_rehape=",theta)
# phi path
phi = sn_conv1x1(x, num_channels //8 , update_collection, init, 'sn_conv_phi') # shape=(64, 16, 16, 256)
print("phi_sn_conv=",phi)
# phi = tf.layers.max_pooling2d(inputs=phi, pool_size=[2, 2], strides=2)
# print("phi_max_pool=",phi)
# phi = tf.reshape(
# phi, [batch_size, downsampled_num, num_channels])
# print("phi_rehape=",phi)
attn_pixel = tf.matmul(theta, phi, transpose_b=True)
print("attn_pixel_matmul=",attn_pixel)
attn_pixel = tf.nn.softmax(attn_pixel) # shape=(64, 32, 32, 32)
print(tf.reduce_sum(attn_pixel, axis=-1))
print("attn_pixel=",attn_pixel)
attn_pixel = tf.layers.max_pooling2d(inputs=attn_pixel, pool_size=[2, 2], strides=2)
print("attn_pixel_max_pool=",attn_pixel)
##################################
attn = tf.matmul(attn_dct, attn_pixel) # shape=(64, 16, 16, 32)
print("attn_matmul=",attn)
##################################
# g path
channels=attn.get_shape().as_list()[-1]
g = sn_conv1x1(x, channels, update_collection, init, 'sn_conv_g') # shape=(64, 32, 32, 128)
print("g_sn_conv=",g)
g = tf.layers.max_pooling2d(inputs=g, pool_size=[2, 2], strides=2) # shape=(64, 16, 16, 128)
print("g_max_pool=",g)
# g = tf.reshape(
# g, [batch_size, downsampled_num, num_channels // 2])
# print("g_reshape=",g)
attn_g = tf.matmul(attn, g, transpose_b=True)
print("attn_g_matmul=",attn_g)
# attn_g = tf.reshape(attn_g, [batch_size, h//2, w//2, num_channels // 2])
# print("attn_g_reshape=",attn_g)
attn_g = sn_conv1x1(attn_g, num_channels, update_collection, init, 'sn_conv_attn')
print("attn_g_sn_conv1x1",attn_g)
attn_g = ops.deconv2d(attn_g, [batch_size, h, w, num_channels], # num_channels
k_h=2, k_w=2, d_h=2, d_w=2, stddev=0.02,
name='attn_g_deconv2d', init_bias=0.)
print("attn_g_deconv2d=",attn_g)
print("x=",x)
sigma = tf.get_variable(
'sigma_ratio', [], initializer=tf.constant_initializer(0.0))
print("sigma=",sigma)
return x + sigma * attn_g
| 40.27027 | 127 | 0.560906 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,421 | 0.538674 |
47b3262789a56b500e24bd5503fa34b4ab5f6bca | 1,031 | py | Python | evaluation/eval_frames.py | suleymanaslan/generative-rainbow | 9f8daac5e06565ef099c0913186f5a1d801ca52c | [
"MIT"
]
| null | null | null | evaluation/eval_frames.py | suleymanaslan/generative-rainbow | 9f8daac5e06565ef099c0913186f5a1d801ca52c | [
"MIT"
]
| null | null | null | evaluation/eval_frames.py | suleymanaslan/generative-rainbow | 9f8daac5e06565ef099c0913186f5a1d801ca52c | [
"MIT"
]
| null | null | null | import numpy as np
import imageio
from evaluation.eval_utils import to_img_padded, format_img, init_evaluation
def save_frames(env, agent, pad, folder):
observation, ep_reward, done = env.reset(), 0, False
count = 0
while not done:
action, generated_next_observation = agent.act(observation, get_generated=True)
next_observation, reward, done, info = env.step(action)
imgs = [to_img_padded(observation, pad),
to_img_padded(generated_next_observation, pad),
to_img_padded(next_observation, pad)]
plot_img = np.concatenate(format_img(imgs), axis=1)
count += 1
imageio.imwrite(f"{folder}/{count:04d}.png", plot_img)
observation = next_observation
def main():
pad = [(5, 5), (5, 5), (0, 0)]
train_env, test_env, agent, agent_folder = init_evaluation(use_backgrounds=False)
save_frames(train_env, agent, pad, f"{agent_folder}/frames/train")
save_frames(test_env, agent, pad, f"{agent_folder}/frames/test")
main()
| 34.366667 | 87 | 0.681862 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.083414 |
47b4d5911e43771cc3188d71f2b90fe1b6287fdc | 22,950 | py | Python | acme/HttpServer.py | ankraft/ACME-oneM2M-CSE | 03c23ea19b35dd6e0aec752d9631e2a76778c61c | [
"BSD-3-Clause"
]
| 10 | 2020-09-25T08:49:19.000Z | 2022-03-30T01:29:22.000Z | acme/HttpServer.py | ankraft/ACME-oneM2M-CSE | 03c23ea19b35dd6e0aec752d9631e2a76778c61c | [
"BSD-3-Clause"
]
| 14 | 2020-05-22T08:00:32.000Z | 2020-12-24T23:38:05.000Z | acme/HttpServer.py | ankraft/ACME-oneM2M-CSE | 03c23ea19b35dd6e0aec752d9631e2a76778c61c | [
"BSD-3-Clause"
]
| 5 | 2020-05-22T03:43:20.000Z | 2021-05-25T06:54:59.000Z | #
# HttpServer.py
#
# (c) 2020 by Andreas Kraft
# License: BSD 3-Clause License. See the LICENSE file for further details.
#
# Server to implement the http part of the oneM2M Mcx communication interface.
#
from __future__ import annotations
import logging, sys, traceback, urllib3
from copy import deepcopy
from typing import Any, Callable, Tuple, cast
import flask
from flask import Flask, Request, make_response, request
from urllib3.exceptions import RequestError
from Configuration import Configuration
from Constants import Constants as C
from Types import ReqResp, ResourceTypes as T, Result, ResponseCode as RC, JSON, Conditions
from Types import Operation, CSERequest, RequestHeaders, ContentSerializationType, RequestHandler, Parameters, RequestArguments, FilterUsage, FilterOperation, DesiredIdentifierResultType, ResultContentType, ResponseType
import CSE, Utils
from Logging import Logging as L, LogLevel
from resources.Resource import Resource
from werkzeug.wrappers import Response
from werkzeug.serving import WSGIRequestHandler
from werkzeug.datastructures import MultiDict
from webUI import WebUI
from helpers.BackgroundWorker import *
#
# Types definitions for the http server
#
FlaskHandler = Callable[[str], Response]
""" Type definition for flask handler. """
class HttpServer(object):
def __init__(self) -> None:
# Initialize the http server
# Meaning defaults are automatically provided.
self.flaskApp = Flask(CSE.cseCsi)
self.rootPath = Configuration.get('http.root')
self.serverAddress = Configuration.get('http.address')
self.listenIF = Configuration.get('http.listenIF')
self.port = Configuration.get('http.port')
self.webuiRoot = Configuration.get('cse.webui.root')
self.webuiDirectory = f'{CSE.rootDirectory}/webui'
self.hfvRVI = Configuration.get('cse.releaseVersion')
self.isStopped = False
self.backgroundActor:BackgroundWorker = None
self.serverID = f'ACME {C.version}' # The server's ID for http response headers
self._responseHeaders = {'Server' : self.serverID} # Additional headers for other requests
L.isInfo and L.log(f'Registering http server root at: {self.rootPath}')
if CSE.security.useTLS:
L.isInfo and L.log('TLS enabled. HTTP server serves via https.')
# Add endpoints
# self.addEndpoint(self.rootPath + '/', handler=self.handleGET, methods=['GET'])
self.addEndpoint(self.rootPath + '/<path:path>', handler=self.handleGET, methods=['GET'])
# self.addEndpoint(self.rootPath + '/', handler=self.handlePOST, methods=['POST'])
self.addEndpoint(self.rootPath + '/<path:path>', handler=self.handlePOST, methods=['POST'])
# self.addEndpoint(self.rootPath + '/', handler=self.handlePUT, methods=['PUT'])
self.addEndpoint(self.rootPath + '/<path:path>', handler=self.handlePUT, methods=['PUT'])
# self.addEndpoint(self.rootPath + '/', handler=self.handleDELETE, methods=['DELETE'])
self.addEndpoint(self.rootPath + '/<path:path>', handler=self.handleDELETE, methods=['DELETE'])
# Register the endpoint for the web UI
# This is done by instancing the otherwise "external" web UI
self.webui = WebUI(self.flaskApp,
defaultRI=CSE.cseRi,
defaultOriginator=CSE.cseOriginator,
root=self.webuiRoot,
webuiDirectory=self.webuiDirectory,
version=C.version)
# Enable the config endpoint
if Configuration.get('http.enableRemoteConfiguration'):
configEndpoint = f'{self.rootPath}/__config__'
L.isInfo and L.log(f'Registering configuration endpoint at: {configEndpoint}')
self.addEndpoint(configEndpoint, handler=self.handleConfig, methods=['GET'], strictSlashes=False)
self.addEndpoint(f'{configEndpoint}/<path:path>', handler=self.handleConfig, methods=['GET', 'PUT'])
# Enable the config endpoint
if Configuration.get('http.enableStructureEndpoint'):
structureEndpoint = f'{self.rootPath}/__structure__'
L.isInfo and L.log(f'Registering structure endpoint at: {structureEndpoint}')
self.addEndpoint(structureEndpoint, handler=self.handleStructure, methods=['GET'], strictSlashes=False)
self.addEndpoint(f'{structureEndpoint}/<path:path>', handler=self.handleStructure, methods=['GET', 'PUT'])
# Enable the reset endpoint
if Configuration.get('http.enableResetEndpoint'):
resetEndPoint = f'{self.rootPath}/__reset__'
L.isInfo and L.log(f'Registering reset endpoint at: {resetEndPoint}')
self.addEndpoint(resetEndPoint, handler=self.handleReset, methods=['GET'], strictSlashes=False)
# Add mapping / macro endpoints
self.mappings = {}
if (mappings := Configuration.get('server.http.mappings')) is not None:
# mappings is a list of tuples
for (k, v) in mappings:
L.isInfo and L.log(f'Registering mapping: {self.rootPath}{k} -> {self.rootPath}{v}')
self.addEndpoint(self.rootPath + k, handler=self.requestRedirect, methods=['GET', 'POST', 'PUT', 'DELETE'])
self.mappings = dict(mappings)
# Disable most logs from requests and urllib3 library
logging.getLogger("requests").setLevel(LogLevel.WARNING)
logging.getLogger("urllib3").setLevel(LogLevel.WARNING)
if not CSE.security.verifyCertificate: # only when we also verify certificates
urllib3.disable_warnings()
L.isInfo and L.log('HTTP Server initialized')
def run(self) -> None:
""" Run the http server in a separate thread.
"""
self.httpActor = BackgroundWorkerPool.newActor(self._run, name='HTTPServer')
self.httpActor.start()
def shutdown(self) -> bool:
""" Shutting down the http server.
"""
L.isInfo and L.log('HttpServer shut down')
self.isStopped = True
return True
def _run(self) -> None:
WSGIRequestHandler.protocol_version = "HTTP/1.1"
# Run the http server. This runs forever.
# The server can run single-threadedly since some of the underlying
# components (e.g. TinyDB) may run into problems otherwise.
if self.flaskApp is not None:
# Disable the flask banner messages
cli = sys.modules['flask.cli']
cli.show_server_banner = lambda *x: None # type: ignore
# Start the server
try:
self.flaskApp.run(host=self.listenIF,
port=self.port,
threaded=Configuration.get('http.multiThread'),
request_handler=ACMERequestHandler,
ssl_context=CSE.security.getSSLContext(),
debug=False)
except Exception as e:
# No logging for headless, nevertheless print the reason what happened
if CSE.isHeadless:
L.console(str(e), isError=True)
L.logErr(str(e))
CSE.shutdown() # exit the CSE. Cleanup happens in the CSE atexit() handler
def addEndpoint(self, endpoint:str=None, endpoint_name:str=None, handler:FlaskHandler=None, methods:list[str]=None, strictSlashes:bool=True) -> None:
self.flaskApp.add_url_rule(endpoint, endpoint_name, handler, methods=methods, strict_slashes=strictSlashes)
def _handleRequest(self, path:str, operation:Operation) -> Response:
""" Get and check all the necessary information from the request and
build the internal strutures. Then, depending on the operation,
call the associated request handler.
"""
L.isDebug and L.logDebug(f'==> HTTP-{operation.name}: /{path}') # path = request.path w/o the root
L.isDebug and L.logDebug(f'Headers: \n{str(request.headers)}')
dissectResult = self._dissectHttpRequest(request, operation, path)
if self.isStopped:
responseResult = Result(rsc=RC.internalServerError, dbg='http server not running', status=False)
else:
try:
if dissectResult.status:
if operation in [ Operation.CREATE, Operation.UPDATE ]:
if dissectResult.request.ct == ContentSerializationType.CBOR:
L.isDebug and L.logDebug(f'Body: \n{Utils.toHex(cast(bytes, dissectResult.request.data))}\n=>\n{dissectResult.request.dict}')
else:
L.isDebug and L.logDebug(f'Body: \n{str(dissectResult.request.data)}')
responseResult = CSE.request.handleRequest(dissectResult.request)
else:
responseResult = dissectResult
except Exception as e:
responseResult = Utils.exceptionToResult(e)
responseResult.request = dissectResult.request
return self._prepareResponse(responseResult)
def handleGET(self, path:str=None) -> Response:
Utils.renameCurrentThread()
CSE.event.httpRetrieve() # type: ignore
return self._handleRequest(path, Operation.RETRIEVE)
def handlePOST(self, path:str=None) -> Response:
Utils.renameCurrentThread()
CSE.event.httpCreate() # type: ignore
return self._handleRequest(path, Operation.CREATE)
def handlePUT(self, path:str=None) -> Response:
Utils.renameCurrentThread()
CSE.event.httpUpdate() # type: ignore
return self._handleRequest(path, Operation.UPDATE)
def handleDELETE(self, path:str=None) -> Response:
Utils.renameCurrentThread()
CSE.event.httpDelete() # type: ignore
return self._handleRequest(path, Operation.DELETE)
#########################################################################
# Handle requests to mapped paths
def requestRedirect(self, path:str=None) -> Response:
path = request.path[len(self.rootPath):] if request.path.startswith(self.rootPath) else request.path
if path in self.mappings:
L.isDebug and L.logDebug(f'==> Redirecting to: /{path}')
CSE.event.httpRedirect() # type: ignore
return flask.redirect(self.mappings[path], code=307)
return Response('', status=404)
#########################################################################
#
# Various handlers
#
# Redirect request to / to webui
def redirectRoot(self) -> Response:
""" Redirect a request to the webroot to the web UI.
"""
return flask.redirect(self.webuiRoot, code=302)
def getVersion(self) -> Response:
""" Handle a GET request to return the CSE version.
"""
return Response(C.version, headers=self._responseHeaders)
def handleConfig(self, path:str=None) -> Response:
""" Handle a configuration request. This can either be e GET request to query a
configuration value, or a PUT request to set a new value to a configuration setting.
Note, that only a few of configuration settings are supported.
"""
def _r(r:str) -> Response: # just construct a response. Trying to reduce the clutter here
return Response(r, headers=self._responseHeaders)
if request.method == 'GET':
if path == None or len(path) == 0:
return _r(Configuration.print())
if Configuration.has(path):
return _r(str(Configuration.get(path)))
return _r('')
elif request.method =='PUT':
data = request.data.decode('utf-8').rstrip()
try:
L.isDebug and L.logDebug(f'New remote configuration: {path} = {data}')
if path == 'cse.checkExpirationsInterval':
if (d := int(data)) < 1:
return _r('nak')
Configuration.set(path, d)
CSE.registration.stopExpirationMonitor()
CSE.registration.startExpirationMonitor()
return _r('ack')
elif path in [ 'cse.req.minet', 'cse.req.maxnet' ]:
if (d := int(data)) < 1:
return _r('nak')
Configuration.set(path, d)
return _r('ack')
except:
return _r('nak')
return _r('nak')
return _r('unsupported')
def handleStructure(self, path:str='puml') -> Response:
""" Handle a structure request. Return a description of the CSE's current resource
and registrar / registree deployment.
An optional parameter 'lvl=<int>' can limit the generated resource tree's depth.
"""
lvl = request.args.get('lvl', default=0, type=int)
if path == 'puml':
return Response(response=CSE.statistics.getStructurePuml(lvl), headers=self._responseHeaders)
if path == 'text':
return Response(response=CSE.console.getResourceTreeText(lvl), headers=self._responseHeaders)
return Response(response='unsupported', status=422, headers=self._responseHeaders)
def handleReset(self, path:str=None) -> Response:
""" Handle a CSE reset request.
"""
CSE.resetCSE()
return Response(response='', status=200)
#########################################################################
#
# Send HTTP requests
#
def _prepContent(self, content:bytes|str|Any, ct:ContentSerializationType) -> str:
if content is None: return ''
if isinstance(content, str): return content
return content.decode('utf-8') if ct == ContentSerializationType.JSON else Utils.toHex(content)
def sendHttpRequest(self, method:Callable, url:str, originator:str, ty:T=None, data:Any=None, parameters:Parameters=None, ct:ContentSerializationType=None, targetResource:Resource=None) -> Result: # type: ignore[type-arg]
ct = CSE.defaultSerialization if ct is None else ct
# Set basic headers
hty = f';ty={int(ty):d}' if ty is not None else ''
hds = { 'User-Agent' : self.serverID,
'Content-Type' : f'{ct.toHeader()}{hty}',
'Accept' : ct.toHeader(),
C.hfOrigin : originator,
C.hfRI : Utils.uniqueRI(),
C.hfRVI : self.hfvRVI, # TODO this actually depends in the originator
}
# Add additional headers
if parameters is not None:
if C.hfcEC in parameters: # Event Category
hds[C.hfEC] = parameters[C.hfcEC]
# serialize data (only if dictionary, pass on non-dict data)
content = Utils.serializeData(data, ct) if isinstance(data, dict) else data
# ! Don't forget: requests are done through the request library, not flask.
# ! The attribute names are different
try:
L.isDebug and L.logDebug(f'Sending request: {method.__name__.upper()} {url}')
if ct == ContentSerializationType.CBOR:
L.isDebug and L.logDebug(f'HTTP-Request ==>:\nHeaders: {hds}\nBody: \n{self._prepContent(content, ct)}\n=>\n{str(data) if data is not None else ""}\n')
else:
L.isDebug and L.logDebug(f'HTTP-Request ==>:\nHeaders: {hds}\nBody: \n{self._prepContent(content, ct)}\n')
# Actual sending the request
r = method(url, data=content, headers=hds, verify=CSE.security.verifyCertificate)
responseCt = ContentSerializationType.getType(r.headers['Content-Type']) if 'Content-Type' in r.headers else ct
rc = RC(int(r.headers['X-M2M-RSC'])) if 'X-M2M-RSC' in r.headers else RC.internalServerError
L.isDebug and L.logDebug(f'HTTP-Response <== ({str(r.status_code)}):\nHeaders: {str(r.headers)}\nBody: \n{self._prepContent(r.content, responseCt)}\n')
except Exception as e:
L.isDebug and L.logWarn(f'Failed to send request: {str(e)}')
return Result(rsc=RC.targetNotReachable, dbg='target not reachable')
return Result(dict=Utils.deserializeData(r.content, responseCt), rsc=rc)
#########################################################################
def _prepareResponse(self, result:Result) -> Response:
content:str|bytes|JSON = ''
# Build the headers
headers = {}
headers['Server'] = self.serverID # set server field
headers['X-M2M-RSC'] = f'{result.rsc}' # set the response status code
if result.request.headers.requestIdentifier is not None:
headers['X-M2M-RI'] = result.request.headers.requestIdentifier
if result.request.headers.releaseVersionIndicator is not None:
headers['X-M2M-RVI'] = result.request.headers.releaseVersionIndicator
if result.request.headers.vendorInformation is not None:
headers['X-M2M-VSI'] = result.request.headers.vendorInformation
# HTTP status code
statusCode = result.rsc.httpStatusCode()
#
# Determine the accept type and encode the content accordinly
#
# Look whether there is a accept header in the original request
if result.request.headers.accept is not None and len(result.request.headers.accept) > 0:
ct = ContentSerializationType.getType(result.request.headers.accept[0])
# No accept, check originator
elif len(csz := CSE.request.getSerializationFromOriginator(result.request.headers.originator)) > 0:
ct = csz[0]
# Default: configured CSE's default
else:
ct = CSE.defaultSerialization
# Assign and encode content accordingly
headers['Content-Type'] = (cts := ct.toHeader())
content = result.toData(ct)
# Build and return the response
if isinstance(content, bytes):
L.isDebug and L.logDebug(f'<== HTTP-Response (RSC: {result.rsc:d}):\nHeaders: {str(headers)}\nBody: \n{Utils.toHex(content)}\n=>\n{str(result.toData())}')
else:
L.isDebug and L.logDebug(f'<== HTTP-Response (RSC: {result.rsc:d}):\nHeaders: {str(headers)}\nBody: {str(content)}\n')
return Response(response=content, status=statusCode, content_type=cts, headers=headers)
# def _prepareException(self, e:Exception) -> Result:
# tb = traceback.format_exc()
# L.logErr(tb, exc=e)
# tbs = tb.replace('"', '\\"').replace('\n', '\\n')
# return Result(rsc=RC.internalServerError, dbg=f'encountered exception: {tbs}')
#########################################################################
#
# HTTP request helper functions
#
#def _dissectHttpRequest(self, request:Request, operation:Operation, _id:Tuple[str, str, str]) -> Result:
def _dissectHttpRequest(self, request:Request, operation:Operation, to:str) -> Result:
""" Dissect an HTTP request. Combine headers and contents into a single structure. Result is returned in Result.request.
"""
# def extractMultipleArgs(args:MultiDict, argName:str, validate:bool=True) -> Tuple[bool, str]:
# """ Get multi-arguments. Remove the found arguments from the original list, but add the new list again with the argument name.
# """
# lst = []
# for e in args.getlist(argName):
# for es in (t := e.split()): # check for number
# if validate:
# if not CSE.validator.validateRequestArgument(argName, es).status:
# return False, f'error validating "{argName}" argument(s)'
# lst.extend(t)
# args.poplist(argName) # type: ignore [no-untyped-call] # perhaps even multiple times
# if len(lst) > 0:
# args[argName] = lst
# return True, None
def extractMultipleArgs(args:MultiDict, argName:str) -> None:
""" Get multi-arguments. Remove the found arguments from the original list, but add the new list again with the argument name.
"""
lst = [ t for sublist in args.getlist(argName) for t in sublist.split() ]
args.poplist(argName) # type: ignore [no-untyped-call] # perhaps even multiple times
if len(lst) > 0:
args[argName] = lst
def requestHeaderField(request:Request, field:str) -> str:
""" Return the value of a specific Request header, or `None` if not found.
"""
if not request.headers.has_key(field):
return None
return request.headers.get(field)
cseRequest = CSERequest()
req:ReqResp = {}
cseRequest.data = request.data # get the data first. This marks the request as consumed, just in case that we have to return early
cseRequest.op = operation
req['op'] = operation.value # Needed later for validation
req['to'] = to
# Copy and parse the original request headers
if (f := requestHeaderField(request, C.hfOrigin)) is not None:
req['fr'] = f
if (f := requestHeaderField(request, C.hfRI)) is not None:
req['rqi'] = f
if (f := requestHeaderField(request, C.hfRET)) is not None:
req['rqet'] = f
if (f := requestHeaderField(request, C.hfRST)) is not None:
req['rset'] = f
if (f := requestHeaderField(request, C.hfOET)) is not None:
req['oet'] = f
if (f := requestHeaderField(request, C.hfRVI)) is not None:
req['rvi'] = f
if (rtu := requestHeaderField(request, C.hfRTU)) is not None: # handle rtu as a list
req['rtu'] = rtu.split('&')
if (f := requestHeaderField(request, C.hfVSI)) is not None:
req['vsi'] = f
# parse and extract content-type header
# cseRequest.headers.contentType = request.content_type
if (ct := request.content_type) is not None:
if not ct.startswith(tuple(C.supportedContentHeaderFormat)):
ct = None
else:
p = ct.partition(';') # always returns a 3-tuple
ct = p[0] # only the content-type without the resource type
t = p[2].partition('=')[2]
if len(t) > 0:
req['ty'] = t # Here we found the type for CREATE requests
cseRequest.headers.contentType = ct
# parse accept header
cseRequest.headers.accept = request.headers.getlist('accept')
cseRequest.headers.accept = [ a for a in cseRequest.headers.accept if a != '*/*' ]
cseRequest.originalArgs = deepcopy(request.args) # Keep the original args
# copy request arguments for greedy attributes checking
args = request.args.copy() # type: ignore [no-untyped-call]
# Do some special handling for those arguments that could occur multiple
# times in the args MultiDict. They are collected together in a single list
# and added again to args.
extractMultipleArgs(args, 'ty') # conversation to int happens later in fillAndValidateCSERequest()
extractMultipleArgs(args, 'cty')
extractMultipleArgs(args, 'lbl')
# Handle rcn differently.
# rcn is not a filter criteria like all the other attributes, but an own request attribute
if (rcn := args.get('rcn')) is not None:
req['rcn'] = rcn
del args['rcn']
# Extract further request arguments from the http request
# add all the args to the filterCriteria
filterCriteria:ReqResp = {}
for k,v in args.items():
filterCriteria[k] = v
req['fc'] = filterCriteria
# De-Serialize the content
if not (contentResult := CSE.request.deserializeContent(cseRequest.data, cseRequest.headers.contentType)).status:
return Result(rsc=contentResult.rsc, request=cseRequest, dbg=contentResult.dbg, status=False)
# Remove 'None' fields *before* adding the pc, because the pc may contain 'None' fields that need to be preserved
req = Utils.removeNoneValuesFromDict(req)
# Add the primitive content and
req['pc'] = contentResult.data[0] # The actual content
cseRequest.ct = contentResult.data[1] # The conten serialization type
cseRequest.req = req # finally store the oneM2M request object in the cseRequest
# do validation and copying of attributes of the whole request
try:
# L.logWarn(str(cseRequest))
if not (res := CSE.request.fillAndValidateCSERequest(cseRequest)).status:
return res
except Exception as e:
return Result(rsc=RC.badRequest, request=cseRequest, dbg=f'invalid arguments/attributes ({str(e)})', status=False)
# Here, if everything went okay so far, we have a request to the CSE
return Result(request=cseRequest, status=True)
##########################################################################
#
# Own request handler.
# Actually only to redirect some logging of the http server.
# This handler does NOT handle requests.
#
class ACMERequestHandler(WSGIRequestHandler):
# Just like WSGIRequestHandler, but without "- -"
def log(self, type, message, *args): # type: ignore
L.isDebug and L.logDebug(message % args)
return
# L.isDebug and L.log(f'{self.address_string()} {message % args}\n')
# Just like WSGIRequestHandler, but without "code"
def log_request(self, code='-', size='-'): # type: ignore
L.isDebug and L.logDebug(f'"{self.requestline}" {size} {code}')
return
def log_message(self, format, *args): # type: ignore
L.isDebug and L.logDebug(format % args)
return
| 39.43299 | 223 | 0.699695 | 21,449 | 0.934597 | 0 | 0 | 0 | 0 | 0 | 0 | 9,463 | 0.412331 |
47b6367947784f5f8c60ac4a630ae41b0271c546 | 2,855 | py | Python | tests.py | dave-shawley/sprockets.mixins.statsd | 98dcce37d275a3ab96ef618b4756d7c4618a550a | [
"BSD-3-Clause"
]
| 1 | 2016-04-18T14:43:28.000Z | 2016-04-18T14:43:28.000Z | tests.py | dave-shawley/sprockets.mixins.statsd | 98dcce37d275a3ab96ef618b4756d7c4618a550a | [
"BSD-3-Clause"
]
| 1 | 2015-03-19T20:09:31.000Z | 2015-03-19T20:56:13.000Z | tests.py | dave-shawley/sprockets.mixins.statsd | 98dcce37d275a3ab96ef618b4756d7c4618a550a | [
"BSD-3-Clause"
]
| 1 | 2021-07-21T16:45:20.000Z | 2021-07-21T16:45:20.000Z | """
Tests for the sprockets.mixins.statsd package
"""
import mock
import socket
try:
import unittest2 as unittest
except ImportError:
import unittest
from tornado import httputil
from tornado import web
from sprockets.mixins import statsd as statsd
class StatsdRequestHandler(statsd.RequestMetricsMixin,
web.RequestHandler):
pass
class Context(object):
remote_ip = '127.0.0.1'
protocol = 'http'
class Connection(object):
context = Context()
def set_close_callback(self, callback):
pass
class MixinTests(unittest.TestCase):
def setUp(self):
self.application = web.Application()
self.request = httputil.HTTPServerRequest('GET',
uri='http://test/foo',
connection=Connection(),
host='127.0.0.1')
self.handler = StatsdRequestHandler(self.application, self.request)
self.handler._status_code = 200
def test_on_finish_calls_statsd_add_timing(self):
self.handler.statsd_use_hostname = True
self.request._finish_time = self.request._start_time + 1
self.duration = self.request._finish_time - self.request._start_time
with mock.patch('sprockets.clients.statsd.add_timing') as add_timing:
self.handler.on_finish()
add_timing.assert_called_once_with(
'timers.' + socket.gethostname(), 'tests',
'StatsdRequestHandler', 'GET', '200',
value=self.duration * 1000)
def test_on_finish_calls_statsd_incr(self):
self.handler.statsd_use_hostname = True
with mock.patch('sprockets.clients.statsd.incr') as incr:
self.handler.on_finish()
incr.assert_called_once_with(
'counters.' + socket.gethostname(), 'tests',
'StatsdRequestHandler', 'GET', '200')
def test_on_finish_calls_statsd_add_timing_without_hostname(self):
self.handler.statsd_use_hostname = False
self.request._finish_time = self.request._start_time + 1
self.duration = self.request._finish_time - self.request._start_time
with mock.patch('sprockets.clients.statsd.add_timing') as add_timing:
self.handler.on_finish()
add_timing.assert_called_once_with(
'timers', 'tests', 'StatsdRequestHandler', 'GET', '200',
value=self.duration * 1000)
def test_on_finish_calls_statsd_incr_without_hostname(self):
self.handler.statsd_use_hostname = False
with mock.patch('sprockets.clients.statsd.incr') as incr:
self.handler.on_finish()
incr.assert_called_once_with(
'counters', 'tests', 'StatsdRequestHandler', 'GET', '200')
| 34.817073 | 77 | 0.633275 | 2,582 | 0.904378 | 0 | 0 | 0 | 0 | 0 | 0 | 434 | 0.152014 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.