hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5b24576277ff90503d0b77ea45447ed2cd207807 | 3,443 | py | Python | add_label.py | Mause/pull_requests | 6c3aa3feb8ec775c184eaa70d09b944ba753125b | [
"MIT"
]
| null | null | null | add_label.py | Mause/pull_requests | 6c3aa3feb8ec775c184eaa70d09b944ba753125b | [
"MIT"
]
| 39 | 2021-02-10T05:59:09.000Z | 2022-03-18T07:21:29.000Z | add_label.py | Mause/pull_requests | 6c3aa3feb8ec775c184eaa70d09b944ba753125b | [
"MIT"
]
| null | null | null | from asyncio import get_event_loop
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Union
from aiohttp import ClientSession
from pydantic import BaseModel
from sgqlc.endpoint.base import BaseEndpoint
from sgqlc.operation import Operation
from sgqlc_schemas.github.schema import (
AddLabelsToLabelableInput,
AddLabelsToLabelablePayload,
MergePullRequestInput,
Mutation,
Query,
Repository,
)
class Shared(BaseModel):
class Config:
arbitrary_types_allowed = True
class Location(Shared):
column: int
line: int
class Error(Shared):
locations: List[Location]
message: str
path: Optional[List[str]] = None
class DataWithErrors(Shared):
data: Union[Query, Mutation]
errors: List[Error]
@dataclass
class AsyncHttpEndpoint(BaseEndpoint):
url: str
headers: Dict[str, str] = field(default_factory=dict)
async def __call__(self, query) -> DataWithErrors:
async with ClientSession() as session:
res = await session.post(
self.url,
headers={**self.headers, 'Content-Type': 'application/json'},
json={'query': bytes(query).decode()},
)
try:
data = await res.json()
except Exception as e:
self._log_json_error(await res.text(), e)
data.setdefault('errors', [])
if data['errors']:
self._log_graphql_error(query, data)
if not (data['errors'] or data.get('data')):
data['errors'] = [{'message': data['message'], 'locations': []}]
return DataWithErrors(data=query + data, errors=data['errors'])
async def add_labels_to_labelable(
endpoint: BaseEndpoint, repository_id: str, labelable_id: str, label: str
) -> AddLabelsToLabelablePayload:
query = Operation(Query)
query.node(id=repository_id).__as__(Repository).labels(first=50).nodes().__fields__(
'name', 'id'
)
labels = {
repo_label.name: repo_label.id
for repo_label in (await endpoint(query)).node.labels.nodes
}
mutation = Operation(Mutation)
mutation.add_labels_to_labelable(
input=AddLabelsToLabelableInput(
labelable_id=labelable_id, label_ids=[labels[label]]
)
)
return (await endpoint(mutation)).add_labels_to_labelable
async def build_endpoint(token: str) -> AsyncHttpEndpoint:
return AsyncHttpEndpoint(
'https://api.github.com/graphql',
{'Authorization': 'Bearer ' + token},
)
async def main():
endpoint = await build_endpoint(open('token.txt').read())
qu = Operation(Query)
repo = qu.repository(owner='Mause', name='media')
repo.id()
repo.pull_requests(first=1).nodes().__fields__('title', 'id')
res = (await endpoint(qu)).repository
await add_labels_to_labelable(
endpoint, res.id, res.pull_requests.nodes[0].id, 'automerge'
)
op = Operation(Mutation)
op = build_merge([res.pull_requests.nodes[0].id])
res = await endpoint(op)
print(res)
def build_merge(ids: List[str]):
op = Operation(Mutation)
for i, ident in enumerate(ids):
op.merge_pull_request(
input=MergePullRequestInput(pull_request_id=ident), __alias__=f'merge_{i}'
).pull_request.title()
return op
if __name__ == "__main__":
get_event_loop().run_until_complete(main())
| 27.544 | 88 | 0.652338 | 1,239 | 0.359861 | 0 | 0 | 926 | 0.268951 | 2,179 | 0.632878 | 249 | 0.072321 |
5b24e7eb961669dcd20e501b760778d98a071d8b | 851 | py | Python | DataEngineering/Chapter7/7.6/financialdata/financialdata/scheduler.py | yz830620/FinMindBook | 1ffda3541eb73e6d4cb47798bf9d28b66a49939b | [
"MIT"
]
| 5 | 2021-12-13T12:03:22.000Z | 2022-03-30T08:51:19.000Z | DataEngineering/Chapter7/7.6/financialdata/financialdata/scheduler.py | yz830620/FinMindBook | 1ffda3541eb73e6d4cb47798bf9d28b66a49939b | [
"MIT"
]
| 1 | 2022-01-26T05:42:56.000Z | 2022-03-12T08:24:57.000Z | DataEngineering/Chapter7/7.6/financialdata/financialdata/scheduler.py | yz830620/FinMindBook | 1ffda3541eb73e6d4cb47798bf9d28b66a49939b | [
"MIT"
]
| 6 | 2021-12-14T04:32:01.000Z | 2022-03-31T17:15:11.000Z | import time
import datetime
from apscheduler.schedulers.background import BackgroundScheduler
from financialdata.producer import Update
from loguru import logger
def sent_crawler_task():
# 將此段,改成發送任務的程式碼
# logger.info(f"sent_crawler_task {dataset}")
today = datetime.datetime.today().date().strftime("%Y-%m-%d")
Update(dataset="taiwan_stock_price", start_date=today, end_date=today)
def main():
scheduler = BackgroundScheduler(timezone="Asia/Taipei")
# 與 crontab 類似,設定何時執行,有小時、分鐘、秒參數,* 星號代表任意時間點
scheduler.add_job(
id="sent_crawler_task",
func=sent_crawler_task,
trigger="cron",
hour="15",
minute="0",
day_of_week="mon-fri",
)
logger.info("sent_crawler_task")
scheduler.start()
if __name__ == "__main__":
main()
while True:
time.sleep(600)
| 24.314286 | 74 | 0.679201 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 308 | 0.327311 |
d289828efb378099de1d3d6011a5a3e50df04330 | 2,692 | py | Python | openmc_plasma_source/plotting/plot_tokamak_source.py | mdfaisal98/openmc-plasma-source | e55d61ce6d641f4d382ce298b6f6335cd46bc507 | [
"MIT"
]
| null | null | null | openmc_plasma_source/plotting/plot_tokamak_source.py | mdfaisal98/openmc-plasma-source | e55d61ce6d641f4d382ce298b6f6335cd46bc507 | [
"MIT"
]
| null | null | null | openmc_plasma_source/plotting/plot_tokamak_source.py | mdfaisal98/openmc-plasma-source | e55d61ce6d641f4d382ce298b6f6335cd46bc507 | [
"MIT"
]
| null | null | null | import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
def scatter_tokamak_source(source, quantity=None, **kwargs):
"""Create a 2D scatter plot of the tokamak source.
See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.scatter.html
for more arguments.
Args:
source (ops.TokamakSource): the plasma source
quantity ("str", optional): value by which the lines should be
coloured. Defaults to None.
Raises:
ValueError: If the quantity is unknown
"""
quantity_to_attribute = {
"ion_temperature": source.temperatures,
"neutron_source_density": source.strengths
}
if quantity in quantity_to_attribute:
colours = quantity_to_attribute[quantity]
elif quantity is None:
colours = None
else:
raise ValueError("Unknown quantity")
plt.gca().set_aspect("equal")
return plt.scatter(source.RZ[0], source.RZ[1], c=colours, **kwargs)
def plot_tokamak_source_3D(source, quantity=None, angles=[0, 1/2*np.pi], colorbar="viridis", **kwargs):
"""Creates a 3D plot of the tokamak source.
See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.plot.html#matplotlib.pyplot.plot
for more arguments.
Args:
source (ops.TokamakSource): the plasma source
quantity ("str", optional): value by which the lines should be
coloured. Defaults to None.
angles (list, optional): iterable of two floats defining the coverage.
Defaults to [0, 1/2*np.pi].
colorbar (str, optional): colorbar used if quantity is not None.
Defaults to "viridis".
Raises:
ValueError: If the quantity is unknown
"""
quantity_to_attribute = {
"ion_temperature": source.temperatures,
"neutron_source_density": source.strengths
}
if quantity in quantity_to_attribute:
values = quantity_to_attribute[quantity]
elif quantity is None:
values = None
else:
raise ValueError("Unknown quantity")
colorbar = cm.get_cmap(colorbar)
axes = plt.axes(projection="3d")
theta = np.linspace(*angles, 100)
for i in range(source.sample_size):
if values is not None:
colour = colorbar(values[i]/max(values))
else:
colour = None
x = source.RZ[0][i] * np.sin(theta)
y = source.RZ[0][i] * np.cos(theta)
z = source.RZ[1][i]
plt.plot(x, y, z, color=colour, **kwargs)
axes.set_xlim(-source.major_radius, source.major_radius)
axes.set_ylim(-source.major_radius, source.major_radius)
axes.set_zlim(-source.major_radius, source.major_radius)
| 33.65 | 103 | 0.658247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,176 | 0.43685 |
d28a47045a9d4366365cea9cca22f372e578a38f | 620 | py | Python | Exercício feitos pela primeira vez/ex046.py | Claayton/pythonExerciciosLinux | 696cdb16983638418bd0d0d4fe44dc72662b9c97 | [
"MIT"
]
| 1 | 2021-01-23T15:43:34.000Z | 2021-01-23T15:43:34.000Z | Exercício feitos pela primeira vez/ex046.py | Claayton/pythonExerciciosLinux | 696cdb16983638418bd0d0d4fe44dc72662b9c97 | [
"MIT"
]
| null | null | null | Exercício feitos pela primeira vez/ex046.py | Claayton/pythonExerciciosLinux | 696cdb16983638418bd0d0d4fe44dc72662b9c97 | [
"MIT"
]
| null | null | null | #Exercício046
from time import sleep
import emoji
print('\033[32mCONTAGEM REGRESSIVA PARA O ANO NOVO:\033[m')
sleep(1)
for c in range(10, 0 - 1, -1):#repete os números de 10 até o 0
print(c)
sleep(1)
print(emoji.emojize("\033[31m:boom::boom::boom:KABUM:boom::boom::boom:", use_aliases=True))
print(emoji.emojize("\033[32m:boom::boom::boom:FOGUETE:boom::boom::boom:", use_aliases=True))
print(emoji.emojize("\033[33m:boom::boom::boom:FOGOS E MAIS FOGOS:boom::boom::boom:", use_aliases=True))
print(emoji.emojize("\033[34m:boom::boom::boom:GUANAGARA VIADO:boom::boom::boom:", use_aliases=True))
print('\033[32mxD') | 47.692308 | 104 | 0.720968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 341 | 0.547352 |
d28ad97667405531526925b2fe6abf6f466b39ff | 10,989 | py | Python | bmds/bmds2/logic/rules.py | shapiromatron/bmds | 57562858f3c45e9b9ec23e1c229a8a1de0ea4a70 | [
"MIT"
]
| 2 | 2017-05-01T20:00:26.000Z | 2019-07-09T16:42:25.000Z | bmds/bmds2/logic/rules.py | shapiromatron/bmds | 57562858f3c45e9b9ec23e1c229a8a1de0ea4a70 | [
"MIT"
]
| 20 | 2016-11-23T21:30:22.000Z | 2022-02-28T15:42:36.000Z | bmds/bmds2/logic/rules.py | shapiromatron/bmds | 57562858f3c45e9b9ec23e1c229a8a1de0ea4a70 | [
"MIT"
]
| 2 | 2016-06-28T20:32:00.000Z | 2017-02-23T20:30:24.000Z | import abc
import math
from ... import constants
class Rule(abc.ABC):
def __init__(self, failure_bin, **kwargs):
self.failure_bin = failure_bin
self.enabled = kwargs.get("enabled", True)
self.threshold = kwargs.get("threshold", float("nan"))
self.rule_name = kwargs.get("rule_name", self.default_rule_name)
self.kwargs = kwargs
def __unicode__(self):
enabled = "✓" if self.enabled else "✕"
threshold = "" if math.isnan(self.threshold) else ", threshold={}".format(self.threshold)
return "{0} {1} [bin={2}{3}]".format(enabled, self.rule_name, self.binmoji, threshold)
def check(self, dataset, output):
if self.enabled:
return self.apply_rule(dataset, output)
else:
return self.return_pass()
@property
def binmoji(self):
return constants.BIN_ICON[self.failure_bin]
@property
def bin_text(self):
return constants.BIN_TEXT[self.failure_bin]
def as_row(self):
return [self.rule_name, self.enabled, self.bin_text, self.threshold]
def return_pass(self):
return constants.BIN_NO_CHANGE, None
@abc.abstractmethod
def apply_rule(self, dataset, output):
"""return tuple of (bin, notes) associated with rule or None"""
...
def get_failure_message(self, *args) -> str:
return "An error occurred"
def _is_valid_number(self, val):
# Ensure number is an int or float, not equal to special case -999.
return val is not None and val != -999 and (isinstance(val, int) or isinstance(val, float))
class NumericValueExists(Rule):
# Test succeeds if value is numeric and not -999
field_name = None
field_name_verbose = None
def apply_rule(self, dataset, output):
val = output.get(self.field_name)
if self._is_valid_number(val):
return self.return_pass()
else:
return self.failure_bin, self.get_failure_message()
def get_failure_message(self):
name = getattr(self, "field_name_verbose")
if name is None:
name = self.field_name
return "{} does not exist".format(name)
class BmdExists(NumericValueExists):
default_rule_name = "BMD exists"
field_name = "BMD"
class BmdlExists(NumericValueExists):
default_rule_name = "BMDL exists"
field_name = "BMDL"
class BmduExists(NumericValueExists):
default_rule_name = "BMDU exists"
field_name = "BMDU"
class AicExists(NumericValueExists):
default_rule_name = "AIC exists"
field_name = "AIC"
class RoiExists(NumericValueExists):
default_rule_name = "Residual of interest exists"
field_name = "residual_of_interest"
field_name_verbose = "Residual of Interest"
class ShouldBeGreaterThan(Rule):
# Test fails if value is less-than threshold.
field_name = ""
field_name_verbose = ""
def apply_rule(self, dataset, output):
val = output.get(self.field_name)
threshold = self.threshold
if not self._is_valid_number(val) or val >= threshold:
return self.return_pass()
else:
return self.failure_bin, self.get_failure_message(val, threshold)
def get_failure_message(self, val, threshold):
name = self.field_name_verbose
return "{} is less than threshold ({:.3} < {})".format(name, float(val), threshold)
class GlobalFit(ShouldBeGreaterThan):
default_rule_name = "GGOF"
field_name = "p_value4"
field_name_verbose = "Goodness of fit p-value"
class ShouldBeLessThan(Rule, abc.ABC):
# Test fails if value is greater-than threshold.
msg = "" # w/ arguments for value and threshold
@abc.abstractmethod
def get_value(self, dataset, output):
...
def apply_rule(self, dataset, output):
val = self.get_value(dataset, output)
threshold = self.threshold
if not self._is_valid_number(val) or val <= threshold:
return self.return_pass()
else:
return self.failure_bin, self.get_failure_message(val, threshold)
def get_failure_message(self, val, threshold):
name = self.field_name_verbose
return "{} is greater than threshold ({:.3} > {})".format(name, float(val), threshold)
class BmdBmdlRatio(ShouldBeLessThan):
default_rule_name = "BMD to BMDL ratio"
field_name_verbose = "BMD/BMDL ratio"
def get_value(self, dataset, output):
bmd = output.get("BMD")
bmdl = output.get("BMDL")
if self._is_valid_number(bmd) and self._is_valid_number(bmdl) and bmdl != 0:
return bmd / bmdl
class RoiFit(ShouldBeLessThan):
default_rule_name = "Residual of interest"
field_name_verbose = "Residual of interest"
def get_value(self, dataset, output):
return output.get("residual_of_interest")
class HighBmd(ShouldBeLessThan):
default_rule_name = "High BMD"
field_name_verbose = "BMD/high dose ratio"
def get_value(self, dataset, output):
max_dose = max(dataset.doses)
bmd = output.get("BMD")
if self._is_valid_number(max_dose) and self._is_valid_number(bmd) and bmd != 0:
return bmd / float(max_dose)
class HighBmdl(ShouldBeLessThan):
default_rule_name = "High BMDL"
field_name_verbose = "BMDL/high dose ratio"
def get_value(self, dataset, output):
max_dose = max(dataset.doses)
bmdl = output.get("BMDL")
if self._is_valid_number(max_dose) and self._is_valid_number(bmdl) and max_dose > 0:
return bmdl / float(max_dose)
class LowBmd(ShouldBeLessThan):
default_rule_name = "Low BMD"
field_name_verbose = "minimum dose/BMD ratio"
def get_value(self, dataset, output):
min_dose = min([d for d in dataset.doses if d > 0])
bmd = output.get("BMD")
if self._is_valid_number(min_dose) and self._is_valid_number(bmd) and bmd > 0:
return min_dose / float(bmd)
class LowBmdl(ShouldBeLessThan):
default_rule_name = "Low BMDL"
field_name_verbose = "minimum dose/BMDL ratio"
def get_value(self, dataset, output):
min_dose = min([d for d in dataset.doses if d > 0])
bmdl = output.get("BMDL")
if self._is_valid_number(min_dose) and self._is_valid_number(bmdl) and bmdl > 0:
return min_dose / float(bmdl)
class ControlResidual(ShouldBeLessThan):
default_rule_name = "Control residual"
field_name_verbose = "Residual at lowest dose"
def get_value(self, dataset, output):
if output.get("fit_residuals") and len(output["fit_residuals"]) > 0:
try:
return abs(output["fit_residuals"][0])
except TypeError:
return float("nan")
class ControlStdevResiduals(ShouldBeLessThan):
default_rule_name = "Control stdev"
field_name_verbose = "Ratio of modeled to actual stdev. at control"
def get_value(self, dataset, output):
if (
output.get("fit_est_stdev")
and output.get("fit_stdev")
and len(output["fit_est_stdev"]) > 0
and len(output["fit_stdev"]) > 0
):
try:
modeled = abs(output["fit_est_stdev"][0])
actual = abs(output["fit_stdev"][0])
except TypeError:
return float("nan")
if (
self._is_valid_number(modeled)
and self._is_valid_number(actual)
and modeled > 0
and actual > 0
):
return abs(modeled / actual)
class CorrectVarianceModel(Rule):
# Check variance model (continuous datasets-only)
default_rule_name = "Variance type"
def apply_rule(self, dataset, output):
if "parameters" not in output:
return self.return_pass()
# 0 = non-homogeneous modeled variance => Var(i) = alpha*mean(i)^rho
# 1 = constant variance => Var(i) = alpha*mean(i)
# if rho is a parameter, then variance model 0 is applied
rho = output["parameters"].get("rho")
constant_variance = 0 if rho else 1
p_value2 = output.get("p_value2")
if p_value2 == "<0.0001":
p_value2 = 0.0001
msg = None
if self._is_valid_number(p_value2):
if constant_variance == 1 and p_value2 < 0.1:
msg = "Incorrect variance model (p-value 2 = {}), constant variance selected".format(
p_value2
)
elif constant_variance == 0 and p_value2 > 0.1:
msg = "Incorrect variance model (p-value 2 = {}), modeled variance selected".format(
p_value2
)
else:
msg = "Correct variance model cannot be determined (p-value 2 = {})".format(p_value2)
if msg:
return self.failure_bin, msg
else:
return self.return_pass()
class VarianceModelFit(Rule):
default_rule_name = "Variance fit"
def apply_rule(self, dataset, output):
if "parameters" not in output:
return self.return_pass()
# 0 = non-homogeneous modeled variance => Var(i) = alpha*mean(i)^rho
# 1 = constant variance => Var(i) = alpha*mean(i)
# if rho is a parameter, then variance model 0 is applied
rho = output["parameters"].get("rho")
constant_variance = 0 if rho else 1
p_value2 = output.get("p_value2")
if p_value2 == "<0.0001":
p_value2 = 0.0001
p_value3 = output.get("p_value3")
if p_value3 == "<0.0001":
p_value3 = 0.0001
msg = None
if self._is_valid_number(p_value2) and constant_variance == 1 and p_value2 < 0.1:
msg = "Variance model poorly fits dataset (p-value 2 = {})".format(p_value2)
if self._is_valid_number(p_value3) and constant_variance == 0 and p_value3 < 0.1:
msg = "Variance model poorly fits dataset (p-value 3 = {})".format(p_value3)
if msg:
return self.failure_bin, msg
else:
return self.return_pass()
class NoDegreesOfFreedom(Rule):
"""
Check to ensure at least one degree of freedom exist to prevent recommendation of an
overfit model.
"""
default_rule_name = "Degrees of freedom"
def apply_rule(self, dataset, output):
df = output.get("df", 1)
if df == 0:
return self.failure_bin, "Zero degrees of freedom; saturated model"
return self.return_pass()
class Warnings(Rule):
# Test fails if any warnings exist.
default_rule_name = "Warnings"
def get_failure_message(self, warnings):
return "Warning(s): {}".format("; ".join(warnings))
def apply_rule(self, dataset, output):
warnings = output.get("warnings", [])
if len(warnings) > 0:
return self.failure_bin, self.get_failure_message(warnings)
else:
return self.return_pass()
| 31.760116 | 101 | 0.628629 | 10,877 | 0.989448 | 0 | 0 | 388 | 0.035295 | 0 | 0 | 2,341 | 0.212954 |
d28b5d6c386f989e7b581b7ea7ba92a93a7470b3 | 1,959 | py | Python | nets/static/conv_rnn_convT.py | MaximilienLC/nevo | c701a1202bc18d89a622472918733bf78ba5e304 | [
"Apache-2.0"
]
| null | null | null | nets/static/conv_rnn_convT.py | MaximilienLC/nevo | c701a1202bc18d89a622472918733bf78ba5e304 | [
"Apache-2.0"
]
| null | null | null | nets/static/conv_rnn_convT.py | MaximilienLC/nevo | c701a1202bc18d89a622472918733bf78ba5e304 | [
"Apache-2.0"
]
| 1 | 2022-03-31T20:44:09.000Z | 2022-03-31T20:44:09.000Z | # Copyright 2022 Maximilien Le Clei.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from nets.static.base import StaticNetBase
class Net(StaticNetBase):
def __init__(self, transpose):
super().__init__()
self.transpose = transpose
self.conv1 = nn.Conv2d( 1, 16, 4, 2, 1)
self.conv2 = nn.Conv2d(16, 32, 4, 2, 1)
self.conv3 = nn.Conv2d(32, 64, 4)
self.rnn1 = nn.RNN(64, 64)
if not self.transpose:
self.fc1 = nn.Linear(64, 1)
else:
self.convT1 = nn.ConvTranspose2d(64, 32, 4)
self.convT2 = nn.ConvTranspose2d(32, 16, 4, 2, 1)
self.convT3 = nn.ConvTranspose2d(16, 1, 4, 2, 1)
self.h = torch.zeros(1, 1, 64)
def reset(self):
self.h = torch.zeros(1, 1, 64).to(self.device)
def pre_setup_to_run(self):
self.h.to(self.device)
def pre_setup_to_save(self):
self.h.to('cpu')
def forward(self, x):
x = torch.relu(self.conv1(x))
x = torch.relu(self.conv2(x))
x = torch.relu(self.conv3(x))
x, self.h = self.rnn1(x[None, :, :, 0, 0], self.h)
if not self.transpose:
x = torch.relu(self.fc1(x[0, :, :]))
else:
x = torch.relu(self.convT1(x[0, :, :, None, None]))
x = torch.relu(self.convT2(x))
x = torch.relu(self.convT3(x))
return x | 26.472973 | 74 | 0.598264 | 1,294 | 0.660541 | 0 | 0 | 0 | 0 | 0 | 0 | 575 | 0.293517 |
d28b646d833333371908e74411b14fa7d1f681ca | 3,306 | py | Python | ors2bryton.py | andbue/ors2bryton | 7a843cbf2e4d1fc4ca85497cb23919431d8d3843 | [
"Unlicense"
]
| null | null | null | ors2bryton.py | andbue/ors2bryton | 7a843cbf2e4d1fc4ca85497cb23919431d8d3843 | [
"Unlicense"
]
| 1 | 2021-02-02T13:11:23.000Z | 2021-09-10T16:38:16.000Z | ors2bryton.py | andbue/ors2bryton | 7a843cbf2e4d1fc4ca85497cb23919431d8d3843 | [
"Unlicense"
]
| null | null | null | from sys import argv
from os.path import splitext
from lxml import etree
from struct import pack
def main():
print(argv)
gpx = argv[1]
"""
bryton:
1: go ahead
2: right
3: left
4: slight right
5: slight left
6: close right
7: close left
8: exit right
9: exit left
10: continue straight
11: uturn right
12: uturn left
13++: go ahead
openrouteservice:
(https://github.com/GIScience/openrouteservice/blob/master/openrouteservice/src/main/java/org/heigit/ors/routing/instructions/InstructionType.java)
TURN_LEFT, /*0*/
TURN_RIGHT, /*1*/
TURN_SHARP_LEFT, /*2*/
TURN_SHARP_RIGHT, /*3*/
TURN_SLIGHT_LEFT, /*4*/
TURN_SLIGHT_RIGHT, /*5*/
CONTINUE, /*6*/
ENTER_ROUNDABOUT, /*7*/
EXIT_ROUNDABOUT, /*8*/
UTURN, /*9*/
FINISH, /*10*/
DEPART, /*11*/
KEEP_LEFT, /*12*/
KEEP_RIGHT, /*13*/
UNKNOWN /*14*/;
"""
orst2brt = {
0: 3,
1: 2,
2: 7,
3: 6,
4: 5,
5: 4,
6: 1,
7: 10,
8: 8,
9: 12,
10: 1,
11: 1,
12: 9,
13: 8,
14: 1
}
fname = splitext(gpx)[0]
r = etree.parse(gpx).getroot()
ns = r.nsmap[None]
rte = r.find(f'./{{{ns}}}rte')
rtepts = rte.findall(f'./{{{ns}}}rtept')
unit = r.find(f'./{{{ns}}}extensions/{{{ns}}}distance-units').text
uf = 10e2 if unit == "km" else 1
ext = rte.find(f'./{{{ns}}}extensions')
dist = int(float(ext.find(f'./{{{ns}}}distance').text) * uf)
bnds = ext.find(f'./{{{ns}}}bounds')
bnds = {k: int(float(v) * 10e5) for k, v in bnds.attrib.items()}
bnds = (bnds['maxLat'], bnds['minLat'], bnds['maxLon'], bnds['minLon'])
print(f'{fname}.smy: {len(rtepts)} waypoints, distance {dist} meters.')
with open(fname + '.smy', 'wb') as smy:
smy.write(pack('<HHIIIII36x', 1, len(rtepts), *bnds, dist))
with open(fname + '.tinfo', 'wb') as tinfo,\
open(fname + '.track', 'wb') as track:
step = None
for n, p in enumerate(rtepts):
lat = int(float(p.attrib.get('lat')) * 10e5)
lon = int(float(p.attrib.get('lon')) * 10e5)
track.write(pack('<II8x', lat, lon))
thisstep = int(p.find(f'./{{{ns}}}extensions/{{{ns}}}step').text)
if thisstep != step:
name = p.find(f'./{{{ns}}}name').text
name = name.encode() if name != None else "".encode()
dist = int(float(p.find(f'./{{{ns}}}extensions/{{{ns}}}distance').text) * uf)
dur = int(float(p.find(f'./{{{ns}}}extensions/{{{ns}}}duration').text))
t = int(p.find(f'./{{{ns}}}extensions/{{{ns}}}type').text)
d = orst2brt[t]
tinfo.write(pack('<HBxHxxHxx32s', n, d, dist, dur, name))
step = thisstep
print(f'{fname}.tinfo, {fname}.track: Finished writing.')
if __name__ == "__main__":
main()
| 31.485714 | 151 | 0.468845 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,624 | 0.491228 |
d28b98aeee69dc1cdd515a34f7751e391f42ef74 | 5,022 | py | Python | src/main/python/smart/smartplots3_run.py | cday97/beam | 7e1ab50eecaefafd04daab360f8b12bc7cab559b | [
"BSD-3-Clause-LBNL"
]
| 123 | 2017-04-06T20:17:19.000Z | 2022-03-02T13:42:15.000Z | src/main/python/smart/smartplots3_run.py | cday97/beam | 7e1ab50eecaefafd04daab360f8b12bc7cab559b | [
"BSD-3-Clause-LBNL"
]
| 2,676 | 2017-04-26T20:27:27.000Z | 2022-03-31T16:39:53.000Z | src/main/python/smart/smartplots3_run.py | cday97/beam | 7e1ab50eecaefafd04daab360f8b12bc7cab559b | [
"BSD-3-Clause-LBNL"
]
| 60 | 2017-04-06T20:14:32.000Z | 2022-03-30T20:10:53.000Z | import pandas as pd
import smartplots3_setup
def createSetup(name,expansion_factor,percapita_factor,plot_size,settings):
plt_setup_smart={
'name': name,
'expansion_factor':expansion_factor,
'percapita_factor':percapita_factor,
'scenarios_itr': [],
'scenarios_id':[],
'scenarios_year':[],
'plot_size': plot_size,
'bottom_labels': [],
'top_labels': [],
'plots_folder': "makeplots3"
}
plt_setup_smart['name']=name
plt_setup_smart['expansion_factor']=expansion_factor
plt_setup_smart['plot_size']=plot_size
plt_setup_smart['scenarios_year']=[]
plt_setup_smart['scenarios_id']=[]
plt_setup_smart['scenarios_itr']=[]
plt_setup_smart['top_labels']=[]
for (scenarios_year,scenarios_id,scenarios_itr,bottom_label,top_label) in settings:
plt_setup_smart['scenarios_year'].append(scenarios_year)
plt_setup_smart['scenarios_id'].append(scenarios_id)
plt_setup_smart['scenarios_itr'].append(scenarios_itr)
plt_setup_smart['top_labels'].append(top_label)
plt_setup_smart['bottom_labels'].append(bottom_label)
return plt_setup_smart
def createSettingRow(scenarios_year,scenarios_id,scenarios_itr,bottom_label,top_label):
return (scenarios_year,scenarios_id,scenarios_itr,bottom_label,top_label)
scenarios_lables = {
"Base_CL_CT": "Base0",
"Base_STL_STT_BAU": "Base2",
"Base_STL_STT_VTO": "Base3",
"Base_LTL_LTT_BAU": "Base5",
"Base_LTL_LTT_VTO": "Base6",
"A_STL_STT_BAU": "A2",
"A_STL_STT_VTO": "A3",
"B_LTL_LTT_BAU": "B5",
"B_LTL_LTT_VTO": "B6",
"C_LTL_LTT_BAU": "C5",
"C_LTL_LTT_VTO": "C6"
}
output_folder = "/home/ubuntu/git/jupyter/data/28thOct2019"
# Base_CL_CT
# A_STL_STT_BAU
settings=[]
settings.append(createSettingRow(2010,1,15,scenarios_lables["Base_CL_CT"], ""))
settings.append(createSettingRow(2025,6,15,scenarios_lables["A_STL_STT_BAU"], ""))
settings.append(createSettingRow(2025,7,15,scenarios_lables["A_STL_STT_VTO"], ""))
settings.append(createSettingRow(2040,8,15,scenarios_lables["B_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,9,15,scenarios_lables["B_LTL_LTT_VTO"], ""))
settings.append(createSettingRow(2040,10,15,scenarios_lables["C_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,11,15,scenarios_lables["C_LTL_LTT_VTO"], ""))
plt_setup_smart3 = createSetup('7scenarios', (7.75/0.315) * 27.0 / 21.3, 27.0/21.3, (8, 4.5), settings)
#smartplots3_setup.pltRealizedModeSplitByTrips(plt_setup_smart3, output_folder)
#smartplots3_setup.pltModeSplitInPMTPerCapita(plt_setup_smart3, output_folder)
#smartplots3_setup.pltAveragePersonSpeed_allModes(plt_setup_smart3, output_folder)
#smartplots3_setup.pltAveragePersonSpeed_car(plt_setup_smart3, output_folder)
#smartplots3_setup.pltModeSplitInVMT(plt_setup_smart3, output_folder)
#smartplots3_setup.pltRHEmptyPooled(plt_setup_smart3, output_folder)
#smartplots3_setup.pltRHWaitTime(plt_setup_smart3, output_folder)
#smartplots3_setup.pltLdvTechnologySplitInVMT(plt_setup_smart3, output_folder)
settings=[]
settings.append(createSettingRow(2010,1,15,scenarios_lables["Base_CL_CT"], ""))
settings.append(createSettingRow(2025,2,15,scenarios_lables["Base_STL_STT_BAU"], ""))
settings.append(createSettingRow(2025,3,15,scenarios_lables["Base_STL_STT_VTO"], ""))
settings.append(createSettingRow(2040,4,15,scenarios_lables["Base_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,5,15,scenarios_lables["Base_LTL_LTT_VTO"], ""))
settings.append(createSettingRow(2025,6,15,scenarios_lables["A_STL_STT_BAU"], ""))
settings.append(createSettingRow(2025,7,15,scenarios_lables["A_STL_STT_VTO"], ""))
settings.append(createSettingRow(2040,8,15,scenarios_lables["B_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,9,15,scenarios_lables["B_LTL_LTT_VTO"], ""))
settings.append(createSettingRow(2040,10,15,scenarios_lables["C_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,11,15,scenarios_lables["C_LTL_LTT_VTO"], ""))
plt_setup_smart3_base = createSetup('11scenarios', (7.75/0.315) * 27.0 / 21.3, 27.0/21.3, (10, 4.5), settings)
smartplots3_setup.pltEnergyPerCapita(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltRealizedModeSplitByTrips(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltModeSplitInPMTPerCapita(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltAveragePersonSpeed_allModes(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltAveragePersonSpeed_car(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltModeSplitInVMT(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltRHEmptyPooled(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltRHWaitTime(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltLdvTechnologySplitInVMT(plt_setup_smart3_base, output_folder)
#smartplots3_setup.pltMEP(plt_setup_smart3, output_folder, [15071,21151,22872,29014,27541,36325,45267])
smartplots3_setup.tableSummary(plt_setup_smart3_base, output_folder) | 50.727273 | 110 | 0.788331 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,654 | 0.329351 |
d28c4ad642d7e25e12003d4150c60dd4429d8299 | 50 | py | Python | genrl/deep/agents/sac/__init__.py | ajaysub110/JigglypuffRL | 083fd26d05b7eac018e6db7d32c4be4587461766 | [
"MIT"
]
| null | null | null | genrl/deep/agents/sac/__init__.py | ajaysub110/JigglypuffRL | 083fd26d05b7eac018e6db7d32c4be4587461766 | [
"MIT"
]
| null | null | null | genrl/deep/agents/sac/__init__.py | ajaysub110/JigglypuffRL | 083fd26d05b7eac018e6db7d32c4be4587461766 | [
"MIT"
]
| null | null | null | from genrl.deep.agents.sac.sac import SAC # noqa
| 25 | 49 | 0.76 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.12 |
d28c64bd9262b8b74070c47f2ceb3b8061a39ebe | 238 | py | Python | contrib/libs/cxxsupp/libsan/generate_symbolizer.py | HeyLey/catboost | f472aed90604ebe727537d9d4a37147985e10ec2 | [
"Apache-2.0"
]
| 6,989 | 2017-07-18T06:23:18.000Z | 2022-03-31T15:58:36.000Z | contrib/libs/cxxsupp/libsan/generate_symbolizer.py | HeyLey/catboost | f472aed90604ebe727537d9d4a37147985e10ec2 | [
"Apache-2.0"
]
| 1,978 | 2017-07-18T09:17:58.000Z | 2022-03-31T14:28:43.000Z | contrib/libs/cxxsupp/libsan/generate_symbolizer.py | HeyLey/catboost | f472aed90604ebe727537d9d4a37147985e10ec2 | [
"Apache-2.0"
]
| 1,228 | 2017-07-18T09:03:13.000Z | 2022-03-29T05:57:40.000Z | import os
import sys
def main():
print 'const char* ya_get_symbolizer_gen() {'
print ' return "{}";'.format(os.path.join(os.path.dirname(sys.argv[1]), 'llvm-symbolizer'))
print '}'
if __name__ == '__main__':
main()
| 18.307692 | 98 | 0.621849 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.365546 |
d28c678a957ea394e636e4d4799124a81070a2a0 | 775 | py | Python | scripts/scheduler/scheduler.py | OCHA-DAP/hdx-scraper-unosat-flood-portal | 80b0bcd404993e4bd1dae442f794c9f86b6d5328 | [
"MIT"
]
| 1 | 2016-07-22T13:32:54.000Z | 2016-07-22T13:32:54.000Z | scripts/scheduler/scheduler.py | OCHA-DAP/hdx-scraper-unosat-flood-portal | 80b0bcd404993e4bd1dae442f794c9f86b6d5328 | [
"MIT"
]
| 21 | 2015-07-08T21:30:32.000Z | 2015-08-27T17:52:24.000Z | scripts/scheduler/scheduler.py | OCHA-DAP/hdxscraper-unosat-flood-portal | 80b0bcd404993e4bd1dae442f794c9f86b6d5328 | [
"MIT"
]
| null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import time
import schedule
dir = os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
sys.path.append(dir)
from utilities.prompt_format import item
from unosat_flood_portal_collect import collect as Collect
def Wrapper(patch=False):
'''Wrapper for main program.'''
#
# Collect data.
#
Collect.Main(patch=True)
#
# Setting-up schedule.
#
schedule.every(1).day.do(Wrapper)
def Main(verbose=True):
'''Wrapper to run all the scheduled tasks.'''
if verbose:
print '%s Running scheduler.' % item('prompt_bullet')
try:
while True:
schedule.run_pending()
time.sleep(1)
except Exception as e:
print e
return False
if __name__ == '__main__':
Main()
| 16.145833 | 68 | 0.68129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 205 | 0.264516 |
d28c6e3b8a94c187af7ae1ba6acb241b56167d9b | 1,916 | py | Python | grAdapt/sampling/initializer/Vertices.py | mkduong-ai/grAdapt | 94c2659b0f6ff9a2984a9dc58e3c83213313bf90 | [
"Apache-2.0"
]
| 25 | 2020-11-13T05:57:01.000Z | 2021-06-18T11:16:03.000Z | grAdapt/sampling/initializer/Vertices.py | mkduong-ai/grAdapt | 94c2659b0f6ff9a2984a9dc58e3c83213313bf90 | [
"Apache-2.0"
]
| null | null | null | grAdapt/sampling/initializer/Vertices.py | mkduong-ai/grAdapt | 94c2659b0f6ff9a2984a9dc58e3c83213313bf90 | [
"Apache-2.0"
]
| null | null | null | # python
# import warnings
# Third party imports
import numpy as np
# grAdapt
from .base import Initial
from grAdapt.utils.sampling import sample_corner_bounds
class Vertices(Initial):
"""
Samples vertices if n_evals >= 2 ** len(bounds).
Else low discrepancy sequences are sampled.
"""
def __init__(self, sampling_method):
"""
Parameters
----------
sampling_method : grAdapt.sampling.equidistributed Object
Sample low discrepancy sequences when initial point method is not feasible
"""
super().__init__(sampling_method)
def sample(self, bounds, n_evals):
"""Returns a numpy array of sampled points.
Does not include corner points of the hypercube/search space.
Parameters
----------
bounds : list of tuples or list of grAdapt.space.datatype.base
Each tuple in the list defines the bounds for the corresponding variable
Example: [(1, 2), (2, 3), (-1, 4)...]
n_evals : int
number of initial points sampled by method
Returns
-------
(self.n_evals, len(self.bounds)) numpy array
"""
super().sample(bounds, n_evals)
if 2 ** len(self.bounds) > self.n_evals:
return self.sampling_method.sample(bounds=bounds, n=n_evals)
else:
corner_points = sample_corner_bounds(self.bounds)
num_corner_points = corner_points.shape[0]
if self.n_evals > 2 ** len(self.bounds):
random_points = self.sampling_method.sample(bounds=self.bounds,
n=(self.n_evals - num_corner_points),
x_history=corner_points)
return np.vstack((corner_points, random_points))
else:
return corner_points
| 34.214286 | 97 | 0.581942 | 1,751 | 0.913883 | 0 | 0 | 0 | 0 | 0 | 0 | 906 | 0.47286 |
d28e9a15ec55f39d2fbe7a6ba1ac7924e04991a1 | 6,456 | py | Python | thirdweb/modules/base.py | princetonwong/python-sdk | f35181d97620e29d055498fca75f3702f3bb2449 | [
"Apache-2.0"
]
| 1 | 2022-02-18T16:59:12.000Z | 2022-02-18T16:59:12.000Z | thirdweb/modules/base.py | princetonwong/python-sdk | f35181d97620e29d055498fca75f3702f3bb2449 | [
"Apache-2.0"
]
| null | null | null | thirdweb/modules/base.py | princetonwong/python-sdk | f35181d97620e29d055498fca75f3702f3bb2449 | [
"Apache-2.0"
]
| null | null | null | """Base Module."""
from abc import ABC, abstractmethod
from typing import Callable, Dict, List, Optional, Union, cast
from eth_account.account import LocalAccount
from thirdweb_web3 import Web3
from thirdweb_web3.types import TxReceipt
from zero_ex.contract_wrappers import TxParams
import json
from ..abi.coin import Coin
from ..abi.erc165 import ERC165
from ..abi.market import Market
from ..abi.nft import SignatureMint721 as NFT
from ..abi.nft_collection import NFTCollection as NFTBundle
from ..abi.pack import Pack
from ..constants.erc_interfaces import InterfaceIdErc721, InterfaceIdErc1155
from ..errors import NoSignerException
import io
from ..options import SdkOptions
from ..storage import IpfsStorage
from ..types.role import Role
ModuleTypes = Union[NFT, Market, Pack, NFTBundle, Coin]
class BaseModule(ABC):
"""
Base module for all modules.
"""
get_client: Optional[Callable[[], Web3]]
""" Returns the client object. """
get_storage: Optional[Callable[[], IpfsStorage]]
""" Returns the storage object. """
get_signer_address: Optional[Callable[[], str]]
""" Returns the signer address. """
get_private_key: Optional[Callable[[], str]]
""" Returns the private key. """
get_transact_opts: Optional[Callable[[], TxParams]]
""" Returns the transaction options. """
get_account: Optional[Callable[[], LocalAccount]]
""" Returns the account object. """
get_options: Optional[Callable[[], SdkOptions]]
""" Returns the options object. """
def __init__(self):
self.get_client = None
self.get_storage = None
self.get_signer_address = None
self.get_private_key = None
self.get_transact_opts = None
self.get_account = None
self.get_options = None
def execute_tx(self, tx) -> TxReceipt:
"""
Execute a transaction and return the receipt.
"""
client = self.get_client()
nonce = client.eth.get_transaction_count(self.get_signer_address())
tx['nonce'] = nonce
del tx['from']
signed_tx = self.__sign_tx(tx)
tx_hash = client.eth.send_raw_transaction(signed_tx.rawTransaction)
return cast(
TxReceipt,
client.eth.wait_for_transaction_receipt(
tx_hash, timeout=self.get_options().tx_timeout_in_seconds)
)
def __sign_tx(self, tx):
"""
Sign a transaction.
"""
signed_tx = self.get_account().sign_transaction(tx)
return signed_tx
def grant_role(self, role: Role, address: str):
"""
Grants the given role to the given address
"""
role_hash = role.get_hash()
tx = self.__abi_module.grant_role.build_transaction(
role_hash, address,
self.get_transact_opts()
)
self.execute_tx(tx)
@abstractmethod
def get_abi_module(self) -> ModuleTypes:
pass
def grant_role(self, role: Role, address: str):
"""
Grants the given role to the given address
"""
role_hash = role.get_hash()
tx = self.get_abi_module().grant_role.build_transaction(
role_hash, address,
self.get_transact_opts()
)
self.execute_tx(tx)
def upload_metadata(self, data: Union[Dict, str]) -> str:
"""
Uploads the metadata to IPFS and returns the uri.
"""
storage = self.get_storage()
if isinstance(data, str) and data.startswith("ipfs://"):
return data
if 'image_uri' in data and data["image"] == "":
data["image"] = data["image_uri"]
if 'image' in data:
if isinstance(data["image"], bytes) or isinstance(data["image"], bytearray):
data["image"] = storage.upload(
data["image"], self.address, self.get_signer_address())
return storage.upload(json.dumps(data), self.address, self.get_signer_address())
def revoke_role(self, role: Role, address: str):
"""
Revokes the given role from the given address
"""
role_hash = role.get_hash()
try:
signer_address = self.get_signer_address()
if signer_address.lower() == address.lower():
self.execute_tx(self.get_abi_module().renounce_role.build_transaction(
role_hash, address, self.get_transact_opts()
))
return
except NoSignerException:
pass
self.execute_tx(self.get_abi_module().revoke_role.build_transaction(
role_hash, address, self.get_transact_opts()
))
def get_role_member_count(self, role: Role):
"""
Returns the number of members in the given role
"""
return self.get_abi_module().get_role_member_count.call(role.get_hash())
def get_role_members(self, role: Role) -> List[str]:
"""
Returns the members of the given role
"""
return [self.get_role_member(role, x) for x in range(self.get_role_member_count(role))]
def get_role_member(self, role: Role, index: int) -> str:
"""
Returns the member at the given index of the given role
"""
return self.get_abi_module().get_role_member.call(role.get_hash(), index)
def get_all_role_members(self) -> Dict[str, List[str]]:
"""
Returns all the members of all the roles
"""
return {
Role.admin.name: self.get_role_members(Role.admin),
Role.minter.name: self.get_role_members(Role.minter),
Role.transfer.name: self.get_role_members(Role.transfer),
Role.pauser.name: self.get_role_members(Role.pauser)
}
def is_erc721(self, address: str) -> bool:
erc165 = ERC165(self.get_client(), address)
return erc165.supports_interface.call(InterfaceIdErc721)
def is_erc1155(self, address: str) -> bool:
erc165 = ERC165(self.get_client(), address)
return erc165.supports_interface.call(InterfaceIdErc1155)
def __get_token_uri(self, token_id: int) -> ModuleTypes:
module = self.get_abi_module()
uri = ""
try:
uri = module.token_uri(token_id)
except:
pass
if uri != "":
return uri
try:
uri = module.uri(token_id)
except:
pass
return uri
| 33.278351 | 95 | 0.623296 | 5,650 | 0.875155 | 0 | 0 | 73 | 0.011307 | 0 | 0 | 1,068 | 0.165428 |
d291c41a3b15e20796ea46ca106a1298d83274c2 | 17,356 | py | Python | data_util.py | shiyu-wangbyte/leadopt | ef289ab349a19ba1f8aa581638ef7e8e3810cb41 | [
"Apache-2.0"
]
| null | null | null | data_util.py | shiyu-wangbyte/leadopt | ef289ab349a19ba1f8aa581638ef7e8e3810cb41 | [
"Apache-2.0"
]
| null | null | null | data_util.py | shiyu-wangbyte/leadopt | ef289ab349a19ba1f8aa581638ef7e8e3810cb41 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2021 Jacob Durrant
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Contains utility code for reading packed data files.
"""
import os
import torch
from torch.utils.data import DataLoader, Dataset
import numpy as np
import h5py
import tqdm
# Atom typing
#
# Atom typing is the process of figuring out which layer each atom should be
# written to. For ease of testing, the packed data file contains a lot of
# potentially useful atomic information which can be distilled during the
# data loading process.
#
# Atom typing is implemented by map functions of the type:
# (atom descriptor) -> (layer index)
#
# If the layer index is -1, the atom is ignored.
class AtomTyper(object):
def __init__(self, fn, num_layers):
"""Initialize an atom typer.
Args:
fn: a function of type:
(atomic_num, aro, hdon, hacc, pcharge) -> (mask)
num_layers: number of output layers (<=32)
"""
self._fn = fn
self._num_layers = num_layers
def size(self):
return self._num_layers
def apply(self, *args):
return self._fn(*args)
class CondAtomTyper(AtomTyper):
def __init__(self, cond_func):
assert len(cond_func) <= 16
def _fn(*args):
v = 0
for k in range(len(cond_func)):
if cond_func[k](*args):
v |= 1 << k
return v
super(CondAtomTyper, self).__init__(_fn, len(cond_func))
REC_TYPER = {
# 1 channel, no hydrogen
'single': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: num not in [0,1]
]),
# 1 channel, including hydrogen
'single_h': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: num != 0
]),
# (C,N,O,S,*)
'simple': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: num == 6,
lambda num, aro, hdon, hacc, pcharge: num == 7,
lambda num, aro, hdon, hacc, pcharge: num == 8,
lambda num, aro, hdon, hacc, pcharge: num == 16,
lambda num, aro, hdon, hacc, pcharge: num not in [0,1,6,7,8,16],
]),
# (H,C,N,O,S,*)
'simple_h': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: num == 1,
lambda num, aro, hdon, hacc, pcharge: num == 6,
lambda num, aro, hdon, hacc, pcharge: num == 7,
lambda num, aro, hdon, hacc, pcharge: num == 8,
lambda num, aro, hdon, hacc, pcharge: num == 16,
lambda num, aro, hdon, hacc, pcharge: num not in [0,1,6,7,8,16],
]),
# (aro, hdon, hacc, positive, negative, occ)
'meta': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: bool(aro), # aromatic
lambda num, aro, hdon, hacc, pcharge: bool(hdon), # hydrogen donor
lambda num, aro, hdon, hacc, pcharge: bool(hacc), # hydrogen acceptor
lambda num, aro, hdon, hacc, pcharge: pcharge >= 128, # partial positive
lambda num, aro, hdon, hacc, pcharge: pcharge < 128, # partial negative
lambda num, aro, hdon, hacc, pcharge: num != 0, # occupancy
]),
# (aro, hdon, hacc, positive, negative, occ)
'meta_mix': CondAtomTyper([
lambda num, aro, hdon, hacc, pcharge: bool(aro), # aromatic
lambda num, aro, hdon, hacc, pcharge: bool(hdon), # hydrogen donor
lambda num, aro, hdon, hacc, pcharge: bool(hacc), # hydrogen acceptor
lambda num, aro, hdon, hacc, pcharge: pcharge >= 128, # partial positive
lambda num, aro, hdon, hacc, pcharge: pcharge < 128, # partial negative
lambda num, aro, hdon, hacc, pcharge: num != 0, # occupancy
lambda num, aro, hdon, hacc, pcharge: num == 1, # hydrogen
lambda num, aro, hdon, hacc, pcharge: num == 6, # carbon
lambda num, aro, hdon, hacc, pcharge: num == 7, # nitrogen
lambda num, aro, hdon, hacc, pcharge: num == 8, # oxygen
lambda num, aro, hdon, hacc, pcharge: num == 16, # sulfur
])
}
LIG_TYPER = {
# 1 channel, no hydrogen
'single': CondAtomTyper([
lambda num: num not in [0,1]
]),
# 1 channel, including hydrogen
'single_h': CondAtomTyper([
lambda num: num != 0
]),
'simple': CondAtomTyper([
lambda num: num == 6, # carbon
lambda num: num == 7, # nitrogen
lambda num: num == 8, # oxygen
lambda num: num not in [0,1,6,7,8] # extra
]),
'simple_h': CondAtomTyper([
lambda num: num == 1, # hydrogen
lambda num: num == 6, # carbon
lambda num: num == 7, # nitrogen
lambda num: num == 8, # oxygen
lambda num: num not in [0,1,6,7,8] # extra
])
}
class FragmentDataset(Dataset):
"""Utility class to work with the packed fragments.h5 format."""
def __init__(self, fragment_file, rec_typer=REC_TYPER['simple'],
lig_typer=LIG_TYPER['simple'], filter_rec=None, filter_smi=None,
fdist_min=None, fdist_max=None, fmass_min=None, fmass_max=None,
verbose=False, lazy_loading=True):
"""Initializes the fragment dataset.
Args:
fragment_file: path to fragments.h5
rec_typer: AtomTyper for receptor
lig_typer: AtomTyper for ligand
filter_rec: list of receptor ids to use (or None to use all)
skip_remap: if True, don't prepare atom type information
(filtering options):
fdist_min: minimum fragment distance
fdist_max: maximum fragment distance
fmass_min: minimum fragment mass (Da)
fmass_max: maximum fragment mass (Da)
"""
self._rec_typer = rec_typer
self._lig_typer = lig_typer
self.verbose = verbose
self._lazy_loading = lazy_loading
self.rec = self._load_rec(fragment_file, rec_typer)
self.frag = self._load_fragments(fragment_file, lig_typer)
self.valid_idx = self._get_valid_examples(
filter_rec, filter_smi, fdist_min, fdist_max, fmass_min, fmass_max, verbose)
def _load_rec(self, fragment_file, rec_typer):
"""Loads receptor information."""
f = h5py.File(fragment_file, 'r')
rec_coords = f['rec_coords'][()]
rec_types = f['rec_types'][()]
rec_lookup = f['rec_lookup'][()]
r = range(len(rec_types))
if self.verbose:
r = tqdm.tqdm(r, desc='Remap receptor atoms')
rec_remapped = np.zeros(len(rec_types), dtype=np.uint16)
if not self._lazy_loading:
for i in r:
rec_remapped[i] = rec_typer.apply(*rec_types[i])
rec_loaded = np.zeros(len(rec_lookup)).astype(np.bool)
# create rec mapping
rec_mapping = {}
for i in range(len(rec_lookup)):
rec_mapping[rec_lookup[i][0].decode('ascii')] = i
rec = {
'rec_coords': rec_coords,
'rec_types': rec_types,
'rec_remapped': rec_remapped,
'rec_lookup': rec_lookup,
'rec_mapping': rec_mapping,
'rec_loaded': rec_loaded
}
f.close()
return rec
def _load_fragments(self, fragment_file, lig_typer):
"""Loads fragment information."""
f = h5py.File(fragment_file, 'r')
frag_data = f['frag_data'][()]
frag_lookup = f['frag_lookup'][()]
frag_smiles = f['frag_smiles'][()]
frag_mass = f['frag_mass'][()]
frag_dist = f['frag_dist'][()]
frag_lig_smi = None
frag_lig_idx = None
if 'frag_lig_smi' in f.keys():
frag_lig_smi = f['frag_lig_smi'][()]
frag_lig_idx = f['frag_lig_idx'][()]
# unpack frag data into separate structures
frag_coords = frag_data[:,:3].astype(np.float32)
frag_types = frag_data[:,3].astype(np.uint8)
frag_remapped = np.zeros(len(frag_types), dtype=np.uint16)
if not self._lazy_loading:
for i in range(len(frag_types)):
frag_remapped[i] = lig_typer.apply(frag_types[i])
frag_loaded = np.zeros(len(frag_lookup)).astype(np.bool)
# find and save connection point
r = range(len(frag_lookup))
if self.verbose:
r = tqdm.tqdm(r, desc='Frag connection point')
frag_conn = np.zeros((len(frag_lookup), 3))
for i in r:
_,f_start,f_end,_,_ = frag_lookup[i]
fdat = frag_data[f_start:f_end]
found = False
for j in range(len(fdat)):
if fdat[j][3] == 0:
frag_conn[i,:] = tuple(fdat[j])[:3]
found = True
break
assert found, "missing fragment connection point at %d" % i
frag = {
'frag_coords': frag_coords, # d_idx -> (x,y,z)
'frag_types': frag_types, # d_idx -> (type)
'frag_remapped': frag_remapped, # d_idx -> (layer)
'frag_lookup': frag_lookup, # f_idx -> (rec_id, fstart, fend, pstart, pend)
'frag_conn': frag_conn, # f_idx -> (x,y,z)
'frag_smiles': frag_smiles, # f_idx -> smiles
'frag_mass': frag_mass, # f_idx -> mass
'frag_dist': frag_dist, # f_idx -> dist
'frag_lig_smi': frag_lig_smi,
'frag_lig_idx': frag_lig_idx,
'frag_loaded': frag_loaded
}
f.close()
return frag
def _get_valid_examples(self, filter_rec, filter_smi, fdist_min, fdist_max, fmass_min,
fmass_max, verbose):
"""Returns an array of valid fragment indexes.
"Valid" in this context means the fragment belongs to a receptor in
filter_rec and the fragment abides by the optional mass/distance
constraints.
"""
# keep track of valid examples
valid_mask = np.ones(self.frag['frag_lookup'].shape[0]).astype(np.bool)
num_frags = self.frag['frag_lookup'].shape[0]
# filter by receptor id
if filter_rec is not None:
valid_rec = np.zeros(num_frags, dtype=np.bool)
r = range(num_frags)
if verbose:
r = tqdm.tqdm(r, desc='filter rec')
for i in r:
rec = self.frag['frag_lookup'][i][0].decode('ascii')
if rec in filter_rec:
valid_rec[i] = 1
valid_mask *= valid_rec
# filter by ligand smiles string
if filter_smi is not None:
valid_lig = np.zeros(num_frags, dtype=np.bool)
r = range(num_frags)
if verbose:
r = tqdm.tqdm(r, desc='filter lig')
for i in r:
smi = self.frag['frag_lig_smi'][self.frag['frag_lig_idx'][i]]
smi = smi.decode('ascii')
if smi in filter_smi:
valid_lig[i] = 1
valid_mask *= valid_lig
# filter by fragment distance
if fdist_min is not None:
valid_mask[self.frag['frag_dist'] < fdist_min] = 0
if fdist_max is not None:
valid_mask[self.frag['frag_dist'] > fdist_max] = 0
# filter by fragment mass
if fmass_min is not None:
valid_mask[self.frag['frag_mass'] < fmass_min] = 0
if fmass_max is not None:
valid_mask[self.frag['frag_mass'] > fmass_max] = 0
# convert to a list of indexes
valid_idx = np.where(valid_mask)[0]
return valid_idx
def __len__(self):
"""Returns the number of valid fragment examples."""
return self.valid_idx.shape[0]
def __getitem__(self, idx):
"""Returns the Nth example.
Returns a dict with:
f_coords: fragment coordinates (Fx3)
f_types: fragment layers (Fx1)
p_coords: parent coordinates (Px3)
p_types: parent layers (Px1)
r_coords: receptor coordinates (Rx3)
r_types: receptor layers (Rx1)
conn: fragment connection point in the parent molecule (x,y,z)
smiles: fragment smiles string
"""
# convert to fragment index
frag_idx = self.valid_idx[idx]
return self.get_raw(frag_idx)
def get_raw(self, frag_idx):
# lookup fragment
rec_id, f_start, f_end, p_start, p_end = self.frag['frag_lookup'][frag_idx]
smiles = self.frag['frag_smiles'][frag_idx].decode('ascii')
conn = self.frag['frag_conn'][frag_idx]
# lookup receptor
rec_idx = self.rec['rec_mapping'][rec_id.decode('ascii')]
_, r_start, r_end = self.rec['rec_lookup'][rec_idx]
# fetch data
# f_coords = self.frag['frag_coords'][f_start:f_end]
# f_types = self.frag['frag_types'][f_start:f_end]
p_coords = self.frag['frag_coords'][p_start:p_end]
r_coords = self.rec['rec_coords'][r_start:r_end]
if self._lazy_loading and self.frag['frag_loaded'][frag_idx] == 0:
frag_types = self.frag['frag_types']
frag_remapped = self.frag['frag_remapped']
# load parent
for i in range(p_start, p_end):
frag_remapped[i] = self._lig_typer.apply(frag_types[i])
self.frag['frag_loaded'][frag_idx] = 1
if self._lazy_loading and self.rec['rec_loaded'][rec_idx] == 0:
rec_types = self.rec['rec_types']
rec_remapped = self.rec['rec_remapped']
# load receptor
for i in range(r_start, r_end):
rec_remapped[i] = self._rec_typer.apply(*rec_types[i])
self.rec['rec_loaded'][rec_idx] = 1
p_mask = self.frag['frag_remapped'][p_start:p_end]
r_mask = self.rec['rec_remapped'][r_start:r_end]
return {
# 'f_coords': f_coords,
# 'f_types': f_types,
'p_coords': p_coords,
'p_types': p_mask,
'r_coords': r_coords,
'r_types': r_mask,
'conn': conn,
'smiles': smiles
}
def get_valid_smiles(self):
"""Returns a list of all valid smiles fragments."""
valid_smiles = set()
for idx in self.valid_idx:
smiles = self.frag['frag_smiles'][idx].decode('ascii')
valid_smiles.add(smiles)
return list(valid_smiles)
def lig_layers(self):
return self._lig_typer.size()
def rec_layers(self):
return self._rec_typer.size()
class SharedFragmentDataset(object):
def __init__(self, dat, filter_rec=None, filter_smi=None, fdist_min=None,
fdist_max=None, fmass_min=None, fmass_max=None):
self._dat = dat
self.valid_idx = self._dat._get_valid_examples(
filter_rec, filter_smi, fdist_min, fdist_max, fmass_min, fmass_max, verbose=True)
def __len__(self):
return self.valid_idx.shape[0]
def __getitem__(self, idx):
frag_idx = self.valid_idx[idx]
return self._dat.get_raw(frag_idx)
def get_valid_smiles(self):
"""Returns a list of all valid smiles fragments."""
valid_smiles = set()
for idx in self.valid_idx:
smiles = self._dat.frag['frag_smiles'][idx].decode('ascii')
valid_smiles.add(smiles)
return list(valid_smiles)
def lig_layers(self):
return self._dat.lig_layers()
def rec_layers(self):
return self._dat.rec_layers()
class FingerprintDataset(Dataset):
def __init__(self, fingerprint_file):
"""Initializes a fingerprint dataset.
Args:
fingerprint_file: path to a fingerprint .h5 file
"""
self.fingerprints = self._load_fingerprints(fingerprint_file)
def _load_fingerprints(self, fingerprint_file):
"""Loads fingerprint information."""
f = h5py.File(fingerprint_file, 'r')
fingerprint_data = f['fingerprints'][()]
fingerprint_smiles = f['smiles'][()]
# create smiles->idx mapping
fingerprint_mapping = {}
for i in range(len(fingerprint_smiles)):
sm = fingerprint_smiles[i].decode('ascii')
fingerprint_mapping[sm] = i
fingerprints = {
'fingerprint_data': fingerprint_data,
'fingerprint_mapping': fingerprint_mapping,
'fingerprint_smiles': fingerprint_smiles,
}
f.close()
return fingerprints
def for_smiles(self, smiles):
"""Return a Tensor of fingerprints for a list of smiles.
Args:
smiles: size N list of smiles strings (as str not bytes)
"""
fp = np.zeros((len(smiles), self.fingerprints['fingerprint_data'].shape[1]))
for i in range(len(smiles)):
fp_idx = self.fingerprints['fingerprint_mapping'][smiles[i]]
fp[i] = self.fingerprints['fingerprint_data'][fp_idx]
return torch.Tensor(fp)
| 33.441233 | 93 | 0.589191 | 13,031 | 0.750807 | 0 | 0 | 0 | 0 | 0 | 0 | 5,499 | 0.316836 |
d291cc8632d543ebd26c04ae26559da840755d11 | 4,181 | py | Python | add_socket_response_event.py | Kur0den/kur0bot | d36722617bb4094bdf636779b20a799f9bd3b419 | [
"MIT"
]
| 1 | 2021-09-09T11:17:17.000Z | 2021-09-09T11:17:17.000Z | add_socket_response_event.py | Kur0den/kur0bot | d36722617bb4094bdf636779b20a799f9bd3b419 | [
"MIT"
]
| 1 | 2021-09-18T15:46:59.000Z | 2021-09-18T15:46:59.000Z | add_socket_response_event.py | Kur0den/kur0bot | d36722617bb4094bdf636779b20a799f9bd3b419 | [
"MIT"
]
| 1 | 2021-09-09T02:34:17.000Z | 2021-09-09T02:34:17.000Z | from discord.gateway import DiscordWebSocket, utils, _log, KeepAliveHandler, ReconnectWebSocket
async def received_message(self, msg, /):
if type(msg) is bytes:
self._buffer.extend(msg)
if len(msg) < 4 or msg[-4:] != b'\x00\x00\xff\xff':
return
msg = self._zlib.decompress(self._buffer)
msg = msg.decode('utf-8')
self._buffer = bytearray()
self.log_receive(msg)
msg = utils._from_json(msg)
_log.debug('For Shard ID %s: WebSocket Event: %s', self.shard_id, msg)
# add dispatch
self._dispatch('socket_response', msg)
event = msg.get('t')
if event:
self._dispatch('socket_event_type', event)
op = msg.get('op')
data = msg.get('d')
seq = msg.get('s')
if seq is not None:
self.sequence = seq
if self._keep_alive:
self._keep_alive.tick()
if op != self.DISPATCH:
if op == self.RECONNECT:
# "reconnect" can only be handled by the Client
# so we terminate our connection and raise an
# internal exception signalling to reconnect.
_log.debug('Received RECONNECT opcode.')
await self.close()
raise ReconnectWebSocket(self.shard_id)
if op == self.HEARTBEAT_ACK:
if self._keep_alive:
self._keep_alive.ack()
return
if op == self.HEARTBEAT:
if self._keep_alive:
beat = self._keep_alive.get_payload()
await self.send_as_json(beat)
return
if op == self.HELLO:
interval = data['heartbeat_interval'] / 1000.0
self._keep_alive = KeepAliveHandler(ws=self, interval=interval, shard_id=self.shard_id)
# send a heartbeat immediately
await self.send_as_json(self._keep_alive.get_payload())
self._keep_alive.start()
return
if op == self.INVALIDATE_SESSION:
if data is True:
await self.close()
raise ReconnectWebSocket(self.shard_id)
self.sequence = None
self.session_id = None
_log.info('Shard ID %s session has been invalidated.', self.shard_id)
await self.close(code=1000)
raise ReconnectWebSocket(self.shard_id, resume=False)
_log.warning('Unknown OP code %s.', op)
return
if event == 'READY':
self._trace = trace = data.get('_trace', [])
self.sequence = msg['s']
self.session_id = data['session_id']
# pass back shard ID to ready handler
data['__shard_id__'] = self.shard_id
_log.info('Shard ID %s has connected to Gateway: %s (Session ID: %s).',
self.shard_id, ', '.join(trace), self.session_id)
elif event == 'RESUMED':
self._trace = trace = data.get('_trace', [])
# pass back the shard ID to the resumed handler
data['__shard_id__'] = self.shard_id
_log.info('Shard ID %s has successfully RESUMED session %s under trace %s.',
self.shard_id, self.session_id, ', '.join(trace))
try:
func = self._discord_parsers[event]
except KeyError:
_log.debug('Unknown event %s.', event)
else:
func(data)
# remove the dispatched listeners
removed = []
for index, entry in enumerate(self._dispatch_listeners):
if entry.event != event:
continue
future = entry.future
if future.cancelled():
removed.append(index)
continue
try:
valid = entry.predicate(data)
except Exception as exc:
future.set_exception(exc)
removed.append(index)
else:
if valid:
ret = data if entry.result is None else entry.result(data)
future.set_result(ret)
removed.append(index)
for index in reversed(removed):
del self._dispatch_listeners[index]
DiscordWebSocket.received_message = received_message
| 32.664063 | 100 | 0.566611 | 0 | 0 | 0 | 0 | 0 | 0 | 4,022 | 0.961971 | 758 | 0.181296 |
d2936347651280722332cf187a2ad771feb61ab8 | 2,207 | py | Python | Image_detection_codes/Keras_training/test2.py | pasadyash/CitizenServiceApp | 01a0389d70624f04f6df25c1eb842b3bbce652da | [
"MIT"
]
| null | null | null | Image_detection_codes/Keras_training/test2.py | pasadyash/CitizenServiceApp | 01a0389d70624f04f6df25c1eb842b3bbce652da | [
"MIT"
]
| null | null | null | Image_detection_codes/Keras_training/test2.py | pasadyash/CitizenServiceApp | 01a0389d70624f04f6df25c1eb842b3bbce652da | [
"MIT"
]
| null | null | null | import numpy as np
np.random.seed(123) # for reproducibility
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from dataset_pothole import pothole
from keras.models import model_from_json
# 4. Load pre-shuffled MNIST data into train and test sets
(X_train, y_train), (X_test, y_test) = pothole.load_data()
print(X_train.shape)
print()
print (y_train.shape)
print()
# 5. Preprocess input data
X_train = X_train.reshape(X_train.shape[0], 200, 200, 1)
X_test = X_test.reshape(X_test.shape[0], 200, 200, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 3380
X_test /= 3380
# 6. Preprocess class labels
Y_train = np_utils.to_categorical(y_train, 4)
Y_test = np_utils.to_categorical(y_test, 4)
# 7. Define model architecture
nb_classes = 4
# number of epochs to train
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid',
input_shape=(200, 200, 1)))
convout1 = Activation('relu')
model.add(convout1)
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
convout2 = Activation('relu')
model.add(convout2)
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta')
# 9. Fit model on training data
model.fit(X_train, Y_train,
batch_size=32, nb_epoch=2, verbose=1)
# 10. Evaluate model on test data
score = model.evaluate(X_test, Y_test, verbose=0)
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
print('Test loss: ', score[0])
print('Test accuracy: ', score[1])
| 27.5875 | 68 | 0.7372 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 573 | 0.259628 |
d2945eb56ca24287c1bd0834d603839aee1fedac | 2,094 | py | Python | platform/web/api/device/models.py | JMSHDev/regent.dev | e4cedf04dd241ad00012735b543ee3447a8da8a2 | [
"Apache-2.0"
]
| 1 | 2021-12-23T14:06:08.000Z | 2021-12-23T14:06:08.000Z | platform/web/api/device/models.py | JMSHDev/regent.dev | e4cedf04dd241ad00012735b543ee3447a8da8a2 | [
"Apache-2.0"
]
| null | null | null | platform/web/api/device/models.py | JMSHDev/regent.dev | e4cedf04dd241ad00012735b543ee3447a8da8a2 | [
"Apache-2.0"
]
| 1 | 2021-06-28T22:17:28.000Z | 2021-06-28T22:17:28.000Z | import hashlib
import random
import string
import logging
from django.db import models
LOG = logging.getLogger(__name__)
class Device(models.Model):
name = models.CharField(max_length=50, unique=True)
customer = models.CharField(max_length=50)
agent_status = models.CharField(max_length=10, default="offline")
program_status = models.CharField(max_length=10, default="down")
last_updated = models.DateTimeField(auto_now=True)
def delete_mqtt_credentials(self):
self.auth.all().delete()
self.acl.all().delete()
class MqttAuth(models.Model):
username = models.CharField(max_length=100, unique=True)
password = models.CharField(max_length=100)
salt = models.CharField(max_length=10)
activated = models.BooleanField(default=False)
device = models.ForeignKey(
Device, on_delete=models.CASCADE, related_name="auth", related_query_name="auth", null=True
)
def __str__(self):
return "activated" if self.activated else "not activated"
@classmethod
def create(cls, username, password, activated, device=None):
salt = "".join(random.choice(string.ascii_letters) for _ in range(10))
password = hashlib.sha256((password + salt).encode("utf-8")).hexdigest()
return MqttAuth(username=username, password=password, salt=salt, activated=activated, device=device)
class MqttAcl(models.Model):
allow = models.SmallIntegerField()
ipaddr = models.CharField(max_length=60, null=True)
username = models.CharField(max_length=100, null=True)
clientid = models.CharField(max_length=100, null=True)
access = models.SmallIntegerField()
topic = models.CharField(max_length=100)
device = models.ForeignKey(
Device, on_delete=models.CASCADE, related_name="acl", related_query_name="acl", null=True
)
class Telemetry(models.Model):
device = models.ForeignKey(
Device, on_delete=models.CASCADE, related_name="telemetry", related_query_name="telemetry"
)
created_on = models.DateTimeField(auto_now_add=True)
state = models.JSONField()
| 34.327869 | 108 | 0.722063 | 1,958 | 0.935053 | 0 | 0 | 346 | 0.165234 | 0 | 0 | 94 | 0.04489 |
d294cefa293f8d84c96bacb7467d9cfe88246372 | 147 | py | Python | armageddon/__init__.py | acse-ns1321/asteroid-impact-simulator | 986c12ff1276e5d0547a4f760e1d2cb90fe4ba11 | [
"MIT"
]
| null | null | null | armageddon/__init__.py | acse-ns1321/asteroid-impact-simulator | 986c12ff1276e5d0547a4f760e1d2cb90fe4ba11 | [
"MIT"
]
| null | null | null | armageddon/__init__.py | acse-ns1321/asteroid-impact-simulator | 986c12ff1276e5d0547a4f760e1d2cb90fe4ba11 | [
"MIT"
]
| null | null | null | # flake8:NOQA
"""Python asteroid airburst calculator"""
from .solver import *
from .damage import *
from .locator import *
from .mapping import *
| 18.375 | 41 | 0.734694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.367347 |
d294d257d8cdf140b519b1d91dd4b68639347768 | 8,235 | py | Python | proxy_server/backend_services.py | lmanzurv/django_proxy_server | 20304829ef1ddcbb281e1373d308e5fa826fcd39 | [
"Apache-2.0"
]
| 11 | 2015-07-18T02:23:43.000Z | 2021-11-15T11:43:21.000Z | proxy_server/backend_services.py | lmanzurv/django_proxy_server | 20304829ef1ddcbb281e1373d308e5fa826fcd39 | [
"Apache-2.0"
]
| null | null | null | proxy_server/backend_services.py | lmanzurv/django_proxy_server | 20304829ef1ddcbb281e1373d308e5fa826fcd39 | [
"Apache-2.0"
]
| 5 | 2015-02-24T15:37:36.000Z | 2021-10-10T16:42:22.000Z | from django.contrib.auth import SESSION_KEY
from django.core.cache import cache
from django.conf import settings
from django.http import HttpResponse, HttpResponseServerError
from proxy_server.response import AJAX_REQUEST
import httplib, json, proxy_server
def invoke_backend_service(method, function_path, json_data=dict(), request=None, response_token=True, public=False, secure=False):
error_message = None
try:
if public is False and request is None:
error_message = 'A private web service must receive Django\'s request'
raise Exception
if response_token is True and request is None:
error_message = 'A web service cannot expect a response token and not receive Django\'s request'
raise Exception
if not hasattr(settings, 'BACKEND_HOST'):
error_message = 'No backend host and/or port specified'
raise Exception
if secure:
if hasattr(settings, 'BACKEND_PORT'):
conn = httplib.HTTPSConnection(settings.BACKEND_HOST, settings.BACKEND_PORT)
else:
conn = httplib.HTTPSConnection(settings.BACKEND_HOST)
else:
if hasattr(settings, 'BACKEND_PORT'):
conn = httplib.HTTPConnection(settings.BACKEND_HOST, settings.BACKEND_PORT)
else:
conn = httplib.HTTPConnection(settings.BACKEND_HOST)
headers = proxy_server.RESTFUL_HEADER
headers[proxy_server.API_KEY] = settings.SECRET_KEY
if request is not None:
pk = cache.get(AJAX_REQUEST, None)
if pk:
request.user.pk = pk
cache.delete(AJAX_REQUEST)
headers[proxy_server.USER_TOKEN] = request.user.pk
headers[proxy_server.CLIENT_IP] = request.META.get(proxy_server.HTTP_FROM)
try:
conn.request(method, function_path, json.dumps(json_data), headers)
except:
error_message = 'Could not connect to service'
raise Exception
response = conn.getresponse()
response_data = response.read()
conn.close()
if response.status == 403:
return 403, None
if response.status == 204:
if response_token is True:
error_message = 'Backend server didn\'t respond with a token'
raise Exception
return 204, None
else:
try:
response_json = json.loads(response_data)
except:
error_message = 'Unknown response format'
raise Exception
if response_token is True:
user_dict = None
if SESSION_KEY in request.session:
user_dict = cache.get(request.session[SESSION_KEY])
cache.delete(request.session[SESSION_KEY])
request.session[SESSION_KEY] = response_json[proxy_server.USER_TOKEN]
request.user.pk = response_json[proxy_server.USER_TOKEN]
request.session[proxy_server.EXPIRATION_DATE] = response_json[proxy_server.EXPIRATION_DATE]
if user_dict:
user_dict['pk'] = request.user.pk
cache.set(request.session[SESSION_KEY], user_dict)
if response.status == 200:
if response_token is True and proxy_server.USER_TOKEN not in response_json:
error_message = 'Server expected user token in response'
raise Exception
result = None
if proxy_server.RESPONSE in response_json:
result = response_json[proxy_server.RESPONSE]
return 200, result
else:
code = response.status
if proxy_server.ERROR in response_json:
error_message = response_json[proxy_server.ERROR][proxy_server.MESSAGE]
raise Exception(code)
else:
error_message = response.reason
raise Exception(code)
except Exception as e:
if error_message is None:
error_message = 'Unknown error in service invocation'
code = int(str(e)) if e is not None and isinstance(str(e), int) else 500
error = {
'error': {
'code': code,
'type': 'ProxyServerError',
'message': error_message
}
}
return code, error
def invoke_backend_service_as_proxy(request, method, function_path, json_data=dict(), response_token=True, secure=False):
error_message = None
try:
if not hasattr(settings, 'BACKEND_HOST'):
error_message = 'No backend host and/or port specified'
raise Exception
if secure:
if hasattr(settings, 'BACKEND_PORT'):
conn = httplib.HTTPSConnection(settings.BACKEND_HOST, settings.BACKEND_PORT)
else:
conn = httplib.HTTPSConnection(settings.BACKEND_HOST)
else:
if hasattr(settings, 'BACKEND_PORT'):
conn = httplib.HTTPConnection(settings.BACKEND_HOST, settings.BACKEND_PORT)
else:
conn = httplib.HTTPConnection(settings.BACKEND_HOST)
headers = proxy_server.RESTFUL_HEADER
headers[proxy_server.USER_TOKEN] = request.META.get(proxy_server.HTTP_USER_TOKEN)
headers[proxy_server.CLIENT_IP] = request.META.get(proxy_server.HTTP_FROM)
headers[proxy_server.API_KEY] = request.META.get(proxy_server.HTTP_API_KEY)
try:
conn.request(method, function_path, json.dumps(json_data), headers)
except:
error_message = 'Could not connect to service'
raise Exception
response = conn.getresponse()
response_data = response.read()
conn.close()
if response.status == 403:
resp = HttpResponse(status=response.status, reason=response.reason)
for header, value in response.getheaders():
resp[header] = value
for header in proxy_server.HOP_BY_HOP:
del resp[header]
resp[proxy_server.HEADER_SERVER] = proxy_server.VALUE_SERVER
return resp
if response.status == 204:
if response_token is True:
error_message = 'Backend server didn\'t respond with a token'
raise Exception
resp = HttpResponse(status=response.status, content_type='application/json', reason=response.reason)
for header, value in response.getheaders():
resp[header] = value
for header in proxy_server.HOP_BY_HOP:
del resp[header]
resp[proxy_server.HEADER_SERVER] = proxy_server.VALUE_SERVER
return resp
else:
try:
response_json = json.loads(response_data)
except:
error_message = 'Unknown response format'
raise Exception
if response.status == 200:
if response_token is True and proxy_server.USER_TOKEN not in response_json:
error_message = 'Server expected user token in response'
raise Exception
resp = HttpResponse(response_data, status=response.status, content_type='application/json', reason=response.reason)
for header, value in response.getheaders():
resp[header] = value
for header in proxy_server.HOP_BY_HOP:
del resp[header]
resp[proxy_server.HEADER_SERVER] = proxy_server.VALUE_SERVER
return resp
except Exception as e:
if error_message is None:
error_message = 'Unknown error in service invocation'
code = int(str(e)) if e is not None and isinstance(str(e), int) else 500
error = {
'error': {
'code': code,
'type': 'ProxyServerError',
'message': error_message
}
}
return HttpResponseServerError(json.dumps(error), content_type='application/json')
| 37.094595 | 131 | 0.600364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 0.097146 |
d294ed611a40faaaff54b7db50b237d6a8c768e7 | 1,645 | py | Python | py/trawl_analyzer/TrawlSensorsDB_model.py | nwfsc-fram/pyFieldSoftware | 477ba162b66ede2263693cda8c5a51d27eaa3b89 | [
"MIT"
]
| null | null | null | py/trawl_analyzer/TrawlSensorsDB_model.py | nwfsc-fram/pyFieldSoftware | 477ba162b66ede2263693cda8c5a51d27eaa3b89 | [
"MIT"
]
| 176 | 2019-11-22T17:44:55.000Z | 2021-10-20T23:40:03.000Z | py/trawl_analyzer/TrawlSensorsDB_model.py | nwfsc-fram/pyFieldSoftware | 477ba162b66ede2263693cda8c5a51d27eaa3b89 | [
"MIT"
]
| 1 | 2021-05-07T01:06:32.000Z | 2021-05-07T01:06:32.000Z | # from peewee import *
from playhouse.apsw_ext import TextField, IntegerField, PrimaryKeyField
from py.trawl_analyzer.Settings import SensorsModel as BaseModel
# database = SqliteDatabase('data\clean_sensors.db', **{})
class UnknownField(object):
def __init__(self, *_, **__): pass
class EnviroNetRawFiles(BaseModel):
activation_datetime = TextField(db_column='ACTIVATION_DATETIME', null=True)
deactivation_datetime = TextField(db_column='DEACTIVATION_DATETIME', null=True)
deployed_equipment = IntegerField(db_column='DEPLOYED_EQUIPMENT_ID', null=True)
enviro_net_raw_files = PrimaryKeyField(db_column='ENVIRO_NET_RAW_FILES_ID')
haul = TextField(db_column='HAUL_ID', null=True)
raw_file = TextField(db_column='RAW_FILE', null=True)
class Meta:
db_table = 'ENVIRO_NET_RAW_FILES'
class EnviroNetRawStrings(BaseModel):
date_time = TextField(db_column='DATE_TIME', index=True, null=True)
deployed_equipment = IntegerField(db_column='DEPLOYED_EQUIPMENT_ID', null=True)
enviro_net_raw_strings = PrimaryKeyField(db_column='ENVIRO_NET_RAW_STRINGS_ID')
haul = TextField(db_column='HAUL_ID', null=True)
raw_strings = TextField(db_column='RAW_STRINGS', null=True)
class Meta:
db_table = 'ENVIRO_NET_RAW_STRINGS'
class RawSentences(BaseModel):
date_time = TextField(db_column='DATE_TIME', null=True)
deployed_equipment = IntegerField(db_column='DEPLOYED_EQUIPMENT_ID', null=True)
raw_sentence = TextField(db_column='RAW_SENTENCE', null=True)
raw_sentence_id = PrimaryKeyField(db_column='RAW_SENTENCE_ID')
class Meta:
db_table = 'RAW_SENTENCES'
| 38.255814 | 83 | 0.764134 | 1,413 | 0.858967 | 0 | 0 | 0 | 0 | 0 | 0 | 400 | 0.243161 |
d29572229651c45d1ad6870cb96992f7e8dc3c59 | 9,754 | py | Python | src/statemachine.py | CEOAI-ABM/SIR-Modelling | 02ab89d64040b09ddce820a1ecbbc0cfc9b13f29 | [
"MIT"
]
| 1 | 2021-06-13T11:50:08.000Z | 2021-06-13T11:50:08.000Z | src/statemachine.py | CEOAI-ABM/SIR-Modelling | 02ab89d64040b09ddce820a1ecbbc0cfc9b13f29 | [
"MIT"
]
| null | null | null | src/statemachine.py | CEOAI-ABM/SIR-Modelling | 02ab89d64040b09ddce820a1ecbbc0cfc9b13f29 | [
"MIT"
]
| null | null | null | import transitions
from functools import partial
# from transitions import transitions.Machine
# TODO: whenever there is a state chage store the following
# (DAY,function_called) -> Stored for every person for agent status, state and Testing state
class AgentStatusA(object):
"""The Statemachine of the agent"""
status = ['Free','Quarentined','Out_of_city','Hospitalized','ICU','Isolation']
def __init__(self):
"""Agent Status class is responsible for figuring out the Mobility of the agent, the agent mobility can be
'Free','Quarentined','Out_of_city','Hospitalized','ICU','Isolation'
"""
super(AgentStatusA, self).__init__()
self.ADDED_BIT = True
self.TruthStatus = None
self.Last_Added_Placeholder = None
self.buffer = []
self.Status = self.status[0]
# def log_update(self,message):
def update_objects(self,TruthStatus):
"""Update object of Virusmodel
Args:
TruthStatus (object): Truth State object to update
"""
self.TruthStatus = TruthStatus
def __remove_from_transport__(self):
if self.useTN == True:
self.City.TravellingCitizens.remove(self)
#print('Person {} removed from travelling list of City {}. New length = {}'.format(self.IntID, self.City.Name, len(self.City.TravellingCitizens)))
def _remove_(self):
"""Remove from workplace and transport list
"""
if self.ADDED_BIT:
obj = self.get_workplace_obj()
if obj !=None:
self.buffer.append('_remove_')
obj.Working.remove(self)
self.ADDED_BIT = False
self.__remove_from_transport__()
def _add_(self):
"""Add to workplace and transport list
"""
if ~self.ADDED_BIT:
obj = self.get_workplace_obj()
if obj != None:
if obj.Working!=None:
self.buffer.append('_add_')
obj.Working.add(self)
self.ADDED_BIT = True
if self.useTN == True:
self.City.TravellingCitizens.add(self)
def _left_(self):
"""Leave city, calls remove
"""
self._remove_()
def _entered_(self):
"""Come back to city
"""
self._add_()
def __remove_from_placeholder__(self):
"""Remove the person from the Truth Status Placeholders
Returns:
bool: Whether Removed or not
"""
try:
if self.Last_Added_Placeholder == 0: # If he is AFreeP
self.TruthStatus.AFreeP.remove(self)
return True
elif self.Last_Added_Placeholder == 1: # If he was Quarentined
self.TruthStatus.AQuarentinedP.remove(self)
return True
elif self.Last_Added_Placeholder == 2: # If he was Isolated
self.TruthStatus.SIsolatedP.remove(self)
return True
elif self.Last_Added_Placeholder == 3: # If he was Hospitalized
self.TruthStatus.SHospitalizedP.remove(self)
return True
elif self.Last_Added_Placeholder == 4: # If he was Icu
self.TruthStatus.SIcuP.remove(self)
return True
else:
return False
except:
self.about()
raise
def leave_city(self):
acceptable_states = [self.status[0]]
try:
assert self.Status in acceptable_states
except:
print('##########', self.Status)
raise
self.Status = self.status[2]
self._left_()
self.__remove_from_placeholder__()
self.Last_Added_Placeholder = None
def enter_city(self):
acceptable_states = [self.status[2]]
try:
assert self.Status in acceptable_states
except:
print('##########', self.Status)
raise
self.Status = self.status[0]
self._entered_()
if self.is_Asymptomatic():
self.TruthStatus.AFreeP.add(self)
self.Last_Added_Placeholder = 0
def quarentined(self,DAY):
acceptable_states = [self.status[0],self.status[1],self.status[2]]
assert self.Status in acceptable_states
if self.Last_Added_Placeholder != 1:
self.__remove_from_placeholder__()
if self.is_Free(): # If free add to quarentined placeholders
self.TruthStatus.AQuarentinedP.add(self)
self.Last_Added_Placeholder = 1
self.Status = self.status[1]
self._remove_()
def hospitalized(self,DAY):
acceptable_states = [self.status[0],self.status[1]]
assert self.Status in acceptable_states
self.Status = self.status[3]
self._remove_()
self.show_symptoms(DAY)
if self.__remove_from_placeholder__(): #If person is in city and removal is successful
self.TruthStatus.SHospitalizedP.add(self)
self.Last_Added_Placeholder = 3
def admit_icu(self,DAY):
acceptable_states = [self.status[0],self.status[1],self.status[3]]
assert self.Status in acceptable_states
self.Status = self.status[4]
self._remove_()
self.show_symptoms(DAY)
if self.__remove_from_placeholder__(): #If person is in city and removal is successful
self.TruthStatus.SIcuP.add(self)
self.Last_Added_Placeholder = 4
def isolate(self,Today):
acceptable_states = [self.status[0],self.status[1],self.status[3],self.status[4],self.status[5]]
assert self.Status in acceptable_states
if self.Status == self.status[0] or self.Status == self.status[1]:
self.show_symptoms(Today)
if self.Last_Added_Placeholder != 2:
if self.__remove_from_placeholder__(): #If person is in city and removal is successful
self.TruthStatus.SIsolatedP.add(self)
self.Last_Added_Placeholder = 2
self.Status = self.status[5]
self._remove_()
def is_Free(self):
return self.Status == self.status[0]
def is_Quarentined(self):
return self.Status == self.status[1]
def is_Out_of_City(self):
return self.Status == self.status[2]
def is_Hospitalized(self):
return self.Status == self.status[3]
def is_ICU(self):
return self.Status == self.status[4]
def is_Isolation(self):
return self.Status == self.status[5]
class AgentStateA(AgentStatusA):
states = ['Healthy','Asymptomatic','Symptomatic','Recovered','Died']
def __init__(self):
"""Agent status is the status of person with respect ot the virus
"""
super(AgentStateA, self).__init__()
#self = person
self.State = self.states[0]
self.TruthStatus = None
def infected(self,DAY):
acceptable_states = [self.states[0]]
assert self.State in acceptable_states
self.State = self.states[1]
self.TruthStatus.AFreeP.add(self)
self.Last_Added_Placeholder = 0
self.History["Infected"] = DAY
def show_symptoms(self,DAY):
acceptable_states = [self.states[1],self.states[2]]
assert self.State in acceptable_states
self.State = self.states[2]
self.History["Symptomatic"] = DAY
def recover(self,DAY):
acceptable_states = [self.states[2]]
assert self.State in acceptable_states
self.State = self.states[3]
self.Status = self.status[5]
if self.__remove_from_placeholder__(): #Removal is succesful, mtlb seher me h
self.TruthStatus.RRecoveredP.add(self)
self.Last_Added_Placeholder =5
self.History["Recovered"] = DAY
self.History["Died"] = -1
def die(self,DAY):
acceptable_states = [self.states[2]]
assert self.State in acceptable_states
self.State = self.states[4]
self.Status = self.status[5]
if self.__remove_from_placeholder__(): #Removal is succesful, mtlb seher me h
self.TruthStatus.RDiedP.add(self)
self.Last_Added_Placeholder = 6
self.History["Recovered"] = -1
self.History["Died"] = DAY
def is_Healthy(self):
return self.State == self.states[0]
def is_Asymptomatic(self):
return self.State == self.states[1]
def is_Symptomatic(self):
return self.State == self.states[2]
def is_Recovered(self):
return self.State == self.states[3]
def is_Died(self):
return self.State == self.states[4]
class TestingState(object):
"""Summary
Attributes:
in_stack (bool): Description
machine (TYPE): Description
state (str): Description
tested (bool): Description
"""
machine = transitions.Machine(model=None, states=['Not_tested', 'Awaiting_Testing', 'Tested_Positive','Tested_Negative'], initial='Not_tested',
transitions=[
{'trigger': 'awaiting_test', 'source': ['Not_tested','Awaiting_Testing','Tested_Negative'], 'dest': 'Awaiting_Testing','before':'add_to_TestingQueue'},
{'trigger': 'tested_positive', 'source': 'Awaiting_Testing', 'dest': 'Tested_Positive','before':'tested_positive_func'},
{'trigger': 'tested_negative', 'source': 'Awaiting_Testing', 'dest': 'Tested_Negative','before':'tested_negative_func'},
])
def __init__(self):
"""This is responsible for updating testing state of the person
Deleted Parameters:
person (object): Home object
VM (object): Virusmodel object
"""
super().__init__()
self.state = 'Not_tested'
def __remove_from_testing_list__(self):
self.City.TestingQueue.remove(self)
def add_to_TestingQueue(self, PrivateTest=False):
"""Summary
"""
# This function is for the City to add citizens into testingQueue
if PrivateTest == False:
if self.state != 'Awaiting_Testing' :
self.City.TestingQueue.append(self)
if self.state == 'Tested_Negative':
self.City.TestedP['Negative'].remove(self)
#print('City {} added person {}'.format(self.City.Name, self.IntID))
#pass type of test
def tested_positive_func(self,Today, PrivateTest=False):
"""Summary
"""
self.City.TestedP['Positive'].add(self)
self.City.NumTestedPositive += 1
if PrivateTest == False:
self.__remove_from_testing_list__()
if self.is_Quarentined():
self.isolate(Today)
def tested_negative_func(self, PrivateTest=False):
"""Summary
"""
self.City.TestedP['Negative'].add(self)
if PrivateTest == False:
self.__remove_from_testing_list__()
def __getattribute__(self, item):
"""Summary
Args:
item (TYPE): Description
Returns:
TYPE: Description
"""
try:
return super(TestingState, self).__getattribute__(item)
except AttributeError:
if item in self.machine.events:
return partial(self.machine.events[item].trigger, self)
raise
| 28.190751 | 159 | 0.705454 | 9,491 | 0.973037 | 0 | 0 | 0 | 0 | 0 | 0 | 2,720 | 0.27886 |
d295e921737512140cabce35cb8da35469a21633 | 304 | py | Python | hard-gists/5898352/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
]
| 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/5898352/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
]
| 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/5898352/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
]
| 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | import os
import scipy.io.wavfile as wav
# install lame
# install bleeding edge scipy (needs new cython)
fname = 'XC135672-Red-winged\ Blackbird1301.mp3'
oname = 'temp.wav'
cmd = 'lame --decode {0} {1}'.format( fname,oname )
os.system(cmd)
data = wav.read(oname)
# your code goes here
print len(data[1])
| 25.333333 | 51 | 0.720395 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.513158 |
d29646348f53744d285a4ab6a2096da4edb810a8 | 2,612 | py | Python | examples/home-assistant/custom_components/evacalor/config_flow.py | fredericvl/pyevacalor | 37a3d96f867efffdec4457f11119977e6e887b8a | [
"Apache-2.0"
]
| 2 | 2020-10-25T15:42:03.000Z | 2021-01-06T10:25:58.000Z | examples/home-assistant/custom_components/evacalor/config_flow.py | fredericvl/pyevacalor | 37a3d96f867efffdec4457f11119977e6e887b8a | [
"Apache-2.0"
]
| 2 | 2021-01-06T09:24:58.000Z | 2021-02-13T21:12:02.000Z | examples/home-assistant/custom_components/evacalor/config_flow.py | fredericvl/pyevacalor | 37a3d96f867efffdec4457f11119977e6e887b8a | [
"Apache-2.0"
]
| null | null | null | """Config flow for Eva Calor."""
from collections import OrderedDict
import logging
import uuid
from pyevacalor import ( # pylint: disable=redefined-builtin
ConnectionError,
Error as EvaCalorError,
UnauthorizedError,
evacalor,
)
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
from .const import CONF_UUID, DOMAIN
_LOGGER = logging.getLogger(__name__)
def conf_entries(hass):
"""Return the email tuples for the domain."""
return set(
entry.data[CONF_EMAIL] for entry in hass.config_entries.async_entries(DOMAIN)
)
class EvaCalorConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Eva Calor Config Flow handler."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def _entry_in_configuration_exists(self, user_input) -> bool:
"""Return True if config already exists in configuration."""
email = user_input[CONF_EMAIL]
if email in conf_entries(self.hass):
return True
return False
async def async_step_user(self, user_input=None):
"""User initiated integration."""
errors = {}
if user_input is not None:
# Validate user input
email = user_input[CONF_EMAIL]
password = user_input[CONF_PASSWORD]
if self._entry_in_configuration_exists(user_input):
return self.async_abort(reason="device_already_configured")
try:
gen_uuid = str(uuid.uuid1())
evacalor(email, password, gen_uuid)
except UnauthorizedError:
errors["base"] = "unauthorized"
except ConnectionError:
errors["base"] = "connection_error"
except EvaCalorError:
errors["base"] = "unknown_error"
if "base" not in errors:
return self.async_create_entry(
title=DOMAIN,
data={
CONF_EMAIL: email,
CONF_PASSWORD: password,
CONF_UUID: gen_uuid,
},
)
else:
user_input = {}
data_schema = OrderedDict()
data_schema[vol.Required(CONF_EMAIL, default=user_input.get(CONF_EMAIL))] = str
data_schema[
vol.Required(CONF_PASSWORD, default=user_input.get(CONF_PASSWORD))
] = str
return self.async_show_form(
step_id="user", data_schema=vol.Schema(data_schema), errors=errors
)
| 31.095238 | 87 | 0.616003 | 1,976 | 0.756508 | 0 | 0 | 0 | 0 | 1,520 | 0.58193 | 366 | 0.140123 |
d2965c42b4aa6f52d9c6e78125bcdb00950f4d9f | 6,608 | py | Python | library_samples/Python3/ocs_sample_library_preview/Dataview/Dataview.py | osi-awoodall/OSI-Samples-OCS | 1995ccda20e4fe2ae66f3b67afbc1127d638a6fc | [
"Apache-2.0"
]
| null | null | null | library_samples/Python3/ocs_sample_library_preview/Dataview/Dataview.py | osi-awoodall/OSI-Samples-OCS | 1995ccda20e4fe2ae66f3b67afbc1127d638a6fc | [
"Apache-2.0"
]
| null | null | null | library_samples/Python3/ocs_sample_library_preview/Dataview/Dataview.py | osi-awoodall/OSI-Samples-OCS | 1995ccda20e4fe2ae66f3b67afbc1127d638a6fc | [
"Apache-2.0"
]
| null | null | null | # Dataview.py
#
import json
from .DataviewQuery import DataviewQuery
from .DataviewMapping import DataviewMapping
from .DataviewIndexConfig import DataviewIndexConfig
from .DataviewGroupRule import DataviewGroupRule
class Dataview(object):
"""
Dataview definition
"""
def __init__(
self,
id=None,
name=None,
description=None,
queries=None,
mappings=None,
indexConfig=None,
indexDataType=None,
groupRules=[],
):
"""
:param id: required
:param name: not required
:param description: not required
:param queries: query string required
:param mappings: array of Dataviewmapping not required
:param indexConfig: DataviewindexConfig not required
:param indexDataType: Currently limited to "DateTime" required
:param groupRules: Array of DataviewGroupRule not required
"""
self.__id = id
self.__name = name
self.__description = description
if queries:
self.__queries = queries
else:
self.__queries = DataviewQuery()
if mappings:
self.__mappings = mappings
self.__indexConfig = indexConfig
self.__indexDataType = indexDataType
self.__groupRules = groupRules
@property
def Id(self):
"""
Get the id required
:return:
"""
return self.__id
@Id.setter
def Id(self, id):
"""
Set the id required
:param id:
:return:
"""
self.__id = id
@property
def Name(self):
"""
Name can be duplicated in a namespace not required
:return:
"""
return self.__name
@Name.setter
def Name(self, name):
"""
Name can be duplicated in a namespace not required
:param name:
:return:
"""
self.__name = name
@property
def Description(self):
"""
Add an esy to understand description not required
:return:
"""
return self.__description
@Description.setter
def Description(self, description):
"""
Add an esy to understand description not required
:param description:
:return:
"""
self.__description = description
@property
def Queries(self):
"""
Query string required
:return:
"""
return self.__queries
@Queries.setter
def Queries(self, queries):
"""
Array of dataviequery required
:param queries:
:return:
"""
self.__queries = queries
@property
def Mappings(self):
"""
array of Dataviewmapping not required
:return:
"""
return self.__mappings
@Mappings.setter
def Mappings(self, mappings):
"""
array of Dataviewmapping not required
:param mappings:
:return:
"""
self.__mappings = mappings
@property
def IndexConfig(self):
"""
DataviewindexConfig not required
:return:
"""
return self.__indexConfig
@IndexConfig.setter
def IndexConfig(self, indexConfig):
"""
DataviewindexConfig not required
:param indexConfig:
:return:
"""
self.__indexConfig = indexConfig
@property
def IndexDataType(self):
"""
Currently limited to "DateTime" required
:return:
"""
return self.__indexDataType
@IndexDataType.setter
def IndexDataType(self, indexDataType):
"""
Currently limited to "DateTime" required
:param indexDataType:
:return:
"""
self.__indexDataType = indexDataType
@property
def GroupRules(self):
"""
Array of DataviewGroupRule not required
:return:
"""
return self.__groupRules
@GroupRules.setter
def GroupRules(self, groupRules):
"""
Array of DataviewGroupRule not required
:param groupRules:
:return:
"""
self.__groupRules = groupRules
def toJson(self):
return json.dumps(self.toDictionary())
def toDictionary(self):
# required properties
dictionary = {"Id": self.Id}
dictionary["Queries"] = self.Queries.toDictionary()
# optional properties
if hasattr(self, "Name"):
dictionary["Name"] = self.Name
if hasattr(self, "Description"):
dictionary["Description"] = self.Description
if hasattr(self, "Mappings") and self.Mappings is not None:
dictionary["Mappings"] = self.Mappings.toDictionary()
if hasattr(self, "IndexConfig") and self.IndexConfig is not None:
dictionary["IndexConfig"] = self.IndexConfig.toDictionary()
if hasattr(self, "IndexDataType"):
dictionary["IndexDataType"] = self.IndexDataType
if hasattr(self, "GroupRules"):
dictionary["GroupRules"] = []
for value in self.GroupRules:
dictionary["GroupRules"].append(value.toDictionary())
return dictionary
@staticmethod
def fromJson(jsonObj):
return Dataview.fromDictionary(jsonObj)
@staticmethod
def fromDictionary(content):
dataview = Dataview()
if not content:
return dataview
if "Id" in content:
dataview.Id = content["Id"]
if "Name" in content:
dataview.Name = content["Name"]
if "Description" in content:
dataview.Description = content["Description"]
if "Queries" in content:
dataview.Queries = DataviewQuery.fromDictionary(content["Queries"])
if "Mappings" in content:
dataview.Mappings = DataviewMapping.fromDictionary(content["Mappings"])
if "IndexConfig" in content:
dataview.IndexConfig = DataviewIndexConfig.fromDictionary(
content["IndexConfig"]
)
if "IndexDataType" in content:
dataview.IndexDataType = content["IndexDataType"]
if "GroupRules" in content:
groupRules = content["GroupRules"]
if groupRules is not None and len(groupRules) > 0:
dataview.GroupRules = []
for value in groupRules:
dataview.GroupRules.append(DataviewGroupRule.fromDictionary(value))
return dataview | 25.513514 | 87 | 0.575969 | 6,389 | 0.966858 | 0 | 0 | 4,111 | 0.622125 | 0 | 0 | 2,322 | 0.351392 |
d296cec19b3a1e77f406394741a977e6895ca59f | 392 | py | Python | PYTHON_Code/TestGUI.py | ROBO-BEV/BARISTO | 0e87d79966efc111cc38c1a1cf22e2d8ee18c350 | [
"CC-BY-3.0",
"MIT"
]
| 8 | 2018-03-12T04:52:28.000Z | 2021-05-19T19:37:01.000Z | PYTHON_Code/TestGUI.py | ROBO-BEV/BARISTO | 0e87d79966efc111cc38c1a1cf22e2d8ee18c350 | [
"CC-BY-3.0",
"MIT"
]
| null | null | null | PYTHON_Code/TestGUI.py | ROBO-BEV/BARISTO | 0e87d79966efc111cc38c1a1cf22e2d8ee18c350 | [
"CC-BY-3.0",
"MIT"
]
| 1 | 2018-01-30T09:43:36.000Z | 2018-01-30T09:43:36.000Z | from tkinter import *
window0 = Tk()
window0.geometry('960x540')
#tk.iconbitmap(default='ROBO_BEV_LOGO.ico')
window0.title("BARISTO")
photo = PhotoImage(file="Page1.png")
widget = Label(window0, image=photo)
widget.photo = photo
widget = Label(window0, text="10", fg="white", font=("Source Sans Pro",50))
#widget = Label(window0, text="9", fg="white")
widget.pack()
window0.mainloop()
| 19.6 | 75 | 0.709184 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 146 | 0.372449 |
d297adc463629ff967a82e11d0f42bb013364af4 | 2,354 | py | Python | handlers/play.py | AftahBagas/AlphaMusik | c8c3804a26ad393b6f666fecd4d3464727ce2544 | [
"MIT"
]
| null | null | null | handlers/play.py | AftahBagas/AlphaMusik | c8c3804a26ad393b6f666fecd4d3464727ce2544 | [
"MIT"
]
| null | null | null | handlers/play.py | AftahBagas/AlphaMusik | c8c3804a26ad393b6f666fecd4d3464727ce2544 | [
"MIT"
]
| 1 | 2021-06-22T08:08:43.000Z | 2021-06-22T08:08:43.000Z | from os import path
from telethon import Client
from telethon.types import Message, Voice
from callsmusic import callsmusic, queues
import converter
from downloaders import youtube
from config import BOT_NAME as bn, DURATION_LIMIT
from helpers.filters import command, other_filters
from helpers.decorators import errors
from helpers.errors import DurationLimitError
from helpers.gets import get_url, get_file_name
from telethon.types import InlineKeyboardButton, InlineKeyboardMarkup
@Client.on_message(command("lplay") & other_filters)
@errors
async def play(_, message: Message):
lel = await message.reply("🔄 **Processing** sounds...")
sender_id = message.from_user.id
sender_name = message.from_user.first_name
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
text="🔊 Group Support",
url="https://t.me/VcgMusicGroup")
]
]
)
audio = (message.reply_to_message.audio or message.reply_to_message.voice) if message.reply_to_message else None
url = get_url(message)
if audio:
if round(audio.duration / 60) > DURATION_LIMIT:
raise DurationLimitError(
f"❌ Videos longer than {DURATION_LIMIT} minute(s) aren't allowed to play!"
)
file_name = get_file_name(audio)
file_path = await converter.convert(
(await message.reply_to_message.download(file_name))
if not path.isfile(path.join("downloads", file_name)) else file_name
)
elif url:
file_path = await converter.convert(youtube.download(url))
else:
return await lel.edit_text("❗ You did not give me anything to play!")
if message.chat.id in callsmusic.pytgcalls.active_calls:
position = await queues.put(message.chat.id, file=file_path)
await lel.edit(f"#⃣ **Queued** at position {position}!")
else:
callsmusic.pytgcalls.join_group_call(message.chat.id, file_path)
await message.reply_photo(
photo="https://telegra.ph/file/a4fa687ed647cfef52402.jpg",
reply_markup=keyboard,
caption="▶️ **Playing** here the song requested by {}!".format(
message.from_user.mention()
),
)
return await lel.delete()
| 34.115942 | 116 | 0.658454 | 0 | 0 | 0 | 0 | 1,880 | 0.793249 | 1,819 | 0.767511 | 360 | 0.151899 |
d2992c7176a1b65595e782d6603b030801317e72 | 2,662 | py | Python | Sindri/Properties.py | mrcsbrn/TCC_software | 17a5335aed17d4740c3bbd0ef828b0fc5dcea1da | [
"MIT"
]
| 11 | 2019-10-17T02:01:51.000Z | 2022-03-17T17:39:34.000Z | Sindri/Properties.py | mrcsbrn/TCC_software | 17a5335aed17d4740c3bbd0ef828b0fc5dcea1da | [
"MIT"
]
| 2 | 2019-07-25T22:16:16.000Z | 2020-03-28T01:59:59.000Z | Sindri/Properties.py | mrcsbrn/TCC_software | 17a5335aed17d4740c3bbd0ef828b0fc5dcea1da | [
"MIT"
]
| 5 | 2019-07-15T18:19:36.000Z | 2021-12-24T08:06:24.000Z | from __future__ import annotations
from constants import DBL_EPSILON
class DeltaProp(object):
def __init__(self, cp: float, h: float, s: float, g: float, u: float, a: float):
self.Cp = cp
self.H = h
self.S = s
self.G = g
self.U = u
self.A = a
def subtract(self, dp2: DeltaProp) -> DeltaProp:
cp = self.Cp - dp2.Cp
h = self.H - dp2.H
s = self.S - dp2.S
g = self.G - dp2.G
u = self.U - dp2.U
a = self.A - dp2.A
return DeltaProp(cp, h, s, g, u, a)
def isEqual(self, dp2: DeltaProp, tol=1e-5) -> bool:
if (
self._relAbsErr(self.Cp, dp2.Cp) < tol
and self._relAbsErr(self.H, dp2.H) < tol
and self._relAbsErr(self.S, dp2.S) < tol
and self._relAbsErr(self.G, dp2.G) < tol
and self._relAbsErr(self.U, dp2.U) < tol
and self._relAbsErr(self.A, dp2.A) < tol
):
return True
return False
def _relAbsErr(self, x: float, y: float) -> float:
if abs(x) < DBL_EPSILON:
return abs(x - y)
return abs((x - y) / x)
class VaporPressure(object):
"""
Class containing information about the vapor pressure of a single substance system.
"""
def __init__(self):
self.EOS = 0
self.AW = 0
self.LK = 0
self.Antoine = 0
self.AntonieLog = 0
def setEOS(self, v: float):
self.EOS = v
def setAW(self, v: float):
self.AW = v
def setLK(self, v: float):
self.LK = v
def setAntoine(self, v: float, log=""):
self.Antoine = v
self.AntonieLog = log
def getAWerr(self) -> float:
return self._relError(self.EOS, self.AW)
def getLKerr(self) -> float:
return self._relError(self.EOS, self.LK)
def getAntoineerr(self) -> float:
return self._relError(self.EOS, self.Antoine)
def _relError(self, _x: float, _y: float) -> float:
if abs(_x) < DBL_EPSILON:
return _x - _y
return (_x - _y) / _x
class Props(object):
def __init__(self):
self.P = 0
self.T = 0
self.Z = 0
self.V = 0
self.rho = 0
self.Pvp = 0
self.Fugacity = 0
self.Props = 0
self.IGProps = 0
self.log = ""
def setRho(self, v: float):
self.rho = v
def setPvp(self, v: VaporPressure):
self.Pvp = v
def setProps(self, v: DeltaProp):
self.Props = v
def setIGProps(self, v: DeltaProp):
self.IGProps = v
def setIGProps(self, v: float):
self.Fugacity = v
| 24.422018 | 87 | 0.531555 | 2,583 | 0.970323 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.038693 |
d29a434df89a3b05d94919b3e887c98d5f6aef26 | 8,240 | py | Python | algorithms/randcommuns.py | eXascaleInfolab/clubmark | 5c329a5308a39d53f77db790a31d621245a7c693 | [
"Apache-2.0"
]
| 14 | 2018-11-20T08:32:30.000Z | 2022-03-14T02:46:35.000Z | algorithms/randcommuns.py | eXascaleInfolab/clubmark | 5c329a5308a39d53f77db790a31d621245a7c693 | [
"Apache-2.0"
]
| null | null | null | algorithms/randcommuns.py | eXascaleInfolab/clubmark | 5c329a5308a39d53f77db790a31d621245a7c693 | [
"Apache-2.0"
]
| 1 | 2019-05-22T08:39:00.000Z | 2019-05-22T08:39:00.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:Brief: Produces rand disjoint communities (clusters) for the given network with sizes similar in the ground truth.
:Description:
Takes number of the resulting communities and their sizes from the specified groundtruth (actually any sample
of the community structure, the real ground truth is not required) and fills stubs of the clusters with
randomly selected nodes from the input network with all their neighbors.
Note: Produced result is a random disjoint partitioning, so if the 'ground truth' had overlapping clusters, then
the number of nodes in the last cluster will be less than in the sample.
:Authors: Artem Lutov <[email protected]>
:Organizations: eXascale lab <http://exascale.info/>, ScienceWise <http://sciencewise.info/>,
Lumais <http://www.lumais.com/>
:Date: 2015-07
"""
from __future__ import print_function, division # Required for stderr output, must be the first import
import sys
import os # Pathes processing
#import igraph as ig
import random as rand
try:
# ATTENTION: Python3 newer treats imports as realtive and results in error here unlike Python2
from utils.parser_nsl import asymnet, loadNsl #pylint: disable=E0611,E0401
except ImportError:
# Note: this case should be the second because explicit relative imports cause various errors
# under Python2 and Python3, which complicates thier handling
from .utils.parser_nsl import asymnet, loadNsl #pylint: disable=E0611,E0401
# Default number of the resulting clusterings (partitions, i.e files that contain disjoint clusters)
_RESNUM = 1
class Params(object):
"""Input parameters (arguments)"""
def __init__(self):
"""Parameters:
groundtruth - flile name of the ground truth clustering
network - flile name of the input network
dirnet - whether the input network is directed
outnum - number of the resulting clusterings
randseed - seed for the clustering generation (automatically generated if not specified)
outpseed - whether to output the seed (automatically set to True on if the seed is generated automatically)
outdir - output directory
outname - base name of the output file based on the network name
outext - extenstion of the output files based on the groundtruth extension
"""
self.groundtruth = None
self.network = None
self.dirnet = False
self.outnum = _RESNUM
self.randseed = None
self.outpseed = False
self.outdir = None
self.outname = None
self.outext = ''
def parseParams(args):
"""Parse user-specified parameters
returns - parsed input arguments, Params()
"""
assert isinstance(args, (tuple, list)) and args, 'Input arguments must be specified'
prm = Params()
for arg in args:
# Validate input format
preflen = 3
if arg[0] != '-' or len(arg) <= preflen:
raise ValueError('Unexpected argument: ' + arg)
if arg[1] == 'g':
prm.groundtruth = arg[preflen:]
prm.outext = os.path.splitext(prm.groundtruth)[1]
elif arg[1] == 'i':
pos = arg.find('=', 2)
if pos == -1 or arg[2] not in 'ud=' or len(arg) == pos + 1:
raise ValueError('Unexpected argument: ' + arg)
pos += 1
prm.network = arg[pos:]
prm.outname, netext = os.path.splitext(os.path.split(prm.network)[1])
prm.dirnet = asymnet(netext.lower(), arg[2] == 'd')
if not prm.outname:
raise ValueError('Invalid network name (is a directory): ' + prm.network)
elif arg[1] == 'n':
prm.outnum = int(arg[preflen:])
assert prm.outnum >= 1, 'outnum must be a natural number'
elif arg[1] == 'r':
prm.randseed = arg[preflen:]
elif arg[1] == 'o':
prm.outdir = arg[preflen:]
else:
raise ValueError('Unexpected argument: ' + arg)
if not (prm.groundtruth and prm.network):
raise ValueError('Input network and groundtruth file names must be specified')
if not prm.outdir:
prm.outdir = os.path.split(prm.network)[0]
if not prm.outdir:
prm.outdir = '.'
if not prm.randseed:
try:
prm.randseed = ''.join(str(ord(c)) for c in os.urandom(8))
except NotImplementedError:
prm.randseed = str(rand.random())
prm.outpseed = True
return prm
def randcommuns(*args):
"""Generate random clusterings for the specified network"""
prm = parseParams(args)
print('Starting randcommuns clustering:'
'\n\tgroundtruth: {}'
'\n\t{} network: {}'
'\n\t{} cls of {} in {} with randseed: {}'
.format(prm.groundtruth, 'directed' if prm.dirnet else 'undirected', prm.network
, prm.outnum, prm.outname + prm.outext, prm.outdir, prm.randseed))
# Load Data from simple real-world networks
graph = loadNsl(prm.network, prm.dirnet) # ig.Graph.Read_Ncol(network, directed=dirnet) # , weights=False
# Load statistics from the ground thruth
groundstat = []
with open(prm.groundtruth, 'r') as fground:
for line in fground:
# Skip empty lines and comments (possible header)
if not line or line[0] == '#':
continue
groundstat.append(len(line.split()))
# Create outpdir if required
if prm.outdir and not os.path.exists(prm.outdir):
os.makedirs(prm.outdir)
# Geneate rand clsuterings
rand.seed(prm.randseed)
while prm.outnum > 0:
prm.outnum -= 1
# Active (remained) nodes indices of the input network
actnodes = set(graph.vs.indices) #pylint: disable=E1101
clusters = [] # Forming clusters
# Reference size of the ground truth clusters (they migh have overlaps unlike the current partitioning)
for clmarg in groundstat:
nodes = [] # Content of the current cluster
# Check whether all nodes of the initial network are mapped
if not actnodes:
break
# Select subsequent rand node
ind = rand.sample(actnodes, 1)[0]
actnodes.remove(ind)
nodes.append(ind)
inode = 0 # Index of the node in the current cluster
# Select neighbors of the selected nodes to fill the clusters
while len(nodes) < clmarg and actnodes:
for nd in graph.vs[nodes[inode]].neighbors(): #pylint: disable=E1136
if nd.index not in actnodes:
continue
actnodes.remove(nd.index)
nodes.append(nd.index)
if len(nodes) >= clmarg or not actnodes:
break
inode += 1
if inode >= len(nodes) and len(nodes) < clmarg and actnodes:
ind = rand.sample(actnodes, 1)[0]
actnodes.remove(ind)
nodes.append(ind)
# Use original labels of the nodes
clusters.append(graph.vs[ind]['name'] for ind in nodes) #pylint: disable=E1136
# Output resulting clusters
with open('/'.join((prm.outdir, ''.join((prm.outname, '_', str(prm.outnum), prm.outext)))), 'w') as fout:
for cl in clusters:
# Note: print() unlike fout.write() appends the newline
print(' '.join(cl), file=fout)
# Output randseed used for the generated clusterings
# Output to the dir above if possible to not mix cluster levels with rand seed
if prm.outpseed:
with open('/'.join((prm.outdir, (os.path.splitext(prm.outname)[0] + '.seed'))), 'w') as fout:
# Note: print() unlike fout.write() appends the newline
print(prm.randseed, file=fout)
print('Random clusterings are successfully generated')
if __name__ == '__main__':
if len(sys.argv) > 2:
randcommuns(*sys.argv[1:])
else:
print('\n'.join(('Produces random disjoint partitioning (clusters are formed with rand nodes and their neighbors)'
' for the input network specified in the NSL format (generalizaiton of NCOL, SNAP, etc.)\n',
'Usage: {app} -g=<ground_truth> -i[{{u, d}}]=<input_network> [-n=<res_num>] [-r=<rand_seed>] [-o=<outp_dir>]',
'',
' -g=<ground_truth> - ground truth clustering as a template for sizes of the resulting communities',
' -i[X]=<input_network> - file of the input network in the format: <src_id> <dst_id> [<weight>]',
' Xu - undirected input network (<src_id> <dst_id> implies also <dst_id> <src_id>). Default',
' Xd - directed input network (both <src_id> <dst_id> and <dst_id> <src_id> are specified)',
' NOTE: (un)directed flag is considered only for the networks with non-NSL file extension',
' -n=<res_num> - number of the resulting clusterings to generate. Default: {resnum}',
' -r=<rand_seed> - random seed, string. Default: value from the system rand source (otherwise current time)',
' -o=<output_communities> - . Default: ./<input_network>/'
)).format(app=sys.argv[0], resnum=_RESNUM))
| 40.392157 | 116 | 0.701942 | 884 | 0.107282 | 0 | 0 | 0 | 0 | 0 | 0 | 4,703 | 0.570752 |
d29d169f662bf82cfbfb0172089e264d38e0b3c3 | 17,578 | py | Python | utils/save_atten.py | xiaomengyc/SPG | 0006659c5be4c3451f8c9a188f1e91e9ff682fa9 | [
"MIT"
]
| 152 | 2018-07-25T01:55:33.000Z | 2022-02-02T15:16:09.000Z | utils/save_atten.py | xiaomengyc/SPG | 0006659c5be4c3451f8c9a188f1e91e9ff682fa9 | [
"MIT"
]
| 15 | 2018-09-13T06:35:16.000Z | 2021-08-05T06:23:16.000Z | utils/save_atten.py | xiaomengyc/SPG | 0006659c5be4c3451f8c9a188f1e91e9ff682fa9 | [
"MIT"
]
| 27 | 2018-07-26T03:47:55.000Z | 2021-04-05T08:06:41.000Z | import numpy as np
import cv2
import os
import torch
import os
import time
from torchvision import models, transforms
from torch.utils.data import DataLoader
from torch.optim import SGD
from torch.autograd import Variable
idx2catename = {'voc20': ['aeroplane','bicycle','bird','boat','bottle','bus','car','cat','chair','cow','diningtable','dog','horse',
'motorbike','person','pottedplant','sheep','sofa','train','tvmonitor'],
'coco80': ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck',
'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench',
'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet',
'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven',
'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
'hair drier', 'toothbrush']}
class SAVE_ATTEN(object):
def __init__(self, save_dir='save_bins', dataset=None):
# type: (object, object) -> object
self.save_dir = save_dir
if dataset is not None:
self.idx2cate = self._get_idx2cate_dict(datasetname=dataset)
else:
self.idx2cate = None
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
def save_top_5_pred_labels(self, preds, org_paths, global_step):
img_num = np.shape(preds)[0]
for idx in xrange(img_num):
img_name = org_paths[idx].strip().split('/')[-1]
if '.JPEG' in img_name:
img_id = img_name[:-5]
elif '.png' in img_name or '.jpg' in img_name:
img_id = img_name[:-4]
out = img_id + ' ' + ' '.join(map(str, preds[idx,:])) + '\n'
out_file = os.path.join(self.save_dir, 'pred_labels.txt')
if global_step == 0 and idx==0 and os.path.exists(out_file):
os.remove(out_file)
with open(out_file, 'a') as f:
f.write(out)
def save_masked_img_batch(self, path_batch, atten_batch, label_batch):
#img_num = np.shape(atten_batch)[0]
img_num = atten_batch.size()[0]
# fid = open('imagenet_val_shape.txt', 'a')
# print(np.shape(img_batch), np.shape(label_batch), np.shape(org_size_batch), np.shape(atten_batch))
for idx in xrange(img_num):
atten = atten_batch[idx]
atten = atten.cpu().data.numpy()
label = label_batch[idx]
label = int(label)
self._save_masked_img(path_batch[idx], atten,label)
def _get_idx2cate_dict(self, datasetname=None):
if datasetname not in idx2catename.keys():
print 'The given %s dataset category names are not available. The supported are: %s'\
%(str(datasetname),','.join(idx2catename.keys()))
return None
else:
return {idx:cate_name for idx, cate_name in enumerate(idx2catename[datasetname])}
def _save_masked_img(self, img_path, atten, label):
'''
save masked images with only one ground truth label
:param path:
:param img:
:param atten:
:param org_size:
:param label:
:param scores:
:param step:
:param args:
:return:
'''
if not os.path.isfile(img_path):
raise 'Image not exist:%s'%(img_path)
img = cv2.imread(img_path)
org_size = np.shape(img)
w = org_size[0]
h = org_size[1]
attention_map = atten[label,:,:]
atten_norm = attention_map
print(np.shape(attention_map), 'Max:', np.max(attention_map), 'Min:',np.min(attention_map))
# min_val = np.min(attention_map)
# max_val = np.max(attention_map)
# atten_norm = (attention_map - min_val)/(max_val - min_val)
atten_norm = cv2.resize(atten_norm, dsize=(h,w))
atten_norm = atten_norm* 255
heat_map = cv2.applyColorMap(atten_norm.astype(np.uint8), cv2.COLORMAP_JET)
img = cv2.addWeighted(img.astype(np.uint8), 0.5, heat_map.astype(np.uint8), 0.5, 0)
img_id = img_path.strip().split('/')[-1]
img_id = img_id.strip().split('.')[0]
save_dir = os.path.join(self.save_dir, img_id+'.png')
cv2.imwrite(save_dir, img)
def get_img_id(self, path):
img_id = path.strip().split('/')[-1]
return img_id.strip().split('.')[0]
def save_top_5_atten_maps(self, atten_fuse_batch, top_indices_batch, org_paths, topk=5):
'''
Save top-5 localization maps for generating bboxes
:param atten_fuse_batch: normalized last layer feature maps of size (batch_size, C, W, H), type: numpy array
:param top_indices_batch: ranked predicted labels of size (batch_size, C), type: numpy array
:param org_paths:
:param args:
:return:
'''
img_num = np.shape(atten_fuse_batch)[0]
for idx in xrange(img_num):
img_id = org_paths[idx].strip().split('/')[-1][:-4]
for k in range(topk):
atten_pos = top_indices_batch[idx, k]
atten_map = atten_fuse_batch[idx, atten_pos,:,:]
heat_map = cv2.resize(atten_map, dsize=(224, 224))
# heat_map = cv2.resize(atten_map, dsize=(img_shape[1], img_shape[0]))
heat_map = heat_map* 255
save_path = os.path.join(self.save_dir, 'heat_maps', 'top%d'%(k+1))
if not os.path.exists(save_path):
os.makedirs(save_path)
save_path = os.path.join(save_path,img_id+'.png')
cv2.imwrite(save_path, heat_map)
# def save_heatmap_segmentation(self, img_path, atten, gt_label, save_dir=None, size=(224,224), maskedimg=False):
# assert np.ndim(atten) == 4
#
# labels_idx = np.where(gt_label[0]==1)[0] if np.ndim(gt_label)==2 else np.where(gt_label==1)[0]
#
# if save_dir is None:
# save_dir = self.save_dir
# if not os.path.exists(save_dir):
# os.mkdir(save_dir)
#
# if isinstance(img_path, list) or isinstance(img_path, tuple):
# batch_size = len(img_path)
# for i in range(batch_size):
# img, size = self.read_img(img_path[i], size=size)
# atten_img = atten[i] #get attention maps for the i-th img of the batch
# img_name = self.get_img_id(img_path[i])
# img_dir = os.path.join(save_dir, img_name)
# if not os.path.exists(img_dir):
# os.mkdir(img_dir)
# for k in labels_idx:
# atten_map_k = atten_img[k,:,:]
# atten_map_k = cv2.resize(atten_map_k, dsize=size)
# if maskedimg:
# img_to_save = self._add_msk2img(img, atten_map_k)
# else:
# img_to_save = self.normalize_map(atten_map_k)*255.0
#
# save_path = os.path.join(img_dir, '%d.png'%(k))
# cv2.imwrite(save_path, img_to_save)
def normalize_map(self, atten_map):
min_val = np.min(atten_map)
max_val = np.max(atten_map)
atten_norm = (atten_map - min_val)/(max_val - min_val)
return atten_norm
def _add_msk2img(self, img, msk, isnorm=True):
if np.ndim(img) == 3:
assert np.shape(img)[0:2] == np.shape(msk)
else:
assert np.shape(img) == np.shape(msk)
if isnorm:
min_val = np.min(msk)
max_val = np.max(msk)
atten_norm = (msk - min_val)/(max_val - min_val)
atten_norm = atten_norm* 255
heat_map = cv2.applyColorMap(atten_norm.astype(np.uint8), cv2.COLORMAP_JET)
w_img = cv2.addWeighted(img.astype(np.uint8), 0.5, heat_map.astype(np.uint8), 0.5, 0)
return w_img
def _draw_text(self, pic, txt, pos='topleft'):
font = cv2.FONT_HERSHEY_SIMPLEX #multiple line
txt = txt.strip().split('\n')
stat_y = 30
for t in txt:
pic = cv2.putText(pic,t,(10,stat_y), font, 0.8,(255,255,255),2,cv2.LINE_AA)
stat_y += 30
return pic
def _mark_score_on_picture(self, pic, score_vec, label_idx):
score = score_vec[label_idx]
txt = '%.3f'%(score)
pic = self._draw_text(pic, txt, pos='topleft')
return pic
def get_heatmap_idxes(self, gt_label):
labels_idx = []
if np.ndim(gt_label) == 1:
labels_idx = np.expand_dims(gt_label, axis=1).astype(np.int)
elif np.ndim(gt_label) == 2:
for row in gt_label:
idxes = np.where(row[0]==1)[0] if np.ndim(row)==2 else np.where(row==1)[0]
labels_idx.append(idxes.tolist())
else:
labels_idx = None
return labels_idx
def get_map_k(self, atten, k, size=(224,224)):
atten_map_k = atten[k,:,:]
# print np.max(atten_map_k), np.min(atten_map_k)
atten_map_k = cv2.resize(atten_map_k, dsize=size)
return atten_map_k
def read_img(self, img_path, size=(224,224)):
img = cv2.imread(img_path)
if img is None:
print "Image does not exist. %s" %(img_path)
exit(0)
if size == (0,0):
size = np.shape(img)[:2]
else:
img = cv2.resize(img, size)
return img, size[::-1]
def get_masked_img(self, img_path, atten, gt_label,
size=(224,224), maps_in_dir=False, save_dir=None, only_map=False):
assert np.ndim(atten) == 4
save_dir = save_dir if save_dir is not None else self.save_dir
if isinstance(img_path, list) or isinstance(img_path, tuple):
batch_size = len(img_path)
label_indexes = self.get_heatmap_idxes(gt_label)
for i in range(batch_size):
img, size = self.read_img(img_path[i], size)
img_name = img_path[i].split('/')[-1]
img_name = img_name.strip().split('.')[0]
if maps_in_dir:
img_save_dir = os.path.join(save_dir, img_name)
os.mkdir(img_save_dir)
for k in label_indexes[i]:
atten_map_k = self.get_map_k(atten[i], k , size)
msked_img = self._add_msk2img(img, atten_map_k)
suffix = str(k+1)
if only_map:
save_img = (self.normalize_map(atten_map_k)*255).astype(np.int)
else:
save_img = msked_img
if maps_in_dir:
cv2.imwrite(os.path.join(img_save_dir, suffix + '.png'), save_img)
else:
cv2.imwrite(os.path.join(save_dir, img_name + '_' + suffix + '.png'), save_img)
# if score_vec is not None and labels_idx is not None:
# msked_img = self._mark_score_on_picture(msked_img, score_vec, labels_idx[k])
# if labels_idx is not None:
# suffix = self.idx2cate.get(labels_idx[k], k)
# def get_masked_img_ml(self, img_path, atten, save_dir=None, size=(224,224),
# gt_label=None, score_vec=None):
# assert np.ndim(atten) == 4
#
# if gt_label is not None and self.idx2cate is not None:
# labels_idx = np.where(gt_label[0]==1)[0] if np.ndim(gt_label)==2 else np.where(gt_label==1)[0]
# else:
# labels_idx = None
#
#
# if save_dir is not None:
# self.save_dir = save_dir
# if isinstance(img_path, list) or isinstance(img_path, tuple):
# batch_size = len(img_path)
# for i in range(batch_size):
# img = cv2.imread(img_path[i])
# if img is None:
# print "Image does not exist. %s" %(img_path[i])
# exit(0)
#
# else:
# atten_img = atten[i] #get attention maps for the i-th img
# img_name = img_path[i].split('/')[-1]
# for k in range(np.shape(atten_img)[0]):
# if size == (0,0):
# w, h, _ = np.shape(img)
# # h, w, _ = np.shape(img)
# else:
# h, w = size
# img = cv2.resize(img, dsize=(h, w))
# atten_map_k = atten_img[k,:,:]
# # print np.max(atten_map_k), np.min(atten_map_k)
# atten_map_k = cv2.resize(atten_map_k, dsize=(h,w))
# msked_img = self._add_msk2img(img, atten_map_k)
# if score_vec is not None and labels_idx is not None:
# msked_img = self._mark_score_on_picture(msked_img, score_vec, labels_idx[k])
# if labels_idx is not None:
# suffix = self.idx2cate.get(labels_idx[k], k)
# else:
# suffix = str(k)
# if '.' in img_name:
# img_name = img_name.strip().split('.')[0]
# cv2.imwrite(os.path.join(self.save_dir, img_name + '_' + suffix + '.png'), msked_img)
#
#
# def get_masked_img(self, img_path, atten, save_dir=None, size=(224,224), combine=True):
# '''
#
# :param img_path:
# :param atten:
# :param size: if it is (0,0) use original image size, otherwise use the specified size.
# :param combine:
# :return:
# '''
#
# if save_dir is not None:
# self.save_dir = save_dir
# if isinstance(img_path, list) or isinstance(img_path, tuple):
# batch_size = len(img_path)
#
# for i in range(batch_size):
# atten_norm = atten[i]
# min_val = np.min(atten_norm)
# max_val = np.max(atten_norm)
# atten_norm = (atten_norm - min_val)/(max_val - min_val)
# # print np.max(atten_norm), np.min(atten_norm)
# img = cv2.imread(img_path[i])
# if img is None:
# print "Image does not exist. %s" %(img_path[i])
# exit(0)
#
# if size == (0,0):
# w, h, _ = np.shape(img)
# # h, w, _ = np.shape(img)
# else:
# h, w = size
# img = cv2.resize(img, dsize=(h, w))
#
# atten_norm = cv2.resize(atten_norm, dsize=(h,w))
# # atten_norm = cv2.resize(atten_norm, dsize=(w,h))
# atten_norm = atten_norm* 255
# heat_map = cv2.applyColorMap(atten_norm.astype(np.uint8), cv2.COLORMAP_JET)
# img = cv2.addWeighted(img.astype(np.uint8), 0.5, heat_map.astype(np.uint8), 0.5, 0)
#
#
# # font = cv2.FONT_HERSHEY_SIMPLEX
# # cv2.putText(img,'OpenCV \n hello',(10,500), font, 4,(255,255,255),2,cv2.LINE_AA)
#
# img_name = img_path[i].split('/')[-1]
# print os.path.join(self.save_dir, img_name)
# cv2.imwrite(os.path.join(self.save_dir, img_name), img)
def get_atten_map(self, img_path, atten, save_dir=None, size=(321,321)):
'''
:param img_path:
:param atten:
:param size: if it is (0,0) use original image size, otherwise use the specified size.
:param combine:
:return:
'''
if save_dir is not None:
self.save_dir = save_dir
if isinstance(img_path, list) or isinstance(img_path, tuple):
batch_size = len(img_path)
for i in range(batch_size):
atten_norm = atten[i]
min_val = np.min(atten_norm)
max_val = np.max(atten_norm)
atten_norm = (atten_norm - min_val)/(max_val - min_val)
# print np.max(atten_norm), np.min(atten_norm)
h, w = size
atten_norm = cv2.resize(atten_norm, dsize=(h,w))
# atten_norm = cv2.resize(atten_norm, dsize=(w,h))
atten_norm = atten_norm* 255
img_name = img_path[i].split('/')[-1]
img_name = img_name.replace('jpg', 'png')
cv2.imwrite(os.path.join(self.save_dir, img_name), atten_norm)
class DRAW(object):
def __init__(self):
pass
def draw_text(self, img, text):
if isinstance(text, dict):
pass
| 42.458937 | 131 | 0.529867 | 15,973 | 0.908693 | 0 | 0 | 0 | 0 | 0 | 0 | 7,965 | 0.453123 |
d29d26d475e134ec64d93b0a0c67aac73b58249e | 453 | py | Python | integration/config/service_names.py | hawflau/serverless-application-model | d2cf4b7e23d26cdf677c564d53bb58e6a5b6cac2 | [
"Apache-2.0"
]
| null | null | null | integration/config/service_names.py | hawflau/serverless-application-model | d2cf4b7e23d26cdf677c564d53bb58e6a5b6cac2 | [
"Apache-2.0"
]
| 1 | 2020-03-03T01:46:46.000Z | 2020-03-03T01:46:46.000Z | integration/config/service_names.py | hawflau/serverless-application-model | d2cf4b7e23d26cdf677c564d53bb58e6a5b6cac2 | [
"Apache-2.0"
]
| null | null | null | COGNITO = "Cognito"
SERVERLESS_REPO = "ServerlessRepo"
MODE = "Mode"
XRAY = "XRay"
LAYERS = "Layers"
HTTP_API = "HttpApi"
IOT = "IoT"
CODE_DEPLOY = "CodeDeploy"
ARM = "ARM"
GATEWAY_RESPONSES = "GatewayResponses"
MSK = "MSK"
KMS = "KMS"
CWE_CWS_DLQ = "CweCwsDlq"
CODE_SIGN = "CodeSign"
MQ = "MQ"
USAGE_PLANS = "UsagePlans"
SCHEDULE_EVENT = "ScheduleEvent"
DYNAMO_DB = "DynamoDB"
KINESIS = "Kinesis"
SNS = "SNS"
SQS = "SQS"
CUSTOM_DOMAIN = "CustomDomain"
| 19.695652 | 38 | 0.708609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 199 | 0.439294 |
d29e1b642a0cdbe5b86c0d36bda20ce0cce1d92a | 2,373 | py | Python | tools/onnx_utilis/export_vfe_weight.py | neolixcn/OpenPCDet | 32bae37db13711a4fb35ad2980068470bb6cee1c | [
"Apache-2.0"
]
| null | null | null | tools/onnx_utilis/export_vfe_weight.py | neolixcn/OpenPCDet | 32bae37db13711a4fb35ad2980068470bb6cee1c | [
"Apache-2.0"
]
| null | null | null | tools/onnx_utilis/export_vfe_weight.py | neolixcn/OpenPCDet | 32bae37db13711a4fb35ad2980068470bb6cee1c | [
"Apache-2.0"
]
| null | null | null | import onnx
import onnxruntime
import torch
import onnx.numpy_helper
# added by huxi, load rpn config
from pcdet.pointpillar_quantize_config import load_rpn_config_json
# ========================================
config_dict = load_rpn_config_json.get_config()
onnx_model_file = config_dict["vfe_onnx_file"]
onnx_model = onnx.load(onnx_model_file)
onnx.checker.check_model(onnx_model)
#check model
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
#[tensor_mat_weight] = [t for t in onnx_model.graph.initializer if t.name == "linear.weight"]
[tensor_mat_weight] = [t for t in onnx_model.graph.initializer if t.name == "14"]
[tensor_bn_gamma] = [t for t in onnx_model.graph.initializer if t.name == "norm.weight"]
[tensor_bn_beta] = [t for t in onnx_model.graph.initializer if t.name == "norm.bias"]
[tensor_bn_mean] = [t for t in onnx_model.graph.initializer if t.name == "norm.running_mean"]
[tensor_bn_var] = [t for t in onnx_model.graph.initializer if t.name == "norm.running_var"]
mat_w = onnx.numpy_helper.to_array(tensor_mat_weight)
mat_w = mat_w.transpose()
mat_w_list = list(mat_w.flatten())
bn_gamma_w = onnx.numpy_helper.to_array(tensor_bn_gamma)
bn_gamma_w_list = list(bn_gamma_w.flatten())
bn_beta_w = onnx.numpy_helper.to_array(tensor_bn_beta)
bn_beta_w_list = list(bn_beta_w.flatten())
bn_mean_w = onnx.numpy_helper.to_array(tensor_bn_mean)
bn_mean_w_list = list(bn_mean_w.flatten())
bn_var_w = onnx.numpy_helper.to_array(tensor_bn_var)
bn_var_w_list = list(bn_var_w.flatten())
result_line = ""
exported_vfe_weight_file = config_dict["vfe_exported_weight_file"]
with open(exported_vfe_weight_file, 'w') as f:
for idx,val in enumerate(mat_w_list):
result_line += str(val)
result_line += " "
result_line += "\n"
for idx,val in enumerate(bn_gamma_w_list):
result_line += str(val)
result_line += " "
result_line += "\n"
for idx,val in enumerate(bn_beta_w_list):
result_line += str(val)
result_line += " "
result_line += "\n"
for idx,val in enumerate(bn_mean_w_list):
result_line += str(val)
result_line += " "
result_line += "\n"
for idx,val in enumerate(bn_var_w_list):
result_line += str(val)
result_line += " "
f.write(result_line)
| 28.25 | 93 | 0.702908 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 321 | 0.135272 |
d29e58f5104bd6d4a19025c66f8dbd6cd3fc3f1a | 1,825 | py | Python | color_extractor/cluster.py | hcoura/color-extractor | a69fc4a9a8b7c90d292f954d289c84a38323eda6 | [
"MIT"
]
| 276 | 2016-07-25T10:00:06.000Z | 2022-03-10T16:56:26.000Z | color_extractor/cluster.py | hcoura/color-extractor | a69fc4a9a8b7c90d292f954d289c84a38323eda6 | [
"MIT"
]
| 13 | 2017-05-25T12:45:30.000Z | 2022-03-11T23:16:30.000Z | color_extractor/cluster.py | hcoura/color-extractor | a69fc4a9a8b7c90d292f954d289c84a38323eda6 | [
"MIT"
]
| 74 | 2016-12-14T07:31:18.000Z | 2022-03-12T18:36:57.000Z | from sklearn.cluster import KMeans
from .exceptions import KMeansException
from .task import Task
class Cluster(Task):
"""
Use the K-Means algorithm to group pixels by clusters. The algorithm tries
to determine the optimal number of clusters for the given pixels.
"""
def __init__(self, settings=None):
if settings is None:
settings = {}
super(Cluster, self).__init__(settings)
self._kmeans_args = {
'max_iter': 50,
'tol': 1.0,
}
def get(self, img):
a = self._settings['algorithm']
if a == 'kmeans':
return self._jump(img)
else:
raise ValueError('Unknown algorithm {}'.format(a))
def _kmeans(self, img, k):
kmeans = KMeans(n_clusters=k, **self._kmeans_args)
try:
kmeans.fit(img)
except:
raise KMeansException()
return kmeans.inertia_, kmeans.labels_, kmeans.cluster_centers_
def _jump(self, img):
npixels = img.size
best = None
prev_distorsion = 0
largest_diff = float('-inf')
for k in range(self._settings['min_k'], self._settings['max_k']):
compact, labels, centers = self._kmeans(img, k)
distorsion = Cluster._square_distorsion(npixels, compact, 1.5)
diff = prev_distorsion - distorsion
prev_distorsion = distorsion
if diff > largest_diff:
largest_diff = diff
best = k, labels, centers
return best
@staticmethod
def _default_settings():
return {
'min_k': 2,
'max_k': 7,
'algorithm': 'kmeans',
}
@staticmethod
def _square_distorsion(npixels, compact, y):
return pow(compact / npixels, -y)
| 26.838235 | 78 | 0.572603 | 1,723 | 0.94411 | 0 | 0 | 256 | 0.140274 | 0 | 0 | 269 | 0.147397 |
d29e853085f1e22d6f5c45806ff223b5999daf1d | 315 | py | Python | notebooks/datasets.py | jweill-aws/jupyterlab-data-explorer | 3db8eed9562f35d2b0e44370cf22f32ac9ffbc4d | [
"BSD-3-Clause"
]
| 173 | 2019-01-04T05:18:08.000Z | 2022-03-28T11:15:30.000Z | notebooks/datasets.py | jweill-aws/jupyterlab-data-explorer | 3db8eed9562f35d2b0e44370cf22f32ac9ffbc4d | [
"BSD-3-Clause"
]
| 115 | 2019-01-04T01:09:41.000Z | 2022-03-24T01:07:00.000Z | notebooks/datasets.py | jweill-aws/jupyterlab-data-explorer | 3db8eed9562f35d2b0e44370cf22f32ac9ffbc4d | [
"BSD-3-Clause"
]
| 34 | 2019-06-12T16:46:53.000Z | 2022-02-01T08:41:40.000Z | #
# @license BSD-3-Clause
#
# Copyright (c) 2019 Project Jupyter Contributors.
# Distributed under the terms of the 3-Clause BSD License.
import IPython.display
import pandas
def output_url(url):
IPython.display.publish_display_data(
{"application/x.jupyter.relative-dataset-urls+json": [url]}
)
| 21 | 67 | 0.730159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 183 | 0.580952 |
d29f3df5f35ab4781444eaf48243bf8b792bb433 | 1,154 | py | Python | django_india/conf.py | k-mullapudi/django-india | 662a5fb363ac4360b573f5864df65619f2794dc8 | [
"MIT"
]
| null | null | null | django_india/conf.py | k-mullapudi/django-india | 662a5fb363ac4360b573f5864df65619f2794dc8 | [
"MIT"
]
| null | null | null | django_india/conf.py | k-mullapudi/django-india | 662a5fb363ac4360b573f5864df65619f2794dc8 | [
"MIT"
]
| null | null | null | import django.conf
url_bases = {
'geonames': {
'dump': 'http://download.geonames.org/export/dump/',
'zip': 'http://download.geonames.org/export/zip/',
},
}
india_country_code = 'IN'
files = {
'state': {
'filename': '',
'urls': [
url_bases['geonames']['dump'] + '{filename}',
],
'fields': [
]
},
'district': {
'filename': '',
'urls': [
url_bases['geonames']['dump'] + '{filename}',
],
'fields': [
]
},
'city': {
'filename': '',
'urls': [
url_bases['geonames']['dump'] + '{filename}',
],
'fields': [
]
}
}
LANGUAGE_DATA = {
}
class AppSettings(object):
"""
A holder for app-specific default settings that allows overriding via
the project's settings.
"""
def __getattribute__(self, attr):
if attr == attr.upper():
try:
return getattr(django.conf.settings, attr)
except AttributeError:
pass
return super(AppSettings, self).__getattribute__(attr)
| 19.233333 | 73 | 0.47747 | 411 | 0.356153 | 0 | 0 | 0 | 0 | 0 | 0 | 408 | 0.353553 |
d29f77fa5fac3eb65fe044b9f6c664cd6a9d69a3 | 1,588 | py | Python | src/dao/evaluation_dao.py | Asconius/trading-bot | df544f058d12c5378a0f8c110e28d49d983e0393 | [
"Apache-2.0"
]
| 2 | 2021-06-04T11:27:02.000Z | 2021-12-19T03:24:51.000Z | src/dao/evaluation_dao.py | Asconius/trading-bot | df544f058d12c5378a0f8c110e28d49d983e0393 | [
"Apache-2.0"
]
| 22 | 2020-08-24T05:16:11.000Z | 2021-12-13T20:51:25.000Z | src/dao/evaluation_dao.py | Asconius/trading-bot | df544f058d12c5378a0f8c110e28d49d983e0393 | [
"Apache-2.0"
]
| null | null | null | from decimal import Decimal
from typing import List
from src.dao.dao import DAO
from src.dto.attempt_dto import AttemptDTO
from src.entity.evaluation_entity import EvaluationEntity
from src.utils.utils import Utils
class EvaluationDAO:
@staticmethod
def create(summation: Decimal, funds: str, attempt: AttemptDTO) -> None:
evaluation: EvaluationEntity = EvaluationEntity()
evaluation.timestamp = Utils.now()
evaluation.sum = str(summation)
evaluation.funds = funds
Utils.set_attributes(evaluation, amount_buy=str(attempt.amount_buy), distance_buy=str(attempt.distance_buy),
delta_buy=str(attempt.delta_buy), amount_sell=str(attempt.amount_sell),
distance_sell=str(attempt.distance_sell), delta_sell=str(attempt.delta_sell))
DAO.persist(evaluation)
@staticmethod
def read_order_by_sum() -> EvaluationEntity:
return EvaluationEntity.query.order_by(EvaluationEntity.sum.desc()).first()
@staticmethod
def read_attempt(attempt: AttemptDTO) -> EvaluationEntity:
return EvaluationEntity.query.filter_by(
amount_buy=attempt.amount_buy).filter_by(
distance_buy=attempt.distance_buy).filter_by(
delta_buy=attempt.delta_buy).filter_by(
amount_sell=attempt.amount_sell).filter_by(
distance_sell=attempt.distance_sell).filter_by(
delta_sell=attempt.delta_sell).first()
@staticmethod
def read_all() -> List[EvaluationEntity]:
return EvaluationEntity.query.all()
| 40.717949 | 116 | 0.706549 | 1,369 | 0.862091 | 0 | 0 | 1,326 | 0.835013 | 0 | 0 | 0 | 0 |
d29fef12d764089bdcfe8679c802e9724d8f9325 | 1,031 | py | Python | src/lib/others/info_gathering/finder/finding_comment.py | nahuelhm17/vault_scanner | 574da226db5d274794d751d9d7959cd785bc9990 | [
"MIT"
]
| 230 | 2019-01-10T07:43:01.000Z | 2022-03-25T03:16:07.000Z | src/lib/others/info_gathering/finder/finding_comment.py | nahuelhm17/vault_scanner | 574da226db5d274794d751d9d7959cd785bc9990 | [
"MIT"
]
| 65 | 2018-11-18T12:48:27.000Z | 2019-01-05T22:40:07.000Z | src/lib/others/info_gathering/finder/finding_comment.py | nahuelhm17/vault_scanner | 574da226db5d274794d751d9d7959cd785bc9990 | [
"MIT"
]
| 64 | 2019-01-16T11:56:18.000Z | 2022-01-12T17:28:37.000Z | #! /usr/bin/python
import requests
import re
from bs4 import BeautifulSoup
import colors
class FindingComments(object):
def __init__(self, url):
self.url = url
self.comment_list = ['<!--(.*)-->']
self.found_comments = {}
def get_soure_code(self):
resp_text = requests.get(self.url).text
return resp_text
def find_comment(self):
source_code = self.get_soure_code()
for comment in self.comment_list:
comments = re.findall(comment, source_code)
self.found_comments[comment] = comments
def parse_comments(self):
self.find_comment()
comment_dict = {}
if len(self.found_comments) > 0:
for comment_code, comment in self.found_comments.items():
colors.success('Found for {} : {}'
.format(comment_code, comment))
comment_dict[comment_code] = comment
else:
colors.error('No comment found')
return comment_dict
| 27.131579 | 69 | 0.596508 | 938 | 0.909796 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.065955 |
d2a2c147c06d327188733c71e9a83b70f75131b1 | 27 | py | Python | micro-benchmark-key-errs/snippets/dicts/type_coercion/main.py | WenJinfeng/PyCG | b45e8e04fe697d8301cf27222a8f37646d69f168 | [
"Apache-2.0"
]
| 121 | 2020-12-16T20:31:37.000Z | 2022-03-21T20:32:43.000Z | micro-benchmark-key-errs/snippets/dicts/type_coercion/main.py | WenJinfeng/PyCG | b45e8e04fe697d8301cf27222a8f37646d69f168 | [
"Apache-2.0"
]
| 24 | 2021-03-13T00:04:00.000Z | 2022-03-21T17:28:11.000Z | micro-benchmark-key-errs/snippets/dicts/type_coercion/main.py | WenJinfeng/PyCG | b45e8e04fe697d8301cf27222a8f37646d69f168 | [
"Apache-2.0"
]
| 19 | 2021-03-23T10:58:47.000Z | 2022-03-24T19:46:50.000Z | d = {"1": "a"}
d[1]
d["1"]
| 6.75 | 14 | 0.259259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.333333 |
d2a35e41a7de7ed1c211d10b17e2843c3afc87ce | 2,753 | py | Python | scripts/link_assignment.py | metagenomics/antibio | ac79c64417c749ed40263fc97d22498097f2e9b9 | [
"MIT"
]
| 4 | 2015-11-03T22:00:33.000Z | 2017-10-21T06:57:35.000Z | scripts/link_assignment.py | metagenomics/antibio | ac79c64417c749ed40263fc97d22498097f2e9b9 | [
"MIT"
]
| 49 | 2015-09-28T11:32:38.000Z | 2016-04-11T14:05:00.000Z | scripts/link_assignment.py | metagenomics/antibio | ac79c64417c749ed40263fc97d22498097f2e9b9 | [
"MIT"
]
| 2 | 2018-08-27T15:15:45.000Z | 2020-03-31T01:50:48.000Z | #!/usr/bin/python
# This program revises the existing overview file.
# If a keyword is found in an Abstract of an accession of a gene, the url of the abstract is added to the overview file
# The revised overview.txt is created in the same directory of the old one and named overview_new.txt
"""
Usage: link_assignment.py -o <overview> -pub <pubhits>
-h --help Please enter the files overview.txt and the pubhits.
"""
from docopt import docopt
from sys import argv
import csv
import os
import util
def load_pubhits_in_dict(pubhits_path):
with open(pubhits_path, 'r') as pubhits_file:
pubhits_reader = csv.reader(pubhits_file, delimiter='\t', )
return dict((row[util.PUBHITS_GENE_ID_INDEX].strip(), row) for row in pubhits_reader)
def build_overview_link(pubhits_dict, gene_id, links):
"""
builds the pubhits link out of the gene id and the pubhits dict
:param pubhits_dict: pubhits dictionary
:param gene_id: gene id
:param links: existsing links
:return: links
"""
pubhits_acc = pubhits_dict[gene_id][util.PUBHITS_ACC_INDEX]
pubhits_link = pubhits_dict[gene_id][util.PUBHITS_LINK_INDEX]
if links.strip() == util.NO_LINK:
new_links = [pubhits_acc + ":" + pubhits_link]
else:
new_links = [links, pubhits_acc + ":" + pubhits_link]
overview_link = ','.join(new_links)
if not overview_link or overview_link == util.TODO:
overview_link = util.NO_KEYWORDS
return overview_link
def set_link_in_row(old_row, pubhits_dict):
"""
set link in existing overview row (dictionary)
:param old_row: overview row
:param pubhits_dict: pubhits dictionary
:return: revised overview row
"""
gene_id = old_row[util.GENE_ID]
if (gene_id in pubhits_dict):
old_row[util.LINKS] = build_overview_link(pubhits_dict, gene_id, old_row[util.LINKS])
return old_row
def main():
args = docopt(__doc__, argv[1:])
overview_path = args['<overview>']
pubhits = args['<pubhits>']
new_overview_path = os.path.splitext(overview_path)[0] + "_new.txt"
pubhits_dict = load_pubhits_in_dict(pubhits)
with open(overview_path, 'r') as overview, open(new_overview_path, 'w') as new_overview:
overview_reader = csv.DictReader(overview, delimiter='\t')
overview_writer = csv.DictWriter(new_overview, delimiter='\t', extrasaction='ignore',
fieldnames=overview.readline().rstrip('\n').split("\t"))
overview.seek(0)
overview_writer.writeheader()
for overview_row in overview_reader:
overview_row = set_link_in_row(overview_row, pubhits_dict)
overview_writer.writerow(overview_row)
if __name__ == '__main__':
main()
| 35.753247 | 119 | 0.694878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 884 | 0.321104 |
d2a6ca53031a949367ecbf3f9d3bfdb61563f697 | 5,421 | py | Python | app/views.py | LauretteMongina/Instagram-clone | 617135bcebcf6b73f2de7af73a66c177718d338c | [
"MIT"
]
| null | null | null | app/views.py | LauretteMongina/Instagram-clone | 617135bcebcf6b73f2de7af73a66c177718d338c | [
"MIT"
]
| null | null | null | app/views.py | LauretteMongina/Instagram-clone | 617135bcebcf6b73f2de7af73a66c177718d338c | [
"MIT"
]
| null | null | null | from django.shortcuts import render,redirect,get_object_or_404
from django.contrib.auth.decorators import login_required
from .models import *
import cloudinary
import cloudinary.uploader
import cloudinary.api
from django.http import HttpResponseRedirect, JsonResponse
from .forms import RegistrationForm, UpdateUserForm, UpdateUserProfileForm, ImageForm, CommentForm
from django.contrib.auth import login, authenticate
from .models import Image, Comment, Profile
from django.contrib.auth.models import User
from django.template.loader import render_to_string
from django.views.generic import RedirectView
from .email import send_welcome_email
# Create your views here.
def registration(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
email = form.cleaned_data.get('email')
login(request, user)
# recipient = Profile(user = user,email =email)
# recipient.save()
# send_welcome_email(user,email)
return redirect('index')
else:
form = RegistrationForm()
return render(request, 'registration/registration.html', {'form': form})
@login_required(login_url='login')
def index(request):
images = Image.objects.all()
users = User.objects.exclude(id=request.user.id)
if request.method == 'POST':
form = ImageForm(request.POST, request.FILES)
if form.is_valid():
image = form.save(commit=False)
# image.user = request.user.profile
image.save()
return HttpResponseRedirect(request.path_info)
else:
form = ImageForm()
params = {
'images': images,
'form': form,
'users': users,
}
return render(request, 'insta/index.html', params)
@login_required(login_url='login')
def profile(request, username):
images = request.user.profile.images.all()
print(images)
if request.method == 'POST':
user_form = UpdateUserForm(request.POST, instance=request.user)
profile_form = UpdateUserProfileForm(request.POST, request.FILES)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
return HttpResponseRedirect(request.path_info)
else:
user_form = UpdateUserForm(instance=request.user)
profile_form = UpdateUserProfileForm(instance=request.user)
params = {
'profile_form': profile_form,
'user_form': user_form,
'images': images,
}
return render(request, 'insta/profile.html', params)
@login_required(login_url='/accounts/login/')
def user_profile(request, username):
user_prof = get_object_or_404(User, username=username)
if request.user == user_prof:
return redirect('profile', username=request.user.username)
user_posts = user_prof.profile.images.all()
params = {
'user_prof': user_prof,
'user_posts': user_posts,}
return render(request, 'insta/user.html', params)
@login_required(login_url='/accounts/login/')
def like_image(request, id):
likes = Likes.objects.filter(image_id=id).first()
if Likes.objects.filter(image_id=id, user_id=request.user.id).exists():
likes.delete()
image = Image.objects.get(id=id)
if image.like_count == 0:
image.like_count = 0
image.save()
else:
image.like_count -= 1
image.save()
return redirect('/')
else:
likes = Likes(image_id=id, user_id=request.user.id)
likes.save()
# increase the number of likes by 1 for the image
image = Image.objects.get(id=id)
image.like_count = image.like_count + 1
image.save()
return redirect('/')
@login_required(login_url='login')
def search(request):
if 'search_user' in request.GET and request.GET['search_user']:
search_term = request.GET.get('search_user')
results = Profile.search(search_term)
print(results)
message = f'{search_term}'
title = message
return render(request, 'search.html', {'success': message})
else:
message = 'You havent searched for any term'
return render(request, 'insta/search.html', {'danger': message})
@login_required(login_url='login')
def comment(request, id):
image = get_object_or_404(Image, pk=id)
comments = image.comments.all()
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comments = form.cleaned_data['comment']
comment.image = image
comment.user = request.user.profile
image.save()
comment.save()
return HttpResponseRedirect(request.path_info)
else:
form = CommentForm()
params = {
'image': image,
'form': form,
'comments':comments,
}
# image = Image.objects.get(id=id)
# image.comments_count = image.comments_count + 1
image.save()
return render(request, 'insta/single.html', params) | 34.310127 | 98 | 0.642317 | 0 | 0 | 0 | 0 | 4,009 | 0.739531 | 0 | 0 | 786 | 0.144992 |
d2a7333fba6a0b271b7f3ddd6746591c934cb750 | 1,557 | py | Python | at_export_config.py | Fmstrat/FreeCAD-ArchTextures | e3af6198ea5e07848602a3b8ba01ebab2335d6b1 | [
"MIT"
]
| 21 | 2018-11-16T05:56:31.000Z | 2021-11-09T13:21:53.000Z | at_export_config.py | Fmstrat/FreeCAD-ArchTextures | e3af6198ea5e07848602a3b8ba01ebab2335d6b1 | [
"MIT"
]
| 39 | 2018-10-02T18:16:18.000Z | 2022-02-11T13:45:50.000Z | at_export_config.py | Fmstrat/FreeCAD-ArchTextures | e3af6198ea5e07848602a3b8ba01ebab2335d6b1 | [
"MIT"
]
| 10 | 2019-07-15T16:34:51.000Z | 2022-01-25T23:57:03.000Z | import FreeCAD, FreeCADGui
from arch_texture_utils.resource_utils import iconPath
import arch_texture_utils.qtutils as qtutils
from arch_texture_utils.selection_utils import findSelectedTextureConfig
class ExportTextureConfigCommand:
toolbarName = 'ArchTexture_Tools'
commandName = 'Export_Config'
def GetResources(self):
return {'MenuText': "Export Texture Config",
'ToolTip' : "Exports the configuration stored inside a TextureConfig object to a file",
'Pixmap': iconPath('ExportConfig.svg')
}
def Activated(self):
textureConfig = findSelectedTextureConfig()
if textureConfig is None:
qtutils.showInfo("No TextureConfig selected", "Select exactly one TextureConfig object to export its content")
return
selectedFile = qtutils.userSelectedFile('Export Location', qtutils.JSON_FILES, False)
if selectedFile is None:
return
fileObject = open(selectedFile, 'w')
textureConfig.export(fileObject)
def IsActive(self):
"""If there is no active document we can't do anything."""
return not FreeCAD.ActiveDocument is None
if __name__ == "__main__":
command = ExportTextureConfigCommand();
if command.IsActive():
command.Activated()
else:
qtutils.showInfo("No open Document", "There is no open document")
else:
import archtexture_toolbars
archtexture_toolbars.toolbarManager.registerCommand(ExportTextureConfigCommand()) | 33.12766 | 122 | 0.689788 | 1,014 | 0.651252 | 0 | 0 | 0 | 0 | 0 | 0 | 399 | 0.256262 |
d2a75f44feb7064f817bce0160b3db28ad77852c | 597 | py | Python | barcode/charsets/ean.py | Azd325/python-barcode | b41b1d5d479fb0ad3290a0a6235a8d3203d34ee9 | [
"MIT"
]
| null | null | null | barcode/charsets/ean.py | Azd325/python-barcode | b41b1d5d479fb0ad3290a0a6235a8d3203d34ee9 | [
"MIT"
]
| null | null | null | barcode/charsets/ean.py | Azd325/python-barcode | b41b1d5d479fb0ad3290a0a6235a8d3203d34ee9 | [
"MIT"
]
| null | null | null | EDGE = '101'
MIDDLE = '01010'
CODES = {
'A': (
'0001101', '0011001', '0010011', '0111101', '0100011', '0110001',
'0101111', '0111011', '0110111', '0001011'
),
'B': (
'0100111', '0110011', '0011011', '0100001', '0011101', '0111001',
'0000101', '0010001', '0001001', '0010111'
),
'C': (
'1110010', '1100110', '1101100', '1000010', '1011100', '1001110',
'1010000', '1000100', '1001000', '1110100'
),
}
LEFT_PATTERN = (
'AAAAAA', 'AABABB', 'AABBAB', 'AABBBA', 'ABAABB', 'ABBAAB', 'ABBBAA',
'ABABAB', 'ABABBA', 'ABBABA'
)
| 28.428571 | 73 | 0.515913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 371 | 0.621441 |
d2a835bc55a30790d6234339c5e466df15a50aed | 2,787 | py | Python | Sushant_Boosting/code.py | sushant-bahekar/ga-learner-dsmp-repo | 1087bec60382c2b3156f26cb87629a3b931fc41f | [
"MIT"
]
| null | null | null | Sushant_Boosting/code.py | sushant-bahekar/ga-learner-dsmp-repo | 1087bec60382c2b3156f26cb87629a3b931fc41f | [
"MIT"
]
| null | null | null | Sushant_Boosting/code.py | sushant-bahekar/ga-learner-dsmp-repo | 1087bec60382c2b3156f26cb87629a3b931fc41f | [
"MIT"
]
| null | null | null | # --------------
import pandas as pd
from sklearn.model_selection import train_test_split
#path - Path of file
# Code starts here
df = pd.read_csv(path)
df.head(5)
X = df.drop(['customerID','Churn'],1)
y = df['Churn']
X_train,X_test,y_train,y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)
# --------------
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Code starts here
#Replacing spaces with 'NaN' in train dataset
X_train['TotalCharges'].replace(' ',np.NaN, inplace=True)
#Replacing spaces with 'NaN' in test dataset
X_test['TotalCharges'].replace(' ',np.NaN, inplace=True)
#Converting the type of column from X_train to float
X_train['TotalCharges'] = X_train['TotalCharges'].astype(float)
#Converting the type of column from X_test to float
X_test['TotalCharges'] = X_test['TotalCharges'].astype(float)
#Filling missing values
X_train['TotalCharges'].fillna(X_train['TotalCharges'].mean(),inplace=True)
X_test['TotalCharges'].fillna(X_train['TotalCharges'].mean(), inplace=True)
#Check value counts
print(X_train.isnull().sum())
cat_cols = X_train.select_dtypes(include='O').columns.tolist()
#Label encoding train data
for x in cat_cols:
le = LabelEncoder()
X_train[x] = le.fit_transform(X_train[x])
#Label encoding test data
for x in cat_cols:
le = LabelEncoder()
X_test[x] = le.fit_transform(X_test[x])
#Encoding train data target
y_train = y_train.replace({'No':0, 'Yes':1})
#Encoding test data target
y_test = y_test.replace({'No':0, 'Yes':1})
# --------------
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix
# Code starts here
print(X_train, X_test, y_train, y_test)
ada_model = AdaBoostClassifier(random_state = 0)
ada_model.fit(X_train, y_train)
y_pred = ada_model.predict(X_test)
ada_score = accuracy_score(y_test, y_pred)
ada_score
ada_cm = confusion_matrix(y_test, y_pred)
ada_cm
# --------------
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
#Parameter list
parameters={'learning_rate':[0.1,0.15,0.2,0.25,0.3],
'max_depth':range(1,3)}
# Code starts here
xgb_model = XGBClassifier(random_state=0)
xgb_model.fit(X_train, y_train)
y_pred = xgb_model.predict(X_test)
xgb_score = accuracy_score(y_test, y_pred)
xgb_cm = confusion_matrix(y_test, y_pred)
xgb_cr = classification_report(y_test, y_pred)
clf_model = GridSearchCV(estimator=xgb_model,param_grid=parameters)
clf_model.fit(X_train, y_train)
y_pred = clf_model.predict(X_test)
clf_score = accuracy_score(y_test, y_pred)
clf_cm = confusion_matrix(y_test, y_pred)
clf_cr = classification_report(y_test, y_pred)
print(xgb_score, clf_score)
print(xgb_cm, clf_cm)
print(xgb_cr, xgb_cr)
| 24.663717 | 89 | 0.742375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 737 | 0.264442 |
d2a9213337ceeb22964f6608d3d20eb1d939ae74 | 16,566 | py | Python | slsgd.py | xcgoner/ecml2019-slsgd | e4856b2015d4c7c39e28743dab2222ef8e0131fa | [
"MIT"
]
| 3 | 2019-09-10T15:46:04.000Z | 2020-09-21T17:53:10.000Z | slsgd.py | xcgoner/ecml2019-slsgd | e4856b2015d4c7c39e28743dab2222ef8e0131fa | [
"MIT"
]
| null | null | null | slsgd.py | xcgoner/ecml2019-slsgd | e4856b2015d4c7c39e28743dab2222ef8e0131fa | [
"MIT"
]
| null | null | null | import argparse, time, logging, os, math, random
os.environ["MXNET_USE_OPERATOR_TUNING"] = "0"
import numpy as np
from scipy import stats
import mxnet as mx
from mxnet import gluon, nd
from mxnet import autograd as ag
from mxnet.gluon import nn
from mxnet.gluon.data.vision import transforms
from gluoncv.model_zoo import get_model
from gluoncv.utils import makedirs, LRScheduler
from os import listdir
import os.path
import argparse
import pickle
from mpi4py import MPI
mpi_comm = MPI.COMM_WORLD
mpi_size = mpi_comm.Get_size()
mpi_rank = mpi_comm.Get_rank()
# print('rank: %d' % (mpi_rank), flush=True)
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", type=str, help="dir of the data", required=True)
parser.add_argument("--valdir", type=str, help="dir of the val data", required=True)
parser.add_argument("--batchsize", type=int, help="batchsize", default=8)
parser.add_argument("--epochs", type=int, help="epochs", default=100)
parser.add_argument("--interval", type=int, help="log interval", default=10)
parser.add_argument("--nsplit", type=int, help="number of split", default=40)
parser.add_argument("--lr", type=float, help="learning rate", default=0.001)
parser.add_argument("--alpha", type=float, help="moving average", default=1.0)
parser.add_argument("--alpha-decay", type=float, help="decay factor of alpha", default=0.5)
parser.add_argument("--alpha-decay-epoch", type=str, help="epoch of alpha decay", default='800')
parser.add_argument("--log", type=str, help="dir of the log file", default='train_cifar100.log')
parser.add_argument("--classes", type=int, help="number of classes", default=20)
parser.add_argument("--iterations", type=int, help="number of local epochs", default=50)
parser.add_argument("--aggregation", type=str, help="aggregation method", default='mean')
parser.add_argument("--nbyz", type=int, help="number of Byzantine workers", default=0)
parser.add_argument("--trim", type=int, help="number of trimmed workers on one side", default=0)
# parser.add_argument("--lr-decay", type=float, help="lr decay rate", default=0.1)
# parser.add_argument("--lr-decay-epoch", type=str, help="lr decay epoch", default='400')
parser.add_argument("--iid", type=int, help="IID setting", default=0)
parser.add_argument("--model", type=str, help="model", default='mobilenetv2_1.0')
parser.add_argument("--save", type=int, help="save", default=0)
parser.add_argument("--start-epoch", type=int, help="epoch start from", default=-1)
parser.add_argument("--seed", type=int, help="random seed", default=733)
args = parser.parse_args()
# print(args, flush=True)
filehandler = logging.FileHandler(args.log)
streamhandler = logging.StreamHandler()
if mpi_rank == 0:
logger = logging.getLogger('')
logger.setLevel(logging.INFO)
logger.addHandler(filehandler)
logger.addHandler(streamhandler)
mx.random.seed(args.seed + mpi_rank)
random.seed(args.seed + mpi_rank)
np.random.seed(args.seed + mpi_rank)
data_dir = os.path.join(args.dir, 'dataset_split_{}'.format(args.nsplit))
train_dir = os.path.join(data_dir, 'train')
# val_dir = os.path.join(data_dir, 'val')
val_train_dir = os.path.join(args.valdir, 'train')
val_val_dir = os.path.join(args.valdir, 'val')
training_files = []
for filename in sorted(listdir(train_dir)):
absolute_filename = os.path.join(train_dir, filename)
training_files.append(absolute_filename)
context = mx.cpu()
classes = args.classes
def get_train_batch(train_filename):
with open(train_filename, "rb") as f:
B, L = pickle.load(f)
# return nd.transpose(nd.array(B.astype('float32') / 255.0), (0, 3, 1, 2)), nd.array(L)
return nd.transpose(nd.array(B), (0, 3, 1, 2)), nd.array(L)
def get_train_batch_byz(train_filename):
with open(train_filename, "rb") as f:
B, L = pickle.load(f)
# return nd.transpose(nd.array(B.astype('float32') / 255.0), (0, 3, 1, 2)), nd.array(classes - 1 - L)
return nd.transpose(nd.array(B), (0, 3, 1, 2)), nd.array(classes - 1 - L)
def get_val_train_batch(data_dir):
test_filename = os.path.join(data_dir, 'train_data_%03d.pkl' % mpi_rank)
with open(test_filename, "rb") as f:
B, L = pickle.load(f)
# return nd.transpose(nd.array(B.astype('float32') / 255.0), (0, 3, 1, 2)), nd.array(L)
return nd.transpose(nd.array(B), (0, 3, 1, 2)), nd.array(L)
def get_val_val_batch(data_dir):
test_filename = os.path.join(data_dir, 'val_data_%03d.pkl' % mpi_rank)
with open(test_filename, "rb") as f:
B, L = pickle.load(f)
# return nd.transpose(nd.array(B.astype('float32') / 255.0), (0, 3, 1, 2)), nd.array(L)
return nd.transpose(nd.array(B), (0, 3, 1, 2)), nd.array(L)
train_data_list = []
for training_file in training_files:
[train_X, train_Y] = get_train_batch(training_file)
train_dataset = mx.gluon.data.dataset.ArrayDataset(train_X, train_Y)
train_data = gluon.data.DataLoader(train_dataset, batch_size=args.batchsize, shuffle=True, last_batch='rollover', num_workers=1)
train_data_list.append(train_data)
[val_train_X, val_train_Y] = get_val_train_batch(val_train_dir)
val_train_dataset = mx.gluon.data.dataset.ArrayDataset(val_train_X, val_train_Y)
val_train_data = gluon.data.DataLoader(val_train_dataset, batch_size=1000, shuffle=False, last_batch='keep', num_workers=1)
[val_val_X, val_val_Y] = get_val_val_batch(val_val_dir)
val_val_dataset = mx.gluon.data.dataset.ArrayDataset(val_val_X, val_val_Y)
val_val_data = gluon.data.DataLoader(val_val_dataset, batch_size=1000, shuffle=False, last_batch='keep', num_workers=1)
model_name = args.model
if model_name == 'default':
net = gluon.nn.Sequential()
with net.name_scope():
# First convolutional layer
net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
net.add(gluon.nn.BatchNorm())
net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
net.add(gluon.nn.BatchNorm())
net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
net.add(gluon.nn.Dropout(rate=0.25))
# Second convolutional layer
# net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
# Third convolutional layer
net.add(gluon.nn.Conv2D(channels=128, kernel_size=3, padding=(1,1), activation='relu'))
net.add(gluon.nn.BatchNorm())
net.add(gluon.nn.Conv2D(channels=128, kernel_size=3, padding=(1,1), activation='relu'))
net.add(gluon.nn.BatchNorm())
net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
net.add(gluon.nn.Dropout(rate=0.25))
# net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
# net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
# net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu'))
# net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2))
# Flatten and apply fullly connected layers
net.add(gluon.nn.Flatten())
# net.add(gluon.nn.Dense(512, activation="relu"))
# net.add(gluon.nn.Dense(512, activation="relu"))
net.add(gluon.nn.Dense(512, activation="relu"))
net.add(gluon.nn.Dropout(rate=0.25))
net.add(gluon.nn.Dense(classes))
else:
model_kwargs = {'ctx': context, 'pretrained': False, 'classes': classes}
net = get_model(model_name, **model_kwargs)
if model_name.startswith('cifar') or model_name == 'default':
net.initialize(mx.init.Xavier(), ctx=context)
else:
net.initialize(mx.init.MSRAPrelu(), ctx=context)
# # no weight decay
# for k, v in net.collect_params('.*beta|.*gamma|.*bias').items():
# v.wd_mult = 0.0
optimizer = 'sgd'
lr = args.lr
# optimizer_params = {'momentum': 0.9, 'learning_rate': lr, 'wd': 0.0001}
optimizer_params = {'momentum': 0.0, 'learning_rate': lr, 'wd': 0.0}
# lr_decay_epoch = [int(i) for i in args.lr_decay_epoch.split(',')]
alpha_decay_epoch = [int(i) for i in args.alpha_decay_epoch.split(',')]
trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params)
loss_func = gluon.loss.SoftmaxCrossEntropyLoss()
train_metric = mx.metric.Accuracy()
acc_top1 = mx.metric.Accuracy()
acc_top5 = mx.metric.TopKAccuracy(5)
train_cross_entropy = mx.metric.CrossEntropy()
# warmup
# print('warm up', flush=True)
trainer.set_learning_rate(0.01)
# train_data = random.choice(train_data_list)
train_data = train_data_list[90]
for local_epoch in range(5):
for i, (data, label) in enumerate(train_data):
with ag.record():
outputs = net(data)
loss = loss_func(outputs, label)
loss.backward()
trainer.step(args.batchsize)
if args.start_epoch > 0:
break
if args.start_epoch > 0:
break
# # force initialization
# train_data = random.choice(train_data_list)
# for i, (data, label) in enumerate(train_data):
# outputs = net(data)
if mpi_rank == 0:
params_prev = [param.data().copy() for param in net.collect_params().values()]
else:
params_prev = None
nd.waitall()
# broadcast
params_prev = mpi_comm.bcast(params_prev, root=0)
for param, param_prev in zip(net.collect_params().values(), params_prev):
param.set_data(param_prev)
if mpi_rank == 0:
worker_list = list(range(mpi_size))
training_file_index_list = [i for i in range(len(training_files))]
alpha = args.alpha
randperm_choice_list = []
randperm_list = [i for i in range(args.nsplit)]
for i in range(int(math.ceil(args.epochs * mpi_size / args.nsplit))):
random.shuffle(randperm_list)
randperm_choice_list = randperm_choice_list + randperm_list
if args.start_epoch > 0:
[dirname, postfix] = os.path.splitext(args.log)
filename = dirname + ("_%04d.params" % (args.start_epoch))
net.load_parameters(filename, ctx=context)
acc_top1.reset()
acc_top5.reset()
train_cross_entropy.reset()
for i, (data, label) in enumerate(val_val_data):
outputs = net(data)
acc_top1.update(label, outputs)
acc_top5.update(label, outputs)
for i, (data, label) in enumerate(val_train_data):
outputs = net(data)
train_cross_entropy.update(label, nd.softmax(outputs))
_, top1 = acc_top1.get()
_, top5 = acc_top5.get()
_, crossentropy = train_cross_entropy.get()
top1_list = mpi_comm.gather(top1, root=0)
top5_list = mpi_comm.gather(top5, root=0)
crossentropy_list = mpi_comm.gather(crossentropy, root=0)
if mpi_rank == 0:
top1_list = np.array(top1_list)
top5_list = np.array(top5_list)
crossentropy_list = np.array(crossentropy_list)
logger.info('[Epoch %d] validation: acc-top1=%f acc-top5=%f, loss=%f, lr=%f, alpha=%f'%(args.start_epoch, top1_list.mean(), top5_list.mean(), crossentropy_list.mean(), trainer.learning_rate, alpha))
nd.waitall()
time_0 = time.time()
for epoch in range(args.start_epoch+1, args.epochs):
# train_metric.reset()
# if epoch in lr_decay_epoch:
# lr = lr * args.lr_decay
if epoch in alpha_decay_epoch:
alpha = alpha * args.alpha_decay
tic = time.time()
if args.iid == 0:
if mpi_rank == 0:
training_file_index_sublist = randperm_choice_list[(mpi_size * epoch):(mpi_size * epoch + mpi_size)]
# logger.info(training_file_index_sublist)
else:
training_file_index_sublist = None
training_file_index = mpi_comm.scatter(training_file_index_sublist, root=0)
train_data = train_data_list[training_file_index]
trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params)
trainer.set_learning_rate(lr)
if alpha < 1:
for param, param_prev in zip(net.collect_params().values(), params_prev):
if param.grad_req != 'null':
param_prev[:] = param.data() * (1-alpha)
# select byz workers
if args.nbyz > 0:
if mpi_rank == 0:
random.shuffle(worker_list)
byz_worker_list = worker_list[0:args.nbyz]
else:
byz_worker_list = None
byz_worker_list = mpi_comm.bcast(byz_worker_list, root=0)
else:
byz_worker_list = []
if mpi_rank in byz_worker_list:
# byz worker
[byz_train_X, byz_train_Y] = get_train_batch_byz(random.choice(training_files))
byz_train_dataset = mx.gluon.data.dataset.ArrayDataset(byz_train_X, byz_train_Y)
byz_train_data = gluon.data.DataLoader(byz_train_dataset, batch_size=args.batchsize, shuffle=True, last_batch='rollover', num_workers=1)
net.initialize(mx.init.MSRAPrelu(), ctx=context, force_reinit=True)
for local_epoch in range(args.iterations):
for i, (data, label) in enumerate(byz_train_data):
with ag.record():
outputs = net(data)
loss = loss_func(outputs, label)
loss.backward()
trainer.step(args.batchsize)
else:
# train
# local epoch
for local_epoch in range(args.iterations):
if args.iid == 1:
train_data = random.choice(train_data_list)
for i, (data, label) in enumerate(train_data):
with ag.record():
outputs = net(data)
loss = loss_func(outputs, label)
loss.backward()
trainer.step(args.batchsize)
# aggregation
nd.waitall()
params_np = [param.data().copy().asnumpy() for param in net.collect_params().values()]
params_np_list = mpi_comm.gather(params_np, root=0)
if mpi_rank == 0:
n_params = len(params_np)
if args.aggregation == "trim" or args.trim > 0:
params_np = [ ( stats.trim_mean( np.stack( [params[j] for params in params_np_list], axis=0), args.trim/mpi_size, axis=0 ) ) for j in range(n_params) ]
else:
params_np = [ ( np.mean( np.stack( [params[j] for params in params_np_list], axis=0), axis=0 ) ) for j in range(n_params) ]
else:
params_np = None
params_np = mpi_comm.bcast(params_np, root=0)
params_nd = [ nd.array(param_np) for param_np in params_np ]
for param, param_nd in zip(net.collect_params().values(), params_nd):
param.set_data(param_nd)
if alpha < 1:
# moving average
for param, param_prev in zip(net.collect_params().values(), params_prev):
if param.grad_req != 'null':
weight = param.data()
weight[:] = weight * alpha + param_prev
# test
nd.waitall()
toc = time.time()
if ( epoch % args.interval == 0 or epoch == args.epochs-1 ) :
acc_top1.reset()
acc_top5.reset()
train_cross_entropy.reset()
for i, (data, label) in enumerate(val_val_data):
outputs = net(data)
acc_top1.update(label, outputs)
acc_top5.update(label, outputs)
for i, (data, label) in enumerate(val_train_data):
outputs = net(data)
train_cross_entropy.update(label, nd.softmax(outputs))
_, top1 = acc_top1.get()
_, top5 = acc_top5.get()
_, crossentropy = train_cross_entropy.get()
top1_list = mpi_comm.gather(top1, root=0)
top5_list = mpi_comm.gather(top5, root=0)
crossentropy_list = mpi_comm.gather(crossentropy, root=0)
if mpi_rank == 0:
top1_list = np.array(top1_list)
top5_list = np.array(top5_list)
crossentropy_list = np.array(crossentropy_list)
logger.info('[Epoch %d] validation: acc-top1=%f acc-top5=%f, loss=%f, lr=%f, alpha=%f, time=%f, elapsed=%f'%(epoch, top1_list.mean(), top5_list.mean(), crossentropy_list.mean(), trainer.learning_rate, alpha, toc-tic, time.time()-time_0))
# logger.info('[Epoch %d] validation: acc-top1=%f acc-top5=%f'%(epoch, top1, top5))
if args.save == 1:
[dirname, postfix] = os.path.splitext(args.log)
filename = dirname + ("_%04d.params" % (epoch))
net.save_parameters(filename)
nd.waitall()
| 40.306569 | 253 | 0.650247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,150 | 0.190148 |
d2a9e60639815c6fa23b7d5054d4eac994971146 | 59,644 | py | Python | predictor.py | MIC-DKFZ/DetectionAndRegression | 40f3cb92ec6447767bd85b62a015b0d50e32ad26 | [
"Apache-2.0"
]
| 40 | 2019-09-24T08:11:35.000Z | 2022-02-23T13:49:01.000Z | predictor.py | MIC-DKFZ/MedicalDetectionRegression | 40f3cb92ec6447767bd85b62a015b0d50e32ad26 | [
"Apache-2.0"
]
| 13 | 2019-11-04T10:52:40.000Z | 2022-03-11T23:57:14.000Z | predictor.py | MIC-DKFZ/MedicalDetectionRegression | 40f3cb92ec6447767bd85b62a015b0d50e32ad26 | [
"Apache-2.0"
]
| 22 | 2019-08-28T15:32:25.000Z | 2022-02-18T11:27:30.000Z | #!/usr/bin/env python
# Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from multiprocessing import Pool
import pickle
import time
import numpy as np
import torch
from scipy.stats import norm
from collections import OrderedDict
import plotting as plg
import utils.model_utils as mutils
import utils.exp_utils as utils
def get_mirrored_patch_crops(patch_crops, org_img_shape):
mirrored_patch_crops = []
mirrored_patch_crops.append([[org_img_shape[2] - ii[1], org_img_shape[2] - ii[0], ii[2], ii[3]]
if len(ii) == 4 else [org_img_shape[2] - ii[1], org_img_shape[2] - ii[0], ii[2],
ii[3], ii[4], ii[5]]
for ii in patch_crops])
mirrored_patch_crops.append([[ii[0], ii[1], org_img_shape[3] - ii[3], org_img_shape[3] - ii[2]]
if len(ii) == 4 else [ii[0], ii[1], org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2], ii[4], ii[5]]
for ii in patch_crops])
mirrored_patch_crops.append([[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2]]
if len(ii) == 4 else
[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2], ii[4], ii[5]]
for ii in patch_crops])
return mirrored_patch_crops
def get_mirrored_patch_crops_ax_dep(patch_crops, org_img_shape, mirror_axes):
mirrored_patch_crops = []
for ax_ix, axes in enumerate(mirror_axes):
if isinstance(axes, (int, float)) and int(axes) == 0:
mirrored_patch_crops.append([[org_img_shape[2] - ii[1], org_img_shape[2] - ii[0], ii[2], ii[3]]
if len(ii) == 4 else [org_img_shape[2] - ii[1], org_img_shape[2] - ii[0],
ii[2], ii[3], ii[4], ii[5]]
for ii in patch_crops])
elif isinstance(axes, (int, float)) and int(axes) == 1:
mirrored_patch_crops.append([[ii[0], ii[1], org_img_shape[3] - ii[3], org_img_shape[3] - ii[2]]
if len(ii) == 4 else [ii[0], ii[1], org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2], ii[4], ii[5]]
for ii in patch_crops])
elif hasattr(axes, "__iter__") and (tuple(axes) == (0, 1) or tuple(axes) == (1, 0)):
mirrored_patch_crops.append([[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2]]
if len(ii) == 4 else
[org_img_shape[2] - ii[1],
org_img_shape[2] - ii[0],
org_img_shape[3] - ii[3],
org_img_shape[3] - ii[2], ii[4], ii[5]]
for ii in patch_crops])
else:
raise Exception("invalid mirror axes {} in get mirrored patch crops".format(axes))
return mirrored_patch_crops
def apply_wbc_to_patient(inputs):
"""
wrapper around prediction box consolidation: weighted box clustering (wbc). processes a single patient.
loops over batch elements in patient results (1 in 3D, slices in 2D) and foreground classes,
aggregates and stores results in new list.
:return. patient_results_list: list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D
predictions, and a dummy batch dimension of 1 for 3D predictions.
:return. pid: string. patient id.
"""
regress_flag, in_patient_results_list, pid, class_dict, clustering_iou, n_ens = inputs
out_patient_results_list = [[] for _ in range(len(in_patient_results_list))]
for bix, b in enumerate(in_patient_results_list):
for cl in list(class_dict.keys()):
boxes = [(ix, box) for ix, box in enumerate(b) if
(box['box_type'] == 'det' and box['box_pred_class_id'] == cl)]
box_coords = np.array([b[1]['box_coords'] for b in boxes])
box_scores = np.array([b[1]['box_score'] for b in boxes])
box_center_factor = np.array([b[1]['box_patch_center_factor'] for b in boxes])
box_n_overlaps = np.array([b[1]['box_n_overlaps'] for b in boxes])
try:
box_patch_id = np.array([b[1]['patch_id'] for b in boxes])
except KeyError: #backward compatibility for already saved pred results ... omg
box_patch_id = np.array([b[1]['ens_ix'] for b in boxes])
box_regressions = np.array([b[1]['regression'] for b in boxes]) if regress_flag else None
box_rg_bins = np.array([b[1]['rg_bin'] if 'rg_bin' in b[1].keys() else float('NaN') for b in boxes])
box_rg_uncs = np.array([b[1]['rg_uncertainty'] if 'rg_uncertainty' in b[1].keys() else float('NaN') for b in boxes])
if 0 not in box_scores.shape:
keep_scores, keep_coords, keep_n_missing, keep_regressions, keep_rg_bins, keep_rg_uncs = \
weighted_box_clustering(box_coords, box_scores, box_center_factor, box_n_overlaps, box_rg_bins, box_rg_uncs,
box_regressions, box_patch_id, clustering_iou, n_ens)
for boxix in range(len(keep_scores)):
clustered_box = {'box_type': 'det', 'box_coords': keep_coords[boxix],
'box_score': keep_scores[boxix], 'cluster_n_missing': keep_n_missing[boxix],
'box_pred_class_id': cl}
if regress_flag:
clustered_box.update({'regression': keep_regressions[boxix],
'rg_uncertainty': keep_rg_uncs[boxix],
'rg_bin': keep_rg_bins[boxix]})
out_patient_results_list[bix].append(clustered_box)
# add gt boxes back to new output list.
out_patient_results_list[bix].extend([box for box in b if box['box_type'] == 'gt'])
return [out_patient_results_list, pid]
def weighted_box_clustering(box_coords, scores, box_pc_facts, box_n_ovs, box_rg_bins, box_rg_uncs,
box_regress, box_patch_id, thresh, n_ens):
"""Consolidates overlapping predictions resulting from patch overlaps, test data augmentations and temporal ensembling.
clusters predictions together with iou > thresh (like in NMS). Output score and coordinate for one cluster are the
average weighted by individual patch center factors (how trustworthy is this candidate measured by how centered
its position within the patch is) and the size of the corresponding box.
The number of expected predictions at a position is n_data_aug * n_temp_ens * n_overlaps_at_position
(1 prediction per unique patch). Missing predictions at a cluster position are defined as the number of unique
patches in the cluster, which did not contribute any predict any boxes.
:param dets: (n_dets, (y1, x1, y2, x2, (z1), (z2), scores, box_pc_facts, box_n_ovs).
:param box_coords: y1, x1, y2, x2, (z1), (z2).
:param scores: confidence scores.
:param box_pc_facts: patch-center factors from position on patch tiles.
:param box_n_ovs: number of patch overlaps at box position.
:param box_rg_bins: regression bin predictions.
:param box_rg_uncs: (n_dets,) regression uncertainties (from model mrcnn_aleatoric).
:param box_regress: (n_dets, n_regression_features).
:param box_patch_id: ensemble index.
:param thresh: threshold for iou_matching.
:param n_ens: number of models, that are ensembled. (-> number of expected predictions per position).
:return: keep_scores: (n_keep) new scores of boxes to be kept.
:return: keep_coords: (n_keep, (y1, x1, y2, x2, (z1), (z2)) new coordinates of boxes to be kept.
"""
dim = 2 if box_coords.shape[1] == 4 else 3
y1 = box_coords[:,0]
x1 = box_coords[:,1]
y2 = box_coords[:,2]
x2 = box_coords[:,3]
areas = (y2 - y1 + 1) * (x2 - x1 + 1)
if dim == 3:
z1 = box_coords[:, 4]
z2 = box_coords[:, 5]
areas *= (z2 - z1 + 1)
# order is the sorted index. maps order to index o[1] = 24 (rank1, ix 24)
order = scores.argsort()[::-1]
keep_scores = []
keep_coords = []
keep_n_missing = []
keep_regress = []
keep_rg_bins = []
keep_rg_uncs = []
while order.size > 0:
i = order[0] # highest scoring element
yy1 = np.maximum(y1[i], y1[order])
xx1 = np.maximum(x1[i], x1[order])
yy2 = np.minimum(y2[i], y2[order])
xx2 = np.minimum(x2[i], x2[order])
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
inter = w * h
if dim == 3:
zz1 = np.maximum(z1[i], z1[order])
zz2 = np.minimum(z2[i], z2[order])
d = np.maximum(0, zz2 - zz1 + 1)
inter *= d
# overlap between currently highest scoring box and all boxes.
ovr = inter / (areas[i] + areas[order] - inter)
ovr_fl = inter.astype('float64') / (areas[i] + areas[order] - inter.astype('float64'))
assert np.all(ovr==ovr_fl), "ovr {}\n ovr_float {}".format(ovr, ovr_fl)
# get all the predictions that match the current box to build one cluster.
matches = np.nonzero(ovr > thresh)[0]
match_n_ovs = box_n_ovs[order[matches]]
match_pc_facts = box_pc_facts[order[matches]]
match_patch_id = box_patch_id[order[matches]]
match_ov_facts = ovr[matches]
match_areas = areas[order[matches]]
match_scores = scores[order[matches]]
# weight all scores in cluster by patch factors, and size.
match_score_weights = match_ov_facts * match_areas * match_pc_facts
match_scores *= match_score_weights
# for the weighted average, scores have to be divided by the number of total expected preds at the position
# of the current cluster. 1 Prediction per patch is expected. therefore, the number of ensembled models is
# multiplied by the mean overlaps of patches at this position (boxes of the cluster might partly be
# in areas of different overlaps).
n_expected_preds = n_ens * np.mean(match_n_ovs)
# the number of missing predictions is obtained as the number of patches,
# which did not contribute any prediction to the current cluster.
n_missing_preds = np.max((0, n_expected_preds - np.unique(match_patch_id).shape[0]))
# missing preds are given the mean weighting
# (expected prediction is the mean over all predictions in cluster).
denom = np.sum(match_score_weights) + n_missing_preds * np.mean(match_score_weights)
# compute weighted average score for the cluster
avg_score = np.sum(match_scores) / denom
# compute weighted average of coordinates for the cluster. now only take existing
# predictions into account.
avg_coords = [np.sum(y1[order[matches]] * match_scores) / np.sum(match_scores),
np.sum(x1[order[matches]] * match_scores) / np.sum(match_scores),
np.sum(y2[order[matches]] * match_scores) / np.sum(match_scores),
np.sum(x2[order[matches]] * match_scores) / np.sum(match_scores)]
if dim == 3:
avg_coords.append(np.sum(z1[order[matches]] * match_scores) / np.sum(match_scores))
avg_coords.append(np.sum(z2[order[matches]] * match_scores) / np.sum(match_scores))
if box_regress is not None:
# compute wt. avg. of regression vectors (component-wise average)
avg_regress = np.sum(box_regress[order[matches]] * match_scores[:, np.newaxis], axis=0) / np.sum(
match_scores)
avg_rg_bins = np.round(np.sum(box_rg_bins[order[matches]] * match_scores) / np.sum(match_scores))
avg_rg_uncs = np.sum(box_rg_uncs[order[matches]] * match_scores) / np.sum(match_scores)
else:
avg_regress = np.array(float('NaN'))
avg_rg_bins = np.array(float('NaN'))
avg_rg_uncs = np.array(float('NaN'))
# some clusters might have very low scores due to high amounts of missing predictions.
# filter out the with a conservative threshold, to speed up evaluation.
if avg_score > 0.01:
keep_scores.append(avg_score)
keep_coords.append(avg_coords)
keep_n_missing.append((n_missing_preds / n_expected_preds * 100)) # relative
keep_regress.append(avg_regress)
keep_rg_uncs.append(avg_rg_uncs)
keep_rg_bins.append(avg_rg_bins)
# get index of all elements that were not matched and discard all others.
inds = np.nonzero(ovr <= thresh)[0]
inds_where = np.where(ovr<=thresh)[0]
assert np.all(inds == inds_where), "inds_nonzero {} \ninds_where {}".format(inds, inds_where)
order = order[inds]
return keep_scores, keep_coords, keep_n_missing, keep_regress, keep_rg_bins, keep_rg_uncs
def apply_nms_to_patient(inputs):
in_patient_results_list, pid, class_dict, iou_thresh = inputs
out_patient_results_list = []
# collect box predictions over batch dimension (slices) and store slice info as slice_ids.
for batch in in_patient_results_list:
batch_el_boxes = []
for cl in list(class_dict.keys()):
det_boxes = [box for box in batch if (box['box_type'] == 'det' and box['box_pred_class_id'] == cl)]
box_coords = np.array([box['box_coords'] for box in det_boxes])
box_scores = np.array([box['box_score'] for box in det_boxes])
if 0 not in box_scores.shape:
keep_ix = mutils.nms_numpy(box_coords, box_scores, iou_thresh)
else:
keep_ix = []
batch_el_boxes += [det_boxes[ix] for ix in keep_ix]
batch_el_boxes += [box for box in batch if box['box_type'] == 'gt']
out_patient_results_list.append(batch_el_boxes)
assert len(in_patient_results_list) == len(out_patient_results_list), "batch dim needs to be maintained, in: {}, out {}".format(len(in_patient_results_list), len(out_patient_results_list))
return [out_patient_results_list, pid]
def nms_2to3D(dets, thresh):
"""
Merges 2D boxes to 3D cubes. For this purpose, boxes of all slices are regarded as lying in one slice.
An adaptation of Non-maximum suppression is applied where clusters are found (like in NMS) with the extra constraint
that suppressed boxes have to have 'connected' z coordinates w.r.t the core slice (cluster center, highest
scoring box, the prevailing box). 'connected' z-coordinates are determined
as the z-coordinates with predictions until the first coordinate for which no prediction is found.
example: a cluster of predictions was found overlap > iou thresh in xy (like NMS). The z-coordinate of the highest
scoring box is 50. Other predictions have 23, 46, 48, 49, 51, 52, 53, 56, 57.
Only the coordinates connected with 50 are clustered to one cube: 48, 49, 51, 52, 53. (46 not because nothing was
found in 47, so 47 is a 'hole', which interrupts the connection). Only the boxes corresponding to these coordinates
are suppressed. All others are kept for building of further clusters.
This algorithm works better with a certain min_confidence of predictions, because low confidence (e.g. noisy/cluttery)
predictions can break the relatively strong assumption of defining cubes' z-boundaries at the first 'hole' in the cluster.
:param dets: (n_detections, (y1, x1, y2, x2, scores, slice_id)
:param thresh: iou matchin threshold (like in NMS).
:return: keep: (n_keep,) 1D tensor of indices to be kept.
:return: keep_z: (n_keep, [z1, z2]) z-coordinates to be added to boxes, which are kept in order to form cubes.
"""
y1 = dets[:, 0]
x1 = dets[:, 1]
y2 = dets[:, 2]
x2 = dets[:, 3]
assert np.all(y1 <= y2) and np.all(x1 <= x2), """"the definition of the coordinates is crucially important here:
where maximum is taken needs to be the lower coordinate"""
scores = dets[:, -2]
slice_id = dets[:, -1]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
keep_z = []
while order.size > 0: # order is the sorted index. maps order to index: order[1] = 24 means (rank1, ix 24)
i = order[0] # highest scoring element
yy1 = np.maximum(y1[i], y1[order]) # highest scoring element still in >order<, is compared to itself: okay?
xx1 = np.maximum(x1[i], x1[order])
yy2 = np.minimum(y2[i], y2[order])
xx2 = np.minimum(x2[i], x2[order])
h = np.maximum(0.0, yy2 - yy1 + 1)
w = np.maximum(0.0, xx2 - xx1 + 1)
inter = h * w
iou = inter / (areas[i] + areas[order] - inter)
matches = np.argwhere(
iou > thresh) # get all the elements that match the current box and have a lower score
slice_ids = slice_id[order[matches]]
core_slice = slice_id[int(i)]
upper_holes = [ii for ii in np.arange(core_slice, np.max(slice_ids)) if ii not in slice_ids]
lower_holes = [ii for ii in np.arange(np.min(slice_ids), core_slice) if ii not in slice_ids]
max_valid_slice_id = np.min(upper_holes) if len(upper_holes) > 0 else np.max(slice_ids)
min_valid_slice_id = np.max(lower_holes) if len(lower_holes) > 0 else np.min(slice_ids)
z_matches = matches[(slice_ids <= max_valid_slice_id) & (slice_ids >= min_valid_slice_id)]
# expand by one z voxel since box content is surrounded w/o overlap, i.e., z-content computed as z2-z1
z1 = np.min(slice_id[order[z_matches]]) - 1
z2 = np.max(slice_id[order[z_matches]]) + 1
keep.append(i)
keep_z.append([z1, z2])
order = np.delete(order, z_matches, axis=0)
return keep, keep_z
def apply_2d_3d_merging_to_patient(inputs):
"""
wrapper around 2Dto3D merging operation. Processes a single patient. Takes 2D patient results (slices in batch dimension)
and returns 3D patient results (dummy batch dimension of 1). Applies an adaption of Non-Maximum Surpression
(Detailed methodology is described in nms_2to3D).
:return. results_dict_boxes: list over batch elements (1 in 3D). each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]].
:return. pid: string. patient id.
"""
in_patient_results_list, pid, class_dict, merge_3D_iou = inputs
out_patient_results_list = []
for cl in list(class_dict.keys()):
det_boxes, slice_ids = [], []
# collect box predictions over batch dimension (slices) and store slice info as slice_ids.
for batch_ix, batch in enumerate(in_patient_results_list):
batch_element_det_boxes = [(ix, box) for ix, box in enumerate(batch) if
(box['box_type'] == 'det' and box['box_pred_class_id'] == cl)]
det_boxes += batch_element_det_boxes
slice_ids += [batch_ix] * len(batch_element_det_boxes)
box_coords = np.array([batch[1]['box_coords'] for batch in det_boxes])
box_scores = np.array([batch[1]['box_score'] for batch in det_boxes])
slice_ids = np.array(slice_ids)
if 0 not in box_scores.shape:
keep_ix, keep_z = nms_2to3D(
np.concatenate((box_coords, box_scores[:, None], slice_ids[:, None]), axis=1), merge_3D_iou)
else:
keep_ix, keep_z = [], []
# store kept predictions in new results list and add corresponding z-dimension info to coordinates.
for kix, kz in zip(keep_ix, keep_z):
keep_box = det_boxes[kix][1]
keep_box['box_coords'] = list(keep_box['box_coords']) + kz
out_patient_results_list.append(keep_box)
gt_boxes = [box for b in in_patient_results_list for box in b if box['box_type'] == 'gt']
if len(gt_boxes) > 0:
assert np.all([len(box["box_coords"]) == 6 for box in gt_boxes]), "expanded preds to 3D but GT is 2D."
out_patient_results_list += gt_boxes
return [[out_patient_results_list], pid] # additional list wrapping is extra batch dim.
class Predictor:
"""
Prediction pipeline:
- receives a patched patient image (n_patches, c, y, x, (z)) from patient data loader.
- forwards patches through model in chunks of batch_size. (method: batch_tiling_forward)
- unmolds predictions (boxes and segmentations) to original patient coordinates. (method: spatial_tiling_forward)
Ensembling (mode == 'test'):
- for inference, forwards 4 mirrored versions of image to through model and unmolds predictions afterwards
accordingly (method: data_aug_forward)
- for inference, loads multiple parameter-sets of the trained model corresponding to different epochs. for each
parameter-set loops over entire test set, runs prediction pipeline for each patient. (method: predict_test_set)
Consolidation of predictions:
- consolidates a patient's predictions (boxes, segmentations) collected over patches, data_aug- and temporal ensembling,
performs clustering and weighted averaging (external function: apply_wbc_to_patient) to obtain consistent outptus.
- for 2D networks, consolidates box predictions to 3D cubes via clustering (adaption of non-maximum surpression).
(external function: apply_2d_3d_merging_to_patient)
Ground truth handling:
- dissmisses any ground truth boxes returned by the model (happens in validation mode, patch-based groundtruth)
- if provided by data loader, adds patient-wise ground truth to the final predictions to be passed to the evaluator.
"""
def __init__(self, cf, net, logger, mode):
self.cf = cf
self.batch_size = cf.batch_size
self.logger = logger
self.mode = mode
self.net = net
self.n_ens = 1
self.rank_ix = '0'
self.regress_flag = any(['regression' in task for task in self.cf.prediction_tasks])
if self.cf.merge_2D_to_3D_preds:
assert self.cf.dim == 2, "Merge 2Dto3D only valid for 2D preds, but current dim is {}.".format(self.cf.dim)
if self.mode == 'test':
last_state_path = os.path.join(self.cf.fold_dir, 'last_state.pth')
try:
self.model_index = torch.load(last_state_path)["model_index"]
self.model_index = self.model_index[self.model_index["rank"] <= self.cf.test_n_epochs]
except FileNotFoundError:
raise FileNotFoundError('no last_state/model_index file in fold directory. '
'seems like you are trying to run testing without prior training...')
self.n_ens = cf.test_n_epochs
if self.cf.test_aug_axes is not None:
self.n_ens *= (len(self.cf.test_aug_axes)+1)
self.example_plot_dir = os.path.join(cf.test_dir, "example_plots")
os.makedirs(self.example_plot_dir, exist_ok=True)
def batch_tiling_forward(self, batch):
"""
calls the actual network forward method. in patch-based prediction, the batch dimension might be overladed
with n_patches >> batch_size, which would exceed gpu memory. In this case, batches are processed in chunks of
batch_size. validation mode calls the train method to monitor losses (returned ground truth objects are discarded).
test mode calls the test forward method, no ground truth required / involved.
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions,
and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- loss / class_loss (only in validation mode)
"""
img = batch['data']
if img.shape[0] <= self.batch_size:
if self.mode == 'val':
# call training method to monitor losses
results_dict = self.net.train_forward(batch, is_validation=True)
# discard returned ground-truth boxes (also training info boxes).
results_dict['boxes'] = [[box for box in b if box['box_type'] == 'det'] for b in results_dict['boxes']]
elif self.mode == 'test':
results_dict = self.net.test_forward(batch, return_masks=self.cf.return_masks_in_test)
else: # needs batch tiling
split_ixs = np.split(np.arange(img.shape[0]), np.arange(img.shape[0])[::self.batch_size])
chunk_dicts = []
for chunk_ixs in split_ixs[1:]: # first split is elements before 0, so empty
b = {k: batch[k][chunk_ixs] for k in batch.keys()
if (isinstance(batch[k], np.ndarray) and batch[k].shape[0] == img.shape[0])}
if self.mode == 'val':
chunk_dicts += [self.net.train_forward(b, is_validation=True)]
else:
chunk_dicts += [self.net.test_forward(b, return_masks=self.cf.return_masks_in_test)]
results_dict = {}
# flatten out batch elements from chunks ([chunk, chunk] -> [b, b, b, b, ...])
results_dict['boxes'] = [item for d in chunk_dicts for item in d['boxes']]
results_dict['seg_preds'] = np.array([item for d in chunk_dicts for item in d['seg_preds']])
if self.mode == 'val':
# if hasattr(self.cf, "losses_to_monitor"):
# loss_names = self.cf.losses_to_monitor
# else:
# loss_names = {name for dic in chunk_dicts for name in dic if 'loss' in name}
# estimate patient loss by mean over batch_chunks. Most similar to training loss.
results_dict['torch_loss'] = torch.mean(torch.cat([d['torch_loss'] for d in chunk_dicts]))
results_dict['class_loss'] = np.mean([d['class_loss'] for d in chunk_dicts])
# discard returned ground-truth boxes (also training info boxes).
results_dict['boxes'] = [[box for box in b if box['box_type'] == 'det'] for b in results_dict['boxes']]
return results_dict
def spatial_tiling_forward(self, batch, patch_crops = None, n_aug='0'):
"""
forwards batch to batch_tiling_forward method and receives and returns a dictionary with results.
if patch-based prediction, the results received from batch_tiling_forward will be on a per-patch-basis.
this method uses the provided patch_crops to re-transform all predictions to whole-image coordinates.
Patch-origin information of all box-predictions will be needed for consolidation, hence it is stored as
'patch_id', which is a unique string for each patch (also takes current data aug and temporal epoch instances
into account). all box predictions get additional information about the amount overlapping patches at the
respective position (used for consolidation).
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions,
and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- monitor_values (only in validation mode)
returned dict is a flattened version with 1 batch instance (3D) or slices (2D)
"""
if patch_crops is not None:
#print("patch_crops not None, applying patch center factor")
patches_dict = self.batch_tiling_forward(batch)
results_dict = {'boxes': [[] for _ in range(batch['original_img_shape'][0])]}
#bc of ohe--> channel dim of seg has size num_classes
out_seg_shape = list(batch['original_img_shape'])
out_seg_shape[1] = patches_dict["seg_preds"].shape[1]
out_seg_preds = np.zeros(out_seg_shape, dtype=np.float16)
patch_overlap_map = np.zeros_like(out_seg_preds, dtype='uint8')
for pix, pc in enumerate(patch_crops):
if self.cf.dim == 3:
out_seg_preds[:, :, pc[0]:pc[1], pc[2]:pc[3], pc[4]:pc[5]] += patches_dict['seg_preds'][pix]
patch_overlap_map[:, :, pc[0]:pc[1], pc[2]:pc[3], pc[4]:pc[5]] += 1
elif self.cf.dim == 2:
out_seg_preds[pc[4]:pc[5], :, pc[0]:pc[1], pc[2]:pc[3], ] += patches_dict['seg_preds'][pix]
patch_overlap_map[pc[4]:pc[5], :, pc[0]:pc[1], pc[2]:pc[3], ] += 1
out_seg_preds[patch_overlap_map > 0] /= patch_overlap_map[patch_overlap_map > 0]
results_dict['seg_preds'] = out_seg_preds
for pix, pc in enumerate(patch_crops):
patch_boxes = patches_dict['boxes'][pix]
for box in patch_boxes:
# add unique patch id for consolidation of predictions.
box['patch_id'] = self.rank_ix + '_' + n_aug + '_' + str(pix)
# boxes from the edges of a patch have a lower prediction quality, than the ones at patch-centers.
# hence they will be down-weighted for consolidation, using the 'box_patch_center_factor', which is
# obtained by a gaussian distribution over positions in the patch and average over spatial dimensions.
# Also the info 'box_n_overlaps' is stored for consolidation, which represents the amount of
# overlapping patches at the box's position.
c = box['box_coords']
#box_centers = np.array([(c[ii] + c[ii+2])/2 for ii in range(len(c)//2)])
box_centers = [(c[ii] + c[ii + 2]) / 2 for ii in range(2)]
if self.cf.dim == 3:
box_centers.append((c[4] + c[5]) / 2)
box['box_patch_center_factor'] = np.mean(
[norm.pdf(bc, loc=pc, scale=pc * 0.8) * np.sqrt(2 * np.pi) * pc * 0.8 for bc, pc in
zip(box_centers, np.array(self.cf.patch_size) / 2)])
if self.cf.dim == 3:
c += np.array([pc[0], pc[2], pc[0], pc[2], pc[4], pc[4]])
int_c = [int(np.floor(ii)) if ix%2 == 0 else int(np.ceil(ii)) for ix, ii in enumerate(c)]
box['box_n_overlaps'] = np.mean(patch_overlap_map[:, :, int_c[1]:int_c[3], int_c[0]:int_c[2], int_c[4]:int_c[5]])
results_dict['boxes'][0].append(box)
else:
c += np.array([pc[0], pc[2], pc[0], pc[2]])
int_c = [int(np.floor(ii)) if ix % 2 == 0 else int(np.ceil(ii)) for ix, ii in enumerate(c)]
box['box_n_overlaps'] = np.mean(
patch_overlap_map[pc[4], :, int_c[1]:int_c[3], int_c[0]:int_c[2]])
results_dict['boxes'][pc[4]].append(box)
if self.mode == 'val':
results_dict['torch_loss'] = patches_dict['torch_loss']
results_dict['class_loss'] = patches_dict['class_loss']
else:
results_dict = self.batch_tiling_forward(batch)
for b in results_dict['boxes']:
for box in b:
box['box_patch_center_factor'] = 1
box['box_n_overlaps'] = 1
box['patch_id'] = self.rank_ix + '_' + n_aug
return results_dict
def data_aug_forward(self, batch):
"""
in val_mode: passes batch through to spatial_tiling method without data_aug.
in test_mode: if cf.test_aug is set in configs, createst 4 mirrored versions of the input image,
passes all of them to the next processing step (spatial_tiling method) and re-transforms returned predictions
to original image version.
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions,
and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- loss / class_loss (only in validation mode)
"""
patch_crops = batch['patch_crop_coords'] if self.patched_patient else None
results_list = [self.spatial_tiling_forward(batch, patch_crops)]
org_img_shape = batch['original_img_shape']
if self.mode == 'test' and self.cf.test_aug_axes is not None:
if isinstance(self.cf.test_aug_axes, (int, float)):
self.cf.test_aug_axes = (self.cf.test_aug_axes,)
#assert np.all(np.array(self.cf.test_aug_axes)<self.cf.dim), "test axes {} need to be spatial axes".format(self.cf.test_aug_axes)
if self.patched_patient:
# apply mirror transformations to patch-crop coordinates, for correct tiling in spatial_tiling method.
mirrored_patch_crops = get_mirrored_patch_crops_ax_dep(patch_crops, batch['original_img_shape'],
self.cf.test_aug_axes)
self.logger.info("mirrored patch crop coords for patched patient in test augs!")
else:
mirrored_patch_crops = [None] * 3
img = np.copy(batch['data'])
for n_aug, sp_axis in enumerate(self.cf.test_aug_axes):
#sp_axis = np.array(axis) #-2 #spatial axis index
axis = np.array(sp_axis)+2
if isinstance(sp_axis, (int, float)):
# mirroring along one axis at a time
batch['data'] = np.flip(img, axis=axis).copy()
chunk_dict = self.spatial_tiling_forward(batch, mirrored_patch_crops[n_aug], n_aug=str(n_aug))
# re-transform coordinates.
for ix in range(len(chunk_dict['boxes'])):
for boxix in range(len(chunk_dict['boxes'][ix])):
coords = chunk_dict['boxes'][ix][boxix]['box_coords'].copy()
coords[sp_axis] = org_img_shape[axis] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis+2]
coords[sp_axis+2] = org_img_shape[axis] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis]
assert coords[2] >= coords[0], [coords, chunk_dict['boxes'][ix][boxix]['box_coords']]
assert coords[3] >= coords[1], [coords, chunk_dict['boxes'][ix][boxix]['box_coords']]
chunk_dict['boxes'][ix][boxix]['box_coords'] = coords
# re-transform segmentation predictions.
chunk_dict['seg_preds'] = np.flip(chunk_dict['seg_preds'], axis=axis)
elif hasattr(sp_axis, "__iter__") and tuple(sp_axis)==(0,1) or tuple(sp_axis)==(1,0):
#NEED: mirrored patch crops are given as [(y-axis), (x-axis), (y-,x-axis)], obey this order!
# mirroring along two axes at same time
batch['data'] = np.flip(np.flip(img, axis=axis[0]), axis=axis[1]).copy()
chunk_dict = self.spatial_tiling_forward(batch, mirrored_patch_crops[n_aug], n_aug=str(n_aug))
# re-transform coordinates.
for ix in range(len(chunk_dict['boxes'])):
for boxix in range(len(chunk_dict['boxes'][ix])):
coords = chunk_dict['boxes'][ix][boxix]['box_coords'].copy()
coords[sp_axis[0]] = org_img_shape[axis[0]] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis[0]+2]
coords[sp_axis[0]+2] = org_img_shape[axis[0]] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis[0]]
coords[sp_axis[1]] = org_img_shape[axis[1]] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis[1]+2]
coords[sp_axis[1]+2] = org_img_shape[axis[1]] - chunk_dict['boxes'][ix][boxix]['box_coords'][sp_axis[1]]
assert coords[2] >= coords[0], [coords, chunk_dict['boxes'][ix][boxix]['box_coords']]
assert coords[3] >= coords[1], [coords, chunk_dict['boxes'][ix][boxix]['box_coords']]
chunk_dict['boxes'][ix][boxix]['box_coords'] = coords
# re-transform segmentation predictions.
chunk_dict['seg_preds'] = np.flip(np.flip(chunk_dict['seg_preds'], axis=axis[0]), axis=axis[1]).copy()
else:
raise Exception("Invalid axis type {} in test augs".format(type(axis)))
results_list.append(chunk_dict)
batch['data'] = img
# aggregate all boxes/seg_preds per batch element from data_aug predictions.
results_dict = {}
results_dict['boxes'] = [[item for d in results_list for item in d['boxes'][batch_instance]]
for batch_instance in range(org_img_shape[0])]
# results_dict['seg_preds'] = np.array([[item for d in results_list for item in d['seg_preds'][batch_instance]]
# for batch_instance in range(org_img_shape[0])])
results_dict['seg_preds'] = np.stack([dic['seg_preds'] for dic in results_list], axis=1)
# needs segs probs in seg_preds entry:
results_dict['seg_preds'] = np.sum(results_dict['seg_preds'], axis=1) #add up seg probs from different augs per class
if self.mode == 'val':
results_dict['torch_loss'] = results_list[0]['torch_loss']
results_dict['class_loss'] = results_list[0]['class_loss']
return results_dict
def load_saved_predictions(self):
"""loads raw predictions saved by self.predict_test_set. aggregates and/or merges 2D boxes to 3D cubes for
evaluation (if model predicts 2D but evaluation is run in 3D), according to settings config.
:return: list_of_results_per_patient: list over patient results. each entry is a dict with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions
(if not merged to 3D), and a dummy batch dimension of 1 for 3D predictions.
- 'batch_dices': dice scores as recorded in raw prediction results.
- 'seg_preds': not implemented yet. could replace dices by seg preds to have raw seg info available, however
would consume critically large memory amount. todo evaluation of instance/semantic segmentation.
"""
results_file = 'pred_results.pkl' if not self.cf.hold_out_test_set else 'pred_results_held_out.pkl'
if not self.cf.hold_out_test_set or not self.cf.ensemble_folds:
self.logger.info("loading saved predictions of fold {}".format(self.cf.fold))
with open(os.path.join(self.cf.fold_dir, results_file), 'rb') as handle:
results_list = pickle.load(handle)
box_results_list = [(res_dict["boxes"], pid) for res_dict, pid in results_list]
da_factor = len(self.cf.test_aug_axes)+1 if self.cf.test_aug_axes is not None else 1
self.n_ens = self.cf.test_n_epochs * da_factor
self.logger.info('loaded raw test set predictions with n_patients = {} and n_ens = {}'.format(
len(results_list), self.n_ens))
else:
self.logger.info("loading saved predictions of hold-out test set")
fold_dirs = sorted([os.path.join(self.cf.exp_dir, f) for f in os.listdir(self.cf.exp_dir) if
os.path.isdir(os.path.join(self.cf.exp_dir, f)) and f.startswith("fold")])
results_list = []
folds_loaded = 0
for fold in range(self.cf.n_cv_splits):
fold_dir = os.path.join(self.cf.exp_dir, 'fold_{}'.format(fold))
if fold_dir in fold_dirs:
with open(os.path.join(fold_dir, results_file), 'rb') as handle:
fold_list = pickle.load(handle)
results_list += fold_list
folds_loaded += 1
else:
self.logger.info("Skipping fold {} since no saved predictions found.".format(fold))
box_results_list = []
for res_dict, pid in results_list: #without filtering gt out:
box_results_list.append((res_dict['boxes'], pid))
#it's usually not right to filter out gts here, is it?
da_factor = len(self.cf.test_aug_axes)+1 if self.cf.test_aug_axes is not None else 1
self.n_ens = self.cf.test_n_epochs * da_factor * folds_loaded
# -------------- aggregation of boxes via clustering -----------------
if self.cf.clustering == "wbc":
self.logger.info('applying WBC to test-set predictions with iou {} and n_ens {} over {} patients'.format(
self.cf.clustering_iou, self.n_ens, len(box_results_list)))
mp_inputs = [[self.regress_flag, ii[0], ii[1], self.cf.class_dict, self.cf.clustering_iou, self.n_ens] for ii
in box_results_list]
del box_results_list
pool = Pool(processes=self.cf.n_workers)
box_results_list = pool.map(apply_wbc_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
elif self.cf.clustering == "nms":
self.logger.info('applying standard NMS to test-set predictions with iou {} over {} patients.'.format(
self.cf.clustering_iou, len(box_results_list)))
pool = Pool(processes=self.cf.n_workers)
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.clustering_iou] for ii in box_results_list]
del box_results_list
box_results_list = pool.map(apply_nms_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
if self.cf.merge_2D_to_3D_preds:
self.logger.info('applying 2Dto3D merging to test-set predictions with iou = {}.'.format(self.cf.merge_3D_iou))
pool = Pool(processes=self.cf.n_workers)
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.merge_3D_iou] for ii in box_results_list]
box_results_list = pool.map(apply_2d_3d_merging_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
for ix in range(len(results_list)):
assert np.all(results_list[ix][1] == box_results_list[ix][1]), "pid mismatch between loaded and aggregated results"
results_list[ix][0]["boxes"] = box_results_list[ix][0]
return results_list # holds (results_dict, pid)
def predict_patient(self, batch):
"""
predicts one patient.
called either directly via loop over validation set in exec.py (mode=='val')
or from self.predict_test_set (mode=='test).
in val mode: adds 3D ground truth info to predictions and runs consolidation and 2Dto3D merging of predictions.
in test mode: returns raw predictions (ground truth addition, consolidation, 2D to 3D merging are
done in self.predict_test_set, because patient predictions across several epochs might be needed
to be collected first, in case of temporal ensembling).
:return. results_dict: stores the results for one patient. dictionary with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions
(if not merged to 3D), and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': pixel-wise predictions. (b, 1, y, x, (z))
- loss / class_loss (only in validation mode)
"""
#if self.mode=="test":
# self.logger.info('predicting patient {} for fold {} '.format(np.unique(batch['pid']), self.cf.fold))
# True if patient is provided in patches and predictions need to be tiled.
self.patched_patient = 'patch_crop_coords' in list(batch.keys())
# forward batch through prediction pipeline.
results_dict = self.data_aug_forward(batch)
#has seg probs in entry 'seg_preds'
if self.mode == 'val':
for b in range(batch['patient_bb_target'].shape[0]):
for t in range(len(batch['patient_bb_target'][b])):
gt_box = {'box_type': 'gt', 'box_coords': batch['patient_bb_target'][b][t],
'class_targets': batch['patient_class_targets'][b][t]}
for name in self.cf.roi_items:
gt_box.update({name : batch['patient_'+name][b][t]})
results_dict['boxes'][b].append(gt_box)
if 'dice' in self.cf.metrics:
if self.patched_patient:
assert 'patient_seg' in batch.keys(), "Results_dict preds are in original patient shape."
results_dict['batch_dices'] = mutils.dice_per_batch_and_class(
results_dict['seg_preds'], batch["patient_seg"] if self.patched_patient else batch['seg'],
self.cf.num_seg_classes, convert_to_ohe=True)
if self.patched_patient and self.cf.clustering == "wbc":
wbc_input = [self.regress_flag, results_dict['boxes'], 'dummy_pid', self.cf.class_dict, self.cf.clustering_iou, self.n_ens]
results_dict['boxes'] = apply_wbc_to_patient(wbc_input)[0]
elif self.patched_patient:
nms_inputs = [results_dict['boxes'], 'dummy_pid', self.cf.class_dict, self.cf.clustering_iou]
results_dict['boxes'] = apply_nms_to_patient(nms_inputs)[0]
if self.cf.merge_2D_to_3D_preds:
results_dict['2D_boxes'] = results_dict['boxes']
merge_dims_inputs = [results_dict['boxes'], 'dummy_pid', self.cf.class_dict, self.cf.merge_3D_iou]
results_dict['boxes'] = apply_2d_3d_merging_to_patient(merge_dims_inputs)[0]
return results_dict
def predict_test_set(self, batch_gen, return_results=True):
"""
wrapper around test method, which loads multiple (or one) epoch parameters (temporal ensembling), loops through
the test set and collects predictions per patient. Also flattens the results per patient and epoch
and adds optional ground truth boxes for evaluation. Saves out the raw result list for later analysis and
optionally consolidates and returns predictions immediately.
:return: (optionally) list_of_results_per_patient: list over patient results. each entry is a dict with keys:
- 'boxes': list over batch elements. each element is a list over boxes, where each box is
one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions
(if not merged to 3D), and a dummy batch dimension of 1 for 3D predictions.
- 'seg_preds': not implemented yet. todo evaluation of instance/semantic segmentation.
"""
# -------------- raw predicting -----------------
dict_of_patients_results = OrderedDict()
set_of_result_types = set()
self.model_index = self.model_index.sort_values(by="rank")
# get paths of all parameter sets to be loaded for temporal ensembling. (or just one for no temp. ensembling).
weight_paths = [os.path.join(self.cf.fold_dir, file_name) for file_name in self.model_index["file_name"]]
for rank_ix, weight_path in enumerate(weight_paths):
self.logger.info(('tmp ensembling over rank_ix:{} epoch:{}'.format(rank_ix, weight_path)))
self.net.load_state_dict(torch.load(weight_path))
self.net.eval()
self.rank_ix = str(rank_ix)
plot_batches = np.random.choice(np.arange(batch_gen['n_test']),
size=min(batch_gen['n_test'], self.cf.n_test_plots), replace=False)
with torch.no_grad():
for i in range(batch_gen['n_test']):
batch = next(batch_gen['test'])
pid = np.unique(batch['pid'])
assert len(pid)==1
pid = pid[0]
if not pid in dict_of_patients_results.keys(): # store batch info in patient entry of results dict.
dict_of_patients_results[pid] = {}
dict_of_patients_results[pid]['results_dicts'] = []
dict_of_patients_results[pid]['patient_bb_target'] = batch['patient_bb_target']
for name in self.cf.roi_items:
dict_of_patients_results[pid]["patient_"+name] = batch["patient_"+name]
stime = time.time()
results_dict = self.predict_patient(batch) #only holds "boxes", "seg_preds"
# needs ohe seg probs in seg_preds entry:
results_dict['seg_preds'] = np.argmax(results_dict['seg_preds'], axis=1)[:,np.newaxis]
print("\rpredicting patient {} with weight rank {} (progress: {}/{}) took {:.2f}s".format(
str(pid), rank_ix, (rank_ix)*batch_gen['n_test']+(i+1), len(weight_paths)*batch_gen['n_test'],
time.time()-stime), end="", flush=True)
if i in plot_batches and (not self.patched_patient or 'patient_data' in batch.keys()):
try:
# view qualitative results of random test case
out_file = os.path.join(self.example_plot_dir,
'batch_example_test_{}_rank_{}.png'.format(self.cf.fold, rank_ix))
utils.split_off_process(plg.view_batch, self.cf, batch, results_dict,
has_colorchannels=self.cf.has_colorchannels,
show_gt_labels=True, show_seg_ids='dice' in self.cf.metrics,
get_time="test-example plot", out_file=out_file)
except Exception as e:
self.logger.info("WARNING: error in view_batch: {}".format(e))
if 'dice' in self.cf.metrics:
if self.patched_patient:
assert 'patient_seg' in batch.keys(), "Results_dict preds are in original patient shape."
results_dict['batch_dices'] = mutils.dice_per_batch_and_class( results_dict['seg_preds'],
batch["patient_seg"] if self.patched_patient else batch['seg'],
self.cf.num_seg_classes, convert_to_ohe=True)
dict_of_patients_results[pid]['results_dicts'].append({k:v for k,v in results_dict.items()
if k in ["boxes", "batch_dices"]})
# collect result types to know which ones to look for when saving
set_of_result_types.update(dict_of_patients_results[pid]['results_dicts'][-1].keys())
# -------------- re-order, save raw results -----------------
self.logger.info('finished predicting test set. starting aggregation of predictions.')
results_per_patient = []
for pid, p_dict in dict_of_patients_results.items():
# dict_of_patients_results[pid]['results_list'] has length batch['n_test']
results_dict = {}
# collect all boxes/seg_preds of same batch_instance over temporal instances.
b_size = len(p_dict['results_dicts'][0]["boxes"])
for res_type in [rtype for rtype in set_of_result_types if rtype in ["boxes", "batch_dices"]]:#, "seg_preds"]]:
if not 'batch' in res_type: #assume it's results on batch-element basis
results_dict[res_type] = [[item for rank_dict in p_dict['results_dicts'] for item in rank_dict[res_type][batch_instance]]
for batch_instance in range(b_size)]
else:
results_dict[res_type] = []
for dict in p_dict['results_dicts']:
if 'dice' in res_type:
item = dict[res_type] #dict['batch_dices'] has shape (num_seg_classes,)
assert len(item) == self.cf.num_seg_classes, \
"{}, {}".format(len(item), self.cf.num_seg_classes)
else:
raise NotImplementedError
results_dict[res_type].append(item)
# rdict[dice] shape (n_rank_epochs (n_saved_ranks), nsegclasses)
# calc mean over test epochs so inline with shape from sampling
results_dict[res_type] = np.mean(results_dict[res_type], axis=0) #maybe error type with other than dice
if not hasattr(self.cf, "eval_test_separately") or not self.cf.eval_test_separately:
# add unpatched 2D or 3D (if dim==3 or merge_2D_to_3D) ground truth boxes for evaluation.
for b in range(p_dict['patient_bb_target'].shape[0]):
for targ in range(len(p_dict['patient_bb_target'][b])):
gt_box = {'box_type': 'gt', 'box_coords':p_dict['patient_bb_target'][b][targ],
'class_targets': p_dict['patient_class_targets'][b][targ]}
for name in self.cf.roi_items:
gt_box.update({name: p_dict["patient_"+name][b][targ]})
results_dict['boxes'][b].append(gt_box)
results_per_patient.append([results_dict, pid])
out_string = 'pred_results_held_out' if self.cf.hold_out_test_set else 'pred_results'
with open(os.path.join(self.cf.fold_dir, '{}.pkl'.format(out_string)), 'wb') as handle:
pickle.dump(results_per_patient, handle)
if return_results:
# -------------- results processing, clustering, etc. -----------------
final_patient_box_results = [ (res_dict["boxes"], pid) for res_dict,pid in results_per_patient ]
if self.cf.clustering == "wbc":
self.logger.info('applying WBC to test-set predictions with iou = {} and n_ens = {}.'.format(
self.cf.clustering_iou, self.n_ens))
mp_inputs = [[self.regress_flag, ii[0], ii[1], self.cf.class_dict, self.cf.clustering_iou, self.n_ens] for ii in final_patient_box_results]
del final_patient_box_results
pool = Pool(processes=self.cf.n_workers)
final_patient_box_results = pool.map(apply_wbc_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
elif self.cf.clustering == "nms":
self.logger.info('applying standard NMS to test-set predictions with iou = {}.'.format(self.cf.clustering_iou))
pool = Pool(processes=self.cf.n_workers)
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.clustering_iou] for ii in final_patient_box_results]
del final_patient_box_results
final_patient_box_results = pool.map(apply_nms_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
if self.cf.merge_2D_to_3D_preds:
self.logger.info('applying 2D-to-3D merging to test-set predictions with iou = {}.'.format(self.cf.merge_3D_iou))
mp_inputs = [[ii[0], ii[1], self.cf.class_dict, self.cf.merge_3D_iou] for ii in final_patient_box_results]
del final_patient_box_results
pool = Pool(processes=self.cf.n_workers)
final_patient_box_results = pool.map(apply_2d_3d_merging_to_patient, mp_inputs, chunksize=1)
pool.close()
pool.join()
del mp_inputs
# final_patient_box_results holds [avg_boxes, pid] if wbc
for ix in range(len(results_per_patient)):
assert results_per_patient[ix][1] == final_patient_box_results[ix][1], "should be same pid"
results_per_patient[ix][0]["boxes"] = final_patient_box_results[ix][0]
# results_per_patient = [(res_dict["boxes"] = boxes, pid) for (boxes,pid) in final_patient_box_results]
return results_per_patient # holds list of (results_dict, pid)
| 59.229394 | 192 | 0.598803 | 37,716 | 0.632352 | 0 | 0 | 0 | 0 | 0 | 0 | 23,311 | 0.390836 |
d2aa2e4deaca6a1a85b89b1e9c89d89fa5c4d8f5 | 424 | py | Python | archive/jonesboro/__init__.py | jayktee/scrapers-us-municipal | ff52a331e91cb590a3eda7db6c688d75b77acacb | [
"MIT"
]
| 67 | 2015-04-28T19:28:18.000Z | 2022-01-31T03:27:17.000Z | archive/jonesboro/__init__.py | jayktee/scrapers-us-municipal | ff52a331e91cb590a3eda7db6c688d75b77acacb | [
"MIT"
]
| 202 | 2015-01-15T18:43:12.000Z | 2021-11-23T15:09:10.000Z | archive/jonesboro/__init__.py | jayktee/scrapers-us-municipal | ff52a331e91cb590a3eda7db6c688d75b77acacb | [
"MIT"
]
| 54 | 2015-01-27T03:15:45.000Z | 2021-09-10T19:35:32.000Z | from pupa.scrape import Jurisdiction
from legistar.ext.pupa import LegistarPeopleScraper
class Jonesboro(Jurisdiction):
division_id = 'ocd-division/country:us/state:ar/place:jonesboro'
jurisdiction_id = 'ocd-jurisdiction/country:us/state:ar/place:jonesboro/government'
name = 'Jonesboro City Council'
url = 'http://jonesboro.legistar.com/'
scrapers = {
"people": LegistarPeopleScraper,
}
| 28.266667 | 87 | 0.735849 | 332 | 0.783019 | 0 | 0 | 0 | 0 | 0 | 0 | 179 | 0.42217 |
d2aa498a5dc13b5e44bb5a53742aa0908d8d79da | 2,766 | py | Python | src/config.py | La-tale/MessyTable | 42ae08294f1a576d2477a4b4c12b2aec047c2ba9 | [
"MIT"
]
| 32 | 2020-07-13T04:30:00.000Z | 2022-03-17T12:04:32.000Z | src/config.py | La-tale/MessyTable | 42ae08294f1a576d2477a4b4c12b2aec047c2ba9 | [
"MIT"
]
| 12 | 2020-08-31T02:58:37.000Z | 2022-03-26T04:05:27.000Z | src/config.py | La-tale/MessyTable | 42ae08294f1a576d2477a4b4c12b2aec047c2ba9 | [
"MIT"
]
| 8 | 2020-07-27T05:20:33.000Z | 2022-02-04T06:58:37.000Z | import yaml
import os
def parse_config(args):
"""
prepare configs
"""
file_dir = os.path.dirname(os.path.realpath('__file__'))
messytable_dir = os.path.realpath(os.path.join(file_dir, '..'))
config_pathname = os.path.join(messytable_dir,'models',args.config_dir,'train.yaml')
config = yaml.load(open(config_pathname, 'r'))
config['messytable_dir'] = messytable_dir
config['config_dir'] = os.path.join(messytable_dir,'models',args.config_dir)
config['data_dir'] = os.path.join(messytable_dir, 'data') if 'data_dir' not in config else config['data_dir'] # NOTE: either indicate data_dir or put the data in messytable/data
config['img_dir'] = os.path.join(config['data_dir'],'images')
config['train_label_pathname'] = os.path.join(config['data_dir'],'labels',config['train_json'])
config['num_workers'] = config['num_workers'] if 'num_workers' in config else 16
config['milestones'] = config['milestones'] if 'milestones' in config else [60, 80]
config['split_samples_in_func'] = config['split_samples_in_func'] if 'split_samples_in_func' in config else True
config['loss_func'] = config['loss_func'] if 'loss_func' in config else 'ERROR_LOSS_FUNC'
config['triplet_margin'] = config['triplet_margin'] if 'triplet_margin' in config else 0.3
config['data_augmentation'] = config['data_augmentation'] if 'data_augmentation' in config else False
config['cropped_img_size'] = (config['cropped_height'],config['cropped_width'])
config['original_img_size'] = (config['img_height'],config['img_width'])
config['scene_ratio'] = config['scene_ratio'] if 'scene_ratio' in config else 1.0
config['cam_selected_num'] = config['cam_selected_num'] if 'cam_selected_num' in config else 8
config['triplet_sampling_ratio'] = config['triplet_sampling_ratio'] if 'triplet_sampling_ratio' in config else [0.5,0.3,0.2]
config['image_pairs_per_batch'] = config['image_pairs_per_batch'] if 'image_pairs_per_batch' in config else 24
config['triplet_batch_size'] = config['triplet_batch_size'] if 'triplet_batch_size' in config else config['batch_size']
config['learning_rate'] = float(config['learning_rate'])
config['zoomout_crop_num'] = 'single_crop' if len(config['zoomout_ratio']) == 1 else 'multi_crops'
# make cam_pairs
test_cam_pairs = []
for i in range(1,9):
for j in range(i+1,10):
test_cam_pairs.append((str(i),str(j)))
reversed_cam_pairs = []
for cam_pair in test_cam_pairs:
reversed_cam_pairs.append((cam_pair[1],cam_pair[0]))
config['test_cam_pairs'] = test_cam_pairs
config['train_cam_pairs'] = test_cam_pairs + reversed_cam_pairs
config['cam_list'] = [str(i) for i in range(1,10)]
return config
| 56.44898 | 181 | 0.713304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,130 | 0.408532 |
d2ab49c4b3562bad12874570d0c5751dda4cf3e6 | 1,194 | py | Python | tests/settings.py | josemarimanio/django-adminlte2-templates | d39ab5eaec674c4725015fe43fc93e74dce78a6e | [
"MIT"
]
| 10 | 2020-03-21T10:50:11.000Z | 2022-03-04T08:36:43.000Z | tests/settings.py | josemarimanio/django-adminlte2-templates | d39ab5eaec674c4725015fe43fc93e74dce78a6e | [
"MIT"
]
| 6 | 2020-06-06T08:48:29.000Z | 2021-06-10T18:49:35.000Z | tests/settings.py | josemarimanio/django-adminlte2-templates | d39ab5eaec674c4725015fe43fc93e74dce78a6e | [
"MIT"
]
| 1 | 2021-09-14T02:00:43.000Z | 2021-09-14T02:00:43.000Z | import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '!t_(11ght0&nmb&$tf4to=gdg&u$!hsm3@)c6dzp=zdc*c9zci' # nosec
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'adminlte2_templates',
'tests',
]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
]
ROOT_URLCONF = 'tests.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.join(BASE_DIR, 'tests/templates')],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'adminlte2_templates.context_processors.template',
],
},
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.MD5PasswordHasher',
]
| 23.88 | 74 | 0.629816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 675 | 0.565327 |
d2ae04ea58cc84694d33370988510f0b8bdcadb9 | 2,658 | py | Python | two-variables-function-fitting/fxy_gen.py | ettoremessina/fitting-with-mlp-using-tensorflow | 50303c7161521f690c37b80a72a281129052365b | [
"MIT"
]
| 9 | 2020-03-21T08:45:28.000Z | 2021-11-30T02:49:41.000Z | two-variables-function-fitting/fxy_gen.py | ettoremessina/fitting-with-mlp-using-tensorflow | 50303c7161521f690c37b80a72a281129052365b | [
"MIT"
]
| null | null | null | two-variables-function-fitting/fxy_gen.py | ettoremessina/fitting-with-mlp-using-tensorflow | 50303c7161521f690c37b80a72a281129052365b | [
"MIT"
]
| 3 | 2020-04-08T15:35:03.000Z | 2022-03-22T02:19:02.000Z | import argparse
import numpy as np
import csv
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='fxy_gen.py generates a synthetic dataset file calling a two-variables real function on a rectangle')
parser.add_argument('--dsout',
type=str,
dest='ds_output_filename',
required=True,
help='dataset output file (csv format)')
parser.add_argument('--fxy',
type=str,
dest='func_xy_body',
required=True,
help='f(x, y) body (lamba format)')
parser.add_argument('--rxbegin',
type=float,
dest='range_xbegin',
required=False,
default=-5.0,
help='begin x range (default:-5.0)')
parser.add_argument('--rxend',
type=float,
dest='range_xend',
required=False,
default=+5.0,
help='end x range (default:+5.0)')
parser.add_argument('--rybegin',
type=float,
dest='range_ybegin',
required=False,
default=-5.0,
help='begin y range (default:-5.0)')
parser.add_argument('--ryend',
type=float,
dest='range_yend',
required=False,
default=+5.0,
help='end y range (default:+5.0)')
parser.add_argument('--rstep',
type=float,
dest='range_step',
required=False,
default=0.01,
help='step range (default: 0.01)')
args = parser.parse_args()
print("#### Started {} {} ####".format(__file__, args));
x_values = np.arange(args.range_xbegin, args.range_xend, args.range_step, dtype=float)
y_values = np.arange(args.range_ybegin, args.range_yend, args.range_step, dtype=float)
func_xy = eval('lambda x, y: ' + args.func_xy_body)
csv_ds_output_file = open(args.ds_output_filename, 'w')
with csv_ds_output_file:
writer = csv.writer(csv_ds_output_file, delimiter=',')
for i in range(0, x_values.size):
for j in range(0, y_values.size):
writer.writerow([x_values[i], y_values[j], func_xy(x_values[i], y_values[j])])
print("#### Terminated {} ####".format(__file__));
| 37.971429 | 150 | 0.482318 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 551 | 0.207299 |
d2aee573a11ac0e4ec731ba7feda47d776f90ea2 | 995 | py | Python | custom/icds_reports/dashboard_utils.py | tstalka/commcare-hq | 902412b0f97ba0daac173fe284f3adc4c01bcd76 | [
"BSD-3-Clause"
]
| null | null | null | custom/icds_reports/dashboard_utils.py | tstalka/commcare-hq | 902412b0f97ba0daac173fe284f3adc4c01bcd76 | [
"BSD-3-Clause"
]
| null | null | null | custom/icds_reports/dashboard_utils.py | tstalka/commcare-hq | 902412b0f97ba0daac173fe284f3adc4c01bcd76 | [
"BSD-3-Clause"
]
| null | null | null | from corehq.apps.locations.util import location_hierarchy_config
from custom.icds_reports.utils import icds_pre_release_features
def get_dashboard_template_context(domain, couch_user):
context = {}
context['location_hierarchy'] = location_hierarchy_config(domain)
context['user_location_id'] = couch_user.get_location_id(domain)
context['all_user_location_id'] = list(couch_user.get_sql_locations(
domain
).location_ids())
context['state_level_access'] = 'state' in set(
[loc.location_type.code for loc in couch_user.get_sql_locations(
domain
)]
)
context['have_access_to_features'] = icds_pre_release_features(couch_user)
context['have_access_to_all_locations'] = couch_user.has_permission(
domain, 'access_all_locations'
)
if context['have_access_to_all_locations']:
context['user_location_id'] = None
if couch_user.is_web_user():
context['is_web_user'] = True
return context
| 34.310345 | 78 | 0.729648 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.226131 |
d2af35f5ecd1284185b97cd7fd48a1dabdbf319d | 1,714 | py | Python | data_input.py | zpcore/OnePass | fc102fae172c617535d4661bfa99a0302cbe09db | [
"MIT"
]
| null | null | null | data_input.py | zpcore/OnePass | fc102fae172c617535d4661bfa99a0302cbe09db | [
"MIT"
]
| null | null | null | data_input.py | zpcore/OnePass | fc102fae172c617535d4661bfa99a0302cbe09db | [
"MIT"
]
| null | null | null | import json
import string, sys
from random import *
class Token:
def __init__(self):
self.company, self.website, self.email, self.username, self.password = None, None, None, None, None
def get_input(self):
while(self.company in (None,'')):
self.company = input('Account Association:')
if(self.company in (None,'')):
print('Account Association cannot be null, try again.')
self.website = input('Website linked to the account:')
self.email = input('Email linked to the account:')
# while(self.email in (None,'')):
# self.email = input('Registered Email:')
# if(self.email in (None,'')):
# print('Email cannot be null, try again.')
while(self.username in (None,'')):
self.username = input('Username:')
if(self.username in (None,'')):
print('Username cannot be null, try again.')
while(self.password in (None,'')):
select = input('Random generate a password for you? Type Y or N. ').strip().lower()
if(select in ('y','yes')):
characters = string.ascii_letters + string.punctuation + string.digits
low_bound, up_bound = 10, 20
password = "".join(choice(characters) for x in range(randint(low_bound, up_bound)))
self.password = password
print('auto generated password:'+self.password)
elif(select in ('n','no')):
self.password = input('Password:')
if(self.password in (None,'')):
print('Password cannot be null, try again.')
else:
print('Incorrect choice. Try again.')
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, Token):
return super().default(obj)
return obj.__dict__
# tok = Token()
# tok.get_input()
# print(json.dumps(tok, cls=MyEncoder)) | 32.339623 | 101 | 0.656943 | 1,584 | 0.924154 | 0 | 0 | 0 | 0 | 0 | 0 | 586 | 0.34189 |
d2af5783fc08617f08a4edb9dc33a39579f11d65 | 1,401 | py | Python | examples/python/test_dict.py | SmartEconomyWorkshop/workshop | 5961dcc8832f60b3a0407cb9a8361ba5485ac280 | [
"MIT"
]
| 79 | 2017-10-22T03:35:06.000Z | 2021-12-02T10:28:06.000Z | examples/python/test_dict.py | SmartEconomyWorkshop/workshop | 5961dcc8832f60b3a0407cb9a8361ba5485ac280 | [
"MIT"
]
| 122 | 2017-10-19T12:34:08.000Z | 2020-08-20T12:38:17.000Z | examples/python/test_dict.py | SmartEconomyWorkshop/workshop | 5961dcc8832f60b3a0407cb9a8361ba5485ac280 | [
"MIT"
]
| 76 | 2017-10-19T05:09:55.000Z | 2020-12-08T12:03:59.000Z | from boa_test.tests.boa_test import BoaTest
from boa.compiler import Compiler
from neo.Settings import settings
from neo.Prompt.Commands.BuildNRun import TestBuild
class TestContract(BoaTest):
def test_dict1(self):
output = Compiler.instance().load('%s/boa_test/example/DictTest1.py' % TestContract.dirname).default
out = output.write()
tx, results, total_ops, engine = TestBuild(out, [], self.GetWallet1(), '', '02')
self.assertEqual(len(results), 1)
self.assertIsInstance(results[0].GetMap(), dict)
self.assertEqual(results[0].GetBoolean(), True)
def test_dict2(self):
output = Compiler.instance().load('%s/boa_test/example/DictTest2.py' % TestContract.dirname).default
out = output.write()
tx, results, total_ops, engine = TestBuild(out, [], self.GetWallet1(), '', '02')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 7)
def test_dict3(self):
output = Compiler.instance().load('%s/boa_test/example/DictTest3.py' % TestContract.dirname).default
out = output.write()
string_ouput = output.to_s()
self.assertGreater(len(string_ouput), 0)
tx, results, total_ops, engine = TestBuild(out, [], self.GetWallet1(), '', '02')
self.assertEqual(len(results), 1)
self.assertIsInstance(results[0].GetMap(), dict)
| 36.868421 | 108 | 0.666667 | 1,234 | 0.880799 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.085653 |
d2b08bd5689396a0415385c35a4d92cedae61e22 | 520 | py | Python | deployment_classifier/setup.py | m-santh/VayuAnukulani | d1b881ac6268c24761dc0ef6db296d7e5ee1a22e | [
"MIT"
]
| 1 | 2021-04-19T17:04:03.000Z | 2021-04-19T17:04:03.000Z | deployment_classifier/setup.py | m-santh/VayuAnukulani | d1b881ac6268c24761dc0ef6db296d7e5ee1a22e | [
"MIT"
]
| 18 | 2020-01-28T22:36:26.000Z | 2020-07-28T17:01:35.000Z | deployment_classifier/setup.py | m-santh/VayuAnukulani | d1b881ac6268c24761dc0ef6db296d7e5ee1a22e | [
"MIT"
]
| 3 | 2019-04-01T10:33:20.000Z | 2020-10-23T23:29:09.000Z | from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['tensorflow==1.8.0','pandas==0.23.1','setuptools==38.7.0','numpy==1.14.1','Keras==2.1.4','scikit_learn==0.19.1','h5py']
setup(
name='classifier',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='My training application package.',
author='Divyam Madaan',
author_email='[email protected]',
license='MIT',
zip_safe=False
)
| 28.888889 | 140 | 0.701923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 205 | 0.394231 |
d2b08ef7b1d20d9d85caa8e8727b92065aef39a2 | 1,023 | py | Python | day5.py | zsmoore/Advent-Of-Code-2017 | 895a7fbaa8b8b82a338dac967bccbf97b2092b20 | [
"MIT"
]
| null | null | null | day5.py | zsmoore/Advent-Of-Code-2017 | 895a7fbaa8b8b82a338dac967bccbf97b2092b20 | [
"MIT"
]
| null | null | null | day5.py | zsmoore/Advent-Of-Code-2017 | 895a7fbaa8b8b82a338dac967bccbf97b2092b20 | [
"MIT"
]
| null | null | null | import sys
import copy
def main():
in_file = open(sys.argv[1], 'r')
jumps = []
for line in in_file.readlines():
jumps.append(int(line.strip()))
#print(compute_exit(jumps))
print(compute_exit2(jumps))
def compute_exit(jump_list):
current_ind = 0
step_num = 0
while True:
if current_ind < 0 or current_ind >= len(jump_list):
return step_num
step = jump_list[current_ind]
jump_list[current_ind] += 1
current_ind += step
step_num += 1
def compute_exit2(jump_list):
current_ind = 0
step_num = 0
while True:
if current_ind < 0 or current_ind >= len(jump_list):
return step_num
step = jump_list[current_ind]
if step >= 3:
jump_list[current_ind] -= 1
elif step <= -3:
jump_list[current_ind] += 1
else:
jump_list[current_ind] += 1
current_ind += step
step_num += 1
if __name__ == "__main__":
main()
| 22.23913 | 60 | 0.567937 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.039101 |
d2b26b4fc46e989fc34f786c463f49d76b84289c | 4,949 | py | Python | pycudasirecon/_recon_params.py | tlambert03/pycudasirecon | 17ca242b1cfed14216d97df480ca2c7f3471d770 | [
"MIT"
]
| 2 | 2021-06-09T15:35:50.000Z | 2021-06-10T05:33:11.000Z | pycudasirecon/_recon_params.py | tlambert03/pycudasirecon | 17ca242b1cfed14216d97df480ca2c7f3471d770 | [
"MIT"
]
| null | null | null | pycudasirecon/_recon_params.py | tlambert03/pycudasirecon | 17ca242b1cfed14216d97df480ca2c7f3471d770 | [
"MIT"
]
| null | null | null | import os
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
from typing import Optional, Sequence
from pydantic import BaseModel, Field, FilePath
@contextmanager
def temp_config(**kwargs):
"""A context manager that creates a temporary config file for SIMReconstructor.
`**kwargs` should be valid keyword arguments for :class:`ReconParams`.
"""
params = ReconParams(**kwargs)
tf = NamedTemporaryFile(delete=False)
tf.file.write(params.to_config().encode()) # type: ignore
tf.close()
try:
yield tf
finally:
os.unlink(tf.name)
class ReconParams(BaseModel):
otf_file: Optional[FilePath] = Field(None, description="OTF file")
usecorr: bool = Field(
False, description="use the flat-field correction file provided"
)
ndirs: int = Field(default=3, description="number of directions")
nphases: int = Field(default=5, description="number of phases per direction")
nordersout: int = Field(
0, description="number of output orders; must be <= norders"
)
angle0: float = Field(1.648, description="angle of the first direction in radians")
ls: float = Field(0.172, description="line spacing of SIM pattern in microns")
na: float = Field(1.42, description="Detection numerical aperture")
nimm: float = Field(1.515, description="refractive index of immersion medium")
zoomfact: float = Field(2, description="lateral oversampling factor")
explodefact: float = Field(
1,
description="artificially exploding the reciprocal-space "
"distance between orders by this factor",
)
zzoom: int = Field(1, description="axial zoom factor")
nofilteroverlaps: bool = Field(
False,
description="do not filter the overlaping region between bands "
"usually used in trouble shooting",
)
background: float = Field(0, description="camera readout background")
wiener: float = Field(0.01, description="Wiener constant")
forcemodamp: Optional[Sequence[float]] = Field(
None, description="modamps forced to these values"
)
k0angles: Optional[Sequence[float]] = Field(
None, description="user given pattern vector k0 angles for all directions"
)
otfRA: bool = Field(True, description="using rotationally averaged OTF")
otfPerAngle: bool = Field(True, description="using one OTF per SIM angle")
fastSI: bool = Field(
True,
description="SIM data is organized in Z->Angle->Phase order; "
"default being Angle->Z->Phase",
)
k0searchAll: bool = Field(False, description="search for k0 at all time points")
norescale: bool = Field(False, description="bleach correcting for z") # TODO
equalizez: bool = Field(True, description="bleach correcting for z")
equalizet: bool = Field(True, description="bleach correcting for time")
dampenOrder0: bool = Field(True, description="dampen order-0 in final assembly")
nosuppress: bool = Field(
False,
description="do not suppress DC singularity in final assembly "
"(good idea for 2D/TIRF data)",
)
nokz0: bool = Field(
True, description="do not use kz=0 plane of the 0th order in the final assembly"
)
gammaApo: float = Field(
1, description="output apodization gamma; 1.0 means triangular apo"
)
bessel: bool = Field(False, description="bessel-SIM data")
besselExWave: float = Field(
0.488, description="Bessel SIM excitation wavelength in microns"
)
besselNA: float = Field(0.144, description="Bessel SIM excitation NA)")
deskew: float = Field(
0,
description="Deskew angle; if not 0.0 then perform deskewing before processing",
)
deskewshift: int = Field(
0,
description="If deskewed, the output image's extra shift in X (positive->left)",
)
noRecon: bool = Field(
False,
description="No reconstruction will be performed; "
"useful when combined with --deskew",
)
cropXY: int = Field(
0, description="Crop the XY dimension to this number; 0 means no cropping"
)
xyres: float = Field(0.1, description="XY pixel size")
zres: float = Field(0.2, description="Z step size")
zresPSF: float = Field(0.15, description="Z step size of the PSF")
wavelength: int = Field(530, description="emission wavelength in nanometers")
writeTitle: bool = Field(
False,
description="Write command line to image header "
"(may cause issues with bioformats)",
)
def to_config(self, exclude_unset=True):
lines = []
for k, v in self.dict(exclude_unset=exclude_unset).items():
if k == "k0angles":
v = ",".join(str(x) for x in v)
if isinstance(v, bool):
v = int(v)
lines.append(f'{k.replace("_", "-")}={v}')
return "\n".join(lines)
| 40.235772 | 88 | 0.658921 | 4,336 | 0.876137 | 416 | 0.084057 | 432 | 0.08729 | 0 | 0 | 1,884 | 0.380683 |
d2b2f379a4dedf2bd69de6e708c00763f4c5952f | 4,098 | py | Python | tesseract_converters/tesseract_to_sa_converter.py | superannotateai/annotateonline-input-converters | 753211f48676d06718bb2d32501ba1df3ace9121 | [
"Apache-2.0"
]
| 10 | 2020-04-30T08:36:08.000Z | 2021-02-27T21:46:45.000Z | tesseract_converters/tesseract_to_sa_converter.py | superannotateai/input_converters | 753211f48676d06718bb2d32501ba1df3ace9121 | [
"Apache-2.0"
]
| 5 | 2020-03-27T07:16:36.000Z | 2020-07-06T04:45:47.000Z | tesseract_converters/tesseract_to_sa_converter.py | superannotateai/annotateonline-input-converters | 753211f48676d06718bb2d32501ba1df3ace9121 | [
"Apache-2.0"
]
| 2 | 2020-06-26T20:02:10.000Z | 2020-06-30T20:56:04.000Z | import os
import json
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
help='Path to input files or folder\
with tesseract dict format.\
File name structure \
[IMAGE_NAME]___tess.json',
required=True
)
parser.add_argument(
'--output',
help='Path to output folder.\
File name structure \
[IMAGE_NAME]___objects.json'
)
parser.add_argument(
'--verbose',
default='0',
choices=['0', '1', '2'],
help="0 -- Doesn't print anything,\
1 -- Prints number of converted files,\
2 -- Prints number of converted files and unconverted files path."
)
args = parser.parse_args()
input_files_list = get_input_list(args.input)
file_name = [os.path.basename(file) for file in input_files_list]
output_files_list = []
if args.output == None:
output_files_list = get_output_list(file_name)
else:
output_files_list = get_output_list(file_name, args.output)
converter(input_files_list, output_files_list, args.verbose)
def get_input_list(pathname):
input_files_list = []
try:
if os.path.isfile(pathname):
input_files_list.append(os.path.abspath(pathname))
else:
list_files = os.listdir(pathname)
abs_path = os.path.abspath(pathname)
for file in list_files:
input_files_list.append(os.path.join(abs_path, file))
except IOError:
print("ERROR: '%s' file or folder doesn't exist!" % (pathname))
return input_files_list
def get_output_list(input_list, pathname='./output'):
if os.path.exists(pathname):
abs_path = os.path.abspath(pathname)
else:
os.makedirs(pathname)
abs_path = os.path.abspath(pathname)
output_files_list = []
for file in input_list:
output_files_list.append(
os.path.join(abs_path,
file.split("___")[0] + "___objects.json")
)
return output_files_list
def converter(input_files_list, output_files_list, verbose=0):
converted = 0
for file_in, file_out in zip(input_files_list, output_files_list):
try:
file_json = json.load(open(file_in))
output = []
for i in range(len(file_json['level'])):
if file_json["text"][i] != "" and file_json["text"][i] != " ":
dd = {
"type": "bbox",
"points":
{
"x1":
file_json["left"][i],
"y1":
file_json["top"][i],
"x2":
file_json["left"][i] +
file_json["width"][i],
"y2":
file_json["top"][i] + file_json["height"][i]
},
"className": "Text",
"classId": 2031,
"pointLabels": {
"0": file_json["text"][i]
},
"attributes": [],
"probability": 100,
"locked": False,
"visible": True,
"groupId": 0,
"imageId": 0
}
output.append(dd)
json.dump(output, open(file_out, "w"), indent=2)
converted += 1
except ValueError:
if verbose == '2':
print("WARNING: '%s' file is not json format!" % (file_in))
if int(verbose) > 0:
print(
"Converted to sa format: %d of %d" %
(converted, len(input_files_list))
)
if __name__ == '__main__':
main() | 32.784 | 80 | 0.476086 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 868 | 0.211811 |
d2b3079900df546aeac436f737e69c681f72b12c | 24,525 | py | Python | fhirclient/r4models/contract_tests.py | cspears-mitre/CapStatement | 2390566ed75d420e0615e3a0aacb77e8c030fdcc | [
"Apache-2.0"
]
| 1 | 2021-12-24T11:14:38.000Z | 2021-12-24T11:14:38.000Z | fhirclient/r4models/contract_tests.py | cspears-mitre/CapStatement | 2390566ed75d420e0615e3a0aacb77e8c030fdcc | [
"Apache-2.0"
]
| null | null | null | fhirclient/r4models/contract_tests.py | cspears-mitre/CapStatement | 2390566ed75d420e0615e3a0aacb77e8c030fdcc | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.6.0-bd605d07 on 2018-12-20.
# 2018, SMART Health IT.
import os
import io
import unittest
import json
from . import contract
from .fhirdate import FHIRDate
class ContractTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Contract", js["resourceType"])
return contract.Contract(js)
def testContract1(self):
inst = self.instantiate_from("pcd-example-notOrg.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract1(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract1(inst2)
def implContract1(self, inst):
self.assertEqual(inst.friendly[0].contentAttachment.title, "The terms of the consent in friendly consumer speak.")
self.assertEqual(inst.id, "pcd-example-notOrg")
self.assertEqual(inst.issued.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.issued.as_json(), "2015-11-18")
self.assertEqual(inst.legal[0].contentAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.subType[0].coding[0].code, "Opt-In")
self.assertEqual(inst.subType[0].coding[0].display, "Default Authorization with exceptions.")
self.assertEqual(inst.subType[0].coding[0].system, "http://www.infoway-inforoute.ca.org/Consent-subtype-codes")
self.assertEqual(inst.term[0].offer.text, "Withhold this order and any results or related objects from any provider.")
self.assertEqual(inst.term[0].type.coding[0].code, "withhold-from")
self.assertEqual(inst.term[0].type.coding[0].display, "Withhold all data from specified actor entity.")
self.assertEqual(inst.term[0].type.coding[0].system, "http://example.org/fhir/consent-term-type-codes")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "57016-8")
self.assertEqual(inst.type.coding[0].system, "http://loinc.org")
def testContract2(self):
inst = self.instantiate_from("contract-example-ins-policy.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract2(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract2(inst2)
def implContract2(self, inst):
self.assertEqual(inst.applies.start.date, FHIRDate("2017-01-01").date)
self.assertEqual(inst.applies.start.as_json(), "2017-01-01")
self.assertEqual(inst.id, "INS-101")
self.assertEqual(inst.identifier[0].system, "http://xyz-insurance.com/forms")
self.assertEqual(inst.identifier[0].value, "YCSCWLN(01-2017)")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.term[0].asset[0].period[0].start.date, FHIRDate("2017-06-01").date)
self.assertEqual(inst.term[0].asset[0].period[0].start.as_json(), "2017-06-01")
self.assertEqual(inst.term[0].asset[0].subtype[0].text, "sample")
self.assertEqual(inst.term[0].asset[0].type[0].coding[0].code, "RicardianContract")
self.assertEqual(inst.term[0].asset[0].type[0].coding[0].system, "urn:ietf:rfc:3986")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].effectiveTime.date, FHIRDate("1995").date)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].effectiveTime.as_json(), "1995")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].entityCodeableConcept.text, "Ford Bobcat")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].factor, 1.0)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].identifier.system, "http://somewhere.motor-vehicle.com/vin")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].identifier.value, "XXSVT34-7665t952236")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].net.currency, "CAD")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].net.value, 200.0)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].points, 1.0)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].quantity.value, 1)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].unitPrice.currency, "CAD")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].unitPrice.value, 200.0)
self.assertEqual(inst.term[0].group[0].offer.text, "Eligible Providers")
self.assertEqual(inst.term[0].group[1].offer.text, "Responsibility for Payment")
self.assertEqual(inst.term[0].group[2].group[0].group[0].offer.text, "Emergency Room Copay")
self.assertEqual(inst.term[0].group[2].group[0].group[1].offer.text, "Professional Visit Copay")
self.assertEqual(inst.term[0].group[2].group[0].offer.text, "Copays")
self.assertEqual(inst.term[0].group[2].group[1].offer.text, "Calendar Year Deductible")
self.assertEqual(inst.term[0].group[2].group[2].offer.text, "Out-Of-Pocket Maximum")
self.assertEqual(inst.term[0].group[2].group[3].group[0].offer.text, "Ambulance Services")
self.assertEqual(inst.term[0].group[2].group[3].group[1].offer.text, "Dental Services")
self.assertEqual(inst.term[0].group[2].group[3].group[2].offer.text, "Diagnostic Services")
self.assertEqual(inst.term[0].group[2].group[3].group[3].offer.text, "Emergency Room Services")
self.assertEqual(inst.term[0].group[2].group[3].group[4].offer.text, "Hospital Inpatient Care")
self.assertEqual(inst.term[0].group[2].group[3].offer.text, "Medical Services")
self.assertEqual(inst.term[0].group[2].offer.text, "List of Benefits")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "healthinsurance")
self.assertEqual(inst.type.coding[0].display, "Health Insurance")
self.assertEqual(inst.type.coding[0].system, "http://terminology.hl7.org/CodeSystem/contract-type")
def testContract3(self):
inst = self.instantiate_from("contract-example-42cfr-part2.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract3(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract3(inst2)
def implContract3(self, inst):
self.assertEqual(inst.applies.start.date, FHIRDate("2013-11-01T21:18:27-04:00").date)
self.assertEqual(inst.applies.start.as_json(), "2013-11-01T21:18:27-04:00")
self.assertEqual(inst.contentDerivative.coding[0].code, "registration")
self.assertEqual(inst.contentDerivative.coding[0].system, "http://terminology.hl7.org/CodeSystem/contract-content-derivative")
self.assertEqual(inst.id, "C-2121")
self.assertEqual(inst.issued.date, FHIRDate("2013-11-01T21:18:27-04:00").date)
self.assertEqual(inst.issued.as_json(), "2013-11-01T21:18:27-04:00")
self.assertEqual(inst.legal[0].contentAttachment.contentType, "application/pdf")
self.assertEqual(inst.legal[0].contentAttachment.language, "en-US")
self.assertEqual(inst.legal[0].contentAttachment.title, "MDHHS-5515 Consent To Share Your Health Information")
self.assertEqual(inst.legal[0].contentAttachment.url, "http://org.mihin.ecms/ConsentDirective-2121")
self.assertEqual(inst.meta.lastUpdated.date, FHIRDate("2016-07-19T18:18:42.108-04:00").date)
self.assertEqual(inst.meta.lastUpdated.as_json(), "2016-07-19T18:18:42.108-04:00")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.meta.versionId, "1")
self.assertEqual(inst.signer[0].signature[0].type[0].code, "1.2.840.10065.1.12.1.1")
self.assertEqual(inst.signer[0].signature[0].type[0].system, "urn:iso-astm:E1762-95:2013")
self.assertEqual(inst.signer[0].signature[0].when.date, FHIRDate("2017-02-08T10:57:34+01:00").date)
self.assertEqual(inst.signer[0].signature[0].when.as_json(), "2017-02-08T10:57:34+01:00")
self.assertEqual(inst.signer[0].type.code, "SELF")
self.assertEqual(inst.signer[0].type.system, "http://org.mdhhs.fhir.consent-signer-type")
self.assertEqual(inst.status, "executed")
self.assertEqual(inst.subType[0].coding[0].code, "hcd")
self.assertEqual(inst.subType[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/consentcategorycodes")
self.assertEqual(inst.term[0].action[0].intent.coding[0].code, "HPRGRP")
self.assertEqual(inst.term[0].action[0].intent.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.term[0].action[0].status.text, "Sample")
self.assertEqual(inst.term[0].action[0].subject[0].role.coding[0].code, "IR")
self.assertEqual(inst.term[0].action[0].subject[0].role.coding[0].display, "Recipient")
self.assertEqual(inst.term[0].action[0].subject[0].role.coding[0].system, "http://org.mdhhs.fhir.consent-actor-type")
self.assertEqual(inst.term[0].action[0].subject[0].role.text, "Recipient of restricted health information")
self.assertEqual(inst.term[0].action[0].subject[1].role.coding[0].code, "IS")
self.assertEqual(inst.term[0].action[0].subject[1].role.coding[0].display, "Sender")
self.assertEqual(inst.term[0].action[0].subject[1].role.coding[0].system, "http://org.mdhhs.fhir.consent-actor-type")
self.assertEqual(inst.term[0].action[0].subject[1].role.text, "Sender of restricted health information")
self.assertEqual(inst.term[0].action[0].type.coding[0].code, "action-a")
self.assertEqual(inst.term[0].action[0].type.coding[0].system, "http://terminology.hl7.org/CodeSystem/contractaction")
self.assertEqual(inst.term[0].asset[0].period[0].end.date, FHIRDate("2019-11-01T21:18:27-04:00").date)
self.assertEqual(inst.term[0].asset[0].period[0].end.as_json(), "2019-11-01T21:18:27-04:00")
self.assertEqual(inst.term[0].asset[0].period[0].start.date, FHIRDate("2013-11-01T21:18:27-04:00").date)
self.assertEqual(inst.term[0].asset[0].period[0].start.as_json(), "2013-11-01T21:18:27-04:00")
self.assertEqual(inst.term[0].offer.decision.coding[0].code, "OPTIN")
self.assertEqual(inst.term[0].offer.decision.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.term[0].offer.text, "Can't refuse")
self.assertEqual(inst.term[0].offer.type.coding[0].code, "statutory")
self.assertEqual(inst.term[0].offer.type.coding[0].system, "http://terminology.hl7.org/CodeSystem/contracttermtypecodes")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "OPTIN")
self.assertEqual(inst.type.coding[0].system, "http://org.mdhhs.fhir.consentdirective-type")
self.assertEqual(inst.type.text, "Opt-in consent directive")
def testContract4(self):
inst = self.instantiate_from("pcd-example-notLabs.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract4(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract4(inst2)
def implContract4(self, inst):
self.assertEqual(inst.friendly[0].contentAttachment.title, "The terms of the consent in friendly consumer speak.")
self.assertEqual(inst.id, "pcd-example-notLabs")
self.assertEqual(inst.issued.date, FHIRDate("2014-08-17").date)
self.assertEqual(inst.issued.as_json(), "2014-08-17")
self.assertEqual(inst.legal[0].contentAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.subType[0].coding[0].code, "Opt-In")
self.assertEqual(inst.subType[0].coding[0].display, "Default Authorization with exceptions.")
self.assertEqual(inst.subType[0].coding[0].system, "http://www.infoway-inforoute.ca.org/Consent-subtype-codes")
self.assertEqual(inst.term[0].group[0].offer.text, "Withhold orders from any provider.")
self.assertEqual(inst.term[0].group[0].subType.coding[0].code, "ServiceRequest")
self.assertEqual(inst.term[0].group[0].subType.coding[0].system, "http://hl7.org/fhir/resource-types")
self.assertEqual(inst.term[0].group[0].type.coding[0].code, "withhold-object-type")
self.assertEqual(inst.term[0].group[0].type.coding[0].system, "http://example.org/fhir/consent-term-type-codes")
self.assertEqual(inst.term[0].group[1].offer.text, "Withhold order results from any provider.")
self.assertEqual(inst.term[0].group[1].subType.coding[0].code, "DiagnosticReport")
self.assertEqual(inst.term[0].group[1].subType.coding[0].system, "http://hl7.org/fhir/resource-types")
self.assertEqual(inst.term[0].group[1].type.coding[0].code, "withhold-object-type")
self.assertEqual(inst.term[0].group[1].type.coding[0].system, "http://example.org/fhir/consent-term-type-codes")
self.assertEqual(inst.term[0].offer.text, "sample")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "57016-8")
self.assertEqual(inst.type.coding[0].system, "http://loinc.org")
def testContract5(self):
inst = self.instantiate_from("pcd-example-notThem.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract5(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract5(inst2)
def implContract5(self, inst):
self.assertEqual(inst.friendly[0].contentAttachment.title, "The terms of the consent in friendly consumer speak.")
self.assertEqual(inst.id, "pcd-example-notThem")
self.assertEqual(inst.issued.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.issued.as_json(), "2015-11-18")
self.assertEqual(inst.legal[0].contentAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.signer[0].signature[0].type[0].code, "1.2.840.10065.1.12.1.1")
self.assertEqual(inst.signer[0].signature[0].type[0].system, "urn:iso-astm:E1762-95:2013")
self.assertEqual(inst.signer[0].signature[0].when.date, FHIRDate("2013-06-08T10:57:34-07:00").date)
self.assertEqual(inst.signer[0].signature[0].when.as_json(), "2013-06-08T10:57:34-07:00")
self.assertEqual(inst.signer[0].type.code, "COVPTY")
self.assertEqual(inst.signer[0].type.system, "http://terminology.hl7.org/CodeSystem/contractsignertypecodes")
self.assertEqual(inst.subType[0].coding[0].code, "Opt-In")
self.assertEqual(inst.subType[0].coding[0].display, "Default Authorization with exceptions.")
self.assertEqual(inst.subType[0].coding[0].system, "http://www.infoway-inforoute.ca.org/Consent-subtype-codes")
self.assertEqual(inst.term[0].offer.text, "Withhold this order and any results or related objects from specified nurse provider.")
self.assertEqual(inst.term[0].type.coding[0].code, "withhold-from")
self.assertEqual(inst.term[0].type.coding[0].display, "Withhold all data from specified actor entity.")
self.assertEqual(inst.term[0].type.coding[0].system, "http://example.org/fhir/consent-term-type-codes")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "57016-8")
self.assertEqual(inst.type.coding[0].system, "http://loinc.org")
def testContract6(self):
inst = self.instantiate_from("pcd-example-notAuthor.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract6(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract6(inst2)
def implContract6(self, inst):
self.assertEqual(inst.friendly[0].contentAttachment.title, "The terms of the consent in friendly consumer speak.")
self.assertEqual(inst.id, "pcd-example-notAuthor")
self.assertEqual(inst.issued.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.issued.as_json(), "2015-11-18")
self.assertEqual(inst.legal[0].contentAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.subType[0].coding[0].code, "Opt-In")
self.assertEqual(inst.subType[0].coding[0].display, "Default Authorization with exceptions.")
self.assertEqual(inst.subType[0].coding[0].system, "http://www.infoway-inforoute.ca.org/Consent-subtype-codes")
self.assertEqual(inst.term[0].offer.text, "Withhold all data authored by Good Health provider.")
self.assertEqual(inst.term[0].type.coding[0].code, "withhold-authored-by")
self.assertEqual(inst.term[0].type.coding[0].display, "Withhold all data authored by specified actor entity.")
self.assertEqual(inst.term[0].type.coding[0].system, "http://example.org/fhir/consent-term-type-codes")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "57016-8")
self.assertEqual(inst.type.coding[0].system, "http://loinc.org")
def testContract7(self):
inst = self.instantiate_from("contract-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract7(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract7(inst2)
def implContract7(self, inst):
self.assertEqual(inst.id, "C-123")
self.assertEqual(inst.identifier[0].system, "http://happyvalley.com/contract")
self.assertEqual(inst.identifier[0].value, "12347")
self.assertEqual(inst.legallyBindingAttachment.contentType, "application/pdf")
self.assertEqual(inst.legallyBindingAttachment.url, "http://www.aws3.com/storage/doc.pdf")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.rule[0].contentAttachment.contentType, "application/txt")
self.assertEqual(inst.rule[0].contentAttachment.url, "http://www.rfc-editor.org/bcp/bcp13.txt")
self.assertEqual(inst.term[0].asset[0].period[0].start.date, FHIRDate("2017-06-01").date)
self.assertEqual(inst.term[0].asset[0].period[0].start.as_json(), "2017-06-01")
self.assertEqual(inst.term[0].asset[0].subtype[0].text, "sample")
self.assertEqual(inst.term[0].asset[0].type[0].coding[0].code, "RicardianContract")
self.assertEqual(inst.term[0].asset[0].type[0].coding[0].system, "urn:ietf:rfc:3986")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].effectiveTime.date, FHIRDate("1995").date)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].effectiveTime.as_json(), "1995")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].entityCodeableConcept.text, "Ford Bobcat")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].factor, 1.0)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].identifier.system, "http://somewhere.motor-vehicle.com/vin")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].identifier.value, "XXSVT34-7665t952236")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].net.currency, "CAD")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].net.value, 200.0)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].points, 1.0)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].quantity.value, 1)
self.assertEqual(inst.term[0].asset[0].valuedItem[0].unitPrice.currency, "CAD")
self.assertEqual(inst.term[0].asset[0].valuedItem[0].unitPrice.value, 200.0)
self.assertEqual(inst.term[0].offer.text, "Can't refuse")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the contract</div>")
self.assertEqual(inst.text.status, "generated")
def testContract8(self):
inst = self.instantiate_from("pcd-example-notThis.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract8(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract8(inst2)
def implContract8(self, inst):
self.assertEqual(inst.friendly[0].contentAttachment.title, "The terms of the consent in friendly consumer speak.")
self.assertEqual(inst.id, "pcd-example-notThis")
self.assertEqual(inst.issued.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.issued.as_json(), "2015-11-18")
self.assertEqual(inst.legal[0].contentAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.subType[0].coding[0].code, "Opt-In")
self.assertEqual(inst.subType[0].coding[0].display, "Default Authorization with exceptions.")
self.assertEqual(inst.subType[0].coding[0].system, "http://www.infoway-inforoute.ca.org/Consent-subtype-codes")
self.assertEqual(inst.term[0].applies.start.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.term[0].applies.start.as_json(), "2015-11-18")
self.assertEqual(inst.term[0].identifier.system, "http://example.org/fhir/term-items")
self.assertEqual(inst.term[0].identifier.value, "3347689")
self.assertEqual(inst.term[0].issued.date, FHIRDate("2015-11-01").date)
self.assertEqual(inst.term[0].issued.as_json(), "2015-11-01")
self.assertEqual(inst.term[0].offer.text, "Withhold this order and any results or related objects from any provider.")
self.assertEqual(inst.term[0].type.coding[0].code, "withhold-identified-object-and-related")
self.assertEqual(inst.term[0].type.coding[0].display, "Withhold the identified object and any other resources that are related to this object.")
self.assertEqual(inst.term[0].type.coding[0].system, "http://example.org/fhir/consent-term-type-codes")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "57016-8")
self.assertEqual(inst.type.coding[0].system, "http://loinc.org")
| 69.279661 | 152 | 0.685219 | 24,290 | 0.990418 | 0 | 0 | 0 | 0 | 0 | 0 | 6,650 | 0.271152 |
d2b34796cb7b21344e2370533fa5aa6227ece2be | 9,978 | py | Python | evaluation/evaluation.py | Ennosigaeon/xautoml | 6e49ee8b2ffb6d19dcfd9cbe8b3397416c9b5ded | [
"BSD-3-Clause"
]
| 4 | 2022-02-27T08:54:08.000Z | 2022-03-30T21:19:29.000Z | evaluation/evaluation.py | Ennosigaeon/xautoml | 6e49ee8b2ffb6d19dcfd9cbe8b3397416c9b5ded | [
"BSD-3-Clause"
]
| 1 | 2022-02-28T09:41:00.000Z | 2022-03-02T07:44:17.000Z | evaluation/evaluation.py | Ennosigaeon/xautoml | 6e49ee8b2ffb6d19dcfd9cbe8b3397416c9b5ded | [
"BSD-3-Clause"
]
| 2 | 2022-03-01T00:38:09.000Z | 2022-03-21T09:38:49.000Z | import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.stats import ttest_ind
from sklearn.preprocessing import LabelEncoder
def load_data():
questionnaire = pd.read_excel('XAutoML.xlsx')
encoder = LabelEncoder()
encoder.classes_ = np.array(['strongly disagree', 'disagree', 'neutral', 'agree', 'strongly agree'])
for c in questionnaire.columns:
try:
questionnaire.loc[:, c] = questionnaire.loc[:, c].str.strip().str.lower()
questionnaire.loc[:, c] = encoder.transform(questionnaire.loc[:, c])
except (AttributeError, ValueError):
pass
questionnaire.columns = questionnaire.columns.str.strip()
requirements = pd.read_excel('task_results.ods', sheet_name='Requirements', skiprows=1)
requirements = requirements.drop(index=[24], columns=['Unnamed: 1']).T
requirements.columns = requirements.iloc[0]
requirements = requirements[1:]
tasks = pd.read_excel('task_results.ods', sheet_name=0)
tasks = tasks.dropna(axis=1, how='all').dropna(axis=0, how='all')
tasks.index = tasks.iloc[:, 0]
tasks.drop(columns=tasks.columns[:2], inplace=True)
return questionnaire, requirements, tasks
def calculate_sus(df: pd.DataFrame):
invert = [False, False, True, False, True, False, True, False, True, True]
for c, inv in zip(df.columns, invert):
if inv:
df.loc[:, c] = 4 - df.loc[:, c]
df.loc[:, c] = df.loc[:, c] * 2.5
score = df.sum(axis=1)
print('###### System Usability Score ######')
print(df.mean(axis=0))
print(score.mean(), score.std())
print('\n\n')
def print_visual_design(df: pd.DataFrame):
de = df[df['Role'] == 'domain expert']
ar = df[df['Role'] == 'automl researcher']
ds = df[df['Role'] == 'data scientist']
data = pd.DataFrame([de.mean() + 1, ds.mean() + 1, ar.mean() + 1, df.mean() + 1]).T
print('###### Visual Design ######')
for _, row in data.iterrows():
print(f'\\({row[0]:.2f}\\)\t& \\({row[1]:.2f}\\)\t& \\({row[2]:.2f}\\)\t& \\({row[3]:.2f}\\) \\\\')
print('\n\n')
def print_previous_knowledge(df: pd.DataFrame):
de = df[df['Role'] == 'domain expert']
ar = df[df['Role'] == 'automl researcher']
ds = df[df['Role'] == 'data scientist']
data = pd.DataFrame([de.mean() + 1, ds.mean() + 1, ar.mean() + 1, df.mean() + 1]).T
print('###### Previous Knowledge ######')
for _, row in data.iterrows():
print(f'\\({row[0]:.2f}\\)\t& \\({row[1]:.2f}\\)\t& \\({row[2]:.2f}\\)\t& \\({row[3]:.2f}\\) \\\\')
print('\n\n')
def plot_priority_distribution(df: pd.DataFrame, group=False):
def calc_user_group(value: str):
return value.strip().split('.')[0]
x = []
y = []
m = []
for col in df:
y.append(df[col].to_list())
x.append([col] * df.shape[0])
m.append(df[col].index.map(calc_user_group))
x = np.array(x).flatten()
y = 24 - np.array(y).flatten()
m = np.array(m).flatten()
data = pd.DataFrame({'x': x, 'y': y, 'role': m})
mean = data.groupby(by=['x', 'role']).mean().reset_index()
mean = pd.DataFrame({
'Domain Expert': 24 - mean.loc[mean['role'] == 'Domain Expert', 'y'].reset_index(drop=True),
'Data Scientist': 24 - mean.loc[mean['role'] == 'Data Scientist', 'y'].reset_index(drop=True),
'AutoML Researcher': 24 - mean.loc[mean['role'] == 'AutoML Researcher', 'y'].reset_index(drop=True),
'All': 24 - data.groupby('x').mean()['y'].reset_index(drop=True)
})
print('Average card rank')
for _, row in mean.iterrows():
print(f'\\({row[0]:.1f}\\)\t& \\({row[1]:.1f}\\)\t& \\({row[2]:.1f}\\)\t& \\({row[3]:.1f}\\) \\\\')
print('\n\n')
if group:
replacements = {
'#01': ['#02', '#03', '#04'],
'#05': ['#06', '#07', '#08'],
'#09': ['#10', '#11', '#12'],
'#15': ['#16'],
'#19': ['#20'],
# '#22': ['#23', '#24']
}
for key, values in replacements.items():
for value in values:
data.loc[data['x'] == value, 'x'] = key
rename = {
'#01': 'Input Data',
'#05': 'Pre-Proc. Data',
'#09': 'Feat.-Eng. Data',
'#13': 'Complete Pipeline',
'#14': 'Search Space',
'#15': 'Search Strategy',
'#17': 'Perf. Metrics',
'#18': 'Perf. Visual.',
'#19': 'Explanations',
'#21': 'View Hyperparam.',
'#22': 'Comp. Perf.',
'#23': 'Comp. Pipelines',
'#24': 'Comp. Hyperparam.'
}
else:
rename = {
'#01': 'R01 View Input',
'#02': 'R02 Desc Input',
'#03': 'R03 Input Stat',
'#04': 'R04 Plot Input',
'#05': 'R05 View Pre-Proc',
'#06': 'R06 Desc Pre-Proc',
'#07': 'R07 Pre-Proc Stat',
'#08': 'R08 Plot Pre-Proc',
'#09': 'R09 View Feat-Eng',
'#10': 'R10 Feat-Eng Stat',
'#11': 'R11 Plot Feat-Eng',
'#12': 'R12 Desc Feat-Eng',
'#13': 'R13 Complete Pipe',
'#14': 'R14 Search Space',
'#15': 'R15 Pipe Search Strat',
'#16': 'R16 HP Search Strat',
'#17': 'R17 View Perf Metrics',
'#18': 'R18 Plot Perf Visual',
'#19': 'R19 Global Expl',
'#20': 'R20 Local Expl',
'#21': 'R21 View HP',
'#22': 'R22 Comp Perf',
'#23': 'R23 Comp Pipe',
'#24': 'R24 Comp HP'
}
for old, new in rename.items():
data.loc[data['x'] == old, 'x'] = new
data.loc[data['role'] == 'AutoML Researcher', 'role'] = 'Data Scientist'
print('Difference between user groups per card')
for card in data['x'].unique():
ds = data[(data['x'] == card) & (data['role'] == 'Data Scientist')]
de = data[(data['x'] == card) & (data['role'] == 'Domain Expert')]
t = ttest_ind(ds['y'].values, de['y'].values)
if t.pvalue < 0.05:
print(f'{card} {t.pvalue:.5f}')
print('\n\n')
sns.set_theme(style="whitegrid")
fig, ax = plt.subplots(1, 1, figsize=(15, 5))
fig.tight_layout()
sns.violinplot(data=data, x='x', y='y', hue='role', split=True, palette='pastel', ax=ax)
sns.despine(left=True)
ax.set_ylim(0, 24)
ax.set_yticklabels([])
ax.set_ylabel(None)
ax.set_xlabel(None)
box = ax.get_position()
if group:
plt.xticks(rotation=15)
fig.text(0.0125, 0.2, 'least important', rotation=90, va='bottom')
fig.text(0.0125, 0.95, 'most important', rotation=90, va='top')
ax.set_position([box.x0, box.y0 + box.height * 0.125, box.width, box.height * 0.875])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=2)
else:
plt.xticks(rotation=25, ha='right', rotation_mode='anchor')
fig.text(0.025, 0.225, 'least important', rotation=90, va='bottom')
fig.text(0.025, 0.91, 'most important', rotation=90, va='top')
ax.set_position([box.x0 + 0.015, box.y0 + box.height * 0.15, box.width, box.height * 0.8])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.13), ncol=2)
fig.show()
fig.savefig('requirement_cards.pdf')
def calculate_trust_result(text_df: pd.DataFrame, vis_df: pd.DataFrame):
def cohen_d(x: pd.Series, y: pd.Series):
nx = len(x)
ny = len(y)
dof = nx + ny - 2
return (x.mean() - y.mean()) / math.sqrt(((nx - 1) * x.std() ** 2 + (ny - 1) * y.std() ** 2) / dof)
vis_df.columns = text_df.columns
print('###### Trust ######')
for col in text_df:
if col == 'Role':
continue
text = text_df.loc[:, col]
vis = vis_df.loc[:, col]
t = ttest_ind(text.values, vis.values, alternative='less')
print(
f'{col}, \({text.mean() + 1:.2f} \pm {text.std():.2f}\), \({vis.mean() + 1:.2f} \pm {vis.std():.2f}\), \(p = {t.pvalue:.2e}\), \(d = {cohen_d(text, vis):.2f}\)')
text_de, vis_de = text_df[text_df['Role'] == 'domain expert'], vis_df[vis_df['Role'] == 'domain expert']
text_ar, vis_ar = text_df[text_df['Role'] == 'automl researcher'], vis_df[vis_df['Role'] == 'automl researcher']
text_ds, vis_ds = text_df[text_df['Role'] == 'data scientist'], vis_df[vis_df['Role'] == 'data scientist']
for col in text_df:
if col == 'Role':
continue
print(
f'\\({text_de[col].mean() + 1:.2f}\\)\t& \\({text_ds[col].mean() + 1:.2f}\\)\t& \\({text_ar[col].mean() + 1:.2f}\\)\t& \\({text_df[col].mean() + 1:.2f}\\) \\\\')
print(
f'\\({vis_de[col].mean() + 1:.2f}\\)\t& \\({vis_ds[col].mean() + 1:.2f}\\)\t& \\({vis_ar[col].mean() + 1:.2f}\\)\t& \\({vis_df[col].mean() + 1:.2f}\\) \\\\')
print('\n\n')
def calculate_task_success(df: pd.DataFrame):
encoder = LabelEncoder()
encoder.classes_ = np.array(['n', 'y'])
for c in df.columns:
df.loc[:, c] = encoder.transform(df.loc[:, c])
with pd.option_context('display.precision', 0):
print('Task success percentage')
print(df.mean(axis=1) * 100)
print(df.mean().mean() * 100)
print('\n\n')
def index(df: pd.DataFrame, slice_) -> pd.DataFrame:
df2 = df.iloc[:, slice_]
df2['Role'] = df['Role']
return df2
questionnaire, requirements, tasks = load_data()
print_visual_design(index(questionnaire, slice(27, 32)))
print_previous_knowledge(index(questionnaire, slice(6, 11)))
calculate_sus(index(questionnaire, slice(32, 42)))
plot_priority_distribution(requirements)
calculate_task_success(tasks)
calculate_trust_result(index(questionnaire, slice(14, 20)), index(questionnaire, slice(20, 26)))
print('Correlation ML expertise and understanding of ML model')
print(questionnaire.iloc[:, [6, 15]].corr())
| 35.763441 | 173 | 0.538785 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,948 | 0.29545 |
d2b462f25f6094199e7adc2a1e6de5c3e66fd2f5 | 4,941 | py | Python | matplotlib/tutorials_python/colors/colors.py | gottaegbert/penter | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | [
"MIT"
]
| 13 | 2020-01-04T07:37:38.000Z | 2021-08-31T05:19:58.000Z | matplotlib/tutorials_python/colors/colors.py | gottaegbert/penter | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | [
"MIT"
]
| 3 | 2020-06-05T22:42:53.000Z | 2020-08-24T07:18:54.000Z | matplotlib/tutorials_python/colors/colors.py | gottaegbert/penter | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | [
"MIT"
]
| 9 | 2020-10-19T04:53:06.000Z | 2021-08-31T05:20:01.000Z | """
*****************
Specifying Colors
*****************
Matplotlib recognizes the following formats to specify a color:
* an RGB or RGBA (red, green, blue, alpha) tuple of float values in closed
interval ``[0, 1]`` (e.g., ``(0.1, 0.2, 0.5)`` or ``(0.1, 0.2, 0.5, 0.3)``);
* a hex RGB or RGBA string (e.g., ``'#0f0f0f'`` or ``'#0f0f0f80'``;
case-insensitive);
* a shorthand hex RGB or RGBA string, equivalent to the hex RGB or RGBA
string obtained by duplicating each character, (e.g., ``'#abc'``, equivalent
to ``'#aabbcc'``, or ``'#abcd'``, equivalent to ``'#aabbccdd'``;
case-insensitive);
* a string representation of a float value in ``[0, 1]`` inclusive for gray
level (e.g., ``'0.5'``);
* one of ``{'b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'}``, they are the single
character short-hand notations for blue, green, red, cyan, magenta, yellow,
black, and white.
* a X11/CSS4 color name (case-insensitive);
* a name from the `xkcd color survey`_, prefixed with ``'xkcd:'`` (e.g.,
``'xkcd:sky blue'``; case insensitive);
* one of the Tableau Colors from the 'T10' categorical palette (the default
color cycle): ``{'tab:blue', 'tab:orange', 'tab:green', 'tab:red',
'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan'}``
(case-insensitive);
* a "CN" color spec, i.e. ``'C'`` followed by a number, which is an index into
the default property cycle (``matplotlib.rcParams['axes.prop_cycle']``); the
indexing is intended to occur at rendering time, and defaults to black if the
cycle does not include color.
.. _xkcd color survey: https://xkcd.com/color/rgb/
"Red", "Green", and "Blue" are the intensities of those colors, the combination
of which span the colorspace.
How "Alpha" behaves depends on the ``zorder`` of the Artist. Higher
``zorder`` Artists are drawn on top of lower Artists, and "Alpha" determines
whether the lower artist is covered by the higher.
If the old RGB of a pixel is ``RGBold`` and the RGB of the
pixel of the Artist being added is ``RGBnew`` with Alpha ``alpha``,
then the RGB of the pixel is updated to:
``RGB = RGBOld * (1 - Alpha) + RGBnew * Alpha``. Alpha
of 1 means the old color is completely covered by the new Artist, Alpha of 0
means that pixel of the Artist is transparent.
For more information on colors in matplotlib see
* the :doc:`/gallery/color/color_demo` example;
* the `matplotlib.colors` API;
* the :doc:`/gallery/color/named_colors` example.
"CN" color selection
--------------------
"CN" colors are converted to RGBA as soon as the artist is created. For
example,
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
th = np.linspace(0, 2*np.pi, 128)
def demo(sty):
mpl.style.use(sty)
fig, ax = plt.subplots(figsize=(3, 3))
ax.set_title('style: {!r}'.format(sty), color='C0')
ax.plot(th, np.cos(th), 'C1', label='C1')
ax.plot(th, np.sin(th), 'C2', label='C2')
ax.legend()
demo('default')
demo('seaborn')
###############################################################################
# will use the first color for the title and then plot using the second
# and third colors of each style's ``mpl.rcParams['axes.prop_cycle']``.
#
#
# .. _xkcd-colors:
#
# xkcd v X11/CSS4
# ---------------
#
# The xkcd colors are derived from a user survey conducted by the
# webcomic xkcd. `Details of the survey are available on the xkcd blog
# <https://blog.xkcd.com/2010/05/03/color-survey-results/>`__.
#
# Out of 148 colors in the CSS color list, there are 95 name collisions
# between the X11/CSS4 names and the xkcd names, all but 3 of which have
# different hex values. For example ``'blue'`` maps to ``'#0000FF'``
# where as ``'xkcd:blue'`` maps to ``'#0343DF'``. Due to these name
# collisions all of the xkcd colors have ``'xkcd:'`` prefixed. As noted in
# the blog post, while it might be interesting to re-define the X11/CSS4 names
# based on such a survey, we do not do so unilaterally.
#
# The name collisions are shown in the table below; the color names
# where the hex values agree are shown in bold.
import matplotlib._color_data as mcd
import matplotlib.patches as mpatch
overlap = {name for name in mcd.CSS4_COLORS
if "xkcd:" + name in mcd.XKCD_COLORS}
fig = plt.figure(figsize=[4.8, 16])
ax = fig.add_axes([0, 0, 1, 1])
for j, n in enumerate(sorted(overlap, reverse=True)):
weight = None
cn = mcd.CSS4_COLORS[n]
xkcd = mcd.XKCD_COLORS["xkcd:" + n].upper()
if cn == xkcd:
weight = 'bold'
r1 = mpatch.Rectangle((0, j), 1, 1, color=cn)
r2 = mpatch.Rectangle((1, j), 1, 1, color=xkcd)
txt = ax.text(2, j+.5, ' ' + n, va='center', fontsize=10,
weight=weight)
ax.add_patch(r1)
ax.add_patch(r2)
ax.axhline(j, color='k')
ax.text(.5, j + 1.5, 'X11', ha='center', va='center')
ax.text(1.5, j + 1.5, 'xkcd', ha='center', va='center')
ax.set_xlim(0, 3)
ax.set_ylim(0, j + 2)
ax.axis('off')
| 36.330882 | 79 | 0.646225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,789 | 0.766849 |
d2b6b250831a7174cf7989d9fc42a91268a025cd | 1,313 | py | Python | 12-listComprehensions.py | pgiardiniere/notes-WhirlwindTourOfPython | 10f483ea4452f0a45f2103886992fd77c2f3ac7c | [
"CC0-1.0"
]
| null | null | null | 12-listComprehensions.py | pgiardiniere/notes-WhirlwindTourOfPython | 10f483ea4452f0a45f2103886992fd77c2f3ac7c | [
"CC0-1.0"
]
| null | null | null | 12-listComprehensions.py | pgiardiniere/notes-WhirlwindTourOfPython | 10f483ea4452f0a45f2103886992fd77c2f3ac7c | [
"CC0-1.0"
]
| null | null | null | # List Comprehensions
#########################
### Basic List Comprehensions
#########################
# allow us to circumvent constructing lists with for loops
l = [] # The Old Way
for n in range(12):
l.append(n**2)
[n ** 2 for n in range(12)] # Comprehension way
# General Syntax:
# [ `expr` for `var` in `iterable` ]
### Multiple iteration --- use tuples!
[(i, j) for i in range(2) for j in range(3)]
### Conditionals on the Iterator
[i for i in range(20) if i % 3 > 0] #S={i|0<=i<20, 3!|i, i∈I}
l = [] # equivalent old-school construction:
for val in range(20):
if val % 3:
l.append(val)
### Conditionals on the Value
# C code :: single-line conditional operator ?
# int absval = (val < 0) ? -val : val
# Python code :: single-line conditional operator if-else
val = -10
val if val >= 0 else -val
# if 3 !| val -> val in list.
# if 2 | val -> -val.
[val if val % 2 else -val
for val in range(20) if val % 3]
#########################
### Other comprehensions
#########################
{ n**2 for n in range(12) } # Set comprehension
{ n:n**2 for n in range(12) } # Dict comprehension
{ a % 3 for a in range(1000) } # a = {0, 1, 2}
# GENERATOR EXPRESSION ---- see next chapter for deets
( n**2 for n in range(12) ) | 26.795918 | 61 | 0.545316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 789 | 0.6 |
d2b6cbdba4cdbf4de3ed032d08f889932f594f92 | 1,515 | py | Python | src/chemical_roles/export/cli.py | bgyori/chemical-roles | 31a917e911075e3be7eea509e143d3ff48e942cc | [
"MIT"
]
| 5 | 2021-02-05T01:27:53.000Z | 2021-07-12T15:47:08.000Z | src/chemical_roles/export/cli.py | bgyori/chemical-roles | 31a917e911075e3be7eea509e143d3ff48e942cc | [
"MIT"
]
| 8 | 2019-10-10T13:02:18.000Z | 2020-05-11T18:41:56.000Z | src/chemical_roles/export/cli.py | bgyori/chemical-roles | 31a917e911075e3be7eea509e143d3ff48e942cc | [
"MIT"
]
| 5 | 2020-06-07T13:11:34.000Z | 2021-07-12T14:24:01.000Z | # -*- coding: utf-8 -*-
"""CLI for Chemical Roles exporters."""
import os
import click
from ..constants import DATA
@click.group()
def export():
"""Export the database."""
@export.command(name='all')
@click.pass_context
def export_all(ctx):
"""Export all."""
ctx.invoke(summary)
ctx.invoke(obo)
ctx.invoke(bel)
ctx.invoke(indra)
directory_option = click.option('--directory', default=DATA)
@export.command()
def summary():
"""Rewrite readme and generate new export."""
from .build import rewrite_repo_readme, write_export
import seaborn as sns
sns.set(font_scale=1.3, style='whitegrid')
rewrite_repo_readme()
write_export()
@export.command()
@directory_option
def bel(directory):
"""Write BEL export."""
import pybel
from .bel import get_bel
graph = get_bel()
pybel.dump(graph, os.path.join(directory, 'crog.bel.nodelink.json.gz'))
@export.command()
@directory_option
def indra(directory):
"""Write INDRA export."""
import pybel
from .bel import get_bel
graph = get_bel(use_inferred=False, add_evidence=False)
pybel.to_indra_statements_json_file(graph, os.path.join(directory, 'crog.indra.json'), sort_keys=True)
@export.command()
@directory_option
def obo(directory):
"""Write OBO export."""
from .obo import get_obo
o = get_obo()
o.write_obo(os.path.join(directory, 'crog.obo'))
o.write_obonet_gz(os.path.join(directory, 'crog.obonet.json.gz'))
if __name__ == '__main__':
export()
| 21.041667 | 106 | 0.684488 | 0 | 0 | 0 | 0 | 1,272 | 0.839604 | 0 | 0 | 335 | 0.221122 |
d2b7475246a09fa72d42e65c0defb8588ba3890e | 4,681 | py | Python | gdsfactory/geometry/write_drc.py | jorgepadilla19/gdsfactory | 68e1c18257a75d4418279851baea417c8899a165 | [
"MIT"
]
| 42 | 2020-05-25T09:33:45.000Z | 2022-03-29T03:41:19.000Z | gdsfactory/geometry/write_drc.py | jorgepadilla19/gdsfactory | 68e1c18257a75d4418279851baea417c8899a165 | [
"MIT"
]
| 133 | 2020-05-28T18:29:04.000Z | 2022-03-31T22:21:42.000Z | gdsfactory/geometry/write_drc.py | jorgepadilla19/gdsfactory | 68e1c18257a75d4418279851baea417c8899a165 | [
"MIT"
]
| 17 | 2020-06-30T07:07:50.000Z | 2022-03-17T15:45:27.000Z | """Write DRC rule decks in klayout.
TODO:
- add min area
- define derived layers (composed rules)
"""
import pathlib
from dataclasses import asdict, is_dataclass
from typing import List, Optional
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
from gdsfactory.config import logger
from gdsfactory.install import get_klayout_path
from gdsfactory.types import Dict, Layer, PathType
layer_name_to_min_width: Dict[str, float]
RuleType = Literal[
"width",
"space",
"enclosing",
]
def rule_width(value: float, layer: str, angle_limit: float = 90) -> str:
"""Min feature size"""
category = "width"
error = f"{layer} {category} {value}um"
return (
f"{layer}.{category}({value}, angle_limit({angle_limit}))"
f".output('{error}', '{error}')"
)
def rule_space(value: float, layer: str, angle_limit: float = 90) -> str:
"""Min Space between shapes of layer"""
category = "space"
error = f"{layer} {category} {value}um"
return (
f"{layer}.{category}({value}, angle_limit({angle_limit}))"
f".output('{error}', '{error}')"
)
def rule_separation(value: float, layer1: str, layer2: str):
"""Min space between different layers"""
error = f"min {layer1} {layer2} separation {value}um"
return f"{layer1}.separation({layer2}, {value})" f".output('{error}', '{error}')"
def rule_enclosing(
value: float, layer1: str, layer2: str, angle_limit: float = 90
) -> str:
"""Layer1 must be enclosed by layer2 by value.
checks if layer1 encloses (is bigger than) layer2 by value
"""
error = f"{layer1} enclosing {layer2} by {value}um"
return (
f"{layer1}.enclosing({layer2}, angle_limit({angle_limit}), {value})"
f".output('{error}', '{error}')"
)
def write_layer_definition(layer_map: Dict[str, Layer]) -> str:
"""Returns layer_map definition script for klayout
Args:
layer_map: can be dict or dataclass
"""
layer_map = asdict(layer_map) if is_dataclass(layer_map) else layer_map
return [
f"{key} = input({value[0]}, {value[1]})" for key, value in layer_map.items()
]
def write_drc_deck(rules: List[str], layer_map: Dict[str, Layer]) -> str:
"""Returns drc_rule_deck for klayou
Args:
rules: list of rules
layer_map: layer definitions can be dict or dataclass
"""
script = []
script += write_layer_definition(layer_map=layer_map)
script += ["\n"]
script += rules
return "\n".join(script)
def write_drc_deck_macro(
name="generic",
filepath: Optional[PathType] = None,
shortcut: str = "Ctrl+Shift+D",
**kwargs,
) -> str:
"""Write script for klayout rule deck
Args:
name: drc rule deck name
filepath: Optional macro path (defaults to .klayout/drc/name.lydrc)
Keyword Args:
rules: list of rules
layer_map: layer definitions can be dict or dataclass
Keyword Args:
rules: list of rules
layer_map: layer definitions can be dict or dataclass
"""
script = f"""<?xml version="1.0" encoding="utf-8"?>
<klayout-macro>
<description>{name} DRC</description>
<version/>
<category>drc</category>
<prolog/>
<epilog/>
<doc/>
<autorun>false</autorun>
<autorun-early>false</autorun-early>
<shortcut>{shortcut}</shortcut>
<show-in-menu>true</show-in-menu>
<group-name>drc_scripts</group-name>
<menu-path>tools_menu.drc.end</menu-path>
<interpreter>dsl</interpreter>
<dsl-interpreter-name>drc-dsl-xml</dsl-interpreter-name>
<text># {name} DRC
# Read about DRC scripts in the User Manual under "Design Rule Check (DRC)"
# Based on SOEN pdk https://github.com/usnistgov/SOEN-PDK/tree/master/tech/OLMAC
# http://klayout.de/doc/manual/drc_basic.html
report("generic DRC")
tiles(100)
tile_borders(2)
threads(3)
"""
script += write_drc_deck(**kwargs)
script += """
</text>
</klayout-macro>
"""
filepath = filepath or get_klayout_path() / "drc" / f"{name}.lydrc"
filepath = pathlib.Path(filepath)
filepath.write_text(script)
logger.info(f"Wrote DRC deck to {filepath}")
return script
if __name__ == "__main__":
import gdsfactory as gf
rules = [
rule_width(layer="WG", value=0.2),
rule_space(layer="WG", value=0.2),
rule_width(layer="M1", value=1),
rule_width(layer="M2", value=2),
rule_space(layer="M2", value=2),
rule_separation(layer1="HEATER", layer2="M1", value=1.0),
rule_enclosing(layer1="M1", layer2="VIAC", value=0.2),
]
drc_rule_deck = write_drc_deck_macro(rules=rules, layer_map=gf.LAYER)
print(drc_rule_deck)
| 26.902299 | 85 | 0.654134 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,472 | 0.528092 |
d2b75bb3697ff16713aa871c5e493e77fa916f5c | 1,620 | py | Python | virtus/core/migrations/0004_auto_20180417_1625.py | eltonjncorreia/gerenciar-dados-virtus | b8e1b8caa152b18221046f6841761d805b232268 | [
"MIT"
]
| null | null | null | virtus/core/migrations/0004_auto_20180417_1625.py | eltonjncorreia/gerenciar-dados-virtus | b8e1b8caa152b18221046f6841761d805b232268 | [
"MIT"
]
| null | null | null | virtus/core/migrations/0004_auto_20180417_1625.py | eltonjncorreia/gerenciar-dados-virtus | b8e1b8caa152b18221046f6841761d805b232268 | [
"MIT"
]
| null | null | null | # Generated by Django 2.0.4 on 2018-04-17 19:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20180417_1613'),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codigo', models.IntegerField(verbose_name='codigo')),
('descricao', models.CharField(max_length=255, verbose_name='descricao')),
('valor', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='valor')),
('unitario', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Unitário')),
('quantidade', models.IntegerField(verbose_name='quantidade')),
],
options={
'verbose_name': 'Item',
'verbose_name_plural': 'Itens',
'ordering': ['codigo'],
},
),
migrations.AlterModelOptions(
name='cliente',
options={'ordering': ['nome'], 'verbose_name': 'Cliente', 'verbose_name_plural': 'Clientes'},
),
migrations.AlterModelOptions(
name='endereco',
options={'ordering': ['tipo'], 'verbose_name': 'Endereço', 'verbose_name_plural': 'Endereços'},
),
migrations.AlterModelOptions(
name='pedido',
options={'ordering': ['numero'], 'verbose_name': 'Pedido', 'verbose_name_plural': 'Pedidos'},
),
]
| 38.571429 | 114 | 0.569753 | 1,530 | 0.942699 | 0 | 0 | 0 | 0 | 0 | 0 | 496 | 0.305607 |
d2b7ebb7c7ccc1338b94c19d7637e3ceac872b46 | 2,173 | py | Python | image_demo.py | a888999a/yolov3fusion1 | 3659898aee34a351e95ea545236b8bc682901498 | [
"MIT"
]
| 7 | 2020-09-23T10:37:17.000Z | 2021-12-26T00:23:02.000Z | image_demo.py | a888999a/yolov3fusion1 | 3659898aee34a351e95ea545236b8bc682901498 | [
"MIT"
]
| null | null | null | image_demo.py | a888999a/yolov3fusion1 | 3659898aee34a351e95ea545236b8bc682901498 | [
"MIT"
]
| null | null | null | #! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : image_demo.py
# Author : YunYang1994
# Created date: 2019-01-20 16:06:06
# Description :
#
#================================================================
import cv2
import numpy as np
import core.utils as utils
import tensorflow as tf
from PIL import Image
return_elements = ["input/input_rgb:0","input/input_lwir:0", "pred_sbbox/concat_2:0", "pred_mbbox/concat_2:0", "pred_lbbox/concat_2:0"]
pb_file = "./yolov3_coco.pb"
image_path_rgb = r"C:\Users\gary\Desktop\b09\test\JPEGImages\rgb\set06_V000_I00019.jpg"
image_path_lwir = r"C:\Users\gary\Desktop\b09\test\JPEGImages\lwir\set06_V000_I00019.jpg"
num_classes = 1
input_size = 416
graph = tf.Graph()
original_rgb = cv2.imread(image_path_rgb)
original_lwir = cv2.imread(image_path_lwir)
original_image_rgb = cv2.cvtColor(original_rgb, cv2.COLOR_BGR2RGB)
original_image_lwir = cv2.cvtColor(original_lwir, cv2.COLOR_BGR2RGB)
original_image_size = original_image_rgb.shape[:2]
image_rgb,image_lwir = utils.image_preporcess(np.copy(original_image_rgb),np.copy(original_image_lwir), [input_size, input_size])
image_rgb = image_rgb[np.newaxis, ...]
image_lwir = image_lwir[np.newaxis, ...]
return_tensors = utils.read_pb_return_tensors(graph, pb_file, return_elements)
with tf.Session(graph=graph) as sess:
pred_sbbox, pred_mbbox, pred_lbbox = sess.run(
[return_tensors[2], return_tensors[3], return_tensors[4]],
feed_dict={ return_tensors[0]: image_rgb,return_tensors[1]: image_lwir})
pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + num_classes)),
np.reshape(pred_mbbox, (-1, 5 + num_classes)),
np.reshape(pred_lbbox, (-1, 5 + num_classes))], axis=0)
bboxes = utils.postprocess_boxes(pred_bbox, original_image_size, input_size, 0.3)
bboxes = utils.nms(bboxes, 0.45, method='nms')
image = utils.draw_bbox(original_image_rgb, bboxes)
image = Image.fromarray(image)
image.show()
| 35.048387 | 135 | 0.673263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 625 | 0.287621 |
d2b930c9508039d505766f1d70318392c9baf277 | 7,090 | py | Python | Sensor/main.py | mahsahadian/EdgeBenchmarkTool | cafddb2eb66732da0bff8f26107788e3c93fbe2f | [
"MIT"
]
| null | null | null | Sensor/main.py | mahsahadian/EdgeBenchmarkTool | cafddb2eb66732da0bff8f26107788e3c93fbe2f | [
"MIT"
]
| null | null | null | Sensor/main.py | mahsahadian/EdgeBenchmarkTool | cafddb2eb66732da0bff8f26107788e3c93fbe2f | [
"MIT"
]
| 2 | 2022-01-31T01:55:56.000Z | 2022-02-01T01:43:20.000Z |
import cv2
from datetime import *
import time
import logging
import base64
import sys
import os
import shutil
import paho.mqtt.client as mqtt
from influxdb import InfluxDBClient
import datetime
import sys
import re
from typing import NamedTuple
import json
from dotenv import load_dotenv
load_dotenv("sensor-variables.env")
log = logging.getLogger()
log.setLevel('DEBUG')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s"))
log.addHandler(handler)
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
print('Hello 1')
def on_connect(client, userdata, flags, rc):
""" The callback for when the client receives a CONNACK response from the server."""
print('Connected with result code ' + str(rc))
client.subscribe('topic')
# The callback for when a PUBLISH message is received from the server.
def save_influx(json_body, body):
print(" Saving data of : ", sys.getsizeof(str(body)), ' bytes')
influx_client.write_points(json_body)
def on_message(client, userdata, msg):
#current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
timestamp = str(int(time.time()))
#print(msg.topic + ' ' + str(msg.payload))
#sensor_data = _parse_mqtt_message(msg.topic, msg.payload.decode('utf-8'))
#if sensor_data is not None:
# _send_sensor_data_to_influxdb(sensor_data)
print("a")
#splits_ = str(msg.payload).split('XXX')
#splits_ = str(msg.payload).split('XXX')
#for i in range(len(splits_)):
json_body = [
{
"measurement": "t_1_4",
"tags": {
"camera_id": camera_id,
},
#"time": timestamp,
"transmitdelay":transmitdelay,
"JPGQuality":JPGQuality,
"fields": {
"value": str(msg.payload) #str(msg.payload)
}
}
]
save_influx(json_body, str(msg.payload))
#print(msg.topic, str(msg.payload))
#thinktime or sleep aftersending
client.loop_stop() # Stop loop
client.disconnect() # disconnect
#if splits_[i] == 'refresh':
#client.reinitialise()
#camera = Camera(camera_id, destination_cluster_ip, JPGQuality, transmitdelay, './imagesout')
#camera.processVideoStream()
#time.sleep(1)
#val = splits_[1].replace('"', '')
#print('recieved id: ', val)
#if int(val) == 2222:
# camera = Camera(camera_id, destination_cluster_ip, JPGQuality, transmitdelay, './imagesout')
# camera.processVideoStream()
def _init_influxdb_database():
databases = influx_client.get_list_database()
if len(list(filter(lambda x: x['name'] == INFLUXDB_DATABASE, databases))) == 0:
influx_client.create_database(INFLUXDB_DATABASE)
influx_client.switch_database(INFLUXDB_DATABASE)
def myconverter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
class Camera():
def __init__(self,camera_id,destination_cluster_ip,JPGQuality,transmitdelay, folder):
self.camera_id = camera_id
self.destination_cluster_ip = destination_cluster_ip
self.JPGQuality = JPGQuality
self.transmitdelay = transmitdelay
start = time.time()
self.folder = folder
def cleanup(self):
folder = './imagesout'
for the_file in os.listdir ('./imagesout'):
file_path = os.path.join ('./imagesout', the_file)
try:
if os.path.isfile (file_path):
os.unlink (file_path)
# elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print (e)
def processVideoStream(self, thread=0):
vidcap = cv2.VideoCapture('black.mp4')
success, image = vidcap.read ()
count = 0
success = True
day_date= date.today()
start = time.time ()
#i = self.JPGQuality
print('JPGQuality:', self.JPGQuality)
list_image_base64 = []
list_image_base64_str = ''
image_base64_last = ''
while success:
#for i in range(9):
#self.JPGQuality = i + 1
cv2.imwrite("./imagesout/frame%d.jpg" % count, image, [int(cv2.IMWRITE_JPEG_QUALITY), self.JPGQuality]) # save frame as JPEG file
imageFileNameandPath = ("./imagesout/frame%d.jpg" % count)
image_base64 = self.convertToBase64(imageFileNameandPath)
success, image = vidcap.read ()
print ('Read a new frame: ', success, ' thread number:', thread)
timestamp = str(int(time.time()))
frame_id = timestamp+str(count)
end = time.time()
runtime_seconds = end - start
data = {'camera_id':str(self.camera_id), 'frame_id':str(frame_id), 'timestamp':timestamp, 'duration':str(int(runtime_seconds)) }
#self.cassandraclient.saveToCassandra(self.camera_id, frame_id, timestamp,day_date ,image_base64)
#self.kafkaclient.saveToKafka(self.camera_id, frame_id, timestamp, day_date, image_base64)
#list_image_base64.append(str(image_base64))
list_image_base64_str += str(image_base64)+'XXX'
image_base64_last = str(image_base64)
cname = "Client" + str(count)
client = mqtt.Client(cname)
client.on_connect = on_connect
client.on_message = on_message
client.connect(os.getenv('MQTT_SERVER_IP'), os.getenv('MQTT_SERVER_PORT'), 60)
client.subscribe("topic", qos=1)
client.publish(topic="topic", payload=str(image_base64), qos=1, retain=False)
#client.loop_forever()
client.loop_start()
time.sleep(1)
#list_image_base64_str = ''
#print(count)
count += 1
print('Experiment Runtime (seconds): ' + str(int(runtime_seconds)))
print('Images written per (second): ' + str(count/runtime_seconds))
self.cleanup()
def convertToBase64(self,fileNameandPath):
with open(fileNameandPath, "rb") as imageFile:
str = base64.b64encode(imageFile.read())
return str
camera_id = os.getenv('CAMERA_ID') # sys.argv[1] # 123
destination_cluster_ip = os.getenv('DESTINATION_CLUSTER_IP') #sys.argv[2] # '132.207.170.59'
JPGQuality = os.getenv('JPGQUALITY')#int(sys.argv[3] ) # 20
transmitdelay = os.getenv('TRANSMITDELAY') # int(sys.argv[4]) # 10
check_looping = 0
INFLUXDB_DATABASE = os.getenv('INFLUXDB_DATABASE_NAME')
influx_client = InfluxDBClient(os.getenv('INFLUXDB_DATABASE_IP'), os.getenv('INFLUXDB_DATABASE_PORT'), database=INFLUXDB_DATABASE)
_init_influxdb_database()
#while True:
camera = Camera(camera_id, destination_cluster_ip, JPGQuality, transmitdelay, './imagesout')
camera.processVideoStream()
| 33.130841 | 146 | 0.629478 | 3,485 | 0.491537 | 0 | 0 | 0 | 0 | 0 | 0 | 2,360 | 0.332863 |
d2b975627d7b7c61820ad7bec967dad5b7b1e8aa | 4,511 | py | Python | oxide/plugins/other/StartupItems.py | john-clark/rust-oxide-umod | 56feca04f96d8a43a1b56e080fc81d526f7471c3 | [
"MIT"
]
| 13 | 2019-05-13T08:03:50.000Z | 2022-02-06T16:44:35.000Z | oxide/plugins/other/StartupItems.py | john-clark/rust-oxide-umod | 56feca04f96d8a43a1b56e080fc81d526f7471c3 | [
"MIT"
]
| null | null | null | oxide/plugins/other/StartupItems.py | john-clark/rust-oxide-umod | 56feca04f96d8a43a1b56e080fc81d526f7471c3 | [
"MIT"
]
| 8 | 2019-12-12T15:48:03.000Z | 2021-12-24T17:04:45.000Z | # Note:
# I add an underscore at the biginning of the variable name for example: "_variable" to prevent
# conflicts with build-in variables from Oxide.
# Use to manage the player's inventory.
import ItemManager
# Use to get player's information.
import BasePlayer
# The plug-in name should be the same as the class name and file name.
class StartupItems:
# Always start with a constructor.
def __init__(self):
# All the variables listed below are recommended for the plug-in and developer informaton.
self.Title = 'StartupItems'
self.Description = 'Set default items when player respawn after dead.'
self.Author = 'RedNinja1337'
self.Version = V(1, 0, 5)
self.Url = 'http://oxidemod.org/plugins/startupitems.1323/'
self.ResourceId = 1323
# Create the configuration file if it does not exists.
def LoadDefaultConfig(self):
# Add some demo data as an example on the configuration file.
self.Config['GroupItems'] = ({
'admin':({'item_shortname':'attire.hide.boots', 'Amount':1, 'Container':'Wear'},
{'item_shortname':'attire.hide.pants', 'Amount':1, 'Container':'Wear'},
{'item_shortname':'rock', 'Amount':1, 'Container':'Belt'},
{'item_shortname':'bow.hunting', 'Amount':1, 'Container':'Belt'},
{'item_shortname':'arrow.hv', 'Amount':25, 'Container':'Main'},),
'moderator':({},),
'player':({},)
})
# Called from BasePlayer.Respawn.
# Called when the player spawns (specifically when they click the "Respawn" button).
# ONLY called after the player has transitioned from dead to not-dead, so not when they're waking up.
def OnPlayerRespawned(self, BasePlayer):
# Check if there is any group set on the configuration file.
if self.Config['GroupItems']:
# If at least one group is found on the configuration file then set the variable "_GroupItems" equals the group's dictionary.
_GroupItems = self.Config['GroupItems']
# Set the variable "_Group" equals the list of groups the player belogs to. By default all players belog to the group "player".
_Group = permission.GetUserGroups(BasePlayer.userID.ToString())
# Set the variable "_SetGroup" equals the last group the user was added from Oxide.Group. By default all players belog to the group "player".
_SetGroup = _GroupItems.get(_Group[-1])
# Check if the group exists in the config file.
if _SetGroup:
try: # Catch the "KeyNotFoundException" error if "Container", "item_shortname" or "Amount" is not found on the config file.
if _SetGroup[0]['Container'] and _SetGroup[0]['item_shortname'] and _SetGroup[0]['Amount']:
# Set the variable "inv" equals the player's inventory.
inv = BasePlayer.inventory
# Empty the player's inventory.
inv.Strip()
# Iterate through the list of items for the specify group from the configuration file.
for item in _SetGroup:
# Add the items set on the configuration file to each container on the player's inventory.
if item['Container'].lower() == 'main':
inv.GiveItem(ItemManager.CreateByName(item['item_shortname'],item['Amount']), inv.containerMain)
elif item['Container'].lower() == 'belt':
inv.GiveItem(ItemManager.CreateByName(item['item_shortname'],item['Amount']), inv.containerBelt)
elif item['Container'].lower() == 'wear':
inv.GiveItem(ItemManager.CreateByName(item['item_shortname'],item['Amount']), inv.containerWear)
else: return
else: print False
# Catch the "KeyNotFoundException" error if "Container", "item_shortname" or "Amount" is not found on the config file.
except KeyError: return
else: return
else: return
| 51.261364 | 153 | 0.570162 | 4,164 | 0.923077 | 0 | 0 | 0 | 0 | 0 | 0 | 2,366 | 0.524496 |
d2bbabe21477b77848cbfcaba239a66c8fe04262 | 1,043 | py | Python | error_handler.py | jrg1381/sm_asr_console | 47c4090075deaaa7f58e9a092423a58bc7b0a30f | [
"MIT"
]
| 2 | 2019-08-07T11:08:06.000Z | 2021-01-20T11:28:37.000Z | error_handler.py | jrg1381/sm_asr_console | 47c4090075deaaa7f58e9a092423a58bc7b0a30f | [
"MIT"
]
| null | null | null | error_handler.py | jrg1381/sm_asr_console | 47c4090075deaaa7f58e9a092423a58bc7b0a30f | [
"MIT"
]
| null | null | null | # encoding: utf-8
""" Parameterized decorator for catching errors and displaying them in an error popup """
from enum import Enum
import npyscreen
class DialogType(Enum):
"""
Enum defining the type of dialog.
CONFIRM - the dialog waits until the user clicks OK
BRIEF - the dialog appears for a few seconds and then vanishes
"""
CONFIRM = npyscreen.notify_confirm
BRIEF = npyscreen.notify_wait
# PythonDecorators/decorator_function_with_arguments.py
def error_handler(title, dialog_type=DialogType.CONFIRM):
"""
Decorator for functions to catch their exceptions and display them in an error popup
:param title The title of the error pop-up
:param dialog_type A DialogType enum
"""
def wrap(original_function):
def wrapped_f(*args):
try:
return original_function(*args)
except Exception as ex: # pylint: disable=broad-except
dialog_type(str(ex), title)
return None
return wrapped_f
return wrap
| 29.8 | 89 | 0.681687 | 274 | 0.262704 | 0 | 0 | 0 | 0 | 0 | 0 | 553 | 0.530201 |
d2bbf8bdae1a8922b42a68b17b2aafcf8fd38f67 | 13,043 | py | Python | parlai/tasks/taskmaster2/agents.py | min942773/parlai_wandb | 1d9ba1a0df2199d0247cee8c4929a2598ac7e41a | [
"MIT"
]
| 2 | 2017-09-20T21:49:51.000Z | 2018-08-12T06:58:10.000Z | parlai/tasks/taskmaster2/agents.py | min942773/parlai_wandb | 1d9ba1a0df2199d0247cee8c4929a2598ac7e41a | [
"MIT"
]
| 7 | 2021-01-12T01:07:03.000Z | 2022-03-12T00:50:45.000Z | parlai/tasks/taskmaster2/agents.py | min942773/parlai_wandb | 1d9ba1a0df2199d0247cee8c4929a2598ac7e41a | [
"MIT"
]
| 1 | 2021-01-07T11:45:03.000Z | 2021-01-07T11:45:03.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Taskmaster-2 implementation for ParlAI.
No official train/valid/test splits are available as of 2020-05-18, so we make our own
splits.
"""
import os
import pandas as pd
import hashlib
from collections import Counter
from parlai.core.opt import Opt
from parlai.core.teachers import DialogTeacher
from parlai.core.metrics import AverageMetric, F1Metric, BleuMetric
from parlai.utils.misc import warn_once
import json
import parlai.utils.logging as logging
from typing import Optional, Tuple
from parlai.core.message import Message
from parlai.utils.io import PathManager
import parlai.tasks.taskmaster2.build as build_
DOMAINS = [
'flights',
'food-ordering',
'hotels',
'movies',
'restaurant-search',
'sports',
'music',
]
ONTO_TOKEN = "Onto:"
CALL_TOKEN = "Call:"
RESP_TOKEN = "Result:"
class _Abstract(DialogTeacher):
"""
Abstract data loader.
"""
@classmethod
def add_cmdline_args(cls, argparser):
argparser.add_argument('--include-ontology', type=bool, default=False)
argparser.add_argument(
'--domains',
nargs='+',
default=DOMAINS,
choices=DOMAINS,
help='Uses last passed in configuration.',
)
return argparser
def __init__(self, opt: Opt, shared=None):
self.fold = opt['datatype'].split(':')[0]
opt['datafile'] = self.fold
self.dpath = os.path.join(opt['datapath'], 'taskmaster-2')
if shared is None:
warn_once(
"Taskmaster2 is a beta dataset, and format may significantly change."
)
build_.build(opt)
super().__init__(opt, shared)
def _h(self, x):
"""
Hash function.
"""
h = int(hashlib.sha1(x.encode('utf-8')).hexdigest(), 16) % 10
if h == 0:
return 'valid'
elif h == 1:
return 'test'
else:
return 'train'
def _normalize_annotation(self, anno):
return anno
def _load_data(self, fold, domains):
# load up the ontology
ontology = {}
for section in domains:
parts = []
fn = os.path.join(self.dpath, section + '.onto.json')
with PathManager.open(fn, 'r') as f:
o = json.load(f)
assert len(o) == 1
o = list(o.values())[0]
for sub in o:
prefix = sub['prefix']
parts += [
self._normalize_annotation(f'{prefix}.{a}')
for a in sub['annotations']
]
ontology[section] = ' ; '.join(parts)
chunks = []
for section in domains:
with PathManager.open(os.path.join(self.dpath, section + '.json')) as f:
subset = pd.read_json(f)
subset['domain'] = section
chunks.append(subset)
chunks = pd.concat(chunks, axis=0)
# shuffle deterministically for randomness in few-shot training
chunks = chunks.sample(frac=1.0, random_state=42)
chunks['fold'] = self._label_fold(chunks)
# only the fold we need here
chunks = chunks[chunks.fold == fold].reset_index()
chunks['ontology'] = chunks['domain'].apply(ontology.get)
return chunks
def _segments2text(self, segments):
output = []
slots = {}
for segment in segments:
val = segment['text']
for anno_ in segment['annotations']:
anno = anno_['name']
anno = self._normalize_annotation(anno)
output.append(f'{anno} = {val}')
slots[anno] = val
return " ; ".join(output), slots
def custom_evaluation(
self,
teacher_action: Message,
labels: Optional[Tuple[str]],
model_response: Message,
):
if 'metrics' in model_response and 'type' in teacher_action:
# keep copies of metrics across both api calls/responses
prefix = teacher_action['type']
keys = list(model_response['metrics'].keys())
for k in keys:
self.metrics.add(f'{prefix}_{k}', model_response['metrics'][k])
if 'text' not in model_response or not labels or 'type' not in teacher_action:
return
domain = teacher_action['domain']
if teacher_action['type'] == 'apicall':
# also count slot accuracy
text = model_response['text']
slot_guesses = set(
text.replace(CALL_TOKEN + " ", "").split(' ; ')
) # prevent cheating via repeated guesses
correct = 0
for slot_guess in slot_guesses:
if ' = ' not in slot_guess:
continue
try:
slot, guess = slot_guess.split(' = ')
except ValueError:
continue
if teacher_action['slots'].get(slot) == guess:
self.metrics.add('slot_p', AverageMetric(1))
self.metrics.add(f'{domain}_slot_p', AverageMetric(1))
correct += 1
else:
self.metrics.add('slot_p', AverageMetric(0))
self.metrics.add(f'{domain}_slot_p', AverageMetric(0))
logging.debug(
f"Bad slot guess '{slot_guess}' != {teacher_action['slots']}"
)
if teacher_action['slots']:
self.metrics.add(
'slot_r', AverageMetric(correct, len(teacher_action['slots']))
)
self.metrics.add(
f'{domain}_slot_r',
AverageMetric(correct, len(teacher_action['slots'])),
)
self.metrics.add(
'jga', AverageMetric(correct == len(teacher_action['slots']))
)
elif teacher_action['type'] == 'apiresp':
# keep track of statistics by domain
f1_metric = F1Metric.compute(model_response['text'], labels)
bleu_metric = BleuMetric.compute(model_response['text'], labels)
self.metrics.add(f'{domain}_lex_f1', f1_metric)
self.metrics.add(f'{domain}_lex_bleu', bleu_metric)
delex_text = model_response['text']
delex_label = labels[0]
# compute delexicalized string metrics
for slot, value in teacher_action['slots'].items():
delex_text = delex_text.replace(value, slot)
delex_label = delex_label.replace(value, slot)
f1_metric = F1Metric.compute(delex_text, (delex_label,))
self.metrics.add('delex_f1', f1_metric)
self.metrics.add(f'{domain}_delex_f1', f1_metric)
bleu_metric = BleuMetric.compute(delex_text, [delex_label])
self.metrics.add('delex_bleu', bleu_metric)
self.metrics.add(f'{domain}_delex_bleu', bleu_metric)
def setup_data(self, fold):
domains = self.opt.get('domains', DOMAINS)
chunks = self._load_data(fold, domains)
domains_cnt = Counter()
for _, row in chunks.iterrows():
domains_cnt[row['domain']] += 1
first = True
utterances = row['utterances'][:]
if (
len(utterances) >= 3
and utterances[0]['speaker'] == 'USER'
and utterances[1]['speaker'] == 'ASSISTANT'
and utterances[2]['speaker'] == 'ASSISTANT'
and "help you?" in utterances[1]['text']
):
# skip this one
utterances.pop(1)
if self.opt['include_ontology']:
yield {'text': f"{ONTO_TOKEN} {row['ontology']}", 'label': ''}, True
first = False
while utterances:
utt = utterances.pop(0)
segtxt, slots = self._segments2text(utt.get('segments', []))
if utt['speaker'] == 'USER':
yield {
'text': utt['text'],
'label': f'{CALL_TOKEN} {segtxt}',
'domain': row['domain'],
'slots': slots,
'type': 'apicall',
}, first
first = False
elif utt['speaker'] == 'ASSISTANT':
yield {
'text': f'{RESP_TOKEN} {segtxt}',
'label': utt['text'],
'domain': row['domain'],
'slots': slots,
'type': 'apiresp',
}, first
first = False
logging.debug(f"Fold {fold} domains: {domains_cnt}")
class DelexTeacher(_Abstract):
def _label_fold(self, chunks):
return chunks.conversation_id.apply(self._h)
def _delexicalize(self, text, slots):
for key, value in slots.items():
text = text.replace(value, key)
return text
def setup_data(self, fold):
domains_cnt = Counter()
chunks = self._load_data(fold)
for _, row in chunks.iterrows():
domains_cnt[row['domain']] += 1
first = True
utterances = row['utterances'][:]
if (
len(utterances) >= 3
and utterances[0]['speaker'] == 'USER'
and utterances[1]['speaker'] == 'ASSISTANT'
and utterances[2]['speaker'] == 'ASSISTANT'
and "help you?" in utterances[1]['text']
):
# skip this one
utterances.pop(1)
user_utterances = []
asst_utterances = []
while utterances:
utt = utterances.pop(0)
_, slots = self._segments2text(utt.get('segments', []))
if utt['speaker'] == 'USER':
if asst_utterances:
yield {
'text': ' __BREAK__ '.join(user_utterances),
'label': ' __BREAK__ '.join(asst_utterances),
'domain': row['domain'],
}, first
first = False
user_utterances = []
asst_utterances = []
user_utterances.append(self._delexicalize(utt['text'], slots))
elif utt['speaker'] == 'ASSISTANT':
asst_utterances.append(self._delexicalize(utt['text'], slots))
if not user_utterances:
user_utterances.append('__SILENCE__')
if asst_utterances:
yield {
'text': ' __BREAK__ '.join(user_utterances),
'label': ' __BREAK__ '.join(asst_utterances),
'domain': row['domain'],
}, first
class TextOnlyTeacher(DelexTeacher):
def _delexicalize(self, text, slots):
return text
class FullShotTeacher(_Abstract):
"""
The full shot teacher uses a standard 80-10-10 split, without regarding domain.
"""
def _label_fold(self, chunks):
return chunks.conversation_id.apply(self._h)
class FewShotTeacher(_Abstract):
"""
Few shot teacher tests for generalization to new domains.
"""
@classmethod
def add_cmdline_args(cls, argparser):
argparser.add_argument(
'--holdout',
default=DOMAINS[0],
choices=DOMAINS,
help='Domain which is held out from test',
)
argparser.add_argument(
'--n-shot',
default=100,
type=int,
help='Number of few shot examples to provide in training fold.',
)
return super().add_cmdline_args(argparser)
def _label_fold(self, chunks):
folds = []
num_shots = 0
for _, row in chunks.iterrows():
if row['domain'] != self.opt['holdout']:
# if it's not in the holdout, always mark it train
folds.append('train')
else:
# keep the same valid/test sets as in fullshot, and only leak
# a small number of the training examples (i.e. throw away the
# vast majority of our data but keep test sets the same)
f = self._h(row['conversation_id'])
if f != 'train':
folds.append(f)
elif num_shots < self.opt['n_shot']:
folds.append('train')
num_shots += 1
else:
folds.append('throwaway')
return folds
class DefaultTeacher(FullShotTeacher):
pass
| 35.734247 | 86 | 0.521966 | 12,004 | 0.92034 | 3,689 | 0.282834 | 839 | 0.064326 | 0 | 0 | 2,907 | 0.222878 |
d2bc823500d7e835a13076bd5554f0f404893ff4 | 243 | py | Python | jmeter_api/timers/__init__.py | dashawn888/jmeter_api | 1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd | [
"Apache-2.0"
]
| 11 | 2020-03-22T13:30:21.000Z | 2021-12-25T06:23:44.000Z | jmeter_api/timers/__init__.py | dashawn888/jmeter_api | 1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd | [
"Apache-2.0"
]
| 2 | 2020-03-23T00:06:42.000Z | 2021-02-24T21:41:40.000Z | jmeter_api/timers/__init__.py | dashawn888/jmeter_api | 1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd | [
"Apache-2.0"
]
| 3 | 2020-11-09T14:14:25.000Z | 2021-05-27T02:54:38.000Z | from jmeter_api.timers.constant_throughput_timer.elements import ConstantThroughputTimer, BasedOn
from jmeter_api.timers.constant_timer.elements import ConstantTimer
from jmeter_api.timers.uniform_random_timer.elements import UniformRandTimer
| 60.75 | 97 | 0.90535 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d2bd972bab298994d41d91b8c6a75e48470ccec5 | 2,520 | py | Python | tensorfn/distributed/launch.py | rosinality/tensorfn | cd410c5e6f6906d223f740501e711b9cfae260e4 | [
"Apache-2.0"
]
| 13 | 2021-04-08T03:09:42.000Z | 2022-03-18T08:27:17.000Z | tensorfn/distributed/launch.py | rosinality/tensorfn | cd410c5e6f6906d223f740501e711b9cfae260e4 | [
"Apache-2.0"
]
| 2 | 2020-08-16T20:25:34.000Z | 2021-07-13T00:35:52.000Z | tensorfn/distributed/launch.py | rosinality/tensorfn | cd410c5e6f6906d223f740501e711b9cfae260e4 | [
"Apache-2.0"
]
| null | null | null | import os
import torch
from torch import distributed as dist
from torch import multiprocessing as mp
from tensorfn import distributed as dist_fn
def find_free_port():
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
return port
def launch(fn, n_gpu_per_machine, n_machine=1, machine_rank=0, dist_url=None, args=()):
world_size = n_machine * n_gpu_per_machine
if world_size > 1:
if "OMP_NUM_THREADS" not in os.environ:
os.environ["OMP_NUM_THREADS"] = "1"
if dist_url == "auto":
if n_machine != 1:
raise ValueError('dist_url="auto" not supported in multi-machine jobs')
port = find_free_port()
dist_url = f"tcp://127.0.0.1:{port}"
if n_machine > 1 and dist_url.startswith("file://"):
raise ValueError(
"file:// is not a reliable init method in multi-machine jobs. Prefer tcp://"
)
mp.spawn(
distributed_worker,
nprocs=n_gpu_per_machine,
args=(fn, world_size, n_gpu_per_machine, machine_rank, dist_url, args),
daemon=False,
)
else:
fn(*args)
def distributed_worker(
local_rank, fn, world_size, n_gpu_per_machine, machine_rank, dist_url, args
):
if not torch.cuda.is_available():
raise OSError("CUDA is not available. Please check your environments")
global_rank = machine_rank * n_gpu_per_machine + local_rank
try:
dist.init_process_group(
backend="NCCL",
init_method=dist_url,
world_size=world_size,
rank=global_rank,
)
except Exception:
raise OSError("failed to initialize NCCL groups")
dist_fn.synchronize()
if n_gpu_per_machine > torch.cuda.device_count():
raise ValueError(
f"specified n_gpu_per_machine larger than available device ({torch.cuda.device_count()})"
)
torch.cuda.set_device(local_rank)
if dist_fn.LOCAL_PROCESS_GROUP is not None:
raise ValueError("torch.distributed.LOCAL_PROCESS_GROUP is not None")
n_machine = world_size // n_gpu_per_machine
for i in range(n_machine):
ranks_on_i = list(range(i * n_gpu_per_machine, (i + 1) * n_gpu_per_machine))
pg = dist.new_group(ranks_on_i)
if i == machine_rank:
dist_fn.distributed.LOCAL_PROCESS_GROUP = pg
fn(*args)
| 27.096774 | 101 | 0.636508 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 443 | 0.175794 |
d2bffe6b8d76be452fc84a9fa325b868d681f43c | 4,097 | py | Python | VideoStitchingSubsystem/StereoCameraAPIs/MonoLensStream.py | AriaPahlavan/see-through-adas-core | 7cc530243d324aecd9db538883bb77ee2d519661 | [
"Apache-2.0"
]
| null | null | null | VideoStitchingSubsystem/StereoCameraAPIs/MonoLensStream.py | AriaPahlavan/see-through-adas-core | 7cc530243d324aecd9db538883bb77ee2d519661 | [
"Apache-2.0"
]
| null | null | null | VideoStitchingSubsystem/StereoCameraAPIs/MonoLensStream.py | AriaPahlavan/see-through-adas-core | 7cc530243d324aecd9db538883bb77ee2d519661 | [
"Apache-2.0"
]
| null | null | null | from enum import Enum
from threading import Thread
import cv2
import time
class Resolution(Enum):
_32p = (64, 32)
_96p = (128, 96)
_120p = (160, 120)
_144p = (256, 144)
_240p = (360, 240)
_288p = (480, 272)
_360p = (480, 360)
_480p = (720, 480)
_576p = (720, 576)
_Hd = (1280, 720)
class MonoLensStream:
def setParam(self, param, value, name):
if self.stream.set(param, value):
pass
else:
import logging
log = logging.getLogger()
log.warning("[WARN] cannot set "+name)
def __init__(self, src=0, framerate=30, resolution=Resolution._240p.value, fourcc="MJPG", exposure=-10,
debugEnable=False, debugCount=1000):
"""
initialize the video stream
"""
self.stream = cv2.VideoCapture(src)
# set resolution
w, h = resolution
self.setParam(cv2.CAP_PROP_FRAME_WIDTH, w, "width")
self.setParam(cv2.CAP_PROP_FRAME_HEIGHT, h, "height")
self.setParam(cv2.CAP_PROP_FPS, framerate, "fps")
self.setParam(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*fourcc), "fourcc")
self.setParam(cv2.CAP_PROP_EXPOSURE, exposure, "exposure")
self.fpsDelay = 1 / framerate
# read first frame
(self.grabbed, self.frame) = self.stream.read()
# frame reader thread
if not debugEnable:
self.frameReaderThread = Thread(target=self.update, args=())
else:
self.min = self.avg = self.max = 0
self.debugCount = debugCount
self.frameReaderThread = Thread(target=self.debugUpdate, args=())
self.streamStopped = False
self.grabbedTime = time.time()
self.returnedTime = self.grabbedTime
def start(self):
"""
start the thread to read frames from the video stream
:return reference to itself
"""
self.frameReaderThread.daemon = True
self.frameReaderThread.start()
return self
def update(self):
"""
grab the next frame from the stream infinitely until the stream is stopped
"""
while True:
if self.streamStopped: # done with streaming
return
(self.grabbed, self.frame) = self.stream.read()
self.grabbedTime = time.time()
# time.sleep(self.fpsDelay)
def read(self):
"""
:return: the current frame
"""
while self.returnedTime == self.grabbedTime:
continue
self.returnedTime = self.grabbedTime
return self.frame, self.returnedTime
def stop(self):
"""
stop the video stream
"""
self.streamStopped = True
self.frameReaderThread.join()
self.stream.release()
def debugUpdate(self):
"""
**FOR DEBUGGING PURPOSES ONLY**
grab the next frame from the stream infinitely until the stream is stopped
"""
startTime = time.time() * 1000 * 1000
(self.grabbed, self.frame) = self.stream.read()
endTime = time.time() * 1000 * 1000
self.max = self.min = endTime - startTime
counter = self.debugCount
while self.debugCount != 0:
startTime = time.time() * 1000 * 1000
(self.grabbed, self.frame) = self.stream.read()
endTime = time.time() * 1000 * 1000
ellapsedTime = endTime - startTime
print(ellapsedTime)
self.avg += ellapsedTime
if self.min > ellapsedTime:
self.min = ellapsedTime
if self.max < ellapsedTime:
self.max = ellapsedTime
self.debugCount -= 1
time.sleep(self.fpsDelay)
self.avg = (self.avg / counter)
def debugResults(self):
"""
**FOR DEBUGGING PURPOSES ONLY**
:return average, min, and max from debugging results
"""
self.frameReaderThread.join()
self.stream.release()
return self.avg, self.min, self.max
| 28.255172 | 107 | 0.573102 | 4,016 | 0.980229 | 0 | 0 | 0 | 0 | 0 | 0 | 778 | 0.189895 |
d2c143baf7ea1e8434d64873e45800bbd43dfe04 | 444 | py | Python | sdk/python/approzium/mysql/connector/pooling.py | UpGado/approzium | 306b40f16a1ba0dfbe3a312e1c40881e98518137 | [
"Apache-2.0"
]
| 59 | 2020-07-14T17:18:09.000Z | 2022-02-24T07:39:22.000Z | sdk/python/approzium/mysql/connector/pooling.py | UpGado/approzium | 306b40f16a1ba0dfbe3a312e1c40881e98518137 | [
"Apache-2.0"
]
| 66 | 2020-07-09T19:11:55.000Z | 2022-03-15T11:42:55.000Z | sdk/python/approzium/mysql/connector/pooling.py | UpGado/approzium | 306b40f16a1ba0dfbe3a312e1c40881e98518137 | [
"Apache-2.0"
]
| 9 | 2020-07-09T19:20:45.000Z | 2022-02-24T07:39:26.000Z | from mysql.connector.pooling import MySQLConnectionPool
from ._connect import _parse_kwargs, _patch_MySQLConnection
class MySQLConnectionPool(MySQLConnectionPool):
def set_config(self, **kwargs):
kwargs = _parse_kwargs(kwargs)
super(MySQLConnectionPool, self).set_config(**kwargs)
def add_connection(self, cnx=None):
with _patch_MySQLConnection(include_pooling=True):
super().add_connection(cnx)
| 31.714286 | 61 | 0.75 | 324 | 0.72973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d2c1cd83dd904d0ffd396c1f85ce4d771a28e638 | 4,813 | py | Python | app/network_x_tools/network_x_utils.py | ThembiNsele/ClimateMind-Backend | 0e418000b2a0141a1e4a7c11dbe3564082a3f4bb | [
"MIT"
]
| 6 | 2020-08-20T10:49:59.000Z | 2022-01-24T16:49:46.000Z | app/network_x_tools/network_x_utils.py | ThembiNsele/ClimateMind-Backend | 0e418000b2a0141a1e4a7c11dbe3564082a3f4bb | [
"MIT"
]
| 95 | 2020-07-24T22:32:34.000Z | 2022-03-05T15:01:16.000Z | app/network_x_tools/network_x_utils.py | ThembiNsele/ClimateMind-Backend | 0e418000b2a0141a1e4a7c11dbe3564082a3f4bb | [
"MIT"
]
| 5 | 2020-07-30T17:29:09.000Z | 2021-01-10T19:46:15.000Z | class network_x_utils:
"""
This class provides commonly used utils which are shared between all different types
of NetworkX nodes (Feed Items, Solutions, Myths). For each of these, we want to be
able to pull basic information like the IRI, Descriptions, Images, etc.
Include any generalized NetworkX functions here.
"""
def __init__(self):
self.node = None # Current node
def set_current_node(self, node):
"""We usually pull multiple node related items simultaneously. Rather
than pass these in individually for each function, this let's us use the same
node for all of the functions in this class.
"""
self.node = node
def get_node_id(self):
"""Node IDs are the unique identifier in the IRI. This is provided to the
front-end as a reference for the feed, but is never shown to the user.
Example http://webprotege.stanford.edu/R8znJBKduM7l8XDXMalSWSl
"""
offset = 4 # .edu <- to skip these characters and get the unique IRI
full_iri = self.node["iri"]
pos = full_iri.find("edu") + offset
return full_iri[pos:]
def get_description(self):
"""Long Descriptions are used by the front-end to display explanations of the
climate effects shown in user feeds.
"""
try:
return self.node["properties"]["schema_longDescription"][0]
except:
return "No long desc available at present"
def get_short_description(self):
"""Short Descriptions are used by the front-end to display explanations of the
climate effects shown in user feeds.
"""
try:
return self.node["properties"]["schema_shortDescription"][0]
except:
return "No short desc available at present"
def get_image_url(self):
"""Images are displayed to the user in the climate feed to accompany an explanation
of the climate effects. The front-end is provided with the URL and then requests
these images from our server.
"""
try:
return self.node["properties"]["schema_image"][0]
except:
# Default image url if image is added
return "https://yaleclimateconnections.org/wp-content/uploads/2018/04/041718_child_factories.jpg"
def get_image_url_or_none(self):
"""Images are displayed to the user in the climate feed to accompany an explanation
of the climate effects. The front-end is provided with the URL and then requests
these images from our server.
"""
try:
return self.node["properties"]["schema_image"][0]
except:
# Default image url if image is added
return None
def get_causal_sources(self):
"""Sources are displayed to the user in the sources tab of the impacts overlay page.
This function returns a list of urls of the sources to show on the impact overlay page for an impact/effect.
Importantly, these sources aren't directly from the networkx node, but all the networkx edges that cause the node.
Only returns edges that are directly tied to the node (ancestor edge sources are not used)
"""
if "causal sources" in self.node and len(self.node["causal sources"]) > 0:
causal_sources = self.node["causal sources"]
try:
return causal_sources
except:
return (
[]
) # Default source if none #should this be the IPCC? or the US National Climate Assessment?
def get_solution_sources(self):
"""Returns a flattened list of custom solution source values from each node key that matches
custom_source_types string.
"""
try:
return self.node["solution sources"]
except:
return []
def get_is_possibly_local(self, node):
"""Returns whether it's possible that a node effects a particular user based on
their location. Note that here we need to pass in the node directly, rather than
using one set by the class as the node comes from the localised_acyclic_graph.py
rather than a the standard graph.
"""
if "isPossiblyLocal" in node:
if node["isPossiblyLocal"]:
return 1
else:
return 0
else:
return 0
def get_co2_eq_reduced(self):
"""
Returns the solution's CO2 Equivalent Reduced / Sequestered (2020–2050) in Gigatons.
Values taken from Project Drawdown scenario 2.
"""
if "CO2_eq_reduced" in self.node["data_properties"]:
return self.node["data_properties"]["CO2_eq_reduced"]
else:
return 0
| 39.45082 | 122 | 0.635155 | 4,814 | 0.999792 | 0 | 0 | 0 | 0 | 0 | 0 | 3,235 | 0.671859 |
d2c30d506f338f0ad2e0b0a0c5af2f47676aea3a | 267 | py | Python | setup.py | Faust-Wang/vswarm | d18ce643218c18ef1e762f40562104b2a0926ad7 | [
"MIT"
]
| 21 | 2021-03-03T10:51:46.000Z | 2022-03-28T11:00:35.000Z | setup.py | Faust-Wang/vswarm | d18ce643218c18ef1e762f40562104b2a0926ad7 | [
"MIT"
]
| 2 | 2021-07-21T07:57:16.000Z | 2022-03-17T12:41:51.000Z | setup.py | hvourtsis/vswarm | d18ce643218c18ef1e762f40562104b2a0926ad7 | [
"MIT"
]
| 8 | 2021-02-27T14:29:55.000Z | 2022-01-05T19:40:38.000Z | # Do not manually invoke this setup.py, use catkin instead!
from setuptools import setup
from catkin_pkg.python_setup import generate_distutils_setup
setup_args = generate_distutils_setup(
packages=['vswarm'],
package_dir={'': 'src'}
)
setup(**setup_args)
| 22.25 | 60 | 0.764045 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.277154 |
d2c38a755a40c6e19281f0cc94b831f228ba7f94 | 250 | py | Python | 实例学习Numpy与Matplotlib/创建 numpy.array.py | shao1chuan/pythonbook | cd9877d04e1e11422d38cc051e368d3d9ce2ab45 | [
"MulanPSL-1.0"
]
| 95 | 2020-10-11T04:45:46.000Z | 2022-02-25T01:50:40.000Z | 实例学习Numpy与Matplotlib/创建 numpy.array.py | shao1chuan/pythonbook | cd9877d04e1e11422d38cc051e368d3d9ce2ab45 | [
"MulanPSL-1.0"
]
| null | null | null | 实例学习Numpy与Matplotlib/创建 numpy.array.py | shao1chuan/pythonbook | cd9877d04e1e11422d38cc051e368d3d9ce2ab45 | [
"MulanPSL-1.0"
]
| 30 | 2020-11-05T09:01:00.000Z | 2022-03-08T05:58:55.000Z |
import numpy as np
nparr = np.array([i for i in range(10)])
a = np.zeros(10)
f = np.zeros(10,dtype=float)
n = np.full((3,5),44)
r = np.random.randint(0,100,size=(3,5))
r2 = np.random.random((3,5))
x = np.linspace(0,100,50)
print(nparr,a,f,n,r,r2,x) | 22.727273 | 40 | 0.64 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d2c38e45f035250f5b56f9b05cf87de9978e93b9 | 4,790 | py | Python | examples/DecryptLoginExamples/crawlers/weibomonitor/weibomonitor.py | hedou/DecryptLogin | ff86a5d378c8a42d1caebbb7482658a95053f716 | [
"Apache-2.0"
]
| null | null | null | examples/DecryptLoginExamples/crawlers/weibomonitor/weibomonitor.py | hedou/DecryptLogin | ff86a5d378c8a42d1caebbb7482658a95053f716 | [
"Apache-2.0"
]
| null | null | null | examples/DecryptLoginExamples/crawlers/weibomonitor/weibomonitor.py | hedou/DecryptLogin | ff86a5d378c8a42d1caebbb7482658a95053f716 | [
"Apache-2.0"
]
| null | null | null | '''
Function:
微博监控
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import re
import time
from DecryptLogin import login
'''微博监控'''
class WeiboMonitor():
def __init__(self, username, password, time_interval=30):
_, self.session = self.login(username, password)
self.headers = {
'Accept': 'application/json, text/plain, */*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Connection': 'keep-alive',
'Host': 'm.weibo.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
}
self.api_url = 'https://m.weibo.cn/api/container/getIndex?uid={}&luicode=10000011&lfid=231093_-_selffollowed&type=uid&value={}&containerid={}'
self.time_interval = time_interval
'''开始监控'''
def run(self):
followed = self.getFollowed()
self.logging('请选择一位您关注列表中的用户进行监控:')
self.logging('-' * 40)
for idx, each in enumerate(sorted(followed.keys())):
self.logging('[%d]. %s' % (idx+1, each))
self.logging('-' * 40)
while True:
user_choice = input('请选择您想要监控的用户编号(例如1):')
try:
profile_url = followed[sorted(followed.keys())[int(user_choice)-1]]
user_id = re.findall(r'uid=(\d+)&', profile_url)[0]
break
except:
self.logging('您的输入有误, 请重新输入.', 'Warning')
self.monitor(user_id, profile_url)
'''监控用户主页'''
def monitor(self, user_id, profile_url):
user_name, containerid = self.getContainerid(user_id, profile_url)
response = self.session.get(self.api_url.format(user_id, user_id, containerid))
weibo_ids = []
cards = response.json()['data']['cards']
for card in cards:
if card['card_type'] == 9:
weibo_ids.append(str(card['mblog']['id']))
while True:
weibo_ids = self.checkUpdate(user_id, profile_url, weibo_ids)
time.sleep(self.time_interval)
'''检查用户是否有新的微博'''
def checkUpdate(self, user_id, profile_url, weibo_ids):
user_name, containerid = self.getContainerid(user_id, profile_url)
response = self.session.get(self.api_url.format(user_id, user_id, containerid))
cards = response.json()['data']['cards']
flag = False
for card in cards:
if card['card_type'] == 9:
if str(card['mblog']['id']) not in weibo_ids:
flag = True
weibo_ids.append(str(card['mblog']['id']))
self.logging(f'用户{user_name}发布了新微博')
pics = []
if card['mblog'].get('pics'):
for i in card['mblog']['pics']: pics.append(i['url'])
pics = '||'.join(pics)
self.logging(card)
if not flag: self.logging(f'用户{user_name}未发布新微博')
return weibo_ids
'''获取containerid'''
def getContainerid(self, user_id, profile_url):
self.session.get(profile_url)
containerid = re.findall(r'fid%3D(\d+)%26', str(self.session.cookies))[0]
response = self.session.get(self.api_url.format(user_id, user_id, containerid))
user_name = self.decode(re.findall(r'"screen_name":"(.*?)"', response.text)[0])
for i in response.json()['data']['tabsInfo']['tabs']:
if i['tab_type'] == 'weibo':
containerid = i['containerid']
return user_name, containerid
'''获取关注列表'''
def getFollowed(self):
data = {}
page = 0
while True:
page += 1
response = self.session.get('https://m.weibo.cn/api/container/getIndex?containerid=231093_-_selffollowed&page={}'.format(page), headers=self.headers)
profile_urls = re.findall(r'"profile_url":"(.*?)"', response.text)
screen_names = re.findall(r'"screen_name":"(.*?)"', response.text)
if len(profile_urls) == 0:
break
for screen_name, profile_url in zip(screen_names, profile_urls):
data[self.decode(screen_name)] = profile_url.replace('\\', '')
return data
'''解码'''
def decode(self, content):
return content.encode('latin-1').decode('unicode_escape')
'''模拟登录'''
def login(self, username, password):
client = login.Client()
weibo = client.weibo(reload_history=True)
infos_return, session = weibo.login(username, password, 'mobile')
return infos_return, session
'''logging'''
def logging(self, msg, tip='INFO'):
print(f'[{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} {tip}]: {msg}') | 43.153153 | 161 | 0.571816 | 4,844 | 0.966095 | 0 | 0 | 0 | 0 | 0 | 0 | 1,421 | 0.283406 |
d2c3e3e6ef11ddd684a0bcebf23085d7e1d9152c | 1,191 | py | Python | crawlai/items/critter/base_critter.py | apockill/CreepyCrawlAI | 2862c03e686801884ffb579a7be29f3c9d0da610 | [
"MIT"
]
| 13 | 2020-05-04T03:11:26.000Z | 2021-12-05T03:57:45.000Z | crawlai/items/critter/base_critter.py | apockill/CreepyCrawlAI | 2862c03e686801884ffb579a7be29f3c9d0da610 | [
"MIT"
]
| null | null | null | crawlai/items/critter/base_critter.py | apockill/CreepyCrawlAI | 2862c03e686801884ffb579a7be29f3c9d0da610 | [
"MIT"
]
| null | null | null | from godot.bindings import ResourceLoader
from crawlai.grid_item import GridItem
from crawlai.items.food import Food
from crawlai.math_utils import clamp
from crawlai.turn import Turn
from crawlai.position import Position
_critter_resource = ResourceLoader.load("res://Game/Critter/Critter.tscn")
class BaseCritter(GridItem):
"""The base class for all critters"""
HEALTH_TICK_PENALTY = 1
MAX_HEALTH = 500
BITE_SIZE = 20
CHOICES = [
Turn(Position(*c), is_action)
for c in [(0, 1), (1, 0), (-1, 0), (0, -1)]
for is_action in (True, False)
] + [Turn(Position(0, 0), False)]
def __init__(self):
super().__init__()
self.health: int
self.age: int
self._reset_stats()
def _reset_stats(self):
self.health = self.MAX_HEALTH
self.age = 0
def _tick_stats(self):
self.age += 1
self.health -= self.HEALTH_TICK_PENALTY
def _load_instance(self):
return _critter_resource.instance()
def perform_action_onto(self, other: 'GridItem'):
if isinstance(other, Food):
max_bite = clamp(self.BITE_SIZE, 0, self.MAX_HEALTH - self.health)
self.health += other.take_nutrition(max_bite)
@property
def delete_queued(self):
return self.health <= 0
| 24.306122 | 74 | 0.715365 | 889 | 0.746432 | 0 | 0 | 61 | 0.051217 | 0 | 0 | 80 | 0.06717 |
d2c4507ff5f2b0e60108a433da49147fd8f6e6c4 | 3,008 | py | Python | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/doc_fragments/nios.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
]
| 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/ansible/plugins/doc_fragments/nios.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
]
| 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/ansible/plugins/doc_fragments/nios.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
]
| 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | # -*- coding: utf-8 -*-
# Copyright: (c) 2015, Peter Sprygada <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r'''
options:
provider:
description:
- A dict object containing connection details.
type: dict
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
instance of NIOS WAPI over REST
- Value can also be specified using C(INFOBLOX_HOST) environment
variable.
type: str
required: true
username:
description:
- Configures the username to use to authenticate the connection to
the remote instance of NIOS.
- Value can also be specified using C(INFOBLOX_USERNAME) environment
variable.
type: str
password:
description:
- Specifies the password to use to authenticate the connection to
the remote instance of NIOS.
- Value can also be specified using C(INFOBLOX_PASSWORD) environment
variable.
type: str
validate_certs:
description:
- Boolean value to enable or disable verifying SSL certificates
- Value can also be specified using C(INFOBLOX_SSL_VERIFY) environment
variable.
type: bool
default: no
aliases: [ ssl_verify ]
http_request_timeout:
description:
- The amount of time before to wait before receiving a response
- Value can also be specified using C(INFOBLOX_HTTP_REQUEST_TIMEOUT) environment
variable.
type: int
default: 10
max_retries:
description:
- Configures the number of attempted retries before the connection
is declared usable
- Value can also be specified using C(INFOBLOX_MAX_RETRIES) environment
variable.
type: int
default: 3
wapi_version:
description:
- Specifies the version of WAPI to use
- Value can also be specified using C(INFOBLOX_WAP_VERSION) environment
variable.
- Until ansible 2.8 the default WAPI was 1.4
type: str
default: '2.1'
max_results:
description:
- Specifies the maximum number of objects to be returned,
if set to a negative number the appliance will return an error when the
number of returned objects would exceed the setting.
- Value can also be specified using C(INFOBLOX_MAX_RESULTS) environment
variable.
type: int
default: 1000
notes:
- "This module must be run locally, which can be achieved by specifying C(connection: local)."
- Please read the :ref:`nios_guide` for more detailed information on how to use Infoblox with Ansible.
'''
| 35.809524 | 104 | 0.635306 | 2,825 | 0.939162 | 0 | 0 | 0 | 0 | 0 | 0 | 2,942 | 0.978059 |
d2c4dfb8a30f8c36fa075d277e4458a4776a5ca8 | 25,299 | py | Python | torchrec/metrics/rec_metric.py | xing-liu/torchrec | 82ffde7a69fdb9c66b79a753d6f03afa5db3f73e | [
"BSD-3-Clause"
]
| 814 | 2022-02-23T17:24:14.000Z | 2022-03-31T16:52:23.000Z | torchrec/metrics/rec_metric.py | xing-liu/torchrec | 82ffde7a69fdb9c66b79a753d6f03afa5db3f73e | [
"BSD-3-Clause"
]
| 89 | 2022-02-23T17:29:56.000Z | 2022-03-31T23:44:13.000Z | torchrec/metrics/rec_metric.py | xing-liu/torchrec | 82ffde7a69fdb9c66b79a753d6f03afa5db3f73e | [
"BSD-3-Clause"
]
| 68 | 2022-02-23T17:42:17.000Z | 2022-03-28T06:39:55.000Z | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
import abc
import math
from collections import defaultdict, deque
from dataclasses import dataclass
from enum import Enum
from typing import (
Any,
Callable,
cast,
Deque,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
import torch
import torch.distributed as dist
import torch.nn as nn
from torchmetrics import Metric
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import (
compose_metric_key,
MetricNameBase,
MetricNamespaceBase,
MetricPrefix,
)
RecModelOutput = Union[torch.Tensor, Dict[str, torch.Tensor]]
@dataclass(frozen=True)
class MetricComputationReport:
name: MetricNameBase
metric_prefix: MetricPrefix
value: torch.Tensor
DefaultValueT = TypeVar("DefaultValueT")
ComputeIterType = Iterator[
Tuple[RecTaskInfo, MetricNameBase, torch.Tensor, MetricPrefix]
]
MAX_BUFFER_COUNT = 1000
class RecMetricException(Exception):
pass
class WindowBuffer:
def __init__(self, max_size: int, max_buffer_count: int) -> None:
self._max_size: int = max_size
self._max_buffer_count: int = max_buffer_count
self._buffers: Deque[torch.Tensor] = deque(maxlen=max_buffer_count)
self._used_sizes: Deque[int] = deque(maxlen=max_buffer_count)
self._window_used_size = 0
def aggregate_state(
self, window_state: torch.Tensor, curr_state: torch.Tensor, size: int
) -> None:
def remove(window_state: torch.Tensor) -> None:
window_state -= self._buffers.popleft()
self._window_used_size -= self._used_sizes.popleft()
if len(self._buffers) == self._buffers.maxlen:
remove(window_state)
self._buffers.append(curr_state)
self._used_sizes.append(size)
window_state += curr_state
self._window_used_size += size
while self._window_used_size > self._max_size:
remove(window_state)
@property
def buffers(self) -> Deque[torch.Tensor]:
return self._buffers
class RecMetricComputation(Metric, abc.ABC):
r"""The internal computation class template.
A metric implementation should overwrite update() and compute(). These two
APIs focuses the actual mathematical meaning of the metric, without the
detail knowledge of model output and task information.
Args:
my_rank (int): the rank of this trainer.
batch_size (int): batch size used by this trainer.
n_tasks (int): the number tasks this communication obj
will have to compute.
window_size (int): the window size for the window metric.
compute_on_all_ranks (bool): whether to compute metrics on all ranks. This
is necessary if non-leader rank want to consum metrics result.
process_group (Optional[ProcessGroup]): the process group used for the
communication. Will use the default process group if not specified.
"""
_batch_window_buffers: Optional[Dict[str, WindowBuffer]]
def __init__(
self,
my_rank: int,
batch_size: int,
n_tasks: int,
window_size: int,
compute_on_all_ranks: bool = False,
# pyre-fixme[11]: Annotation `ProcessGroup` is not defined as a type.
process_group: Optional[dist.ProcessGroup] = None,
*args: Any,
**kwargs: Any,
) -> None:
super().__init__(process_group=process_group, *args, **kwargs)
self._my_rank = my_rank
self._n_tasks = n_tasks
self._batch_size = batch_size
self._window_size = window_size
self._compute_on_all_ranks = compute_on_all_ranks
if self._window_size > 0:
self._batch_window_buffers = {}
else:
self._batch_window_buffers = None
self._add_state(
"has_valid_update",
torch.zeros(self._n_tasks, dtype=torch.uint8),
add_window_state=False,
dist_reduce_fx=lambda x: torch.any(x, dim=0).byte(),
persistent=True,
)
@staticmethod
def get_window_state_name(state_name: str) -> str:
return f"window_{state_name}"
def get_window_state(self, state_name: str) -> torch.Tensor:
return getattr(self, self.get_window_state_name(state_name))
def _add_state(
self, name: str, default: DefaultValueT, add_window_state: bool, **kwargs: Any
) -> None:
# pyre-fixme[6]: Expected `Union[List[typing.Any], torch.Tensor]` for 2nd
# param but got `DefaultValueT`.
super().add_state(name, default, **kwargs)
if add_window_state:
if self._batch_window_buffers is None:
raise RuntimeError(
"Users is adding a window state while window metric is disabled."
)
kwargs["persistent"] = False
window_state_name = self.get_window_state_name(name)
# Avoid pyre error
assert isinstance(default, torch.Tensor)
super().add_state(window_state_name, default.detach().clone(), **kwargs)
self._batch_window_buffers[window_state_name] = WindowBuffer(
max_size=self._window_size,
max_buffer_count=MAX_BUFFER_COUNT,
)
def _aggregate_window_state(
self, state_name: str, state: torch.Tensor, num_samples: int
) -> None:
if self._batch_window_buffers is None:
raise RuntimeError(
"Users is adding a window state while window metric is disabled."
)
window_state_name = self.get_window_state_name(state_name)
assert self._batch_window_buffers is not None
self._batch_window_buffers[window_state_name].aggregate_state(
getattr(self, window_state_name), curr_state=state, size=num_samples
)
@abc.abstractmethod
# pyre-fixme[14]: `update` overrides method defined in `Metric` inconsistently.
def update(
self,
*,
predictions: Optional[torch.Tensor],
labels: torch.Tensor,
weights: Optional[torch.Tensor],
) -> None: # pragma: no cover
pass
@abc.abstractmethod
def _compute(self) -> List[MetricComputationReport]: # pragma: no cover
pass
def pre_compute(self) -> None:
r"""If a metric need to do some work before `compute()`, the metric
has to override this `pre_compute()`. One possible usage is to do
some pre-processing of the local state before `compute()` as TorchMetric
wraps `RecMetricComputation.compute()` and will do the global aggregation
before `RecMetricComputation.compute()` is called.
"""
return
def compute(self) -> List[MetricComputationReport]:
if self._my_rank == 0 or self._compute_on_all_ranks:
return self._compute()
else:
return []
def local_compute(self) -> List[MetricComputationReport]:
return self._compute()
class RecMetric(nn.Module, abc.ABC):
r"""The main class template to implement a recommendation metric.
This class contains the recommendation tasks information (RecTaskInfo) and
the actual computation object (RecMetricComputation). RecMetric processes
all the information related to RecTaskInfo and models and pass the required
signals to the computation object, allowing the implementation of
RecMetricComputation to focus on the mathemetical meaning.
A new metric that inherit RecMetric must override the following attributes
in its own __init__(): `_namespace` and `_metrics_computations`. No other
methods should be overridden.
Args:
world_size (int): the number of trainers.
my_rank (int): the rank of this trainer.
batch_size (int): batch size used by this trainer.
tasks (List[RecTaskInfo]): the information of the model tasks.
compute_mode (RecComputeMode): the computation mode. See RecComputeMode.
window_size (int): the window size for the window metric.
fused_update_limit (int): the maximum number of updates to be fused.
compute_on_all_ranks (bool): whether to compute metrics on all ranks. This
is necessary if non-leader rank want to consume global metrics result.
process_group (Optional[ProcessGroup]): the process group used for the
communication. Will use the default process group if not specified.
Call Args:
Not supported.
Returns:
Not supported.
Example::
ne = NEMetric(
world_size=4,
my_rank=0,
batch_size=128,
tasks=DefaultTaskInfo,
)
"""
_computation_class: Type[RecMetricComputation]
_namespace: MetricNamespaceBase
_metrics_computations: nn.ModuleList
_tasks: List[RecTaskInfo]
_window_size: int
_tasks_iter: Callable[[str], ComputeIterType]
_update_buffers: Dict[str, List[RecModelOutput]]
_default_weights: Dict[Tuple[int, ...], torch.Tensor]
PREDICTIONS: str = "predictions"
LABELS: str = "labels"
WEIGHTS: str = "weights"
def __init__(
self,
world_size: int,
my_rank: int,
batch_size: int,
tasks: List[RecTaskInfo],
compute_mode: RecComputeMode = RecComputeMode.UNFUSED_TASKS_COMPUTATION,
window_size: int = 100,
fused_update_limit: int = 0,
compute_on_all_ranks: bool = False,
process_group: Optional[dist.ProcessGroup] = None,
**kwargs: Any,
) -> None:
# TODO(stellaya): consider to inherit from TorchMetrics.Metric or
# TorchMetrics.MetricCollection.
if (
compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION
and fused_update_limit > 0
):
raise ValueError(
"The fused tasks computation and the fused update cannot be set at the same time"
)
super().__init__()
self._world_size = world_size
self._my_rank = my_rank
self._window_size = math.ceil(window_size / world_size)
self._batch_size = batch_size
self._tasks = tasks
self._compute_mode = compute_mode
self._fused_update_limit = fused_update_limit
self._default_weights = {}
self._update_buffers = {
self.PREDICTIONS: [],
self.LABELS: [],
self.WEIGHTS: [],
}
if compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION:
n_metrics = 1
task_per_metric = len(self._tasks)
self._tasks_iter = self._fused_tasks_iter
else:
n_metrics = len(self._tasks)
task_per_metric = 1
self._tasks_iter = self._unfused_tasks_iter
self._metrics_computations: nn.ModuleList = nn.ModuleList(
[
# This Pyre error seems to be Pyre's bug as it can be inferred by mypy
# according to https://github.com/python/mypy/issues/3048.
# pyre-fixme[45]: Cannot instantiate abstract class `RecMetricCoputation`.
self._computation_class(
my_rank,
batch_size,
task_per_metric,
self._window_size,
compute_on_all_ranks,
process_group,
**kwargs,
)
for _ in range(n_metrics)
]
)
# TODO(stellaya): Refactor the _[fused, unfused]_tasks_iter methods and replace the
# compute_scope str input with an enum
def _fused_tasks_iter(self, compute_scope: str) -> ComputeIterType:
assert len(self._metrics_computations) == 1
self._metrics_computations[0].pre_compute()
for metric_report in getattr(
self._metrics_computations[0], compute_scope + "compute"
)():
for task, metric_value, has_valid_update in zip(
self._tasks,
metric_report.value,
self._metrics_computations[0].has_valid_update,
):
# The attribute has_valid_update is a tensor whose length equals to the
# number of tasks. Each value in it is corresponding to whether a task
# has valid updates or not.
# If for a task there's no valid updates, the calculated metric_value
# will be meaningless, so we mask it with the default value, i.e. 0.
valid_metric_value = (
metric_value
if has_valid_update > 0
else torch.zeros_like(metric_value)
)
yield task, metric_report.name, valid_metric_value, compute_scope + metric_report.metric_prefix.value
def _unfused_tasks_iter(self, compute_scope: str) -> ComputeIterType:
for task, metric_computation in zip(self._tasks, self._metrics_computations):
metric_computation.pre_compute()
for metric_report in getattr(
metric_computation, compute_scope + "compute"
)():
# The attribute has_valid_update is a tensor with only 1 value
# corresponding to whether the task has valid updates or not.
# If there's no valid update, the calculated metric_report.value
# will be meaningless, so we mask it with the default value, i.e. 0.
valid_metric_value = (
metric_report.value
if metric_computation.has_valid_update[0] > 0
else torch.zeros_like(metric_report.value)
)
yield task, metric_report.name, valid_metric_value, compute_scope + metric_report.metric_prefix.value
def _fuse_update_buffers(self) -> Dict[str, RecModelOutput]:
def fuse(outputs: List[RecModelOutput]) -> RecModelOutput:
assert len(outputs) > 0
if isinstance(outputs[0], torch.Tensor):
return torch.cat(cast(List[torch.Tensor], outputs))
else:
task_outputs: Dict[str, List[torch.Tensor]] = defaultdict(list)
for output in outputs:
assert isinstance(output, dict)
for task_name, tensor in output.items():
task_outputs[task_name].append(tensor)
return {
name: torch.cat(tensors) for name, tensors in task_outputs.items()
}
ret: Dict[str, RecModelOutput] = {}
for key, output_list in self._update_buffers.items():
if len(output_list) > 0:
ret[key] = fuse(output_list)
else:
assert key == self.WEIGHTS
output_list.clear()
return ret
def _check_fused_update(self, force: bool) -> None:
if self._fused_update_limit <= 0:
return
if len(self._update_buffers[self.PREDICTIONS]) == 0:
return
if (
not force
and len(self._update_buffers[self.PREDICTIONS]) < self._fused_update_limit
):
return
fused_arguments = self._fuse_update_buffers()
self._update(
predictions=fused_arguments[self.PREDICTIONS],
labels=fused_arguments[self.LABELS],
weights=fused_arguments.get(self.WEIGHTS, None),
)
def _create_default_weights(self, predictions: torch.Tensor) -> torch.Tensor:
weights = self._default_weights.get(predictions.size(), None)
if weights is None:
weights = torch.ones_like(predictions)
self._default_weights[predictions.size()] = weights
return weights
def _check_nonempty_weights(self, weights: torch.Tensor) -> torch.Tensor:
return torch.gt(torch.count_nonzero(weights, dim=-1), 0)
def _update(
self,
*,
predictions: RecModelOutput,
labels: RecModelOutput,
weights: Optional[RecModelOutput],
) -> None:
with torch.no_grad():
if self._compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION:
assert isinstance(predictions, torch.Tensor)
# Reshape the predictions to size([len(self._tasks), self._batch_size])
predictions = predictions.view(-1, self._batch_size)
assert isinstance(labels, torch.Tensor)
labels = labels.view(-1, self._batch_size)
if weights is None:
weights = self._create_default_weights(predictions)
else:
assert isinstance(weights, torch.Tensor)
weights = weights.view(-1, self._batch_size)
# has_valid_weights is a tensor of bool whose length equals to the number
# of tasks. Each value in it is corresponding to whether the weights
# are valid, i.e. are set to non-zero values for that task in this update.
# If has_valid_weights are Falses for all the tasks, we just ignore this
# update.
has_valid_weights = self._check_nonempty_weights(weights)
if torch.any(has_valid_weights):
self._metrics_computations[0].update(
predictions=predictions, labels=labels, weights=weights
)
self._metrics_computations[0].has_valid_update.logical_or_(
has_valid_weights
).byte()
else:
for task, metric_ in zip(self._tasks, self._metrics_computations):
if task.name not in predictions:
continue
if torch.numel(predictions[task.name]) == 0:
assert torch.numel(labels[task.name]) == 0
assert weights is None or torch.numel(weights[task.name]) == 0
continue
# Reshape the predictions to size([1, self._batch_size])
task_predictions = predictions[task.name].view(1, -1)
task_labels = labels[task.name].view(1, -1)
if weights is None:
task_weights = self._create_default_weights(task_predictions)
else:
task_weights = weights[task.name].view(1, -1)
# has_valid_weights is a tensor with only 1 value corresponding to
# whether the weights are valid, i.e. are set to non-zero values for
# the task in this update.
# If has_valid_update[0] is False, we just ignore this update.
has_valid_weights = self._check_nonempty_weights(task_weights)
if has_valid_weights[0]:
metric_.update(
predictions=task_predictions,
labels=task_labels,
weights=task_weights,
)
metric_.has_valid_update.logical_or_(has_valid_weights).byte()
def update(
self,
*,
predictions: RecModelOutput,
labels: RecModelOutput,
weights: Optional[RecModelOutput],
) -> None:
if self._fused_update_limit > 0:
self._update_buffers[self.PREDICTIONS].append(predictions)
self._update_buffers[self.LABELS].append(labels)
if weights is not None:
self._update_buffers[self.WEIGHTS].append(weights)
self._check_fused_update(force=False)
else:
self._update(predictions=predictions, labels=labels, weights=weights)
# The implementation of compute is very similar to local_compute, but compute overwrites
# the abstract method compute in torchmetrics.Metric, which is wrapped by _wrap_compute
def compute(self) -> Dict[str, torch.Tensor]:
self._check_fused_update(force=True)
ret = {}
for task, metric_name, metric_value, prefix in self._tasks_iter(""):
metric_key = compose_metric_key(
self._namespace, task.name, metric_name, prefix
)
ret[metric_key] = metric_value
return ret
def local_compute(self) -> Dict[str, torch.Tensor]:
self._check_fused_update(force=True)
ret = {}
for task, metric_name, metric_value, prefix in self._tasks_iter("local_"):
metric_key = compose_metric_key(
self._namespace, task.name, metric_name, prefix
)
ret[metric_key] = metric_value
return ret
def sync(self) -> None:
for computation in self._metrics_computations:
computation.sync()
def unsync(self) -> None:
for computation in self._metrics_computations:
if computation._is_synced:
computation.unsync()
def reset(self) -> None:
for computation in self._metrics_computations:
computation.reset()
def get_memory_usage(self) -> Dict[torch.Tensor, int]:
r"""Estimates the memory of the rec metric instance's
underlying tensors; returns the map of tensor to size
"""
tensor_map = {}
attributes_q = deque(self.__dict__.values())
while attributes_q:
attribute = attributes_q.popleft()
if isinstance(attribute, torch.Tensor):
tensor_map[attribute] = (
attribute.size().numel() * attribute.element_size()
)
elif isinstance(attribute, WindowBuffer):
attributes_q.extend(attribute.buffers)
elif isinstance(attribute, Mapping):
attributes_q.extend(attribute.values())
elif isinstance(attribute, Sequence) and not isinstance(attribute, str):
attributes_q.extend(attribute)
elif hasattr(attribute, "__dict__") and not isinstance(attribute, Enum):
attributes_q.extend(attribute.__dict__.values())
return tensor_map
# pyre-fixme[14]: `state_dict` overrides method defined in `Module` inconsistently.
def state_dict(
self,
destination: Optional[Dict[str, torch.Tensor]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, torch.Tensor]:
# We need to flush the cached output to ensure checkpointing correctness.
self._check_fused_update(force=True)
destination = super().state_dict(
destination=destination, prefix=prefix, keep_vars=keep_vars
)
return self._metrics_computations.state_dict(
destination=destination,
prefix=f"{prefix}_metrics_computations.",
keep_vars=keep_vars,
)
class RecMetricList(nn.Module):
"""
A list module to encapulate multiple RecMetric instances and provide the
same interfaces as RecMetric.
Args:
rec_metrics (List[RecMetric]: the list of the input RecMetrics.
Call Args:
Not supported.
Returns:
Not supported.
Example::
ne = NEMetric(
world_size=4,
my_rank=0,
batch_size=128,
tasks=DefaultTaskInfo
)
metrics = RecMetricList([ne])
"""
rec_metrics: nn.ModuleList
def __init__(self, rec_metrics: List[RecMetric]) -> None:
# TODO(stellaya): consider to inherit from TorchMetrics.MetricCollection.
# The prequsite to use MetricCollection is that RecMetric inherits from
# TorchMetrics.Metric or TorchMetrics.MetricCollection
super().__init__()
self.rec_metrics = nn.ModuleList(rec_metrics)
def __len__(self) -> int:
return len(self.rec_metrics)
def __getitem__(self, idx: int) -> nn.Module:
return self.rec_metrics[idx]
def update(
self,
*,
predictions: RecModelOutput,
labels: RecModelOutput,
weights: RecModelOutput,
) -> None:
for metric in self.rec_metrics:
metric.update(predictions=predictions, labels=labels, weights=weights)
def compute(self) -> Dict[str, torch.Tensor]:
ret = {}
for metric in self.rec_metrics:
ret.update(metric.compute())
return ret
def local_compute(self) -> Dict[str, torch.Tensor]:
ret = {}
for metric in self.rec_metrics:
ret.update(metric.local_compute())
return ret
def sync(self) -> None:
for metric in self.rec_metrics:
metric.sync()
def unsync(self) -> None:
for metric in self.rec_metrics:
metric.unsync()
def reset(self) -> None:
for metric in self.rec_metrics:
metric.reset()
| 38.331818 | 117 | 0.616072 | 24,152 | 0.954662 | 2,183 | 0.086288 | 742 | 0.029329 | 0 | 0 | 6,644 | 0.262619 |
d2c55dd79284c9bf304a1f86538b6964cbb89f09 | 7,594 | py | Python | alison.py | johanhoiness/SlothBot | 556f9e0f67aa90543bd98889b06a4b939e30450d | [
"MIT"
]
| 1 | 2017-06-28T09:24:49.000Z | 2017-06-28T09:24:49.000Z | alison.py | johanhoiness/SlothBot | 556f9e0f67aa90543bd98889b06a4b939e30450d | [
"MIT"
]
| null | null | null | alison.py | johanhoiness/SlothBot | 556f9e0f67aa90543bd98889b06a4b939e30450d | [
"MIT"
]
| null | null | null | __author__ = 'JohnHiness'
import sys
import os
import random
import time
import string
import connection
from time import strftime
import ceq
import json, urllib2
import thread
args = sys.argv
req_files = ['filegen.py', 'connection.py', 'commands.py', 'general.py', 'automatics.py']
for filename in req_files:
if os.path.exists(filename) == False:
print "Required file \"{}\" not found. Make sure you have acquired all files.".format(filename)
sys.exit(1)
import filegen
if os.path.exists('config.py') == False:
print 'No configuration-file found. Generating config.py'
filegen.gen_config()
python = sys.executable
print str(python)+'||'+str(python)+'||'+ str(* sys.argv)
os.execl(python, python, * sys.argv)
if os.path.exists('revar.py') == False:
print 'No reconfigurable file found. Generating revar.py'
filegen.gen_revar()
python = sys.executable
print str(python)+'||'+str(python)+'||'+ str(* sys.argv)
os.execl(python, python, * sys.argv)
import config
import revar
import filegen
import commands
import general
import automatics
if not revar.channels:
revar.channels = config.channel.replace(', ', ',').replace(' ', ',').split(',')
if len(args) > 1:
if args[1].lower() == 'reconfig' or args[1].lower() == 'config':
answr = raw_input("This will have you regenerate the configuration file and all old configurations will be lost.\nAre you sure you want to do this?(y/n) ")
while answr.lower() != 'y' or answr.lower() != 'n':
answr = raw_input("You must use the letters Y or N to answer: ")
if answr.lower() == 'y':
filegen.gen_config()
sys.exit(0)
if answr.lower() == 'n':
sys.exit(0)
elif args[1].lower() == 'help':
print "Usage: python alison.py <help | reconfig | >"
sys.exit(0)
else:
print "Flag not recognized."
sys.exit(1)
def connect(server, port):
print "Connecting to {} with port {}.".format(server, port)
s = connection.s
readbuffer = ''
try:
s.connect((server, port))
except BaseException as exc:
print 'Failed to connect: ' + str(exc)
sys.exit(1)
s.send("PASS %s\n" % config.password)
s.send("USER %s %s %s :%s\n" % (config.bot_username, config.bot_hostname, config.bot_servername, config.bot_realname))
s.send("NICK %s\n" % revar.bot_nick)
mode_found = False
while not mode_found:
readbuffer = readbuffer + s.recv(2048)
temp = string.split(readbuffer, "\n")
readbuffer = temp.pop()
for rline in temp:
rline = string.rstrip(rline)
rline = string.split(rline)
g = general
if rline[0] == "PING":
g.ssend("PONG %s\r" % rline[1])
if rline[1] == '433':
if revar.bot_nick.lower() != config.bot_nick2.lower():
revar.bot_nick = config.bot_nick2
else:
revar.bot_nick += '_'
g.ssend('NICK %s' % revar.bot_nick)
if len(rline) > 2 and rline[1] == '391':
revar.bot_nick = rline[2]
if len(rline) > 2 and rline[1].lower() == 'join':
if not rline[2].lower() in revar.channels:
revar.channels.append(rline[2].lower())
if len(rline) > 2 and rline[1].lower() == 'part':
if rline[2].lower() in revar.channels:
try:
revar.channels.append(rline[2].lower())
except:
pass
if rline[1] == 'MODE':
mode_found = True
g.ssend('JOIN %s' % ','.join(revar.channels))
general.update_user_info()
def server_responses(rline):
g = general
if rline[0] == "PING":
g.ssend("PONG %s\r" % rline[1])
return True
if len(rline) > 4 and rline[3] == '152':
general.append_user_info(rline)
return True
if rline[1] == '433':
if revar.bot_nick.lower() != config.bot_nick2.lower():
revar.bot_nick = config.bot_nick2
else:
revar.bot_nick += '_'
g.ssend('NICK %s' % revar.bot_nick)
return True
if len(rline) > 2 and rline[1] == '391':
revar.bot_nick = rline[2]
return True
if len(rline) > 1 and rline[1].lower() == 'pong':
general.last_pong = time.time()
return True
if len(rline) > 2 and rline[1].lower() == 'join':
if not rline[2].lower() in revar.channels:
revar.channels.append(rline[2].lower())
return True
if len(rline) > 2 and rline[1].lower() == 'nick':
general.update_user_info()
return True
if len(rline) > 2 and rline[1].lower() == 'part':
if rline[2].lower() in revar.channels:
try:
revar.channels.append(rline[2].lower())
except:
pass
return True
if len(rline) > 3 and rline[1] == '319' and rline[2].lower() == revar.bot_nick.lower():
revar.channels = ' '.join(rline[4:])[1:].replace('+', '').replace('@', '').lower().split()
return True
if len(rline) > 2 and rline[1] == '391':
revar.bot_nick = rline[2]
return True
if not rline[0].find('!') != -1:
return True
if len(rline) > 3 and rline[1] == '315':
return True
return False
def find_imdb_link(chanq, msg):
if msg.lower().find('imdb.com/title/') != -1:
imdb_id = msg.lower()[msg.lower().find('imdb.com/title/')+15:][:9]
g.csend(chanq, commands.imdb_info('id', imdb_id))
def botendtriggerd(chant, usert, msgt):
if not general.check_operator(usert):
outp = 'You do not have permission to use any of these commands.'
else:
msgt = general.check_bottriggers(msgt).split()
outp = commands.operator_commands(chant, msgt)
if outp is not None:
for line in outp.split('\n'):
g.csend(chant, line)
time.sleep(1)
def work_command(chanw, userw, msgw):
msgw = general.check_midsentencecomment(msgw)
msgw, rec, notice, pm = general.checkrec(chanw, userw, msgw)
outp = commands.check_called(chanw, userw, msgw)
if outp is not None:
for line in outp.split('\n'):
g.csend(chanw, line, notice, pm, rec)
time.sleep(1)
def work_line(chanl, userl, msgl):
if chanl in general.countdown and msgl.lower().find('stop') != -1:
general.countdown.remove(chanl)
if chanl.find('#') != -1 and (msgl.lower().find('johan') != -1 or msgl.lower().find('slut') != -1):
for item in general.user_info:
if item['nickserv'].lower() == 'sloth':
general.csend(item['nick'], '{} <{}> {}'.format(chanl, userl, msgl))
general.update_seen(chanl, userl, msgl)
if (" "+msgl).lower().find('deer god') != -1 and time.time() - general.deer_god > 30 and revar.deer_god:
general.deer_god = time.time()
general.csend(chanl, "Deer God http://th07.deviantart.net/fs71/PRE/f/2011/223/3/c/deer_god_by_aubrace-d469jox.jpg")
if __name__ == '__main__':
thread.start_new_thread(automatics.get_ftime, ())
connect(config.server, config.port)
thread.start_new_thread(automatics.autoping, ())
thread.start_new_thread(automatics.autoweather, ())
thread.start_new_thread(automatics.checkpongs, ())
thread.start_new_thread(automatics.who_channel, ())
s = connection.s
readbuffer = ''
while True:
readbuffer = readbuffer + s.recv(2048)
temp = string.split(readbuffer, "\n")
readbuffer = temp.pop()
for rline in temp:
rline = string.rstrip(rline)
rline = string.split(rline)
g = general
if not server_responses(rline) and len(rline) > 3:
msg = ' '.join(rline[3:])[1:]
user = rline[0][1:][:rline[0].find('!')][:-1]
chan = rline[2]
if chan.lower() == revar.bot_nick.lower():
chan = user
if config.verbose:
print g.ftime + ' << ' + ' '.join(rline)
else:
print g.ftime + ' << ' + chan + ' <{}> '.format(user) + msg
if general.check_bottriggers(msg):
thread.start_new_thread(botendtriggerd, (chan, user, msg),)
break
thread.start_new_thread(find_imdb_link, (chan, msg), )
thread.start_new_thread(work_line, (chan, user, msg), )
msg = general.check_midsentencetrigger(msg)
msg = general.check_triggers(msg)
if msg:
thread.start_new_thread(work_command, (chan, user, msg), )
| 29.095785 | 157 | 0.658019 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,173 | 0.154464 |
d2c5679b86d58ca48ad37cdef98dbe5e554266cb | 2,364 | py | Python | pyroomacoustics/experimental/tests/test_deconvolution.py | HemaZ/pyroomacoustics | c401f829c71ff03a947f68f9b6b2f48346ae84b2 | [
"MIT"
]
| 1 | 2020-02-13T14:39:37.000Z | 2020-02-13T14:39:37.000Z | pyroomacoustics/experimental/tests/test_deconvolution.py | HemaZ/pyroomacoustics | c401f829c71ff03a947f68f9b6b2f48346ae84b2 | [
"MIT"
]
| null | null | null | pyroomacoustics/experimental/tests/test_deconvolution.py | HemaZ/pyroomacoustics | c401f829c71ff03a947f68f9b6b2f48346ae84b2 | [
"MIT"
]
| 1 | 2021-01-14T08:42:47.000Z | 2021-01-14T08:42:47.000Z |
from unittest import TestCase
import numpy as np
from scipy.signal import fftconvolve
import pyroomacoustics as pra
# fix seed for repeatability
np.random.seed(0)
h_len = 30
x_len = 1000
SNR = 1000. # decibels
h_lp = np.fft.irfft(np.ones(5), n=h_len)
h_rand = np.random.randn(h_len)
h_hann = pra.hann(h_len, flag='symmetric')
x = np.random.randn(x_len)
noise = np.random.randn(x_len + h_len - 1)
def generate_signals(SNR, x, h, noise):
''' run convolution '''
# noise standard deviation
sigma_noise = 10**(-SNR / 20.)
y = fftconvolve(x, h)
y += sigma_noise * noise
return y, sigma_noise
class TestDeconvolution(TestCase):
def test_deconvolve_hann_noiseless(self):
h = h_hann
h_len = h_hann.shape[0]
SNR = 1000.
tol = 1e-7
y, sigma_noise = generate_signals(SNR, x, h, noise)
h_hat = pra.experimental.deconvolve(y, x, length=h_len)
rmse = np.sqrt(np.linalg.norm(h_hat - h)**2 / h_len)
print('rmse=', rmse, '(tol=', tol, ')')
self.assertTrue(rmse < tol)
def test_wiener_deconvolve_hann_noiseless(self):
h = h_hann
h_len = h_hann.shape[0]
SNR = 1000.
tol = 1e-7
y, sigma_noise = generate_signals(SNR, x, h, noise)
h_hat = pra.experimental.wiener_deconvolve(y, x, length=h_len, noise_variance=sigma_noise**2)
rmse = np.sqrt(np.linalg.norm(h_hat - h)**2 / h_len)
print('rmse=', rmse, '(tol=', tol, ')')
self.assertTrue(rmse < tol)
if __name__ == '__main__':
import matplotlib.pyplot as plt
h = h_hann
y, sigma_noise = generate_signals(SNR, x, h, noise)
h_hat1 = pra.experimental.deconvolve(y, x, length=h_len)
res1 = np.linalg.norm(y - fftconvolve(x, h_hat1))**2 / y.shape[0]
mse1 = np.linalg.norm(h_hat1 - h)**2 / h_len
h_hat2 = pra.experimental.wiener_deconvolve(y, x, length=h_len, noise_variance=sigma_noise**2, let_n_points=15)
res2 = np.linalg.norm(y - fftconvolve(x, h_hat2))**2 / y.shape[0]
mse2 = np.linalg.norm(h_hat2 - h)**2 / h_len
print('MSE naive: rmse=', np.sqrt(mse1), ' res=', pra.dB(res1, power=True))
print('MSE Wiener: rmse=', np.sqrt(mse2), ' res=', pra.dB(res1, power=True))
plt.plot(h)
plt.plot(h_hat1)
plt.plot(h_hat2)
plt.legend(['Original', 'Naive', 'Wiener'])
plt.show()
| 26.266667 | 115 | 0.630711 | 901 | 0.381134 | 0 | 0 | 0 | 0 | 0 | 0 | 218 | 0.092217 |
d2c5ccb03692b30b21e99cbcada633194e147414 | 7,423 | py | Python | pthelper/img_to_txt.py | hkcountryman/veg-scanner | 6b3aa4d0799c901cecdbc0f4b5ca61b0d754ab30 | [
"MIT"
]
| null | null | null | pthelper/img_to_txt.py | hkcountryman/veg-scanner | 6b3aa4d0799c901cecdbc0f4b5ca61b0d754ab30 | [
"MIT"
]
| null | null | null | pthelper/img_to_txt.py | hkcountryman/veg-scanner | 6b3aa4d0799c901cecdbc0f4b5ca61b0d754ab30 | [
"MIT"
]
| null | null | null | import cv2 as cv
from deskew import determine_skew
import numpy as np
from PIL import Image, ImageFilter, ImageOps
from pytesseract import image_to_string
from skimage import io
from skimage.color import rgb2gray
from skimage.transform import rotate
from spellchecker import SpellChecker
import traceback
# On Windows, you need to tell it where Tesseract is installed, for example:
# pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract.exe
# OCR Stuff
####################################################################################################
def to_text(pic):
"""
Read and return text from an image.
Args:
pic: filename string, pathlib.Path object, or file object to read.
Returns:
Text from the image.
"""
try:
img = Image.open(pic)
except FileNotFoundError as e:
print("File " + pic + " does not exist.")
quit()
except PIL.UnidentifiedImageError as e:
print("That file is not an image.")
quit()
except:
print("Unanticipated error:")
traceback.print_exc()
quit()
remove_alpha(img)
text = image_to_string(img)
return text
def valid_text(ocr, accuracy_pct, language="en", distance=2, case_sensitive=True): # this spellchecker sucks
"""
Checks that the output of to_text() makes sense. To build your own dictionary, see
https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#how-to-build-a-new-dictionary
Args:
ocr: string to analyze.
accuracy_pct: percentage of words in ocr that should be in the dictionary.
language: language of dictionary (default English); see
https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#changing-language
distance: Levenshtein distance (default 2 for shorter words); see
https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#basic-usage
https://en.wikipedia.org/wiki/Levenshtein_distance
Returns:
Boolean indicating success of to_text():
True: to_text() makes sense.
False: to_text() returned nonsense.
"""
if ocr == "":
return False # if it returned nothing
word_list = ocr.split() # get list of all words in input string
spell = SpellChecker(language=language, distance=distance, case_sensitive=case_sensitive)
misspelled = spell.unknown(word_list) # list of unknown words from word_list
#print(misspelled)
#print(word_list)
if (len(word_list) - len(misspelled)) / len(word_list) < accuracy_pct / 100:
return False # if it returned gibberish
return True # otherwise, all good
def parse(pic, accuracy_pct, language="en", distance=2, case_sensitive=True):
"""
Attempts OCR with image and decides if processing is needed.
Args:
pic: filename string, pathlib.Path object, or file object to read.
accuracy_pct: percentage of words in string that should be in the dictionary.
language: language of dictionary (default English); see
https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#changing-language
distance: Levenshtein distance (default 2 for shorter words); see
https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#basic-usage
https://en.wikipedia.org/wiki/Levenshtein_distance
Returns:
Text from the image if OCR was successful; otherwise a failure message.
"""
text = to_text(pic)
if valid_text(text, accuracy_pct, language=language, distance=distance,
case_sensitive=case_sensitive):
return text
else:
return "OCR failed." # time for processing
# Image Processing Stuff
####################################################################################################
def remove_alpha(pic):
"""
Removes the alpha channel from an image, if it exists. Necessary for OCR.
Args:
pic: PIL.Image object to convert.
Returns:
The PIL.Image object in RGB format.
"""
return pic.convert("RGB")
def invert(pic):
"""
Inverts the colors in an image. Useful if OCR doesn't work.
Args:
pic: PIL.Image object to invert.
Returns:
The inverted PIL.Image object.
"""
return ImageOps.invert(remove_alpha(pic)) # negative colors
'''def resize(pic): # needs work: possible key error "dpi"
"""
Resizes an image that is less than 300 dpi. Useful if OCR doesn't work.
Args:
pic: PIL.Image object to resize.
Returns:
The resized PIL.Image object.
"""
pic = remove_alpha(pic)
res = pic.info["dpi"] # fetch tuple of dpi
lower = min(res) # get the lower of the two entries in the tuple
factor = 300 / lower # how much should we scale?
resized = pic.resize((round(pic.size[0]*factor), round(pic.size[1]*factor))) # scale it!
return resized'''
def threshold(pic, gaussian=True): # needs work
"""
Applies thresholding to the image. Doesn't work.
(Tesseract already tries the Otsu algorithm.)
Args:
pic: filename string, pathlib.Path object, or file object to read.
gaussian: boolean:
True: apply adaptive Gaussian thresholding.
False: apply adaptive mean thresholding.
Returns:
The image with thresholding.
"""
img = cv.imread("test2.jpg", 0)
if gaussian: # adaptive Gaussian thresholding
img = cv.adaptiveThreshold(img, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2)
else: # adaptive mean thresholding
img = cv.adaptiveThreshold(img, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 11, 2)
return Image.fromarray(img)
def denoise(pic): # needs work
"""
Allegedly removes noise? Useful if OCR doesn't work.
Args:
pic: filename string, pathlib.Path object, or file object to read.
Returns:
The denoised image.
"""
img = cv.imread(pic)
img = cv.fastNlMeansDenoising(img)
return Image.fromarray(img)
def dilate(pic, size):
"""
Dilates the text (grows edges of characters) if it's against a common background.
Useful if OCR doesn't work.
Args:
pic: PIL.Image object to dilate.
size: kernel size, in pixels. Recommend starting at 1.
Returns:
The dilated PIL.Image object.
"""
pic = remove_alpha(pic)
return pic.filter(ImageFilter.MaxFilter(size))
def erode(pic, size):
"""
Erodes the text (shrinks edges of characters) if it's against a common background.
Useful if OCR doesn't work.
Args:
pic: PIL.Image object to erode.
size: kernel size, in pixels. Recommend starting at 1.
Returns:
The eroded PIL.Image object.
"""
pic = remove_alpha(pic)
return pic.filter(ImageFilter.MinFilter(size))
def deskew(pic, output): # needs work
"""
Deskews an image. Useful if OCR doesn't work.
Args:
pic: filename string, pathlib.Path object, or file object to read.
output: string to save output as
"""
# Thanks to Stephane Brunner (https://github.com/sbrunner) for deskew and the code!
img = io.imread(pic)
grayscale = rgb2gray(img)
angle = determine_skew(grayscale)
rotated = rotate(img, angle, resize=True) * 255
io.imsave(output, rotated.astype(np.uint8))
| 33.588235 | 108 | 0.649064 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,027 | 0.677219 |
d2c5ed1f81d8bfe0be0278969594e7da6dcf2781 | 3,544 | py | Python | scripts/training.py | tobinsouth/privacy-preserving-synthetic-mobility-data | fd4d1851b47e3e7304761a894b460e8345fae5db | [
"MIT"
]
| null | null | null | scripts/training.py | tobinsouth/privacy-preserving-synthetic-mobility-data | fd4d1851b47e3e7304761a894b460e8345fae5db | [
"MIT"
]
| null | null | null | scripts/training.py | tobinsouth/privacy-preserving-synthetic-mobility-data | fd4d1851b47e3e7304761a894b460e8345fae5db | [
"MIT"
]
| null | null | null | # Params
learning_rate = 0.001
k = 0.0025
x0 =2500
epochs = 4
batch_size=16
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
import torch, numpy as np
from tqdm import tqdm
# Get the dataloader
from dataloader import get_train_test
trainStays, testStays = get_train_test(train_size=0.95, batch_size=batch_size, shuffle=True, dataset='cuebiq')
# Load and define the model
from VAE import SentenceVAE
# Model params
params = dict(
vocab_size = trainStays.dataset.dataset._vocab_size,
max_sequence_length = trainStays.dataset.dataset._max_seq_len,
embedding_size = 256,
rnn_type = 'gru',
hidden_size = 256,
num_layers = 1,
bidirectional = False,
latent_size = 16,
word_dropout = 0,
embedding_dropout = 0.5,
sos_idx=0,
eos_idx=0,
pad_idx=0,
unk_idx=1,
device=device,
)
model = SentenceVAE(**params)
model = model.to(device) # Device is defined in VAE
# Custom loss function from paper
NLL = torch.nn.NLLLoss(ignore_index=0, reduction='sum')
def loss_fn(logp, target, mean, logv, step, k, x0):
"""The loss function used in the paper, taken from https://github.com/timbmg/Sentence-VAE"""
target = target.view(-1)
logp = logp.view(-1, logp.size(2))
# Negative Log Likelihood
NLL_loss = NLL(logp, target)
# KL Divergence
KL_loss = -0.5 * torch.sum(1 + logv - mean.pow(2) - logv.exp())
KL_weight = float(1/(1+np.exp(-k*(step-x0))))
return NLL_loss, KL_loss, KL_weight
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Logging with tensorboard
from torch.utils.tensorboard import SummaryWriter
LOG_DIR = "runs/cuebiq"
comment = f' batch_size = {batch_size} lr = {learning_rate} dp = False'
train_writer = SummaryWriter(LOG_DIR + "/train", comment=comment)
val_writer = SummaryWriter(LOG_DIR + "/val", comment=comment)
# Run training loop
step = 0
for epoch in range(epochs):
running_loss = 0.0
for i, batch in enumerate(tqdm(trainStays, miniters=500)):
batch = batch.to(device)
# Forward pass
logp, mean, logv, z = model(batch)
# loss calculation
NLL_loss, KL_loss, KL_weight = loss_fn(logp, batch, mean, logv, step, k, x0)
loss = (NLL_loss + KL_weight * KL_loss) / batch_size
loss.to(device)
# backward + optimization
optimizer.zero_grad()
loss.backward()
optimizer.step()
step += 1
running_loss += loss.item()
if i % 1000 == 999:
train_writer.add_scalar('loss', running_loss / 1000, epoch * len(trainStays) + i)
running_loss = 0.0
# Periodic Validation and checkpointing
if i % 20000 == 19999:
model.eval()
val_loss = 0.0
for batch in testStays:
batch = batch.to(device)
logp, mean, logv, z = model(batch)
NLL_loss, KL_loss, KL_weight = loss_fn(logp, batch, mean, logv, step, k, x0)
loss = (NLL_loss + KL_weight * KL_loss) / batch_size
val_loss += loss.item()
val_writer.add_scalar('loss', val_loss / 20000, epoch * len(trainStays) + i)
model.train()
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': val_loss / 10000,
'params': params,
}, '../models/cuebiq_vae.pt')
train_writer.close()
val_writer.close() | 30.290598 | 110 | 0.628668 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 616 | 0.173815 |
d2c662f276d75d5cf194b16fa8615d6ac1fdca1d | 1,674 | py | Python | tests/compute/planar/test_rotateZ.py | ianna/vector | c00b258049c0ea1de46f90311849923b96068a02 | [
"BSD-3-Clause"
]
| null | null | null | tests/compute/planar/test_rotateZ.py | ianna/vector | c00b258049c0ea1de46f90311849923b96068a02 | [
"BSD-3-Clause"
]
| null | null | null | tests/compute/planar/test_rotateZ.py | ianna/vector | c00b258049c0ea1de46f90311849923b96068a02 | [
"BSD-3-Clause"
]
| null | null | null | # Copyright (c) 2019-2021, Jonas Eschle, Jim Pivarski, Eduardo Rodrigues, and Henry Schreiner.
#
# Distributed under the 3-clause BSD license, see accompanying file LICENSE
# or https://github.com/scikit-hep/vector for details.
import numpy
import pytest
import vector.backends.numpy_
import vector.backends.object_
def test_xy():
vec = vector.backends.object_.VectorObject2D(
vector.backends.object_.AzimuthalObjectXY(1, 0)
)
assert vec.rotateZ(0.1).x == pytest.approx(0.9950041652780258)
assert vec.rotateZ(0.1).y == pytest.approx(0.09983341664682815)
array = vector.backends.numpy_.VectorNumpy2D(
[(0, 0), (1, 0), (0, 1)], dtype=[("x", numpy.float64), ("y", numpy.float64)]
)
assert isinstance(array.rotateZ(0.1), vector.backends.numpy_.VectorNumpy2D)
out = array.rotateZ(0.1)
assert out.dtype.names == ("x", "y")
assert numpy.allclose(out.x, [0, 0.9950041652780258, -0.09983341664682815])
assert numpy.allclose(out.y, [0, 0.09983341664682815, 0.9950041652780258])
def test_rhophi():
vec = vector.backends.object_.VectorObject2D(
vector.backends.object_.AzimuthalObjectRhoPhi(1, 0)
)
assert vec.rotateZ(0.1).rho == pytest.approx(1)
assert vec.rotateZ(0.1).phi == pytest.approx(0.1)
array = vector.backends.numpy_.VectorNumpy2D(
[(0, 0), (1, 0), (0, 1)], dtype=[("rho", numpy.float64), ("phi", numpy.float64)]
)
assert isinstance(array.rotateZ(0.1), vector.backends.numpy_.VectorNumpy2D)
out = array.rotateZ(0.1)
assert out.dtype.names == ("rho", "phi")
assert numpy.allclose(out.rho, [0, 1, 0])
assert numpy.allclose(out.phi, [0.1, 0.1, 1.1])
| 37.2 | 94 | 0.680406 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 256 | 0.152927 |
d2c66e24087a653bf88316c9ed3e62b1ba5b4aa5 | 3,791 | py | Python | src/RIOT/tests/pkg_tensorflow-lite/mnist/mnist_mlp.py | ARte-team/ARte | 19f17f57522e1b18ba390718fc94be246451837b | [
"MIT"
]
| 2 | 2020-04-30T08:17:45.000Z | 2020-05-23T08:46:54.000Z | src/RIOT/tests/pkg_tensorflow-lite/mnist/mnist_mlp.py | ARte-team/ARte | 19f17f57522e1b18ba390718fc94be246451837b | [
"MIT"
]
| null | null | null | src/RIOT/tests/pkg_tensorflow-lite/mnist/mnist_mlp.py | ARte-team/ARte | 19f17f57522e1b18ba390718fc94be246451837b | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
import os
# imports for array-handling
import numpy as np
import tensorflow as tf
# keras imports for the dataset and building our neural network
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
# let's keep our keras backend tensorflow quiet
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# load mnist dataset
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# building the input vector from the 28x28 pixels
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# Split the train set in a train + validation set
X_valid = X_train[50000:]
y_valid = y_train[50000:]
X_train = X_train[:50000]
y_train = y_train[:50000]
# Normalize the data
X_train = X_train / 255.0
X_test = X_test / 255.0
X_valid = X_valid / 255.0
# building a very simple linear stack of layers using a sequential model
model = Sequential([
Dense(64, activation='relu', input_shape=(784,)),
Dropout(0.2),
Dense(10, activation='softmax')
])
# compiling the sequential model
model.compile(loss='sparse_categorical_crossentropy', metrics=['accuracy'],
optimizer='adam')
batch_size = 32
epochs = 30
# training the model and saving metrics in history
history = model.fit(X_train, y_train,
batch_size=batch_size, epochs=epochs,
verbose=2,
validation_data=(X_valid, y_valid))
# saving the model
# Convert the model to the TensorFlow Lite format without quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# Save the basic model to disk
open("model_basic.tflite", "wb").write(tflite_model)
# Convert the model to the TensorFlow Lite format with quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model)
(mnist_train, _), (_, _) = mnist.load_data()
mnist_train = mnist_train.reshape(60000, 784)
mnist_train = mnist_train.astype('float32')
mnist_train = mnist_train / 255.0
mnist_ds = tf.data.Dataset.from_tensor_slices((mnist_train)).batch(1)
def representative_data_gen():
for input_value in mnist_ds.take(100):
yield [input_value]
converter.representative_dataset = representative_data_gen
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_model = converter.convert()
# # Save the quantized model to disk
open("model.tflite", "wb").write(tflite_model)
basic_model_size = os.path.getsize("model_basic.tflite")
print("Basic model is %d bytes" % basic_model_size)
quantized_model_size = os.path.getsize("model.tflite")
print("Quantized model is %d bytes" % quantized_model_size)
difference = basic_model_size - quantized_model_size
print("Difference is %d bytes" % difference)
# Now let's verify the model on a few input digits
# Instantiate an interpreter for the model
model_quantized_reloaded = tf.lite.Interpreter('model.tflite')
# Allocate memory for each model
model_quantized_reloaded.allocate_tensors()
# Get the input and output tensors so we can feed in values and get the results
model_quantized_input = model_quantized_reloaded.get_input_details()[0]["index"]
model_quantized_output = model_quantized_reloaded.get_output_details()[0]["index"]
# Create arrays to store the results
model_quantized_predictions = np.empty(X_test.size)
for i in range(10):
# Invoke the interpreter
model_quantized_reloaded.set_tensor(model_quantized_input, X_test[i:i+1, :])
model_quantized_reloaded.invoke()
model_quantized_prediction = model_quantized_reloaded.get_tensor(model_quantized_output)
print("Digit: {} - Prediction:\n{}".format(y_test[i], model_quantized_prediction))
print("")
| 32.127119 | 92 | 0.759166 | 0 | 0 | 101 | 0.026642 | 0 | 0 | 0 | 0 | 1,265 | 0.333685 |
d2c77644e40785600cc8b3b66d9450e3d85ddf12 | 67 | py | Python | lang/Python/random-numbers-1.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
]
| null | null | null | lang/Python/random-numbers-1.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
]
| null | null | null | lang/Python/random-numbers-1.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
]
| null | null | null | import random
values = [random.gauss(1, .5) for i in range(1000)]
| 16.75 | 51 | 0.686567 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d2c7feb7c74a18d3044bb9f836e91d4015495e7f | 155 | py | Python | src/display.py | thebruce87/Photobooth | 43ba9e9537bd51040c2cb2ffb809d7a8ca0633ef | [
"MIT"
]
| null | null | null | src/display.py | thebruce87/Photobooth | 43ba9e9537bd51040c2cb2ffb809d7a8ca0633ef | [
"MIT"
]
| null | null | null | src/display.py | thebruce87/Photobooth | 43ba9e9537bd51040c2cb2ffb809d7a8ca0633ef | [
"MIT"
]
| null | null | null | class Display():
def __init__(self, width, height):
self.width = width
self.height = height
def getSize(self):
return (self.width, self.height)
| 19.375 | 35 | 0.690323 | 154 | 0.993548 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d2c876fb5f375461dc3ae3b4e7ececc7c2f8aa23 | 2,091 | py | Python | stock_api_handler.py | Sergix/analyst-server | a2ec7cc92610f78ac2a4ce4a46c52410219cd360 | [
"MIT"
]
| 2 | 2020-03-16T01:09:10.000Z | 2020-03-16T03:02:57.000Z | stock_api_handler.py | Sergix/analyst-server | a2ec7cc92610f78ac2a4ce4a46c52410219cd360 | [
"MIT"
]
| 1 | 2020-04-21T16:49:53.000Z | 2020-04-29T02:15:45.000Z | stock_api_handler.py | Sergix/analyst-server | a2ec7cc92610f78ac2a4ce4a46c52410219cd360 | [
"MIT"
]
| 3 | 2020-03-16T14:46:41.000Z | 2020-03-21T13:55:24.000Z | # This python script handles stock api request from yfinance
# Last Updated: 4/7/2020
# Credits:nóto
#Import yfinance api lib
import yfinance as yf
#Import pandas lib
import pandas as pd
#Import json to manipulate api data
import json
#Import math
import math
class StockApi():
def __init__(self):
self.panda = pd
def request_data(self, t, p='1d', i="5m"):
#set the stock we would like to search for
stock = yf.Ticker(t)
#Retrieve data and store as Panda Data Frame
self.unclean_data = stock.history(period=p,interval=i)
#unclean_data selectors stored in an array
self.data_selectors = list(self.unclean_data.columns)
#create list of the index values which the values are equal to the time stamps of our data
self.time_stamps = list(self.unclean_data.index)
#get the length
self.time_stamp_total_length = len(self.time_stamps)
#now let us clean the data
self.clean_data()
#lets convert the data and return it back to what ever called us
return self.convert_data()
#END
#function to organize 'clean' the stock data
def clean_data(self):
#function to clean panda data returned by Api
#
self.new_data = {
}
for count in range(self.time_stamp_total_length):
#get the next timestamp and store it as a string
self.new_time_stamp = str(self.time_stamps[count])
#insert new data here
if(not math.isnan((self.unclean_data.iloc[count].to_list())[0])):
self.new_data.update({self.new_time_stamp:self.unclean_data.iloc[count].to_list()})
for i in range(4):
self.new_data[self.new_time_stamp][i] = (round(self.new_data[self.new_time_stamp][i], 2))
#return the new data
return self.new_data
#END
#function to convert the data so the front end can read it
def convert_data(self):
self.new_data = json.dumps(self.new_data, indent=2)
return self.new_data
#END
| 35.440678 | 109 | 0.648015 | 1,828 | 0.873805 | 0 | 0 | 0 | 0 | 0 | 0 | 770 | 0.368069 |
d2c9cfe9e4e2384aabafbe6f290a4052329e6bc7 | 1,493 | py | Python | hth/shows/tests/factories.py | roperi/myband | ec1955626fe6997484fd92ed02127b6899cd7062 | [
"MIT"
]
| 1 | 2016-04-12T17:38:26.000Z | 2016-04-12T17:38:26.000Z | hth/shows/tests/factories.py | bhrutledge/jahhills.com | 74fe94a214f1ed5681bd45159315f0b68daf5a33 | [
"MIT"
]
| 92 | 2015-04-03T10:04:55.000Z | 2021-07-17T11:13:52.000Z | hth/shows/tests/factories.py | roperi/myband | ec1955626fe6997484fd92ed02127b6899cd7062 | [
"MIT"
]
| 1 | 2021-01-26T18:02:49.000Z | 2021-01-26T18:02:49.000Z | from datetime import date
from random import randrange
import factory
import factory.fuzzy
from hth.core.tests.utils import from_today
class VenueFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'shows.Venue'
name = factory.Sequence(lambda n: 'Venue %d' % n)
city = factory.Sequence(lambda n: 'City %d' % n)
website = factory.Sequence(lambda n: 'http://venue-%d.dev' % n)
class GigFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'shows.Gig'
date = factory.fuzzy.FuzzyDate(date(2000, 1, 1))
venue = factory.SubFactory(VenueFactory)
description = factory.fuzzy.FuzzyText(length=100)
details = factory.fuzzy.FuzzyText(length=100)
class PublishedGigFactory(GigFactory):
publish = True
class UpcomingGigFactory(PublishedGigFactory):
# Pick a random date from today through next year
date = factory.LazyAttribute(lambda obj: from_today(days=randrange(365)))
@classmethod
def create_batch(cls, size, **kwargs):
batch = super().create_batch(size, **kwargs)
return sorted(batch, key=lambda x: x.date)
class PastGigFactory(PublishedGigFactory):
# Pick a random date from 10 years ago through yesterday
date = factory.LazyAttribute(lambda obj: from_today(randrange(-3650, 0)))
@classmethod
def create_batch(cls, size, **kwargs):
batch = super().create_batch(size, **kwargs)
return sorted(batch, key=lambda x: x.date, reverse=True)
| 26.192982 | 77 | 0.704622 | 1,340 | 0.897522 | 0 | 0 | 332 | 0.222371 | 0 | 0 | 169 | 0.113195 |
d2ca30ab580a71ee2a0484e370c2d881b8376a24 | 2,143 | py | Python | homeassistant/components/eight_sleep/binary_sensor.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
]
| 22,481 | 2020-03-02T13:09:59.000Z | 2022-03-31T23:34:28.000Z | homeassistant/components/eight_sleep/binary_sensor.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
]
| 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | homeassistant/components/eight_sleep/binary_sensor.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
]
| 11,411 | 2020-03-02T14:19:20.000Z | 2022-03-31T22:46:07.000Z | """Support for Eight Sleep binary sensors."""
from __future__ import annotations
import logging
from pyeight.eight import EightSleep
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import (
CONF_BINARY_SENSORS,
DATA_API,
DATA_EIGHT,
DATA_HEAT,
EightSleepBaseEntity,
EightSleepHeatDataCoordinator,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType = None,
) -> None:
"""Set up the eight sleep binary sensor."""
if discovery_info is None:
return
name = "Eight"
sensors = discovery_info[CONF_BINARY_SENSORS]
eight: EightSleep = hass.data[DATA_EIGHT][DATA_API]
heat_coordinator: EightSleepHeatDataCoordinator = hass.data[DATA_EIGHT][DATA_HEAT]
all_sensors = [
EightHeatSensor(name, heat_coordinator, eight, side, sensor)
for side, sensor in sensors
]
async_add_entities(all_sensors)
class EightHeatSensor(EightSleepBaseEntity, BinarySensorEntity):
"""Representation of a Eight Sleep heat-based sensor."""
def __init__(
self,
name: str,
coordinator: EightSleepHeatDataCoordinator,
eight: EightSleep,
side: str | None,
sensor: str,
) -> None:
"""Initialize the sensor."""
super().__init__(name, coordinator, eight, side, sensor)
self._attr_device_class = BinarySensorDeviceClass.OCCUPANCY
assert self._usrobj
_LOGGER.debug(
"Presence Sensor: %s, Side: %s, User: %s",
self._sensor,
self._side,
self._usrobj.userid,
)
@property
def is_on(self) -> bool:
"""Return true if the binary sensor is on."""
assert self._usrobj
return bool(self._usrobj.bed_presence)
| 27.474359 | 86 | 0.691087 | 860 | 0.401307 | 0 | 0 | 167 | 0.077928 | 658 | 0.307046 | 265 | 0.123658 |
d2cb2cb149ab4d390a0fe9859ee6b67392f9a4c2 | 3,384 | py | Python | tensorbay/opendataset/FLIC/loader.py | rexzheng324-c/tensorbay-python-sdk | 764c28f34069229daa41474e2f104786dbfa973f | [
"MIT"
]
| null | null | null | tensorbay/opendataset/FLIC/loader.py | rexzheng324-c/tensorbay-python-sdk | 764c28f34069229daa41474e2f104786dbfa973f | [
"MIT"
]
| null | null | null | tensorbay/opendataset/FLIC/loader.py | rexzheng324-c/tensorbay-python-sdk | 764c28f34069229daa41474e2f104786dbfa973f | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
#
# Copyright 2021 Graviti. Licensed under MIT License.
#
# pylint: disable=invalid-name
# pylint: disable=missing-module-docstring
import os
from typing import Any, Dict, Iterator, Tuple
from tensorbay.dataset import Data, Dataset
from tensorbay.exception import ModuleImportError
from tensorbay.label import Classification, LabeledBox2D, LabeledKeypoints2D
DATASET_NAME = "FLIC"
_VALID_KEYPOINT_INDICES = [0, 1, 2, 3, 4, 5, 6, 9, 12, 13, 16]
def FLIC(path: str) -> Dataset:
"""`FLIC <https://bensapp.github.io/flic-dataset.html>`_ dataset.
The folder structure should be like::
<path>
exampls.mat
images/
2-fast-2-furious-00003571.jpg
...
Arguments:
path: The root directory of the dataset.
Raises:
ModuleImportError: When the module "scipy" can not be found.
Returns:
Loaded :class:`~tensorbay.dataset.dataset.Dataset` instance.
"""
try:
from scipy.io import loadmat # pylint: disable=import-outside-toplevel
except ModuleNotFoundError as error:
raise ModuleImportError(module_name=error.name) from error
root_path = os.path.abspath(os.path.expanduser(path))
dataset = Dataset(DATASET_NAME)
annotations = loadmat(os.path.join(root_path, "examples.mat"))["examples"][0]
dataset.create_segment("train")
dataset.create_segment("test")
dataset.load_catalog(os.path.join(os.path.dirname(__file__), "catalog.json"))
# try whether the dataset has bad segment
try:
_ = annotations["isbad"]
flag = True
dataset.create_segment("bad")
dataset.catalog.classification.add_attribute(name="isunchecked", type_="boolean")
except ValueError:
flag = False
for data, segment_name in _get_data(root_path, annotations, flag):
dataset[segment_name].append(data)
return dataset
def _get_data(path: str, annotations: Any, flag: bool) -> Iterator[Tuple[Data, str]]:
filepath_to_data: Dict[str, Data] = {}
for annotation in annotations:
filepath = annotation["filepath"][0]
keypoints = LabeledKeypoints2D(
annotation["coords"].T[_VALID_KEYPOINT_INDICES],
attributes={"poselet_hit_idx": annotation["poselet_hit_idx"].T.tolist()},
)
box2d = LabeledBox2D(*annotation["torsobox"][0].tolist())
if filepath not in filepath_to_data:
data = Data(os.path.join(path, "images", filepath))
data.label.keypoints2d = [keypoints]
data.label.box2d = [box2d]
attribute = {"currframe": int(annotation["currframe"][0][0])}
if flag:
attribute["isunchecked"] = bool(annotation["isunchecked"])
data.label.classification = Classification(
category=annotation["moviename"][0], attributes=attribute
)
filepath_to_data[filepath] = data
if annotation["istrain"]:
segment_name = "train"
elif annotation["istest"]:
segment_name = "test"
else:
segment_name = "bad"
yield data, segment_name
else:
image_data = filepath_to_data[filepath]
image_data.label.keypoints2d.append(keypoints)
image_data.label.box2d.append(box2d)
| 31.924528 | 89 | 0.638889 | 0 | 0 | 1,454 | 0.429669 | 0 | 0 | 0 | 0 | 959 | 0.283392 |
d2cb4dbefc7f4606adaa9b77d466de95f1e38071 | 3,925 | py | Python | my_answers/homework/OOP/athlete.py | eyalle/python_course | acc75fd3c81f69f314099051026c81d80d141a84 | [
"MIT"
]
| null | null | null | my_answers/homework/OOP/athlete.py | eyalle/python_course | acc75fd3c81f69f314099051026c81d80d141a84 | [
"MIT"
]
| null | null | null | my_answers/homework/OOP/athlete.py | eyalle/python_course | acc75fd3c81f69f314099051026c81d80d141a84 | [
"MIT"
]
| null | null | null |
def get_time(time_in_seconds):
import datetime
time_str = str(datetime.timedelta(time_in_seconds))
time_fractions = time_str.split(":")
time_fractions[0] = time_fractions[0].replace(",","")
time_fractions[-1] += 's'
time_fractions[-2] += 'm'
time_fractions[-3] += 'h'
# print(time_fractions)
time_str = ":".join(time_fractions)
# time_str = f'{time_fractions[0]}:{time_fractions[1]}:{time_fractions[2]}s'
return time_str
class Athlete:
def __init__(self, name, weight, power, speed, endurance):
self.name = name
self.power = float(power)
self.speed = int(speed)
self.weight = float(weight)
self.endurance = int(endurance)
if (self.endurance < self.speed):
self.endurance += 3
class Runner(Athlete):
def __init__(self, name, weight=60.0, power=0, speed=0, endurance=0):
Athlete.__init__(self, name, weight, float(power), int(speed), int(endurance))
self.power += (self.weight * 0.1)
self.speed += 25
self.endurance += 8
def get_duration(self, distance):
acceleration = self.power / self.weight
top_speed = self.speed
time_to_reach_top_speed = top_speed / acceleration
distance_to_top_speed = top_speed * time_to_reach_top_speed / 2
if distance == distance_to_top_speed:
duration = time_to_reach_top_speed
elif distance < distance_to_top_speed:
duration = (2 * distance / acceleration) ** (1 / 2)
else:
deceleration = acceleration
endurance_speed = self.endurance
time_to_reach_endurance_speed = top_speed - endurance_speed / deceleration
distance_to_endurance_speed = top_speed * time_to_reach_endurance_speed / 2
if distance == distance_to_top_speed + distance_to_endurance_speed:
duration = time_to_reach_endurance_speed
elif distance < distance_to_top_speed + distance_to_endurance_speed:
duration = time_to_reach_top_speed + (2 * (distance - distance_to_top_speed) / deceleration) ** (1 / 2)
else:
time_to_reach_distance = (distance - (distance_to_top_speed + distance_to_endurance_speed)) / endurance_speed
duration = time_to_reach_top_speed + time_to_reach_endurance_speed + time_to_reach_distance
return duration
def run(self, distance):
import time
t = self.get_duration(distance)
time.sleep(t/2)
return self.name
class Sprinter(Runner):
def __init__(self, name, weight=70.0, power=0, speed=0, endurance=0):
Runner.__init__(self, name, float(weight), int(power), int(speed), int(endurance))
self.power += (0.75 * self.weight)
self.speed += 15
self.endurance += 1
class MarathonRunner(Runner):
def __init__(self, name, weight=55.0, power=0, speed=0, endurance=0):
Runner.__init__(self, name, float(weight), int(power), int(speed), int(endurance))
self.power /= 1.1
self.speed -= 3
self.endurance += 7
self.speed = 8 if (self.speed < 8) else self.speed
self.speed = self.endurance + 1 if (self.speed < (self.endurance + 1)) else self.endurance
def get_durations(distances, athletes):
for distance in distances:
for athlete in athletes:
print(f'{athlete.run(distance)} ran {distance} meters in {get_time(athlete.get_duration(distance))}')
if __name__ == "__main__":
runr = Runner("run", 90, 15, 30)
sprt1 = Sprinter("sprnt1", 90, 15, 30)
sprt2 = Sprinter("sprnt2", 80, 10, 25)
mrtn = MarathonRunner("mrtn", 50, 6, 7)
# print('getting running time..')
# print(f'{runr.run(100)} ran for {runr.get_duration(100)}')
distances = (100, 200, 800, 1600, 5000, 20000)
athletes = (runr, sprt1, sprt2, mrtn)
get_durations(distances, athletes)
| 39.25 | 125 | 0.642803 | 2,799 | 0.713121 | 0 | 0 | 0 | 0 | 0 | 0 | 343 | 0.087389 |
d2cbe0ce287e68ba03cda24086915b54c95f413e | 3,391 | py | Python | osisoft/pidevclub/piwebapi/models/pi_data_server_license.py | jugillar/PI-Web-API-Client-Python | 9652e18384d8c66194c6d561d5ef01f60d820253 | [
"Apache-2.0"
]
| 30 | 2019-01-03T03:09:25.000Z | 2022-03-30T17:42:54.000Z | osisoft/pidevclub/piwebapi/models/pi_data_server_license.py | jugillar/PI-Web-API-Client-Python | 9652e18384d8c66194c6d561d5ef01f60d820253 | [
"Apache-2.0"
]
| null | null | null | osisoft/pidevclub/piwebapi/models/pi_data_server_license.py | jugillar/PI-Web-API-Client-Python | 9652e18384d8c66194c6d561d5ef01f60d820253 | [
"Apache-2.0"
]
| 46 | 2018-11-07T14:46:35.000Z | 2022-03-31T12:23:39.000Z | # coding: utf-8
"""
Copyright 2018 OSIsoft, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
<http://www.apache.org/licenses/LICENSE-2.0>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
class PIDataServerLicense(object):
swagger_types = {
'amount_left': 'str',
'amount_used': 'str',
'name': 'str',
'total_amount': 'str',
'links': 'PIDataServerLicenseLinks',
'web_exception': 'PIWebException',
}
attribute_map = {
'amount_left': 'AmountLeft',
'amount_used': 'AmountUsed',
'name': 'Name',
'total_amount': 'TotalAmount',
'links': 'Links',
'web_exception': 'WebException',
}
def __init__(self, amount_left=None, amount_used=None, name=None, total_amount=None, links=None, web_exception=None):
self._amount_left = None
self._amount_used = None
self._name = None
self._total_amount = None
self._links = None
self._web_exception = None
if amount_left is not None:
self.amount_left = amount_left
if amount_used is not None:
self.amount_used = amount_used
if name is not None:
self.name = name
if total_amount is not None:
self.total_amount = total_amount
if links is not None:
self.links = links
if web_exception is not None:
self.web_exception = web_exception
@property
def amount_left(self):
return self._amount_left
@amount_left.setter
def amount_left(self, amount_left):
self._amount_left = amount_left
@property
def amount_used(self):
return self._amount_used
@amount_used.setter
def amount_used(self, amount_used):
self._amount_used = amount_used
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def total_amount(self):
return self._total_amount
@total_amount.setter
def total_amount(self, total_amount):
self._total_amount = total_amount
@property
def links(self):
return self._links
@links.setter
def links(self, links):
self._links = links
@property
def web_exception(self):
return self._web_exception
@web_exception.setter
def web_exception(self, web_exception):
self._web_exception = web_exception
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __ne__(self, other):
return not self == other
def __eq__(self, other):
if not isinstance(other, PIDataServerLicense):
return False
return self.__dict__ == other.__dict__
| 23.548611 | 118 | 0.714833 | 2,745 | 0.809496 | 0 | 0 | 830 | 0.244766 | 0 | 0 | 875 | 0.258036 |
d2ccb686d34873a1a30c9b50f3a2bad12ac217e0 | 4,054 | py | Python | bot.py | JavierOramas/scholar_standing_bot | 9afde1fc0d56a3c57cf281092ff5c3d123ddac2f | [
"MIT"
]
| null | null | null | bot.py | JavierOramas/scholar_standing_bot | 9afde1fc0d56a3c57cf281092ff5c3d123ddac2f | [
"MIT"
]
| null | null | null | bot.py | JavierOramas/scholar_standing_bot | 9afde1fc0d56a3c57cf281092ff5c3d123ddac2f | [
"MIT"
]
| 2 | 2021-09-19T21:08:55.000Z | 2021-09-19T21:09:39.000Z | #! /root/anaconda3/bin/python
import os
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from pyrogram import Client, filters
from read_config import read_config
import json
import requests
import schedule
import time
def get_value_usd(sum):
price = requests.get('https://api.coingecko.com/api/v3/simple/token_price/ethereum?contract_addresses=0xcc8fa225d80b9c7d42f96e9570156c65d6caaa25&vs_currencies=usd').json()['0xcc8fa225d80b9c7d42f96e9570156c65d6caaa25']['usd']
return price*sum
def read_data(id):
id = str(id)
try:
with open('./db/'+id+'.json', 'r') as f:
return json.loads(f.readline())
except:
return {}
def write_data(id,db):
id = str(id)
with open('./db/'+id+'.json', 'w') as f:
f.write(json.dumps(db))
config_data = read_config('./config/config_bot.json')
app = Client(config_data['bot_user_name'], config_data['api_id'], config_data['api_hash'])
@app.on_message(filters.command('add'))
def add_scholar(client, message):
users = message.text.split()
if len(users) != 3:
message.reply_text("formato incorrecto, debe ser de la forma: \n /add pedro 0x000000000")
name = str(users[-2])
wallet = str(users[-1])
os.makedirs("./db", exist_ok=True)
db = read_data(message.chat.id)
# db = read_data('1')
if not name in db:
db[name] = {
"wallet": wallet,
"slp": "[0]"
}
write_data(message.chat.id,db)
message.reply_text("Añadido con éxito")
else:
message.reply_text("Ya tienes un scholar con ese nombre")
pass
@app.on_message(filters.command('del'))
def del_scholar(client, message):
users = message.text.split()
name = str(users[-2])
# wallet = str(users[-1])
os.makedirs("./db", exist_ok=True)
db = read_data(message.chat.id)
# db = read_data('1')
if name in db:
db.pop(name)
write_data(message.chat.id,db)
else:
message.reply_text("no tienes un scholar con ese nombre")
pass
@app.on_message(filters.command('standing'))
def see_fee(client, message):
# owner_id = app.get_users(message.chat.id)
os.makedirs("./db", exist_ok=True)
db = read_data(message.chat.id)
list = []
if len(db.keys()) > 0:
for i in db.keys():
wallet = db[i]['wallet']
slp = requests.get(f'https://game-api.skymavis.com/game-api/clients/{wallet}/items/1').json()['total']
list.append((i,slp))
list.sort(key=lambda x:x[1], reverse=True)
stand = ''
for i in list:
stand += f'{i[0]} : {i[1]} - ${get_value_usd(i[1])}\n'
message.reply_text(stand)
else:
message.reply_text('no tienes scholars :(')
pass
@app.on_message(filters.command('week'))
def see_fee(client, message):
# owner_id = app.get_users(message.chat.id)
os.makedirs("./db", exist_ok=True)
db = read_data(message.chat.id)
list = []
if len(db.keys()) > 0:
for i in db.keys():
slp = sum(db[i]['slp'])
list.append((i,slp))
list.sort(key=lambda x:x[1], reverse=True)
stand = ''
for i in list:
stand += f'{i[0]} : {i[1]} - ${get_value_usd(i[1])}\n'
message.reply_text(stand)
else:
message.reply_text('no tienes scholars :(')
pass
# @app.on_message(filters.command('help'))
@app.on_message(filters.command('help'))
@app.on_message(filters.command('start'))
def help(client, message):
message.reply_text("""
/add nombre wallet - añade el usuario a tu lista de scholars, recuerda sustituir ronin: por 0x\n
/del nombre - elimina el usuario de tu lista\n
/standing - muestra todos los scholars ordenados\n
Puedes contribuir con el desarrollo aqui: https://github.com/JavierOramas/scholar_standing_bot\no puedes donar para contribuir al desarrollo: 0x64eF391bb5Feae6023440AD12a9870062dd2B342
""")
pass
app.run() | 30.712121 | 228 | 0.619142 | 0 | 0 | 0 | 0 | 3,050 | 0.751787 | 0 | 0 | 1,387 | 0.341878 |
d2d16238955afe2195185ab27a0954cf27e01b00 | 7,622 | py | Python | skdecide/discrete_optimization/rcpsp_multiskill/parser/rcpsp_multiskill_parser.py | emilienDespres/scikit-decide | 2a3dd2d93e5e6d07984e1bc02b6e969261aeefbc | [
"MIT"
]
| 27 | 2020-11-23T11:45:31.000Z | 2022-03-22T08:08:00.000Z | skdecide/discrete_optimization/rcpsp_multiskill/parser/rcpsp_multiskill_parser.py | emilienDespres/scikit-decide | 2a3dd2d93e5e6d07984e1bc02b6e969261aeefbc | [
"MIT"
]
| 94 | 2021-02-24T09:50:23.000Z | 2022-02-27T10:07:15.000Z | skdecide/discrete_optimization/rcpsp_multiskill/parser/rcpsp_multiskill_parser.py | emilienDespres/scikit-decide | 2a3dd2d93e5e6d07984e1bc02b6e969261aeefbc | [
"MIT"
]
| 12 | 2020-12-08T10:38:26.000Z | 2021-10-01T09:17:04.000Z | # Copyright (c) AIRBUS and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Dict, Tuple
from skdecide.discrete_optimization.rcpsp_multiskill.rcpsp_multiskill import (
Employee,
MS_RCPSPModel,
SkillDetail,
)
def parse_imopse(input_data, max_horizon=None):
# parse the input
# print('input_data\n',input_data)
lines = input_data.split("\n")
# "General characteristics:
# Tasks: 161
# Resources: 10
# Precedence relations: 321
# Number of skill types: 9
# ====================================================================================================================
# ResourceID Salary Skills
# 1 14.2 Q2: 0 Q3: 2 Q1: 0 Q4: 2 Q7: 1 Q8: 2
# 2 31.2 Q0: 0 Q4: 2 Q7: 1 Q3: 1 Q8: 2 Q2: 0
# 3 34.4 Q4: 0 Q2: 1 Q6: 2 Q3: 1 Q0: 1 Q5: 0
# 4 26.0 Q5: 2 Q1: 1 Q4: 1 Q8: 2 Q0: 2 Q2: 2
# 5 30.8 Q8: 0 Q7: 1 Q3: 1 Q1: 2 Q4: 1 Q5: 1
# 6 17.3 Q6: 1 Q3: 2 Q4: 2 Q2: 0 Q7: 2 Q1: 0
# 7 19.8 Q1: 2 Q4: 2 Q5: 0 Q7: 1 Q3: 1 Q6: 2
# 8 35.8 Q2: 1 Q0: 1 Q3: 2 Q6: 0 Q7: 0 Q8: 1
# 9 37.6 Q7: 0 Q5: 2 Q2: 0 Q1: 0 Q0: 1 Q3: 1
# 10 23.5 Q8: 1 Q5: 1 Q1: 2 Q6: 0 Q4: 0 Q3: 2 "
nb_task = None
nb_worker = None
nb_precedence_relation = None
nb_skills = None
resource_zone = False
task_zone = False
resource_dict = {}
task_dict = {}
real_skills_found = set()
for line in lines:
words = line.split()
if len(words) == 2 and words[0] == "Tasks:":
nb_task = int(words[1])
continue
if len(words) == 2 and words[0] == "Resources:":
nb_worker = int(words[1])
continue
if len(words) == 3 and words[0] == "Precedence" and words[1] == "relations:":
nb_precedence_relation = int(words[2])
continue
if len(words) == 5 and words[0] == "Number" and words[1] == "of":
nb_skills = int(words[4])
continue
if len(words) == 0:
continue
if words[0] == "ResourceID":
resource_zone = True
continue
if words[0] == "TaskID":
task_zone = True
continue
if resource_zone:
if words[0][0] == "=":
resource_zone = False
continue
else:
id_worker = words[0]
resource_dict[id_worker] = {"salary": float(words[1])}
for word in words[2:]:
if word[0] == "Q":
current_skill = word[:-1]
continue
resource_dict[id_worker][current_skill] = int(word) + 1
real_skills_found.add(current_skill)
if task_zone:
if words[0][0] == "=":
task_zone = False
continue
else:
task_id = int(words[0])
if task_id not in task_dict:
task_dict[task_id] = {"id": task_id, "successors": [], "skills": {}}
task_dict[task_id]["duration"] = int(words[1])
i = 2
while i < len(words):
if words[i][0] == "Q":
current_skill = words[i][:-1]
task_dict[task_id]["skills"][current_skill] = int(words[i + 1]) + 1
real_skills_found.add(current_skill)
i = i + 2
continue
else:
if "precedence" not in task_dict[task_id]:
task_dict[task_id]["precedence"] = []
task_dict[task_id]["precedence"] += [int(words[i])]
if int(words[i]) not in task_dict:
task_dict[int(words[i])] = {
"id": int(words[i]),
"successors": [],
"skills": {},
}
if "successors" not in task_dict[int(words[i])]:
task_dict[int(words[i])]["successors"] = []
task_dict[int(words[i])]["successors"] += [task_id]
i += 1
# print(resource_dict)
# print(task_dict)
sorted_task_names = sorted(task_dict.keys())
task_id_to_new_name = {
sorted_task_names[i]: i + 2 for i in range(len(sorted_task_names))
}
new_tame_to_original_task_id = {
task_id_to_new_name[ind]: ind for ind in task_id_to_new_name
}
mode_details = {
task_id_to_new_name[task_id]: {1: {"duration": task_dict[task_id]["duration"]}}
for task_id in task_dict
}
resource_dict = {int(i): resource_dict[i] for i in resource_dict}
# skills = set(["Q"+str(i) for i in range(nb_skills)])
skills = real_skills_found
for task_id in task_dict:
for skill in skills:
req_squill = task_dict[task_id]["skills"].get(skill, 0.0)
mode_details[task_id_to_new_name[task_id]][1][skill] = req_squill
mode_details[1] = {1: {"duration": 0}}
for skill in skills:
mode_details[1][1][skill] = int(0)
max_t = max(mode_details)
mode_details[max_t + 1] = {1: {"duration": 0}}
for skill in skills:
mode_details[max_t + 1][1][skill] = int(0)
successors = {
task_id_to_new_name[task_id]: [
task_id_to_new_name[t] for t in task_dict[task_id]["successors"]
]
+ [max_t + 1]
for task_id in task_dict
}
successors[max_t + 1] = []
successors[1] = [k for k in successors]
# max_horizon = 2*sum([task_dict[task_id]["duration"] for task_id in task_dict])
max_horizon = 300 if max_horizon is None else max_horizon
return (
MS_RCPSPModel(
skills_set=set(real_skills_found),
resources_set=set(),
non_renewable_resources=set(),
resources_availability={},
employees={
res: Employee(
dict_skill={
skill: SkillDetail(
skill_value=resource_dict[res][skill],
efficiency_ratio=1.0,
experience=1.0,
)
for skill in resource_dict[res]
if skill != "salary"
},
salary=resource_dict[res]["salary"],
calendar_employee=[True] * max_horizon,
)
for res in resource_dict
},
employees_availability=[len(resource_dict)] * max_horizon,
mode_details=mode_details,
successors=successors,
horizon=max_horizon,
source_task=1,
sink_task=max_t + 1,
one_unit_per_task_max=True,
),
new_tame_to_original_task_id,
)
def parse_file(file_path, max_horizon=None) -> Tuple[MS_RCPSPModel, Dict]:
with open(file_path, "r") as input_data_file:
input_data = input_data_file.read()
rcpsp_model, new_tame_to_original_task_id = parse_imopse(
input_data, max_horizon
)
return rcpsp_model, new_tame_to_original_task_id
| 39.697917 | 126 | 0.494358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,699 | 0.222907 |
d2d1d69838e8dd6599bd00b4fca0bacfaf367308 | 530 | py | Python | pipe_anchorages/logging_monkeypatch.py | GlobalFishingWatch/anchorages_pipeline | 88764545b693bfb65fc7a7f62a344fb2afbc3d97 | [
"Apache-2.0"
]
| 3 | 2017-12-22T10:19:15.000Z | 2020-04-20T10:28:43.000Z | pipe_tools/beam/logging_monkeypatch.py | GlobalFishingWatch/pipe-tools | 34dff591997bb2c25e018df86d13a9d42972032b | [
"Apache-2.0"
]
| 37 | 2017-10-22T12:00:59.000Z | 2022-02-08T19:17:58.000Z | pipe_tools/beam/logging_monkeypatch.py | GlobalFishingWatch/pipe-tools | 34dff591997bb2c25e018df86d13a9d42972032b | [
"Apache-2.0"
]
| 3 | 2018-01-21T14:07:58.000Z | 2021-07-28T16:02:20.000Z | import logging
# monkey patch to suppress the annoying warning you get when you import apache_beam
#
# No handlers could be found for logger "oauth2client.contrib.multistore_file"
#
# This warning is harmless, but annooying when you are using beam from a command line app
# see: https://issues.apache.org/jira/browse/BEAM-1183
# This just creates a null handler for that logger so there is no output
logger = logging.getLogger('oauth2client.contrib.multistore_file')
handler = logging.NullHandler()
logger.addHandler(handler)
| 33.125 | 89 | 0.792453 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 416 | 0.784906 |
d2d2f4b2d01e6090619cd23b148cfe0e1bc36f87 | 330 | py | Python | core/managers.py | Bilal815/ecommerce_storee | 45e61f1d865a65b4c52d74502b4fcab7ee6c1adf | [
"MIT"
]
| 95 | 2020-04-13T09:02:30.000Z | 2022-03-25T14:11:34.000Z | core/managers.py | Bilal815/ecommerce_api | a3d8ce7a9e1fa2528d240d5ab508afe92607c9f8 | [
"MIT"
]
| 87 | 2020-02-21T17:58:56.000Z | 2022-03-21T21:37:05.000Z | core/managers.py | Bilal815/ecommerce_api | a3d8ce7a9e1fa2528d240d5ab508afe92607c9f8 | [
"MIT"
]
| 33 | 2021-01-18T09:30:29.000Z | 2022-03-30T01:31:57.000Z | from django.db import models
class SoftDeleteManager(models.Manager):
def save_soft_delete(self):
self.is_deleted = True
self.save()
return True
def get_soft_delete(self):
return self.filter(is_deleted=True)
def get_unsoft_delete(self):
return self.filter(is_deleted=False)
| 22 | 44 | 0.681818 | 298 | 0.90303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d2d2fa8cda2955386068decf56b4b942626e5d83 | 22,286 | py | Python | mizani/breaks.py | stillmatic/mizani | 9a9dcb2b2ae8fca9a1c5b5e475be4d1f801bda1c | [
"BSD-3-Clause"
]
| null | null | null | mizani/breaks.py | stillmatic/mizani | 9a9dcb2b2ae8fca9a1c5b5e475be4d1f801bda1c | [
"BSD-3-Clause"
]
| null | null | null | mizani/breaks.py | stillmatic/mizani | 9a9dcb2b2ae8fca9a1c5b5e475be4d1f801bda1c | [
"BSD-3-Clause"
]
| null | null | null | """
All scales have a means by which the values that are mapped
onto the scale are interpreted. Numeric digital scales put
out numbers for direct interpretation, but most scales
cannot do this. What they offer is named markers/ticks that
aid in assessing the values e.g. the common odometer will
have ticks and values to help gauge the speed of the vehicle.
The named markers are what we call breaks. Properly calculated
breaks make interpretation straight forward. These functions
provide ways to calculate good(hopefully) breaks.
"""
from __future__ import division
import numpy as np
import pandas as pd
from matplotlib.dates import MinuteLocator, HourLocator, DayLocator
from matplotlib.dates import WeekdayLocator, MonthLocator, YearLocator
from matplotlib.dates import AutoDateLocator
from matplotlib.dates import num2date, YEARLY
from matplotlib.ticker import MaxNLocator
from .utils import min_max, SECONDS, NANOSECONDS
from .utils import same_log10_order_of_magnitude
__all__ = ['mpl_breaks', 'log_breaks', 'minor_breaks',
'trans_minor_breaks', 'date_breaks',
'timedelta_breaks', 'extended_breaks']
# The break calculations rely on MPL locators to do
# the heavylifting. It may be more convinient to lift
# the calculations out of MPL.
class DateLocator(AutoDateLocator):
def __init__(self):
AutoDateLocator.__init__(self, minticks=5,
interval_multiples=True)
# Remove 4 and 400
self.intervald[YEARLY] = [
1, 2, 5, 10, 20, 50, 100, 200, 500,
1000, 2000, 5000, 10000]
self.create_dummy_axis()
def tick_values(self, vmin, vmax):
# get locator
# if yearlocator
# change the vmin to turn of decade or half-decade
ticks = AutoDateLocator.tick_values(self, vmin, vmax)
return ticks
class mpl_breaks(object):
"""
Compute breaks using MPL's default locator
See :class:`~matplotlib.ticker.MaxNLocator` for the
parameter descriptions
Examples
--------
>>> x = range(10)
>>> limits = (0, 9)
>>> mpl_breaks()(limits)
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> mpl_breaks(nbins=2)(limits)
array([ 0., 5., 10.])
"""
def __init__(self, *args, **kwargs):
self.locator = MaxNLocator(*args, **kwargs)
def __call__(self, limits):
"""
Compute breaks
Parameters
----------
limits : tuple
Minimum and maximum values
Returns
-------
out : array_like
Sequence of breaks points
"""
if any(np.isinf(limits)):
return []
if limits[0] == limits[1]:
return np.array([limits[0]])
return self.locator.tick_values(limits[0], limits[1])
class log_breaks(object):
"""
Integer breaks on log transformed scales
Parameters
----------
n : int
Desired number of breaks
base : int
Base of logarithm
Examples
--------
>>> x = np.logspace(3, 7)
>>> limits = min(x), max(x)
>>> log_breaks()(limits)
array([ 100, 10000, 1000000])
>>> log_breaks(2)(limits)
array([ 100, 100000])
"""
def __init__(self, n=5, base=10):
self.n = n
self.base = base
def __call__(self, limits):
"""
Compute breaks
Parameters
----------
limits : tuple
Minimum and maximum values
Returns
-------
out : array_like
Sequence of breaks points
"""
n = self.n
base = self.base
if any(np.isinf(limits)):
return []
rng = np.log(limits)/np.log(base)
if base == 10 and same_log10_order_of_magnitude(rng):
return extended_breaks(n=4)(limits)
_min = int(np.floor(rng[0]))
_max = int(np.ceil(rng[1]))
if _max == _min:
return base ** _min
step = (_max-_min)//n + 1
dtype = float if (_min < 0) else int
return base ** np.arange(_min, _max+1, step, dtype=dtype)
class minor_breaks(object):
"""
Compute minor breaks
Parameters
----------
n : int
Number of minor breaks between the major
breaks.
Examples
--------
>>> major = [1, 2, 3, 4]
>>> limits = [0, 5]
>>> minor_breaks()(major, limits)
array([0.5, 1.5, 2.5, 3.5, 4.5])
"""
def __init__(self, n=1):
self.n = n
def __call__(self, major, limits=None):
"""
Minor breaks
Parameters
----------
major : array_like
Major breaks
limits : array_like | None
Limits of the scale. If *array_like*, must be
of size 2. If **None**, then the minimum and
maximum of the major breaks are used.
Returns
-------
out : array_like
Minor beraks
"""
n = self.n
if len(major) < 2:
return np.array([])
if limits is None:
limits = min_max(major)
# Try to infer additional major breaks so that
# minor breaks can be generated beyond the first
# and last major breaks
diff = np.diff(major)
step = diff[0]
if len(diff) > 1 and all(diff == step):
major = np.hstack([major[0]-step,
major,
major[-1]+step])
mbreaks = []
factors = np.arange(1, n+1)
for lhs, rhs in zip(major[:-1], major[1:]):
sep = (rhs - lhs)/(n+1)
mbreaks.append(lhs + factors * sep)
minor = np.hstack(mbreaks)
minor = minor.compress((limits[0] <= minor) &
(minor <= limits[1]))
return minor
class trans_minor_breaks(object):
"""
Compute minor breaks for transformed scales
The minor breaks are computed in data space.
This together with major breaks computed in
transform space reveals the non linearity of
of a scale. See the log transforms created
with :func:`log_trans` like :class:`log10_trans`.
Parameters
----------
trans : trans or type
Trans object or trans class.
n : int
Number of minor breaks between the major
breaks.
Examples
--------
>>> from mizani.transforms import sqrt_trans
>>> major = [1, 2, 3, 4]
>>> limits = [0, 5]
>>> sqrt_trans().minor_breaks(major, limits)
array([0.5, 1.5, 2.5, 3.5, 4.5])
>>> class sqrt_trans2(sqrt_trans):
... def __init__(self):
... self.minor_breaks = trans_minor_breaks(sqrt_trans2)
>>> sqrt_trans2().minor_breaks(major, limits)
array([1.58113883, 2.54950976, 3.53553391])
"""
def __init__(self, trans, n=1):
self.trans = trans
self.n = n
def __call__(self, major, limits=None):
"""
Minor breaks for transformed scales
Parameters
----------
major : array_like
Major breaks
limits : array_like | None
Limits of the scale. If *array_like*, must be
of size 2. If **None**, then the minimum and
maximum of the major breaks are used.
Returns
-------
out : array_like
Minor breaks
"""
if not self.trans.dataspace_is_numerical:
raise TypeError(
"trans_minor_breaks can only be used for data "
"whose format is numerical.")
if limits is None:
limits = min_max(major)
major = self._extend_breaks(major)
major = self.trans.inverse(major)
limits = self.trans.inverse(limits)
minor = minor_breaks(self.n)(major, limits)
return self.trans.transform(minor)
def _extend_breaks(self, major):
"""
Append 2 extra breaks at either end of major
If breaks of transform space are non-equidistant,
:func:`minor_breaks` add minor breaks beyond the first
and last major breaks. The solutions is to extend those
breaks (in transformed space) before the minor break call
is made. How the breaks depends on the type of transform.
"""
trans = self.trans
trans = trans if isinstance(trans, type) else trans.__class__
# so far we are only certain about this extending stuff
# making sense for log transform
is_log = trans.__name__.startswith('log')
diff = np.diff(major)
step = diff[0]
if is_log and all(diff == step):
major = np.hstack([major[0]-step, major, major[-1]+step])
return major
# Matplotlib's YearLocator uses different named
# arguments than the others
LOCATORS = {
'minute': MinuteLocator,
'hour': HourLocator,
'day': DayLocator,
'week': WeekdayLocator,
'month': MonthLocator,
'year': lambda interval: YearLocator(base=interval)
}
class date_breaks(object):
"""
Regularly spaced dates
Parameters
----------
width : str | None
An interval specification. Must be one of
[minute, hour, day, week, month, year]
If ``None``, the interval automatic.
Examples
--------
>>> from datetime import datetime
>>> x = [datetime(year, 1, 1) for year in [2010, 2026, 2015]]
Default breaks will be regularly spaced but the spacing
is automatically determined
>>> limits = min(x), max(x)
>>> breaks = date_breaks()
>>> [d.year for d in breaks(limits)]
[2010, 2012, 2014, 2016, 2018, 2020, 2022, 2024, 2026]
Breaks at 4 year intervals
>>> breaks = date_breaks('4 year')
>>> [d.year for d in breaks(limits)]
[2008, 2012, 2016, 2020, 2024, 2028]
"""
def __init__(self, width=None):
if not width:
locator = DateLocator()
else:
# Parse the width specification
# e.g. '10 weeks' => (10, week)
_n, units = width.strip().lower().split()
interval, units = int(_n), units.rstrip('s')
locator = LOCATORS[units](interval=interval)
self.locator = locator
def __call__(self, limits):
"""
Compute breaks
Parameters
----------
limits : tuple
Minimum and maximum :class:`datetime.datetime` values.
Returns
-------
out : array_like
Sequence of break points.
"""
if any(pd.isnull(x) for x in limits):
return []
ret = self.locator.tick_values(*limits)
# MPL returns the tick_values in ordinal format,
# but we return them in the same space as the
# inputs.
return [num2date(val) for val in ret]
class timedelta_breaks(object):
"""
Timedelta breaks
Returns
-------
out : callable ``f(limits)``
A function that takes a sequence of two
:class:`datetime.timedelta` values and returns
a sequence of break points.
Examples
--------
>>> from datetime import timedelta
>>> breaks = timedelta_breaks()
>>> x = [timedelta(days=i*365) for i in range(25)]
>>> limits = min(x), max(x)
>>> major = breaks(limits)
>>> [val.total_seconds()/(365*24*60*60)for val in major]
[0.0, 5.0, 10.0, 15.0, 20.0, 25.0]
"""
def __init__(self, n=5, Q=(1, 2, 5, 10)):
self._breaks_func = extended_breaks(n=n, Q=Q)
def __call__(self, limits):
"""
Compute breaks
Parameters
----------
limits : tuple
Minimum and maximum :class:`datetime.timedelta` values.
Returns
-------
out : array_like
Sequence of break points.
"""
if any(pd.isnull(x) for x in limits):
return []
helper = timedelta_helper(limits)
scaled_limits = helper.scaled_limits()
scaled_breaks = self._breaks_func(scaled_limits)
breaks = helper.numeric_to_timedelta(scaled_breaks)
return breaks
# This could be cleaned up, state overload?
class timedelta_helper(object):
"""
Helper for computing timedelta breaks
and labels.
How to use - breaks?
1. Initialise with a timedelta sequence/limits.
2. Get the scaled limits and use those to calculate
breaks using a general purpose breaks calculating
routine. The scaled limits are in numerical format.
3. Convert the computed breaks from numeric into timedelta.
See, :func:`timedelta_breaks`
How to use - formating?
1. Call :meth:`format_info` with the timedelta values to be
formatted and get back a tuple of numeric values and
the units for those values.
2. Format the values with a general purpose formatting
routing.
See, :func:`timedelta_format`
"""
def __init__(self, x, units=None):
self.x = x
self.type = type(x[0])
self.package = self.determine_package(x[0])
_limits = min(x), max(x)
self.limits = self.value(_limits[0]), self.value(_limits[1])
self.units = units or self.best_units(_limits)
self.factor = self.get_scaling_factor(self.units)
@classmethod
def determine_package(cls, td):
if hasattr(td, 'components'):
package = 'pandas'
elif hasattr(td, 'total_seconds'):
package = 'cpython'
else:
msg = '{} format not yet supported.'
raise ValueError(msg.format(td.__class__))
return package
@classmethod
def format_info(cls, x, units=None):
helper = cls(x, units)
return helper.timedelta_to_numeric(x), helper.units
def best_units(self, sequence):
"""
Determine good units for representing a sequence of timedeltas
"""
# Read
# [(0.9, 's'),
# (9, 'm)]
# as, break ranges between 0.9 seconds (inclusive)
# and 9 minutes are represented in seconds. And so on.
ts_range = self.value(max(sequence)) - self.value(min(sequence))
package = self.determine_package(sequence[0])
if package == 'pandas':
cuts = [
(0.9, 'us'),
(0.9, 'ms'),
(0.9, 's'),
(9, 'm'),
(6, 'h'),
(4, 'd'),
(4, 'w'),
(4, 'M'),
(3, 'y')]
denomination = NANOSECONDS
base_units = 'ns'
else:
cuts = [
(0.9, 's'),
(9, 'm'),
(6, 'h'),
(4, 'd'),
(4, 'w'),
(4, 'M'),
(3, 'y')]
denomination = SECONDS
base_units = 'ms'
for size, units in reversed(cuts):
if ts_range >= size*denomination[units]:
return units
return base_units
def value(self, td):
"""
Return the numeric value representation on a timedelta
"""
if self.package == 'pandas':
return td.value
else:
return td.total_seconds()
def scaled_limits(self):
"""
Minimum and Maximum to use for computing breaks
"""
_min = self.limits[0]/self.factor
_max = self.limits[1]/self.factor
return _min, _max
def timedelta_to_numeric(self, timedeltas):
"""
Convert sequence of timedelta to numerics
"""
return [self.to_numeric(td) for td in timedeltas]
def numeric_to_timedelta(self, numerics):
"""
Convert sequence of numerics to timedelta
"""
if self.package == 'pandas':
return [self.type(int(x*self.factor), units='ns')
for x in numerics]
else:
return [self.type(seconds=x*self.factor)
for x in numerics]
def get_scaling_factor(self, units):
if self.package == 'pandas':
return NANOSECONDS[units]
else:
return SECONDS[units]
def to_numeric(self, td):
"""
Convert timedelta to a number corresponding to the
appropriate units. The appropriate units are those
determined with the object is initialised.
"""
if self.package == 'pandas':
return td.value/NANOSECONDS[self.units]
else:
return td.total_seconds()/SECONDS[self.units]
class extended_breaks(object):
"""
An extension of Wilkinson's tick position algorithm
Parameters
----------
n : int
Desired number of ticks
Q : list
List of nice numbers
only_inside : bool
If ``True``, then all the ticks will be within the given
range.
w : list
Weights applied to the four optimization components
(simplicity, coverage, density, and legibility). They
should add up to 1.
Examples
--------
>>> limits = (0, 9)
>>> extended_breaks()(limits)
array([ 0. , 2.5, 5. , 7.5, 10. ])
>>> extended_breaks(n=6)(limits)
array([ 0., 2., 4., 6., 8., 10.])
References
----------
- Talbot, J., Lin, S., Hanrahan, P. (2010) An Extension of
Wilkinson's Algorithm for Positioning Tick Labels on Axes,
InfoVis 2010.
Additional Credit to Justin Talbot on whose code this
implementation is almost entirely based.
"""
def __init__(self, n=5, Q=[1, 5, 2, 2.5, 4, 3],
only_inside=False, w=[0.25, 0.2, 0.5, 0.05]):
self.Q = Q
self.only_inside = only_inside
self.w = w
self.n = n
# Used for lookups during the computations
self.Q_index = {q: i for i, q in enumerate(Q)}
def coverage(self, dmin, dmax, lmin, lmax):
p1 = (dmax-lmax)**2
p2 = (dmin-lmin)**2
p3 = (0.1*(dmax-dmin))**2
return 1 - 0.5*(p1+p2)/p3
def coverage_max(self, dmin, dmax, span):
range = dmax-dmin
if span > range:
half = (span-range)/2.0
return 1 - (half**2) / (0.1*range)**2
else:
return 1
def density(self, k, dmin, dmax, lmin, lmax):
r = (k-1.0) / (lmax-lmin)
rt = (self.n-1) / (max(lmax, dmax) - min(lmin, dmin))
return 2 - max(r/rt, rt/r)
def density_max(self, k):
if k >= self.n:
return 2 - (k-1.0)/(self.n-1.0)
else:
return 1
def simplicity(self, q, j, lmin, lmax, lstep):
eps = 1e-10
n = len(self.Q)
i = self.Q_index[q]+1
if ((lmin % lstep < eps or (lstep - lmin % lstep) < eps) and
lmin <= 0 and lmax >= 0):
v = 1
else:
v = 0
return (n-i)/(n-1.0) + v - j
def simplicity_max(self, q, j):
n = len(self.Q)
i = self.Q_index[q]+1
v = 1
return (n-i)/(n-1.0) + v - j
def legibility(self, lmin, lmax, lstep):
# Legibility depends on fontsize, rotation, overlap ... i.e.
# it requires drawing or simulating drawn breaks then calculating
# a score. Return 1 ignores all that.
return 1
def __call__(self, limits):
"""
Calculate the breaks
Parameters
----------
limits : array
Minimum and maximum values.
Returns
-------
out : array_like
Sequence of break points.
"""
Q = self.Q
w = self.w
only_inside = self.only_inside
simplicity_max = self.simplicity_max
density_max = self.density_max
coverage_max = self.coverage_max
simplicity = self.simplicity
coverage = self.coverage
density = self.density
legibility = self.legibility
log10 = np.log10
ceil = np.ceil
floor = np.floor
dmin, dmax = limits
if dmin > dmax:
dmin, dmax = dmax, dmin
elif dmin == dmax:
return np.array([dmin])
best_score = -2
j = 1
while j < float('inf'):
for q in Q:
sm = simplicity_max(q, j)
if w[0]*sm + w[1] + w[2] + w[3] < best_score:
j = float('inf')
break
k = 2
while k < float('inf'):
dm = density_max(k)
if w[0]*sm + w[1] + w[2]*dm + w[3] < best_score:
break
delta = (dmax-dmin)/(k+1)/j/q
z = ceil(log10(delta))
while z < float('inf'):
step = j*q*(10**z)
cm = coverage_max(dmin, dmax, step*(k-1))
if w[0]*sm + w[1]*cm + w[2]*dm + w[3] < best_score:
break
min_start = int(floor(dmax/step)*j - (k-1)*j)
max_start = int(ceil(dmin/step)*j)
if min_start > max_start:
z = z+1
break
for start in range(min_start, max_start+1):
lmin = start * (step/j)
lmax = lmin + step*(k-1)
lstep = step
s = simplicity(q, j, lmin, lmax, lstep)
c = coverage(dmin, dmax, lmin, lmax)
d = density(k, dmin, dmax, lmin, lmax)
l = legibility(lmin, lmax, lstep)
score = w[0]*s + w[1]*c + w[2]*d + w[3]*l
if (score > best_score and
(not only_inside or
(lmin >= dmin and lmax <= dmax))):
best_score = score
best = (lmin, lmax, lstep, q, k)
z = z+1
k = k+1
j = j+1
try:
locs = best[0] + np.arange(best[4])*best[2]
except UnboundLocalError:
locs = []
return locs
| 28.793282 | 75 | 0.531634 | 20,661 | 0.927084 | 0 | 0 | 477 | 0.021404 | 0 | 0 | 10,007 | 0.449026 |
d2d32938d031d59331d2f4a11e7ede6bb4a40fe0 | 2,412 | py | Python | examples/04_sweep_wind_directions.py | ElieKadoche/floris | d18f4d263ecabf502242592f9d60815a07c7b89c | [
"Apache-2.0"
]
| null | null | null | examples/04_sweep_wind_directions.py | ElieKadoche/floris | d18f4d263ecabf502242592f9d60815a07c7b89c | [
"Apache-2.0"
]
| 1 | 2019-03-02T00:29:12.000Z | 2019-03-02T04:59:54.000Z | examples/04_sweep_wind_directions.py | ElieKadoche/floris | d18f4d263ecabf502242592f9d60815a07c7b89c | [
"Apache-2.0"
]
| null | null | null | # Copyright 2022 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# See https://floris.readthedocs.io for documentation
import matplotlib.pyplot as plt
import numpy as np
from floris.tools import FlorisInterface
from floris.tools.visualization import visualize_cut_plane
"""
04_sweep_wind_directions
This example demonstrates vectorization of wind direction.
A vector of wind directions is passed to the intialize function
and the powers of the two simulated turbines is computed for all
wind directions in one call
The power of both turbines for each wind direction is then plotted
"""
# Instantiate FLORIS using either the GCH or CC model
fi = FlorisInterface("inputs/gch.yaml") # GCH model matched to the default "legacy_gauss" of V2
# fi = FlorisInterface("inputs/cc.yaml") # New CumulativeCurl model
# Define a two turbine farm
D = 126.
layout_x = np.array([0, D*6])
layout_y = [0, 0]
fi.reinitialize(layout = [layout_x, layout_y])
# Sweep wind speeds but keep wind direction fixed
wd_array = np.arange(250,291,1.)
fi.reinitialize(wind_directions=wd_array)
# Define a matrix of yaw angles to be all 0
# Note that yaw angles is now specified as a matrix whose dimesions are
# wd/ws/turbine
num_wd = len(wd_array) # Number of wind directions
num_ws = 1 # Number of wind speeds
num_turbine = len(layout_x) # Number of turbines
yaw_angles = np.zeros((num_wd, num_ws, num_turbine))
# Calculate
fi.calculate_wake(yaw_angles=yaw_angles)
# Collect the turbine powers
turbine_powers = fi.get_turbine_powers() / 1E3 # In kW
# Pull out the power values per turbine
pow_t0 = turbine_powers[:,:,0].flatten()
pow_t1 = turbine_powers[:,:,1].flatten()
# Plot
fig, ax = plt.subplots()
ax.plot(wd_array,pow_t0,color='k',label='Upstream Turbine')
ax.plot(wd_array,pow_t1,color='r',label='Downstream Turbine')
ax.grid(True)
ax.legend()
ax.set_xlabel('Wind Direction (deg)')
ax.set_ylabel('Power (kW)')
plt.show()
| 31.736842 | 95 | 0.76534 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,558 | 0.645937 |
d2d3e9419d90d8f17a71b13f9d3381c03813b4d4 | 623 | py | Python | 1.main.py | learning-nn/nn_from_scratch | 8f8f46efd5814a3cca645b644f70ddc07210256f | [
"MIT"
]
| null | null | null | 1.main.py | learning-nn/nn_from_scratch | 8f8f46efd5814a3cca645b644f70ddc07210256f | [
"MIT"
]
| null | null | null | 1.main.py | learning-nn/nn_from_scratch | 8f8f46efd5814a3cca645b644f70ddc07210256f | [
"MIT"
]
| null | null | null | import numpy
import numpy as np
# converting to a layer with 4 input and 3 neuron
inputs = [[1.2, 2.1, 3.4, 1.2],
[1.2, 2.1, 3.4, 1.2],
[1.2, 2.1, 3.4, 1.2]]
print(numpy.shape(inputs))
weights = [[4.1, -4.5, 3.1, 2.3],
[-4.1, 4.5, 2.1, 2.3],
[4.1, 4.5, 3.1, -2.3]]
print(numpy.shape(weights))
biases = [1, 2, 3]
weights2 = [[4.1, -4.5, 3.1],
[-4.1, 4.5, 2.1],
[4.1, 4.5, 3.1]]
biases2 = [1, 2, 3]
layer1_outputs = np.dot(inputs, np.array(weights).T) + biases
layer2_outputs = np.dot(layer1_outputs, np.array(weights2).T) + biases2
print(layer2_outputs)
| 23.961538 | 71 | 0.536116 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.078652 |
d2d3eacc8c8caee95603f50b68c177c406992381 | 83 | py | Python | backend/grant/task/__init__.py | DSBUGAY2/zcash-grant-system | 729b9edda13bd1eeb3f445d889264230c6470d7e | [
"MIT"
]
| 8 | 2019-06-03T16:29:49.000Z | 2021-05-11T20:38:36.000Z | backend/grant/task/__init__.py | DSBUGAY2/zcash-grant-system | 729b9edda13bd1eeb3f445d889264230c6470d7e | [
"MIT"
]
| 342 | 2019-01-15T19:13:58.000Z | 2020-03-24T16:38:13.000Z | backend/grant/task/__init__.py | DSBUGAY2/zcash-grant-system | 729b9edda13bd1eeb3f445d889264230c6470d7e | [
"MIT"
]
| 5 | 2019-02-15T09:06:47.000Z | 2022-01-24T21:38:41.000Z | from . import models
from . import views
from . import commands
from . import jobs | 16.6 | 22 | 0.759036 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.