code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
from django.contrib import admin
# Local Imports
import transports.models as trans
# Register your models here.
@admin.register(trans.ForwardMessage)
class ForwardMessageAdmin(admin.ModelAdmin):
list_display = ('created','identity','text','fwrd_status','transport','url')
list_filter = ('fwrd_status','transport')
readonly_fields = ('created','modified')
| [
"django.contrib.admin.register"
] | [((115, 151), 'django.contrib.admin.register', 'admin.register', (['trans.ForwardMessage'], {}), '(trans.ForwardMessage)\n', (129, 151), False, 'from django.contrib import admin\n')] |
from tensorflow.keras.models import load_model
import numpy as np
import tensorflow as tf
import cv2
def model_prediction(imgpath):
class_names = ["Normal", "Pneumonia"]
model = load_model("./xray_model_cv2")
img_width = 64
img_height = 64
img = cv2.imread(imgpath)
img = cv2.resize(img, (img_width, img_height), interpolation=cv2.INTER_AREA)
img = np.array(img)
img= np.resize(img,(img_height,img_width,3))
img = img.astype('float32')
img /= 255
probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])
img = (np.expand_dims(img,0))
prediction = probability_model.predict(img)
return (class_names[np.argmax(prediction[0])], prediction[0][np.argmax(prediction[0])])
| [
"numpy.argmax",
"numpy.array",
"numpy.resize",
"tensorflow.keras.models.load_model",
"numpy.expand_dims",
"tensorflow.keras.layers.Softmax",
"cv2.resize",
"cv2.imread"
] | [((193, 223), 'tensorflow.keras.models.load_model', 'load_model', (['"""./xray_model_cv2"""'], {}), "('./xray_model_cv2')\n", (203, 223), False, 'from tensorflow.keras.models import load_model\n'), ((275, 294), 'cv2.imread', 'cv2.imread', (['imgpath'], {}), '(imgpath)\n', (285, 294), False, 'import cv2\n'), ((305, 375), 'cv2.resize', 'cv2.resize', (['img', '(img_width, img_height)'], {'interpolation': 'cv2.INTER_AREA'}), '(img, (img_width, img_height), interpolation=cv2.INTER_AREA)\n', (315, 375), False, 'import cv2\n'), ((387, 400), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (395, 400), True, 'import numpy as np\n'), ((410, 452), 'numpy.resize', 'np.resize', (['img', '(img_height, img_width, 3)'], {}), '(img, (img_height, img_width, 3))\n', (419, 452), True, 'import numpy as np\n'), ((636, 658), 'numpy.expand_dims', 'np.expand_dims', (['img', '(0)'], {}), '(img, 0)\n', (650, 658), True, 'import numpy as np\n'), ((596, 621), 'tensorflow.keras.layers.Softmax', 'tf.keras.layers.Softmax', ([], {}), '()\n', (619, 621), True, 'import tensorflow as tf\n'), ((732, 756), 'numpy.argmax', 'np.argmax', (['prediction[0]'], {}), '(prediction[0])\n', (741, 756), True, 'import numpy as np\n'), ((773, 797), 'numpy.argmax', 'np.argmax', (['prediction[0]'], {}), '(prediction[0])\n', (782, 797), True, 'import numpy as np\n')] |
import asyncio
import functools
import time
import ray
import ray.util
from ray import serve
import os
from tqdm import tqdm
from transformers import AutoTokenizer, EvalPrediction
from transformers.data.data_collator import (
default_data_collator,
)
from torch.utils.data import DataLoader
from datasets import load_metric, load_dataset
from scipy.special import softmax
from utils_qa import postprocess_qa_predictions
from hfutils.measure import get_energy_by_group
from hfutils.qa import prepare_train_features, prepare_validation_features
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
task_name = "squad_v2"
batch_size = 1
home_dir = "/mnt/raid0nvme1"
tokenizer = AutoTokenizer.from_pretrained(
f"{home_dir}/HuggingFace/bert-base-uncased", use_fast=True,
)
pad_on_right = True
max_seq_length = 384
doc_stride = 128
version_2_with_negative = True
n_best_size = 20
null_score_diff_threshold = 0.0
max_answer_length = 30
output_dir = "."
metric = load_metric("squad_v2")
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
val_dataset = load_dataset(task_name)["validation"].select([x for x in range(5000)])
data_collator = default_data_collator
column_names = val_dataset.column_names
eval_dataset = val_dataset.map(
functools.partial(
prepare_validation_features, tokenizer=tokenizer, column_names=column_names
),
batched=True,
remove_columns=column_names,
desc="Running tokenizer on training dataset",
)
m = functools.partial(softmax, axis=1)
eval_dataloader = DataLoader(
eval_dataset.remove_columns(["example_id", "offset_mapping"]),
shuffle=False,
collate_fn=data_collator,
batch_size=batch_size,
drop_last=True
)
inputs_list = []
for step, batch in enumerate(tqdm(eval_dataloader, desc="Prepare")):
input_ids = batch["input_ids"].numpy()
attention_mask = batch["attention_mask"].numpy()
token_type_ids = batch["token_type_ids"].numpy()
inputs_list.append((input_ids, token_type_ids, attention_mask))
ray.init(address="ray://172.16.58.3:10001", namespace="bert")
# asyncio.run(asyncio.gather(*async_requests))
# async def main():
# handle = serve.get_deployment("hybrid-scheduler").get_handle(sync=False)
# async_requests = []
# for step, input in enumerate(tqdm(inputs_list)):
# response = handle.ensemble_inference.remote(input)
# async_requests.append(response)
# for obj in tqdm(async_requests):
# ray.get(await obj)
# # responses = await asyncio.gather(*async_requests)
# # for obj in responses:
# # print(ray.get(obj))
# asyncio.run(main())
handle = serve.get_deployment("hybrid-scheduler").get_handle()
start_time = time.perf_counter()
start_energy = sum(list(get_energy_by_group().values()))
async_requests = []
for step, input in enumerate(tqdm(inputs_list)):
response = handle.ensemble_inference.remote(input)
async_requests.append(response)
async_requests = ray.get(async_requests)
end_energy = sum(list(get_energy_by_group().values()))
end_time = time.perf_counter()
print(end_energy - start_energy)
print(end_time - start_time)
# for idx, async_request in tqdm(enumerate(async_requests), desc=f"bsz{batch_size}-async"):
# response = async_request.get_result() | [
"datasets.load_metric",
"ray.serve.get_deployment",
"ray.get",
"tqdm.tqdm",
"time.perf_counter",
"functools.partial",
"datasets.load_dataset",
"transformers.AutoTokenizer.from_pretrained",
"hfutils.measure.get_energy_by_group",
"ray.init"
] | [((678, 771), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['f"""{home_dir}/HuggingFace/bert-base-uncased"""'], {'use_fast': '(True)'}), "(f'{home_dir}/HuggingFace/bert-base-uncased',\n use_fast=True)\n", (707, 771), False, 'from transformers import AutoTokenizer, EvalPrediction\n'), ((964, 987), 'datasets.load_metric', 'load_metric', (['"""squad_v2"""'], {}), "('squad_v2')\n", (975, 987), False, 'from datasets import load_metric, load_dataset\n'), ((1527, 1561), 'functools.partial', 'functools.partial', (['softmax'], {'axis': '(1)'}), '(softmax, axis=1)\n', (1544, 1561), False, 'import functools\n'), ((2064, 2125), 'ray.init', 'ray.init', ([], {'address': '"""ray://172.16.58.3:10001"""', 'namespace': '"""bert"""'}), "(address='ray://172.16.58.3:10001', namespace='bert')\n", (2072, 2125), False, 'import ray\n'), ((2750, 2769), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2767, 2769), False, 'import time\n'), ((3005, 3028), 'ray.get', 'ray.get', (['async_requests'], {}), '(async_requests)\n', (3012, 3028), False, 'import ray\n'), ((3095, 3114), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3112, 3114), False, 'import time\n'), ((1309, 1407), 'functools.partial', 'functools.partial', (['prepare_validation_features'], {'tokenizer': 'tokenizer', 'column_names': 'column_names'}), '(prepare_validation_features, tokenizer=tokenizer,\n column_names=column_names)\n', (1326, 1407), False, 'import functools\n'), ((1804, 1841), 'tqdm.tqdm', 'tqdm', (['eval_dataloader'], {'desc': '"""Prepare"""'}), "(eval_dataloader, desc='Prepare')\n", (1808, 1841), False, 'from tqdm import tqdm\n'), ((2876, 2893), 'tqdm.tqdm', 'tqdm', (['inputs_list'], {}), '(inputs_list)\n', (2880, 2893), False, 'from tqdm import tqdm\n'), ((2682, 2722), 'ray.serve.get_deployment', 'serve.get_deployment', (['"""hybrid-scheduler"""'], {}), "('hybrid-scheduler')\n", (2702, 2722), False, 'from ray import serve\n'), ((1123, 1146), 'datasets.load_dataset', 'load_dataset', (['task_name'], {}), '(task_name)\n', (1135, 1146), False, 'from datasets import load_metric, load_dataset\n'), ((2794, 2815), 'hfutils.measure.get_energy_by_group', 'get_energy_by_group', ([], {}), '()\n', (2813, 2815), False, 'from hfutils.measure import get_energy_by_group\n'), ((3051, 3072), 'hfutils.measure.get_energy_by_group', 'get_energy_by_group', ([], {}), '()\n', (3070, 3072), False, 'from hfutils.measure import get_energy_by_group\n')] |
import pandas as pd
import html
from bedrock.doc.relation import Relation
from bedrock.doc.annotation import Annotation
from bedrock.doc.token import Token
from bedrock.doc.layer import Layer
from bedrock.common import uima
import logging
from typing import Any
import warnings
class CAS2DataFrameConverter:
def __init__(self, mapping_fns: dict = None, appending_fns: dict = None):
if mapping_fns is None:
self.__mapping_fns = {}
else:
self.__mapping_fns = mapping_fns
if appending_fns is None:
self.__appending_fns = {}
else:
self.__appending_fns = appending_fns
def register_mapping_fn(self, layer_name: str, fn: Any):
self.__mapping_fns[layer_name] = fn
def register_appending_fn(self, layer_name: str, fn: Any):
self.__appending_fns[layer_name] = fn
def unregister_mapping_fn(self, layer_name: str):
self.__mapping_fns[layer_name] = None
def unregister_appending_fn(self, layer_name: str):
self.__appending_fns[layer_name] = None
# Generates panda df from the UIMA CAS: tokens, annotations, relations, uima (combined)
def get_dataframes(self, cas):
annotations = pd.DataFrame(columns=Annotation.COLS)
relations = pd.DataFrame(columns=Relation.COLS)
for element in cas.getAnnotationIndex():
layer = element.FStype.name
if element.FStype.name == 'uima.cas.Sofa':
cas_text = '"' + html.unescape(element.sofaString) + '"'
continue
if len(element.getFeatures()) >= 1:
row = {}
for feature_dict in element.getFeatureValsAsDictList():
for feature_name, feature_value in feature_dict.items():
if feature_name == uima.BEGIN:
row[Annotation.BEGIN] = int(feature_value)
elif feature_name == uima.END:
row[Annotation.END] = int(feature_value)
if type(feature_value) is list:
if len(feature_value) > 1:
# TODO handle multiple values per UIMA feature
logging.warning(feature_value)
continue
feature_value = feature_value[0]
if layer in self.__mapping_fns:
row = self.__mapping_fns[layer](row, feature_name, feature_value)
row[Annotation.ID] = int(element.FSid)
row[Annotation.LAYER] = layer
if layer in Layer.TOKEN:
row[Annotation.FEATURE_VAL] = cas_text[row[Annotation.BEGIN]+1:row[Annotation.END]+1]
# add the layer to the data frame
if layer in self.__appending_fns:
annotations, relations = self.__appending_fns[layer](row, annotations, relations)
else:
warnings.warn("appending function not implemented for layer: " + layer)
tokens = annotations[
(annotations[Annotation.LAYER] == Layer.TOKEN) & (annotations[Annotation.FEATURE] == Token.TEXT)
][[Annotation.ID, Annotation.BEGIN, Annotation.END, Annotation.FEATURE_VAL]]
tokens.reset_index(inplace=True, drop=True)
tokens.rename(columns={Annotation.FEATURE_VAL: Token.TEXT}, inplace=True)
pos_annotations = annotations[
(annotations[Annotation.LAYER] == Layer.POS) & (annotations[Annotation.FEATURE] == Token.POS_VALUE)
][[Annotation.BEGIN, Annotation.END, Annotation.FEATURE_VAL]] # TODO ID could be added if needed
pos_annotations.rename(columns={Annotation.FEATURE_VAL: Token.POS_VALUE}, inplace=True)
tokens = pd.merge(tokens, pos_annotations, on=[Annotation.BEGIN, Annotation.END], how='left')
sentence_annotations = annotations[
annotations[Annotation.LAYER] == Layer.SENTENCE
][[Annotation.BEGIN]] # TODO ID could be added if needed
sentence_annotations.loc[:, Token.SENT_START] = True
tokens = pd.merge(tokens, sentence_annotations, on=[Annotation.BEGIN], how='left')
dependency_annotations = relations[
(relations[Relation.LAYER] == Layer.DEPENDENCY) & (relations[Relation.FEATURE] == Token.DEP_TYPE)
][[Relation.BEGIN, Relation.END, Relation.FEATURE_VAL, Token.GOV_ID]]
dependency_annotations.rename(columns={Annotation.FEATURE_VAL: Token.DEP_TYPE}, inplace=True)
tokens = pd.merge(tokens, dependency_annotations, on=[Relation.BEGIN, Relation.END], how='left')
tokens = tokens.replace({pd.np.nan: None})
# sets ID column as index
annotations.set_index(Annotation.ID, inplace=True)
relations.set_index(Relation.ID, inplace=True)
return tokens, annotations, relations
| [
"pandas.merge",
"logging.warning",
"html.unescape",
"pandas.DataFrame",
"warnings.warn"
] | [((1225, 1262), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'Annotation.COLS'}), '(columns=Annotation.COLS)\n', (1237, 1262), True, 'import pandas as pd\n'), ((1283, 1318), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'Relation.COLS'}), '(columns=Relation.COLS)\n', (1295, 1318), True, 'import pandas as pd\n'), ((3848, 3936), 'pandas.merge', 'pd.merge', (['tokens', 'pos_annotations'], {'on': '[Annotation.BEGIN, Annotation.END]', 'how': '"""left"""'}), "(tokens, pos_annotations, on=[Annotation.BEGIN, Annotation.END],\n how='left')\n", (3856, 3936), True, 'import pandas as pd\n'), ((4186, 4259), 'pandas.merge', 'pd.merge', (['tokens', 'sentence_annotations'], {'on': '[Annotation.BEGIN]', 'how': '"""left"""'}), "(tokens, sentence_annotations, on=[Annotation.BEGIN], how='left')\n", (4194, 4259), True, 'import pandas as pd\n'), ((4612, 4703), 'pandas.merge', 'pd.merge', (['tokens', 'dependency_annotations'], {'on': '[Relation.BEGIN, Relation.END]', 'how': '"""left"""'}), "(tokens, dependency_annotations, on=[Relation.BEGIN, Relation.END],\n how='left')\n", (4620, 4703), True, 'import pandas as pd\n'), ((3045, 3116), 'warnings.warn', 'warnings.warn', (["('appending function not implemented for layer: ' + layer)"], {}), "('appending function not implemented for layer: ' + layer)\n", (3058, 3116), False, 'import warnings\n'), ((1498, 1531), 'html.unescape', 'html.unescape', (['element.sofaString'], {}), '(element.sofaString)\n', (1511, 1531), False, 'import html\n'), ((2259, 2289), 'logging.warning', 'logging.warning', (['feature_value'], {}), '(feature_value)\n', (2274, 2289), False, 'import logging\n')] |
# Copyright 2019 The SQLNet Company GmbH
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import json
import os
import numpy as np
import pandas as pd
from getml.placeholder import Placeholder
import getml.columns as columns
import getml.communication as comm
# -----------------------------------------------------------------------------
class DataFrame(object):
"""
Data storage container for the getml engine.
Args:
name (str): Name of the DataFrame
join_keys (List[str], optional) : Names of the columns that are the join keys.
time_stamps (List[str], optional): Names of the columns that are the
time stamps. Time stamps can be of type pd.Timestamp or float. If
they are a float, the floating point number will be interpreted as
the number of days since 1970-01-01 00:00:00. Fractions will be
interpreted as fractions of a day. For instance, 2.5 will be
interpreted as 1970-01-03 12:00:00.
categorical (List[str], optional): Names of the columns that are categorical variables.
discrete (List[str]), optional) : Names of the columns that are discrete variables.
numerical (List[str]): Names of the columns that are numerical variables.
targets (List[str], optional): Target variables.
Will be ignored during prediction or if this is peripheral table.
units (dict): Mapping of column names to units.
All columns containing that column name will be assigned the unit.
Columns containing the same unit can be directly compared.
"""
def __init__(
self,
name,
join_keys=None,
time_stamps=None,
categorical=None,
discrete=None,
numerical=None,
targets=None,
units=None
):
# ---------------------------------------------------------------------
self.name = name
self.units = units or dict()
# ---------------------------------------------------------------------
join_key_names = join_keys or []
time_stamp_names = time_stamps or []
categorical_names = categorical or []
discrete_names = discrete or []
numerical_names = numerical or []
target_names = targets or []
# ---------------------------------------------------------------------
self.__categorical_columns = []
for i, name in enumerate(categorical_names):
self.__categorical_columns.append(
columns.CategoricalColumn(
name=name,
role="categorical",
num=i,
df_name=self.name
)
)
# ---------------------------------------------------------------------
discrete_units = self.__extract_units(discrete_names)
self.__discrete_columns = []
for i, name in enumerate(discrete_names):
self.__discrete_columns.append(
columns.Column(
name=name,
unit=discrete_units[i],
role="discrete",
num=i,
df_name=self.name
)
)
# ---------------------------------------------------------------------
self.__join_key_columns = []
for i, name in enumerate(join_key_names):
self.__join_key_columns.append(
columns.CategoricalColumn(
name=name,
role="join_key",
num=i,
df_name=self.name
)
)
# ---------------------------------------------------------------------
numerical_units = self.__extract_units(numerical_names)
self.__numerical_columns = []
for i, name in enumerate(numerical_names):
self.__numerical_columns.append(
columns.Column(
name=name,
unit=numerical_units[i],
role="numerical",
num=i,
df_name=self.name
)
)
# ---------------------------------------------------------------------
self.__target_columns = []
for i, name in enumerate(target_names):
self.__target_columns.append(
columns.Column(
name=name,
role="target",
num=i,
df_name=self.name
)
)
# ---------------------------------------------------------------------
self.__time_stamp_columns = []
for i, name in enumerate(time_stamp_names):
self.__time_stamp_columns.append(
columns.Column(
name=name,
role="time_stamp",
num=i,
df_name=self.name
)
)
# -------------------------------------------------------------------------
def __add_categorical_column(self, col, name, role, unit):
# ------------------------------------------------------
# Send command
cmd = dict()
cmd["type_"] = "DataFrame.add_categorical_column"
cmd["name_"] = name
cmd["col_"] = col.thisptr
cmd["df_name_"] = self.name
cmd["role_"] = role
cmd["unit_"] = unit
comm.send(cmd)
# ------------------------------------------------------
self.refresh()
# -------------------------------------------------------------------------
def __add_column(self, col, name, role, unit):
# ------------------------------------------------------
# Send command
cmd = dict()
cmd["type_"] = "DataFrame.add_column"
cmd["name_"] = name
cmd["col_"] = col.thisptr
cmd["df_name_"] = self.name
cmd["role_"] = role
cmd["unit_"] = unit
comm.send(cmd)
# ------------------------------------------------------
self.refresh()
# -------------------------------------------------------------------------
def __check_plausibility(self, data_frame):
# ------------------------------------------------------
if len(self.join_key_names) == 0:
raise Exception("You need to provide at least one join key!")
if len(self.time_stamp_names) == 0:
raise Exception("You need to provide at least one time stamp!")
if len(self.categorical_names) != len(np.unique(self.categorical_names)):
raise Exception("Categorical columns not unique!")
if len(self.discrete_names) != len(np.unique(self.discrete_names)):
raise Exception("Discrete columns not unique!")
if len(self.join_key_names) != len(np.unique(self.join_key_names)):
raise Exception("Join keys not unique!")
if len(self.numerical_names) != len(np.unique(self.numerical_names)):
raise Exception("Numerical columns not unique!")
if len(self.target_names) != len(np.unique(self.target_names)):
raise Exception("Target columns not unique!")
if len(self.time_stamp_names) != len(np.unique(self.time_stamp_names)):
raise Exception("Time stamps not unique!")
# ------------------------------------------------------
for col in self.categorical_names:
if col not in data_frame.columns:
raise ValueError(
"Column named '" + col + "' does not exist!")
for col in self.discrete_names:
if col not in data_frame.columns:
raise ValueError(
"Column named '" + col + "' does not exist!")
for col in self.join_key_names:
if col not in data_frame.columns:
raise ValueError(
"Column named '" + col + "' does not exist!")
for col in self.numerical_names:
if col not in data_frame.columns:
raise ValueError(
"Column named '" + col + "' does not exist!")
for col in self.target_names:
if col not in data_frame.columns:
raise ValueError(
"Column named '" + col + "' does not exist!")
for col in self.time_stamp_names:
if col not in data_frame.columns:
raise ValueError(
"Column named '" + col + "' does not exist!")
# -------------------------------------------------------------------------
def __close(self, s):
cmd = dict()
cmd["type_"] = "DataFrame.close"
cmd["name_"] = self.name
comm.send_string(s, json.dumps(cmd))
msg = comm.recv_string(s)
if msg != "Success!":
raise Exception(msg)
# -------------------------------------------------------------------------
def __extract_shape(self, cmd, name):
shape = cmd[name + "_shape_"]
shape = np.asarray(shape).astype(np.int32)
return shape.tolist()
# -------------------------------------------------------------------------
def __extract_units(self, colnames):
return [
self.units[col] if col in self.units else "" for col in colnames
]
# -------------------------------------------------------------------------
def __get_column(self, name, columns):
for col in columns:
if col.name == name:
return col
raise Exception("Column named '" + name + "' not found.")
# -------------------------------------------------------------------------
def __send_data(self, data_frame, s):
for col in self.__categorical_columns:
col.send(
data_frame[[col.name]].values.astype(np.str),
s
)
for col in self.__discrete_columns:
if "time stamp" in col.thisptr["unit_"]:
col.send(
self.__transform_timestamps(
data_frame[[col.name]]),
s
)
else:
col.send(
data_frame[[col.name]].apply(
pd.to_numeric, errors="coerce"
).values,
s
)
for col in self.__join_key_columns:
col.send(
data_frame[[col.name]].values.astype(np.str),
s
)
for col in self.__numerical_columns:
if "time stamp" in col.thisptr["unit_"]:
col.send(
self.__transform_timestamps(
data_frame[[col.name]]
),
s
)
else:
col.send(
data_frame[[col.name]].apply(
pd.to_numeric, errors="coerce"
).values,
s
)
for col in self.__target_columns:
col.send(
data_frame[[col.name]].apply(
pd.to_numeric, errors="raise"
).values,
s
)
for col in self.__time_stamp_columns:
col.send(
self.__transform_timestamps(
data_frame[[col.name]]
),
s
)
# -------------------------------------------------------------------------
def __rm_col(self, name, role):
# ------------------------------------------------------
# Send command
cmd = dict()
cmd["type_"] = "DataFrame.remove_column"
cmd["name_"] = name
cmd["df_name_"] = self.name
cmd["role_"] = role
comm.send(cmd)
# ------------------------------------------------------
self.refresh()
# -------------------------------------------------------------------------
def __transform_timestamps(self, time_stamps):
# Transforming a time stamp using to_numeric
# will result in the number of nanoseconds since
# the beginning of UNIX time. There are 8.64e+13 nanoseconds
# in a day.
transformed = pd.DataFrame()
for colname in time_stamps.columns:
if pd.api.types.is_numeric_dtype(time_stamps[colname]):
transformed[colname] = time_stamps[colname]
else:
transformed[colname] = time_stamps[[colname]].apply(
pd.to_datetime,
errors="coerce"
).apply(
pd.to_numeric,
errors="coerce"
).apply(
lambda val: val / 8.64e+13
)[colname]
return transformed.values
# -------------------------------------------------------------------------
def add_categorical(self, col, name, unit=""):
"""
Adds a categorical column to the DataFrame.
Args:
col: The column to be added.
name (str): Name of the new column in the DataFrame.
unit (otional): Unit of the column.
"""
self.__add_categorical_column(col, name, "categorical", unit)
# -------------------------------------------------------------------------
def add_discrete(self, col, name, unit=""):
"""
Adds a discrete column to the DataFrame.
Args:
col: The column to be added.
name (str): Name of the new column in the DataFrame.
unit (otional): Unit of the column.
"""
self.__add_column(col, name, "discrete", unit)
# -------------------------------------------------------------------------
def add_join_key(self, col, name):
"""
Adds a join key column to the DataFrame.
Args:
col: The column to be added.
name (str): Name of the new column in the DataFrame.
"""
self.__add_categorical_column(col, name, "join_key", "")
# -------------------------------------------------------------------------
def add_numerical(self, col, name, unit=""):
"""
Adds a numerical column to the DataFrame.
Args:
col: The column to be added.
name (str): Name of the new column in the DataFrame.
unit (otional): Unit of the column.
"""
self.__add_column(col, name, "numerical", unit)
# -------------------------------------------------------------------------
def add_target(self, col, name):
"""
Adds a target column to the DataFrame.
Args:
col: The column to be added.
name (str): Name of the new column in the DataFrame.
"""
self.__add_column(col, name, "target", "")
# -------------------------------------------------------------------------
def add_time_stamp(self, col, name):
"""
Adds a time stamp column to the DataFrame.
Args:
col: The column to be added.
name (str): Name of the new column in the DataFrame.
"""
self.__add_column(col, name, "time_stamp", "")
# -------------------------------------------------------------------------
def append(self, data_frame, sock=None):
"""Appends data to tables that already exist on the getml engine.
If sock is None, it will call a function to create a new
socket, use it for the data transfer and close it
afterwards. If, instead, a socket is provided, it just sends
all the data but does not close it.
Args:
data_frame (pandas.DataFrame): Table that you want to be
appended to the existing data.
sock (optional): Socket connecting the Python API with the
getML engine.
"""
# ------------------------------------------------------
self.__check_plausibility(data_frame)
# ------------------------------------------------------
# Create connection.
cmd = dict()
cmd["type_"] = "DataFrame.append"
cmd["name_"] = self.name
if sock is None:
s = comm.send_and_receive_socket(cmd)
else:
s = sock
comm.send_string(s, json.dumps(cmd))
# ------------------------------------------------------
# Send individual matrices to getml engine
self.__send_data(data_frame, s)
# ------------------------------------------------------
self.__close(s)
if sock is None:
s.close()
return self
# -------------------------------------------------------------------------
def categorical(self, name):
"""
Handle to a categorical column.
Args:
name (str): Name of the column.
"""
return self.__get_column(name, self.__categorical_columns)
# -------------------------------------------------------------------------
@property
def categorical_names(self):
"""
List of the names of all categorical columns.
"""
return [col.name for col in self.__categorical_columns]
# -------------------------------------------------------------------------
def delete(self, mem_only=False):
"""
Deletes the data frame from the engine.
Args:
mem_only (bool): If True, the data frame will be deleted from
memory only, but not from disk.
"""
# -------------------------------------------
# Send JSON command to getml engine
cmd = dict()
cmd["type_"] = "DataFrame.delete"
cmd["name_"] = self.name
cmd["mem_only_"] = mem_only
comm.send(cmd)
# -------------------------------------------------------------------------
def discrete(self, name):
"""
Returns a handle to a discrete column.
Args:
name (str): Name of the column.
"""
return self.__get_column(name, self.__discrete_columns)
# -------------------------------------------------------------------------
@property
def discrete_names(self):
"""
List of the names of all discrete columns.
"""
return [col.name for col in self.__discrete_columns]
# -------------------------------------------------------------------------
def from_db(self, table_name, append=False):
"""
Fill from Database
The DataFrame will be filled from a table in the database.
Args:
table_name(str): Table from which we want to retrieve the data.
append(bool): If a DataFrame already exists, should table be appended?
"""
# -------------------------------------------
# Send JSON command to getml engine
cmd = dict()
cmd["type_"] = "DataFrame.from_db"
cmd["name_"] = self.name
cmd["table_name_"] = table_name
cmd["categoricals_"] = self.categorical_names
cmd["discretes_"] = self.discrete_names
cmd["join_keys_"] = self.join_key_names
cmd["numericals_"] = self.numerical_names
cmd["targets_"] = self.target_names
cmd["time_stamps_"] = self.time_stamp_names
cmd["append_"] = append
comm.send(cmd)
# -------------------------------------------
return self
# -------------------------------------------------------------------------
def from_json(self, json_str, append=False, time_formats=["%Y-%m-%dT%H:%M:%s%z", "%Y-%m-%d %H:%M:%S", "%Y-%m-%d"]):
"""
Fill from JSON
Fills the data frame with data from a JSON string.
Args:
json_str (str): The JSON string containing the data.
append (bool): If a DataFrame already exists, should json_str be appended?
time_formats (str): The formats tried when parsing time stamps.
Refer to https://pocoproject.org/docs/Poco.DateTimeFormatter.html#9946 for the options.
"""
# -------------------------------------------
# Send JSON command to getml engine
cmd = dict()
cmd["type_"] = "DataFrame.from_json"
cmd["name_"] = self.name
cmd["categoricals_"] = self.categorical_names
cmd["discretes_"] = self.discrete_names
cmd["join_keys_"] = self.join_key_names
cmd["numericals_"] = self.numerical_names
cmd["targets_"] = self.target_names
cmd["time_stamps_"] = self.time_stamp_names
cmd["append_"] = append
cmd["time_formats_"] = time_formats
s = comm.send_and_receive_socket(cmd)
# -------------------------------------------
# Send the JSON string
comm.send_string(s, json_str)
# -------------------------------------------
# Make sure everything went well and close
# connection
msg = comm.recv_string(s)
s.close()
if msg != "Success!":
raise Exception(msg)
# -------------------------------------------
return self
# -------------------------------------------------------------------------
def from_query(self, query, append=False):
"""
Fill from query
Fills the data frame with data from a table in the database.
Args:
query: The query used to retrieve the data.
append (bool): If a DataFrame already exists, should table be appended?
"""
# -------------------------------------------
# Send JSON command to getml engine
cmd = dict()
cmd["type_"] = "DataFrame.from_query"
cmd["name_"] = self.name
cmd["query_"] = query
cmd["categoricals_"] = self.categorical_names
cmd["discretes_"] = self.discrete_names
cmd["join_keys_"] = self.join_key_names
cmd["numericals_"] = self.numerical_names
cmd["targets_"] = self.target_names
cmd["time_stamps_"] = self.time_stamp_names
cmd["append_"] = append
comm.send(cmd)
# -------------------------------------------
return self
# -------------------------------------------------------------------------
def get(self):
"""
Get Pandas DataFrame
Returns:
:class:`pandas.DataFrame`
"""
# -------------------------------------------
# Send JSON command to getml engine
cmd = dict()
cmd["type_"] = "DataFrame.get"
cmd["name_"] = self.name
# -------------------------------------------
# Establish communication with getml engine
s = comm.send_and_receive_socket(cmd)
# -------------------------------------------
# Receive all columns
df = pd.DataFrame()
for col in self.__categorical_columns:
df[col.name] = col.get(s)
for col in self.__discrete_columns:
df[col.name] = col.get(s)
for col in self.__join_key_columns:
df[col.name] = col.get(s)
for col in self.__numerical_columns:
df[col.name] = col.get(s)
for col in self.__target_columns:
df[col.name] = col.get(s)
for col in self.__time_stamp_columns:
df[col.name] = col.get(s)
# -------------------------------------------
# Close connection
self.__close(s)
s.close()
# -------------------------------------------
return df
# -------------------------------------------------------------------------
def group_by(self, join_key, name, aggregations):
"""
Creates new DataFrame by grouping over a join key.
Args:
join_key (str): Name of the join key to group by.
name (str): Name of the new DataFrame.
aggregations: List containing aggregations.
Returns:
:class:`~getml.engine.DataFrame`
"""
# ----------------------------------------------------------------------
# Build command
cmd = dict()
cmd["name_"] = name
cmd["type_"] = "DataFrame.group_by"
cmd["join_key_name_"] = join_key
cmd["df_name_"] = self.name
cmd["aggregations_"] = [agg.thisptr for agg in aggregations]
comm.send(cmd)
# ----------------------------------------------------------------------
# Create handle for new data frame.
new_df = DataFrame(name)
return new_df.refresh()
# -------------------------------------------------------------------------
def join(
self,
name,
other,
join_key,
other_join_key=None,
cols=None,
other_cols=None,
how="inner",
where=None):
"""
Create a new DataFrame by joining this DataFrame with another DataFrame.
Args:
name (str): The name of the new DataFrame.
other (DataFrame): The other DataFrame.
join_key (str): Name of the join key in this DataFrame.
other_join_key (str, optional): Name of the join key in the other table
(if not identical to join_key).
cols (optional): List of columns from this DataFrame to be included.
If left blank, all columns from this DataFrame will be included.
other_cols (optional): List of columns from the other DataFrame to be included.
If left blank, all columns from the other DataFrame will be included.
how (str): Type of the join. Supports "left", "right" and "inner".
where (bool): Boolean column that imposes WHERE conditions on the join.
"""
# -------------------------------------------
# Send JSON command to getml engine
cmd = dict()
cmd["type_"] = "DataFrame.join"
cmd["name_"] = name
cmd["df1_name_"] = self.name
cmd["df2_name_"] = other.name
cmd["join_key_used_"] = join_key
cmd["other_join_key_used_"] = other_join_key or join_key
cmd["cols1_"] = cols or []
cmd["cols2_"] = other_cols or []
cmd["cols1_"] = [c.thisptr for c in cmd["cols1_"]]
cmd["cols2_"] = [c.thisptr for c in cmd["cols2_"]]
cmd["how_"] = how
if where is not None:
cmd["where_"] = where.thisptr
comm.send(cmd)
# -------------------------------------------
return DataFrame(name=name).refresh()
# -------------------------------------------------------------------------
def join_key(self, name):
"""
Returns a handle to a join key.
Args:
name (str): Name of the join key.
"""
return self.__get_column(name, self.__join_key_columns)
# -------------------------------------------------------------------------
@property
def join_key_names(self):
"""
List of the names of all join keys.
"""
return [col.name for col in self.__join_key_columns]
# -------------------------------------------------------------------------
def load(self):
"""
Loads the DataFrame object from the engine.
"""
# ----------------------------------------------------------------------
cmd = dict()
cmd["type_"] = "DataFrame.load"
cmd["name_"] = self.name
comm.send(cmd)
# ----------------------------------------------------------------------
return self.refresh()
# -------------------------------------------------------------------------
def n_bytes(self):
"""
Returns the size of the data stored in the DataFrame in bytes.
"""
# ------------------------------------------------------
# Build and send JSON command
cmd = dict()
cmd["type_"] = "DataFrame.nbytes"
cmd["name_"] = self.name
s = comm.send_and_receive_socket(cmd)
# ------------------------------------------------------
# Make sure model exists on getml engine
msg = comm.recv_string(s)
if msg != "Found!":
s.close()
raise Exception(msg)
# ------------------------------------------------------
# Receive number of bytes from getml engine
nbytes = comm.recv_string(s)
# ------------------------------------------------------
s.close()
return np.uint64(nbytes)
# -------------------------------------------------------------------------
@property
def n_categorical(self):
"""
Number of categorical columns.
"""
return len(self.__categorical_columns)
# -------------------------------------------------------------------------
@property
def n_discrete(self):
"""
Number of discrete columns.
"""
return len(self.__discrete_columns)
# -------------------------------------------------------------------------
@property
def n_join_keys(self):
"""
Number of join keys.
"""
return len(self.__join_key_columns)
# -------------------------------------------------------------------------
@property
def n_numerical(self):
"""
Number of numerical columns.
"""
return len(self.__numerical_columns)
# -------------------------------------------------------------------------
def n_rows(self):
"""
Returns the number of rows in the data frame.
"""
# ------------------------------------------------------
# Build and send JSON command
cmd = dict()
cmd["type_"] = "DataFrame.nrows"
cmd["name_"] = self.name
s = comm.send_and_receive_socket(cmd)
# ------------------------------------------------------
# Make sure model exists on getml engine
msg = comm.recv_string(s)
if msg != "Found!":
s.close()
raise Exception(msg)
# ------------------------------------------------------
# Receive number of rows from getml engine
nrows = comm.recv_string(s)
# ------------------------------------------------------
s.close()
return np.int32(nrows)
# -------------------------------------------------------------------------
@property
def n_targets(self):
"""
Number of target columns.
"""
return len(self.__target_columns)
# -------------------------------------------------------------------------
@property
def n_time_stamps(self):
"""
Number of time stamps columns.
"""
return len(self.__time_stamp_columns)
# -------------------------------------------------------------------------
def numerical(self, name):
"""
Returns a handle to a numerical column.
Args:
name (str): Name of the column.
"""
return self.__get_column(name, self.__numerical_columns)
# -------------------------------------------------------------------------
@property
def numerical_names(self):
"""
List of the names of all numerical columns.
"""
return [col.name for col in self.__numerical_columns]
# -------------------------------------------------------------------------
def random(self, seed=5849):
"""
Create random column
The numbers will uniformly distributed from 0.0 to 1.0.
Args:
seed (int)*: Seed used for the random number generator.
Returns:
col (Column): Column containing random numbers
"""
col = columns._VirtualColumn(
df_name=self.name,
operator="random",
operand1=None,
operand2=None
)
col.thisptr["seed_"] = seed
return col
# -------------------------------------------------------------------------
def read_csv(
self,
fnames,
append=True,
quotechar='"',
sep=',',
time_formats=["%Y-%m-%dT%H:%M:%s%z", "%Y-%m-%d %H:%M:%S", "%Y-%m-%d"]):
"""
Read CSV file
It is assumed that the first line of each CSV file contains the column
names.
Args:
fnames (List[str]): CSV file names to be read.
append (bool): If a DataFrame already exists, should the file be appended?
quotechar (str): The character used to wrap strings.
sep (str): The separator used for separating fields.
time_formats (str): The formats tried when parsing time stamps.
Refer to https://pocoproject.org/docs/Poco.DateTimeFormatter.html#9946 for the options.
"""
# -------------------------------------------
# Transform paths
fnames_ = [os.path.abspath(_) for _ in fnames]
# -------------------------------------------
# Send JSON command to getml engine
cmd = dict()
cmd["type_"] = "DataFrame.read_csv"
cmd["name_"] = self.name
cmd["fnames_"] = fnames_
cmd["append_"] = append
cmd["quotechar_"] = quotechar
cmd["sep_"] = sep
cmd["time_formats_"] = time_formats
cmd["categoricals_"] = self.categorical_names
cmd["discretes_"] = self.discrete_names
cmd["join_keys_"] = self.join_key_names
cmd["numericals_"] = self.numerical_names
cmd["targets_"] = self.target_names
cmd["time_stamps_"] = self.time_stamp_names
comm.send(cmd)
# -------------------------------------------
return self
# -------------------------------------------------------------------------
def refresh(self):
"""
Aligns meta-information of the DataFrame with the engine.
This method can be used to avoid encoding conflicts. Note that the
.load() method automatically calls refresh.
"""
# ----------------------------------------------------------------------
cmd = dict()
cmd["type_"] = "DataFrame.refresh"
cmd["name_"] = self.name
s = comm.send_and_receive_socket(cmd)
msg = comm.recv_string(s)
s.close()
if msg[0] != "{":
raise Exception(msg)
# ----------------------------------------------------------------------
encodings = json.loads(msg)
# ----------------------------------------------------------------------
# Extract colnames
if encodings["categorical_"] == '':
categorical = []
else:
categorical = encodings["categorical_"]
if encodings["discrete_"] == '':
discrete = []
else:
discrete = encodings["discrete_"]
if encodings["join_keys_"] == '':
join_keys = []
else:
join_keys = encodings["join_keys_"]
if encodings["numerical_"] == '':
numerical = []
else:
numerical = encodings["numerical_"]
if encodings["targets_"] == '':
targets = []
else:
targets = encodings["targets_"]
if encodings["time_stamps_"] == '':
time_stamps = []
else:
time_stamps = encodings["time_stamps_"]
# ----------------------------------------------------------------------
# Re-initialize data frame
self.__init__(
name=self.name,
join_keys=join_keys,
time_stamps=time_stamps,
categorical=categorical,
discrete=discrete,
numerical=numerical,
targets=targets,
units=self.units
)
# ----------------------------------------------------------------------
return self
# -------------------------------------------------------------------------
def rowid(self):
"""
Returns a (numerical) column containing the row id, starting with 0.
"""
return columns._VirtualColumn(
df_name=self.name,
operator="rowid",
operand1=None,
operand2=None
)
# -------------------------------------------------------------------------
def save(self):
"""Saves the DataFrame on the engine.
To be saved on the engine, it already has to be present
there. You can use the :meth:`~getml.engine.DataFrame.send`
function to upload it to the engine.
Returns:
:class:`~getml.engine.DataFrame`:
The current instance of the DataFrame class.
"""
cmd = dict()
cmd["type_"] = "DataFrame.save"
cmd["name_"] = self.name
comm.send(cmd)
return self
# -------------------------------------------------------------------------
def rm_categorical(self, name):
"""
Removes a categorical column from the DataFrame.
Args:
name (str): Name of the column to be removed.
"""
self.__rm_col(name, "categorical")
# -------------------------------------------------------------------------
def rm_discrete(self, name):
"""
Removes a discrete column from the DataFrame.
Args:
name (str): Name of the column to be removed.
"""
self.__rm_col(name, "discrete")
# -------------------------------------------------------------------------
def rm_join_key(self, name):
"""
Removes a join key from the DataFrame.
Args:
name (str): Name of the column to be removed.
"""
self.__rm_col(name, "join_key")
# -------------------------------------------------------------------------
def rm_numerical(self, name):
"""
Removes a numerical column from the DataFrame.
Args:
name (str): Name of the column to be removed.
"""
self.__rm_col(name, "numerical")
# -------------------------------------------------------------------------
def rm_target(self, name):
"""
Removes a target column from the DataFrame.
Args:
name (str): Name of the column to be removed.
"""
self.__rm_col(name, "target")
# -------------------------------------------------------------------------
def rm_time_stamp(self, name):
"""
Removes a time stamp column from the DataFrame.
Args:
name (str): Name of the column to be removed.
"""
self.__rm_col(name, "time_stamp")
# -------------------------------------------------------------------------
def send(self, data_frame, sock = None):
"""Send data to the getml engine.
If sock is None, it will call a function to create
a new socket, use it for the data transfer and close it
afterwards. If, instead, a socket is provided, it just sends
all the data but does not close it.
Args:
data_frame (pandas.DataFrame): Data Frame that you want to be
appended to the existing data.
sock (optional): Socket connecting the Python API with the
getML engine.
"""
# ------------------------------------------------------
if data_frame is not None:
self.__check_plausibility(data_frame)
# ------------------------------------------------------
# Send data frame itself
cmd = dict()
cmd["type_"] = "DataFrame"
cmd["name_"] = self.name
if sock is None:
s = comm.send_and_receive_socket(cmd)
else:
s = sock
comm.send_string(s, json.dumps(cmd))
msg = comm.recv_string(s)
if msg != "Success!":
raise Exception(msg)
# ------------------------------------------------------
# Send individual columns to getml engine
self.__send_data(data_frame, s)
# ------------------------------------------------------
self.__close(s)
if sock is None:
s.close()
return self
# -------------------------------------------------------------------------
def target(self, name):
"""
Returns a handle to a target column.
Args:
name (str): Name of the column.
"""
return self.__get_column(name, self.__target_columns)
# -------------------------------------------------------------------------
@property
def target_names(self):
"""
List of the names of all target columns.
"""
return [col.name for col in self.__target_columns]
# -------------------------------------------------------------------------
def time_stamp(self, name):
"""
Returns a handle to a time stamp column.
Args:
name (str): Name of the column.
"""
return self.__get_column(name, self.__time_stamp_columns)
# -------------------------------------------------------------------------
@property
def time_stamp_names(self):
"""
List of the names of all time stamps.
"""
return [col.name for col in self.__time_stamp_columns]
# -------------------------------------------------------------------------
def to_csv(self, fname, quotechar='"', sep=','):
"""
Writes the data frame into a newly created CSV file.
Args:
fname (str): The name of the CSV file.
quotechar (str): The character used to wrap strings.
sep (str): The separator used for separating fields.
"""
# ----------------------------------------------------------------------
# Transform path
fname_ = os.path.abspath(fname)
# ----------------------------------------------------------------------
# Build command
cmd = dict()
cmd["type_"] = "DataFrame.to_csv"
cmd["name_"] = self.name
cmd["fname_"] = fname_
cmd["quotechar_"] = quotechar
cmd["sep_"] = sep
comm.send(cmd)
# -------------------------------------------------------------------------
def to_db(self, table_name):
"""
Writes the data frame into a newly created table in the database.
Args:
table_name (str): Name of the table to be created.
If a table of that name already exists, it will be replaced.
"""
# ----------------------------------------------------------------------
# Build command
cmd = dict()
cmd["type_"] = "DataFrame.to_db"
cmd["name_"] = self.name
cmd["table_name_"] = table_name
comm.send(cmd)
# -------------------------------------------------------------------------
def to_placeholder(self):
"""
Generates a placeholder from the data frame.
"""
self.refresh()
return Placeholder(
name=self.name,
categorical=self.categorical_names,
discrete=self.discrete_names,
numerical=self.numerical_names,
join_keys=self.join_key_names,
time_stamps=self.time_stamp_names,
targets=self.target_names
)
# -------------------------------------------------------------------------
def where(self, name, condition):
"""
Creates a new DataFrame as a subselection of this one.
Args:
name (str): Name of the new DataFrame.
condition (bool): Boolean column indicating the rows you want to select.
"""
# ----------------------------------------------------------------------
# Build command
cmd = dict()
cmd["type_"] = "DataFrame.where"
cmd["name_"] = self.name
cmd["new_df_"] = name
cmd["condition_"] = condition.thisptr
comm.send(cmd)
# ----------------------------------------------------------------------
# Create handle for new data frame.
new_df = DataFrame(name)
return new_df.refresh()
# -------------------------------------------------------------------------
# -----------------------------------------------------------------------------
| [
"json.loads",
"numpy.unique",
"getml.columns.CategoricalColumn",
"pandas.api.types.is_numeric_dtype",
"getml.communication.recv_string",
"numpy.int32",
"getml.communication.send_and_receive_socket",
"getml.communication.send",
"json.dumps",
"getml.columns._VirtualColumn",
"numpy.asarray",
"numpy.uint64",
"getml.communication.send_string",
"getml.columns.Column",
"pandas.DataFrame",
"getml.placeholder.Placeholder",
"os.path.abspath"
] | [((6542, 6556), 'getml.communication.send', 'comm.send', (['cmd'], {}), '(cmd)\n', (6551, 6556), True, 'import getml.communication as comm\n'), ((7101, 7115), 'getml.communication.send', 'comm.send', (['cmd'], {}), '(cmd)\n', (7110, 7115), True, 'import getml.communication as comm\n'), ((9925, 9944), 'getml.communication.recv_string', 'comm.recv_string', (['s'], {}), '(s)\n', (9941, 9944), True, 'import getml.communication as comm\n'), ((13079, 13093), 'getml.communication.send', 'comm.send', (['cmd'], {}), '(cmd)\n', (13088, 13093), True, 'import getml.communication as comm\n'), ((13538, 13552), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (13550, 13552), True, 'import pandas as pd\n'), ((19172, 19186), 'getml.communication.send', 'comm.send', (['cmd'], {}), '(cmd)\n', (19181, 19186), True, 'import getml.communication as comm\n'), ((20779, 20793), 'getml.communication.send', 'comm.send', (['cmd'], {}), '(cmd)\n', (20788, 20793), True, 'import getml.communication as comm\n'), ((22114, 22147), 'getml.communication.send_and_receive_socket', 'comm.send_and_receive_socket', (['cmd'], {}), '(cmd)\n', (22142, 22147), True, 'import getml.communication as comm\n'), ((22243, 22272), 'getml.communication.send_string', 'comm.send_string', (['s', 'json_str'], {}), '(s, json_str)\n', (22259, 22272), True, 'import getml.communication as comm\n'), ((22415, 22434), 'getml.communication.recv_string', 'comm.recv_string', (['s'], {}), '(s)\n', (22431, 22434), True, 'import getml.communication as comm\n'), ((23574, 23588), 'getml.communication.send', 'comm.send', (['cmd'], {}), '(cmd)\n', (23583, 23588), True, 'import getml.communication as comm\n'), ((24188, 24221), 'getml.communication.send_and_receive_socket', 'comm.send_and_receive_socket', (['cmd'], {}), '(cmd)\n', (24216, 24221), True, 'import getml.communication as comm\n'), ((24321, 24335), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (24333, 24335), True, 'import pandas as pd\n'), ((25864, 25878), 'getml.communication.send', 'comm.send', (['cmd'], {}), '(cmd)\n', (25873, 25878), True, 'import getml.communication as comm\n'), ((27951, 27965), 'getml.communication.send', 'comm.send', (['cmd'], {}), '(cmd)\n', (27960, 27965), True, 'import getml.communication as comm\n'), ((28996, 29010), 'getml.communication.send', 'comm.send', (['cmd'], {}), '(cmd)\n', (29005, 29010), True, 'import getml.communication as comm\n'), ((29538, 29571), 'getml.communication.send_and_receive_socket', 'comm.send_and_receive_socket', (['cmd'], {}), '(cmd)\n', (29566, 29571), True, 'import getml.communication as comm\n'), ((29702, 29721), 'getml.communication.recv_string', 'comm.recv_string', (['s'], {}), '(s)\n', (29718, 29721), True, 'import getml.communication as comm\n'), ((29942, 29961), 'getml.communication.recv_string', 'comm.recv_string', (['s'], {}), '(s)\n', (29958, 29961), True, 'import getml.communication as comm\n'), ((30063, 30080), 'numpy.uint64', 'np.uint64', (['nbytes'], {}), '(nbytes)\n', (30072, 30080), True, 'import numpy as np\n'), ((31418, 31451), 'getml.communication.send_and_receive_socket', 'comm.send_and_receive_socket', (['cmd'], {}), '(cmd)\n', (31446, 31451), True, 'import getml.communication as comm\n'), ((31582, 31601), 'getml.communication.recv_string', 'comm.recv_string', (['s'], {}), '(s)\n', (31598, 31601), True, 'import getml.communication as comm\n'), ((31820, 31839), 'getml.communication.recv_string', 'comm.recv_string', (['s'], {}), '(s)\n', (31836, 31839), True, 'import getml.communication as comm\n'), ((31941, 31956), 'numpy.int32', 'np.int32', (['nrows'], {}), '(nrows)\n', (31949, 31956), True, 'import numpy as np\n'), ((33417, 33511), 'getml.columns._VirtualColumn', 'columns._VirtualColumn', ([], {'df_name': 'self.name', 'operator': '"""random"""', 'operand1': 'None', 'operand2': 'None'}), "(df_name=self.name, operator='random', operand1=None,\n operand2=None)\n", (33439, 33511), True, 'import getml.columns as columns\n'), ((35351, 35365), 'getml.communication.send', 'comm.send', (['cmd'], {}), '(cmd)\n', (35360, 35365), True, 'import getml.communication as comm\n'), ((35958, 35991), 'getml.communication.send_and_receive_socket', 'comm.send_and_receive_socket', (['cmd'], {}), '(cmd)\n', (35986, 35991), True, 'import getml.communication as comm\n'), ((36007, 36026), 'getml.communication.recv_string', 'comm.recv_string', (['s'], {}), '(s)\n', (36023, 36026), True, 'import getml.communication as comm\n'), ((36209, 36224), 'json.loads', 'json.loads', (['msg'], {}), '(msg)\n', (36219, 36224), False, 'import json\n'), ((37860, 37953), 'getml.columns._VirtualColumn', 'columns._VirtualColumn', ([], {'df_name': 'self.name', 'operator': '"""rowid"""', 'operand1': 'None', 'operand2': 'None'}), "(df_name=self.name, operator='rowid', operand1=None,\n operand2=None)\n", (37882, 37953), True, 'import getml.columns as columns\n'), ((38599, 38613), 'getml.communication.send', 'comm.send', (['cmd'], {}), '(cmd)\n', (38608, 38613), True, 'import getml.communication as comm\n'), ((41664, 41683), 'getml.communication.recv_string', 'comm.recv_string', (['s'], {}), '(s)\n', (41680, 41683), True, 'import getml.communication as comm\n'), ((43778, 43800), 'os.path.abspath', 'os.path.abspath', (['fname'], {}), '(fname)\n', (43793, 43800), False, 'import os\n'), ((44120, 44134), 'getml.communication.send', 'comm.send', (['cmd'], {}), '(cmd)\n', (44129, 44134), True, 'import getml.communication as comm\n'), ((44769, 44783), 'getml.communication.send', 'comm.send', (['cmd'], {}), '(cmd)\n', (44778, 44783), True, 'import getml.communication as comm\n'), ((45016, 45249), 'getml.placeholder.Placeholder', 'Placeholder', ([], {'name': 'self.name', 'categorical': 'self.categorical_names', 'discrete': 'self.discrete_names', 'numerical': 'self.numerical_names', 'join_keys': 'self.join_key_names', 'time_stamps': 'self.time_stamp_names', 'targets': 'self.target_names'}), '(name=self.name, categorical=self.categorical_names, discrete=\n self.discrete_names, numerical=self.numerical_names, join_keys=self.\n join_key_names, time_stamps=self.time_stamp_names, targets=self.\n target_names)\n', (45027, 45249), False, 'from getml.placeholder import Placeholder\n'), ((45981, 45995), 'getml.communication.send', 'comm.send', (['cmd'], {}), '(cmd)\n', (45990, 45995), True, 'import getml.communication as comm\n'), ((9885, 9900), 'json.dumps', 'json.dumps', (['cmd'], {}), '(cmd)\n', (9895, 9900), False, 'import json\n'), ((13613, 13664), 'pandas.api.types.is_numeric_dtype', 'pd.api.types.is_numeric_dtype', (['time_stamps[colname]'], {}), '(time_stamps[colname])\n', (13642, 13664), True, 'import pandas as pd\n'), ((17566, 17599), 'getml.communication.send_and_receive_socket', 'comm.send_and_receive_socket', (['cmd'], {}), '(cmd)\n', (17594, 17599), True, 'import getml.communication as comm\n'), ((34636, 34654), 'os.path.abspath', 'os.path.abspath', (['_'], {}), '(_)\n', (34651, 34654), False, 'import os\n'), ((41531, 41564), 'getml.communication.send_and_receive_socket', 'comm.send_and_receive_socket', (['cmd'], {}), '(cmd)\n', (41559, 41564), True, 'import getml.communication as comm\n'), ((3582, 3669), 'getml.columns.CategoricalColumn', 'columns.CategoricalColumn', ([], {'name': 'name', 'role': '"""categorical"""', 'num': 'i', 'df_name': 'self.name'}), "(name=name, role='categorical', num=i, df_name=\n self.name)\n", (3607, 3669), True, 'import getml.columns as columns\n'), ((4070, 4166), 'getml.columns.Column', 'columns.Column', ([], {'name': 'name', 'unit': 'discrete_units[i]', 'role': '"""discrete"""', 'num': 'i', 'df_name': 'self.name'}), "(name=name, unit=discrete_units[i], role='discrete', num=i,\n df_name=self.name)\n", (4084, 4166), True, 'import getml.columns as columns\n'), ((4525, 4604), 'getml.columns.CategoricalColumn', 'columns.CategoricalColumn', ([], {'name': 'name', 'role': '"""join_key"""', 'num': 'i', 'df_name': 'self.name'}), "(name=name, role='join_key', num=i, df_name=self.name)\n", (4550, 4604), True, 'import getml.columns as columns\n'), ((5015, 5113), 'getml.columns.Column', 'columns.Column', ([], {'name': 'name', 'unit': 'numerical_units[i]', 'role': '"""numerical"""', 'num': 'i', 'df_name': 'self.name'}), "(name=name, unit=numerical_units[i], role='numerical', num=i,\n df_name=self.name)\n", (5029, 5113), True, 'import getml.columns as columns\n'), ((5466, 5532), 'getml.columns.Column', 'columns.Column', ([], {'name': 'name', 'role': '"""target"""', 'num': 'i', 'df_name': 'self.name'}), "(name=name, role='target', num=i, df_name=self.name)\n", (5480, 5532), True, 'import getml.columns as columns\n'), ((5881, 5951), 'getml.columns.Column', 'columns.Column', ([], {'name': 'name', 'role': '"""time_stamp"""', 'num': 'i', 'df_name': 'self.name'}), "(name=name, role='time_stamp', num=i, df_name=self.name)\n", (5895, 5951), True, 'import getml.columns as columns\n'), ((7687, 7720), 'numpy.unique', 'np.unique', (['self.categorical_names'], {}), '(self.categorical_names)\n', (7696, 7720), True, 'import numpy as np\n'), ((7830, 7860), 'numpy.unique', 'np.unique', (['self.discrete_names'], {}), '(self.discrete_names)\n', (7839, 7860), True, 'import numpy as np\n'), ((7967, 7997), 'numpy.unique', 'np.unique', (['self.join_key_names'], {}), '(self.join_key_names)\n', (7976, 7997), True, 'import numpy as np\n'), ((8098, 8129), 'numpy.unique', 'np.unique', (['self.numerical_names'], {}), '(self.numerical_names)\n', (8107, 8129), True, 'import numpy as np\n'), ((8235, 8263), 'numpy.unique', 'np.unique', (['self.target_names'], {}), '(self.target_names)\n', (8244, 8263), True, 'import numpy as np\n'), ((8370, 8402), 'numpy.unique', 'np.unique', (['self.time_stamp_names'], {}), '(self.time_stamp_names)\n', (8379, 8402), True, 'import numpy as np\n'), ((10203, 10220), 'numpy.asarray', 'np.asarray', (['shape'], {}), '(shape)\n', (10213, 10220), True, 'import numpy as np\n'), ((17667, 17682), 'json.dumps', 'json.dumps', (['cmd'], {}), '(cmd)\n', (17677, 17682), False, 'import json\n'), ((41632, 41647), 'json.dumps', 'json.dumps', (['cmd'], {}), '(cmd)\n', (41642, 41647), False, 'import json\n')] |
"""
Command to back-populate domain of the site the user account was created on.
"""
import six
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand, CommandError
from common.djangoapps.student.models import Registration, UserAttribute
CREATED_ON_SITE = 'created_on_site'
class Command(BaseCommand):
"""
This command back-populates domain of the site the user account was created on.
"""
help = """This command back-populates domain of the site the user account was created on.
Example: ./manage.py lms populate_created_on_site_user_attribute --users <user_id1>,<user_id2>...
'--activation-keys <key1>,<key2>... --site-domain <site_domain> --settings=devstack_docker"""
def add_arguments(self, parser):
"""
Add arguments to the command parser.
"""
parser.add_argument(
'--users',
help='Enter comma-separated user ids.',
default='',
type=str
)
parser.add_argument(
'--activation-keys',
help='Enter comma-separated activation keys.',
default='',
type=str
)
parser.add_argument(
'--site-domain',
help='Enter an existing site domain.',
required=True
)
def handle(self, *args, **options):
site_domain = options['site_domain']
user_ids = options['users'].split(',') if options['users'] else []
activation_keys = options['activation_keys'].split(',') if options['activation_keys'] else []
if not user_ids and not activation_keys:
raise CommandError('You must provide user ids or activation keys.')
try:
Site.objects.get(domain__exact=site_domain)
except Site.DoesNotExist:
question = "The site you specified is not configured as a Site in the system. " \
"Are you sure you want to continue? (y/n):"
if str(six.moves.input(question)).lower().strip()[0] != 'y':
return
for user_id in user_ids:
try:
user = User.objects.get(id=user_id)
if UserAttribute.get_user_attribute(user, CREATED_ON_SITE):
self.stdout.write(f"created_on_site attribute already exists for user id: {user_id}")
else:
UserAttribute.set_user_attribute(user, CREATED_ON_SITE, site_domain)
except User.DoesNotExist:
self.stdout.write(f"This user id [{user_id}] does not exist in the system.")
for key in activation_keys:
try:
user = Registration.objects.get(activation_key=key).user
if UserAttribute.get_user_attribute(user, CREATED_ON_SITE):
self.stdout.write(f"created_on_site attribute already exists for user id: {user.id}")
else:
UserAttribute.set_user_attribute(user, CREATED_ON_SITE, site_domain)
except Registration.DoesNotExist:
self.stdout.write(f"This activation key [{key}] does not exist in the system.")
| [
"common.djangoapps.student.models.UserAttribute.get_user_attribute",
"six.moves.input",
"common.djangoapps.student.models.Registration.objects.get",
"django.contrib.sites.models.Site.objects.get",
"django.core.management.base.CommandError",
"django.contrib.auth.models.User.objects.get",
"common.djangoapps.student.models.UserAttribute.set_user_attribute"
] | [((1773, 1834), 'django.core.management.base.CommandError', 'CommandError', (['"""You must provide user ids or activation keys."""'], {}), "('You must provide user ids or activation keys.')\n", (1785, 1834), False, 'from django.core.management.base import BaseCommand, CommandError\n'), ((1861, 1904), 'django.contrib.sites.models.Site.objects.get', 'Site.objects.get', ([], {'domain__exact': 'site_domain'}), '(domain__exact=site_domain)\n', (1877, 1904), False, 'from django.contrib.sites.models import Site\n'), ((2271, 2299), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'id': 'user_id'}), '(id=user_id)\n', (2287, 2299), False, 'from django.contrib.auth.models import User\n'), ((2319, 2374), 'common.djangoapps.student.models.UserAttribute.get_user_attribute', 'UserAttribute.get_user_attribute', (['user', 'CREATED_ON_SITE'], {}), '(user, CREATED_ON_SITE)\n', (2351, 2374), False, 'from common.djangoapps.student.models import Registration, UserAttribute\n'), ((2870, 2925), 'common.djangoapps.student.models.UserAttribute.get_user_attribute', 'UserAttribute.get_user_attribute', (['user', 'CREATED_ON_SITE'], {}), '(user, CREATED_ON_SITE)\n', (2902, 2925), False, 'from common.djangoapps.student.models import Registration, UserAttribute\n'), ((2524, 2592), 'common.djangoapps.student.models.UserAttribute.set_user_attribute', 'UserAttribute.set_user_attribute', (['user', 'CREATED_ON_SITE', 'site_domain'], {}), '(user, CREATED_ON_SITE, site_domain)\n', (2556, 2592), False, 'from common.djangoapps.student.models import Registration, UserAttribute\n'), ((2801, 2845), 'common.djangoapps.student.models.Registration.objects.get', 'Registration.objects.get', ([], {'activation_key': 'key'}), '(activation_key=key)\n', (2825, 2845), False, 'from common.djangoapps.student.models import Registration, UserAttribute\n'), ((3075, 3143), 'common.djangoapps.student.models.UserAttribute.set_user_attribute', 'UserAttribute.set_user_attribute', (['user', 'CREATED_ON_SITE', 'site_domain'], {}), '(user, CREATED_ON_SITE, site_domain)\n', (3107, 3143), False, 'from common.djangoapps.student.models import Registration, UserAttribute\n'), ((2120, 2145), 'six.moves.input', 'six.moves.input', (['question'], {}), '(question)\n', (2135, 2145), False, 'import six\n')] |
#!/usr/bin/env python
from snapassist.webapp import app
app.run(debug=True)
| [
"snapassist.webapp.app.run"
] | [((58, 77), 'snapassist.webapp.app.run', 'app.run', ([], {'debug': '(True)'}), '(debug=True)\n', (65, 77), False, 'from snapassist.webapp import app\n')] |
from vagrant_ansible_provisioner.cli import Cli
def test_cli_missing_command():
assert Cli.from_args([]) == 1
| [
"vagrant_ansible_provisioner.cli.Cli.from_args"
] | [((93, 110), 'vagrant_ansible_provisioner.cli.Cli.from_args', 'Cli.from_args', (['[]'], {}), '([])\n', (106, 110), False, 'from vagrant_ansible_provisioner.cli import Cli\n')] |
#!/usr/bin/env python3
import argparse
import base64
import greenstalk
import json
import os
def do_save(args):
client = greenstalk.Client(host=args.host, port=args.port, use="results",
watch=["results"])
client.use("results")
while True:
job = client.reserve()
current = json.loads(job.body)
name = current['name']
#data = current['data']
path = os.path.join(args.o, name)
print(path)
#with open(path, 'wb') as fd:
#fd.write(base64.decode64(data))
client.delete(job)
if __name__ == '__main__':
argp = argparse.ArgumentParser()
argp.add_argument(
"--host",
help="Result pipe host")
argp.add_argument(
"--port",
help="Result pipe port")
argp.add_argument(
"--pipe",
help="Result pipe name")
argp.add_argument(
"-o",
help="Output directory")
args = argp.parse_args()
do_save(args)
| [
"greenstalk.Client",
"os.path.join",
"json.loads",
"argparse.ArgumentParser"
] | [((128, 216), 'greenstalk.Client', 'greenstalk.Client', ([], {'host': 'args.host', 'port': 'args.port', 'use': '"""results"""', 'watch': "['results']"}), "(host=args.host, port=args.port, use='results', watch=[\n 'results'])\n", (145, 216), False, 'import greenstalk\n'), ((614, 639), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (637, 639), False, 'import argparse\n'), ((316, 336), 'json.loads', 'json.loads', (['job.body'], {}), '(job.body)\n', (326, 336), False, 'import json\n'), ((416, 442), 'os.path.join', 'os.path.join', (['args.o', 'name'], {}), '(args.o, name)\n', (428, 442), False, 'import os\n')] |
from nesta.core.luigihacks.mysqldb import MySqlTarget
from nesta.core.luigihacks.autobatch import AutoBatchTask
from nesta.core.orms.orm_utils import setup_es, get_es_ids, get_config
from nesta.packages.misc_utils.batches import split_batches, put_s3_batch
from abc import abstractmethod
import luigi
import logging
import functools
class ElasticsearchTask(AutoBatchTask):
'''Note: :obj:`done_ids` must be overridden with a function
return all document ids which do not require processing. If
you want to avoid writing that function see
:obj:`LazyElasticsearchTask`.
Args:
routine_id (str): Label for this routine.
db_config_path (str): Database config path.
endpoint (str): AWS domain name of the ES endpoint.
dataset (str): Name of the ES dataset.
entity_type (str): Entity type, for :obj:`ElasticsearchPlus`.
kwargs (dict): Any extra parameters to pass to the batchables.
index (str): Override the elasticsearch config with this index.
process_batch_size (int): Number of documents per batch.
intermediate_bucket (str): S3 bucket where batch chunks are stored.
sql_config_filename (str): SQL config path/filename in the batch task.
'''
routine_id = luigi.Parameter()
db_config_path = luigi.Parameter('mysqldb.config')
endpoint = luigi.Parameter()
dataset = luigi.Parameter()
entity_type = luigi.Parameter()
kwargs = luigi.DictParameter(default={})
index = luigi.Parameter(default=None)
process_batch_size = luigi.IntParameter(default=5000)
intermediate_bucket = luigi.Parameter('nesta-production'
'-intermediate')
sql_config_filename = luigi.Parameter('mysqldb.config')
@property
@functools.lru_cache()
def _done_ids(self):
return self.done_ids()
@abstractmethod
def done_ids(self):
'''All document ids which do not require processing. If
you want to avoid writing that function see
:obj:`LazyElasticsearchTask`.
Returns:
done_ids (set): A set of document ids, not to be processed.
'''
pass
def output(self):
'''Points to the output database engine'''
_id = self.routine_id
db_config = get_config(self.db_config_path, "mysqldb")
db_config["database"] = ('dev' if self.test
else 'production')
db_config["table"] = f"{_id} <dummy>" # Fake table
update_id = f"{_id}_ElasticsearchTask"
return MySqlTarget(update_id=update_id, **db_config)
def prepare(self):
'''Chunk up elasticsearch data, and submit batch
jobs over those chunks.'''
if self.test:
self.process_batch_size = 1000
logging.warning("Batch size restricted to "
f"{self.process_batch_size}"
" while in test mode")
# Setup elasticsearch and extract all ids
es, es_config = setup_es(endpoint=self.endpoint,
dataset=self.dataset,
production=not self.test,
drop_and_recreate=False,
increment_version=False)
ids = get_es_ids(es, es_config, size=10000) # All ids in this index
ids = ids - self._done_ids # Don't repeat done ids
# Override the default index if specified
es_config['index'] = (self.index if self.index is not None
else es_config['index'])
# Generate the job params
job_params = []
batches = split_batches(ids, self.process_batch_size)
for count, batch in enumerate(batches, 1):
done = False # Already taken care of with _done_ids
# write batch of ids to s3
batch_file = ''
if not done:
batch_file = put_s3_batch(batch,
self.intermediate_bucket,
self.routine_id)
params = {
"batch_file": batch_file,
"config": self.sql_config_filename,
"bucket": self.intermediate_bucket,
"done": done,
"count": len(ids),
'outinfo': es_config['host'],
'out_port': es_config['port'],
'index': es_config['index'],
'out_type': es_config['type'],
'aws_auth_region': es_config['region'],
'test': self.test,
'routine_id': self.routine_id,
'entity_type': self.entity_type,
**self.kwargs
}
job_params.append(params)
# Test mode
if self.test and count > 1:
logging.warning("Breaking after 2 batches "
"while in test mode.")
logging.warning(job_params)
break
# Done
logging.info("Batch preparation completed, "
f"with {len(job_params)} batches")
return job_params
def combine(self, job_params):
'''Touch the checkpoint'''
self.output().touch()
class LazyElasticsearchTask(ElasticsearchTask):
'''Same as ElasticsearchTask, except no done_ids'''
def done_ids(self):
return set()
| [
"nesta.core.orms.orm_utils.get_es_ids",
"luigi.IntParameter",
"luigi.DictParameter",
"nesta.core.luigihacks.mysqldb.MySqlTarget",
"nesta.core.orms.orm_utils.setup_es",
"logging.warning",
"nesta.packages.misc_utils.batches.put_s3_batch",
"nesta.packages.misc_utils.batches.split_batches",
"nesta.core.orms.orm_utils.get_config",
"functools.lru_cache",
"luigi.Parameter"
] | [((1263, 1280), 'luigi.Parameter', 'luigi.Parameter', ([], {}), '()\n', (1278, 1280), False, 'import luigi\n'), ((1302, 1335), 'luigi.Parameter', 'luigi.Parameter', (['"""mysqldb.config"""'], {}), "('mysqldb.config')\n", (1317, 1335), False, 'import luigi\n'), ((1351, 1368), 'luigi.Parameter', 'luigi.Parameter', ([], {}), '()\n', (1366, 1368), False, 'import luigi\n'), ((1383, 1400), 'luigi.Parameter', 'luigi.Parameter', ([], {}), '()\n', (1398, 1400), False, 'import luigi\n'), ((1419, 1436), 'luigi.Parameter', 'luigi.Parameter', ([], {}), '()\n', (1434, 1436), False, 'import luigi\n'), ((1450, 1481), 'luigi.DictParameter', 'luigi.DictParameter', ([], {'default': '{}'}), '(default={})\n', (1469, 1481), False, 'import luigi\n'), ((1494, 1523), 'luigi.Parameter', 'luigi.Parameter', ([], {'default': 'None'}), '(default=None)\n', (1509, 1523), False, 'import luigi\n'), ((1549, 1581), 'luigi.IntParameter', 'luigi.IntParameter', ([], {'default': '(5000)'}), '(default=5000)\n', (1567, 1581), False, 'import luigi\n'), ((1608, 1656), 'luigi.Parameter', 'luigi.Parameter', (['"""nesta-production-intermediate"""'], {}), "('nesta-production-intermediate')\n", (1623, 1656), False, 'import luigi\n'), ((1728, 1761), 'luigi.Parameter', 'luigi.Parameter', (['"""mysqldb.config"""'], {}), "('mysqldb.config')\n", (1743, 1761), False, 'import luigi\n'), ((1782, 1803), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (1801, 1803), False, 'import functools\n'), ((2298, 2340), 'nesta.core.orms.orm_utils.get_config', 'get_config', (['self.db_config_path', '"""mysqldb"""'], {}), "(self.db_config_path, 'mysqldb')\n", (2308, 2340), False, 'from nesta.core.orms.orm_utils import setup_es, get_es_ids, get_config\n'), ((2567, 2612), 'nesta.core.luigihacks.mysqldb.MySqlTarget', 'MySqlTarget', ([], {'update_id': 'update_id'}), '(update_id=update_id, **db_config)\n', (2578, 2612), False, 'from nesta.core.luigihacks.mysqldb import MySqlTarget\n'), ((3033, 3168), 'nesta.core.orms.orm_utils.setup_es', 'setup_es', ([], {'endpoint': 'self.endpoint', 'dataset': 'self.dataset', 'production': '(not self.test)', 'drop_and_recreate': '(False)', 'increment_version': '(False)'}), '(endpoint=self.endpoint, dataset=self.dataset, production=not self.\n test, drop_and_recreate=False, increment_version=False)\n', (3041, 3168), False, 'from nesta.core.orms.orm_utils import setup_es, get_es_ids, get_config\n'), ((3310, 3347), 'nesta.core.orms.orm_utils.get_es_ids', 'get_es_ids', (['es', 'es_config'], {'size': '(10000)'}), '(es, es_config, size=10000)\n', (3320, 3347), False, 'from nesta.core.orms.orm_utils import setup_es, get_es_ids, get_config\n'), ((3683, 3726), 'nesta.packages.misc_utils.batches.split_batches', 'split_batches', (['ids', 'self.process_batch_size'], {}), '(ids, self.process_batch_size)\n', (3696, 3726), False, 'from nesta.packages.misc_utils.batches import split_batches, put_s3_batch\n'), ((2806, 2900), 'logging.warning', 'logging.warning', (['f"""Batch size restricted to {self.process_batch_size} while in test mode"""'], {}), "(\n f'Batch size restricted to {self.process_batch_size} while in test mode')\n", (2821, 2900), False, 'import logging\n'), ((3964, 4026), 'nesta.packages.misc_utils.batches.put_s3_batch', 'put_s3_batch', (['batch', 'self.intermediate_bucket', 'self.routine_id'], {}), '(batch, self.intermediate_bucket, self.routine_id)\n', (3976, 4026), False, 'from nesta.packages.misc_utils.batches import split_batches, put_s3_batch\n'), ((4879, 4942), 'logging.warning', 'logging.warning', (['"""Breaking after 2 batches while in test mode."""'], {}), "('Breaking after 2 batches while in test mode.')\n", (4894, 4942), False, 'import logging\n'), ((4994, 5021), 'logging.warning', 'logging.warning', (['job_params'], {}), '(job_params)\n', (5009, 5021), False, 'import logging\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-30 04:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cq', '0003_event_aggregate_type'),
]
operations = [
migrations.AddField(
model_name='event',
name='revision',
field=models.PositiveIntegerField(default=1),
),
]
| [
"django.db.models.PositiveIntegerField"
] | [((395, 433), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(1)'}), '(default=1)\n', (422, 433), False, 'from django.db import migrations, models\n')] |
# IMPORTATION STANDARD
import json
import logging
# IMPORTATION THIRD PARTY
import grpc
from google.protobuf.empty_pb2 import Empty
# from google.protobuf.wrappers_pb2 import *
# IMPORTATION INTERNAL
from degiro_connector.quotecast.models.quotecast_pb2 import Chart, Quotecast
from degiro_connector.quotecast.models.quotecast_relay_pb2 import Config
from degiro_connector.quotecast.models.quotecast_relay_pb2_grpc import (
QuotecastRelayStub,
)
# SETUP LOGS
logging.basicConfig(level=logging.DEBUG)
# SETUP CONFIG DICT
with open("config/config.json") as config_file:
config_dict = json.load(config_file)
# SETUP CREDENTIALS
user_token = config_dict.get("user_token")
# SETUP RELAY
relay_channel = grpc.insecure_channel("localhost:50051")
relay_stub = QuotecastRelayStub(channel=relay_channel)
# RESPONSES DICT
responses = dict()
# CALL : set_config
config = Config()
config.user_token = user_token
config.auto_connect = True
responses["set_config"] = relay_stub.set_config(request=config)
# CALL : connect
responses["connect"] = relay_stub.connect(request=Empty())
# CALL : subscribe
request = Quotecast.Request()
request.subscriptions["AAPL.BATS,E"].extend(
[
"LastDate",
"LastTime",
"LastPrice",
"LastVolume",
"AskPrice",
"BidPrice",
]
)
responses["subscribe"] = relay_stub.subscribe(request=request)
# CALL : fetch_data
responses["fetch_data"] = relay_stub.fetch_data(request=Empty())
# CALL : get_chart
request = Chart.Request()
request.requestid = "1"
request.resolution = Chart.Interval.PT1M
request.culture = "fr-FR"
request.series.append("issueid:360148977")
request.series.append("price:issueid:360148977")
request.series.append("ohlc:issueid:360148977")
request.series.append("volume:issueid:360148977")
request.period = Chart.Interval.P1D
request.tz = "Europe/Paris"
responses["get_chart"] = relay_stub.get_chart(
request=request,
)
# DISPLAY RESPONSES DICT
print(responses)
| [
"logging.basicConfig",
"degiro_connector.quotecast.models.quotecast_relay_pb2_grpc.QuotecastRelayStub",
"grpc.insecure_channel",
"degiro_connector.quotecast.models.quotecast_pb2.Chart.Request",
"google.protobuf.empty_pb2.Empty",
"degiro_connector.quotecast.models.quotecast_relay_pb2.Config",
"json.load",
"degiro_connector.quotecast.models.quotecast_pb2.Quotecast.Request"
] | [((466, 506), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (485, 506), False, 'import logging\n'), ((712, 752), 'grpc.insecure_channel', 'grpc.insecure_channel', (['"""localhost:50051"""'], {}), "('localhost:50051')\n", (733, 752), False, 'import grpc\n'), ((767, 808), 'degiro_connector.quotecast.models.quotecast_relay_pb2_grpc.QuotecastRelayStub', 'QuotecastRelayStub', ([], {'channel': 'relay_channel'}), '(channel=relay_channel)\n', (785, 808), False, 'from degiro_connector.quotecast.models.quotecast_relay_pb2_grpc import QuotecastRelayStub\n'), ((876, 884), 'degiro_connector.quotecast.models.quotecast_relay_pb2.Config', 'Config', ([], {}), '()\n', (882, 884), False, 'from degiro_connector.quotecast.models.quotecast_relay_pb2 import Config\n'), ((1115, 1134), 'degiro_connector.quotecast.models.quotecast_pb2.Quotecast.Request', 'Quotecast.Request', ([], {}), '()\n', (1132, 1134), False, 'from degiro_connector.quotecast.models.quotecast_pb2 import Chart, Quotecast\n'), ((1498, 1513), 'degiro_connector.quotecast.models.quotecast_pb2.Chart.Request', 'Chart.Request', ([], {}), '()\n', (1511, 1513), False, 'from degiro_connector.quotecast.models.quotecast_pb2 import Chart, Quotecast\n'), ((594, 616), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (603, 616), False, 'import json\n'), ((1076, 1083), 'google.protobuf.empty_pb2.Empty', 'Empty', ([], {}), '()\n', (1081, 1083), False, 'from google.protobuf.empty_pb2 import Empty\n'), ((1458, 1465), 'google.protobuf.empty_pb2.Empty', 'Empty', ([], {}), '()\n', (1463, 1465), False, 'from google.protobuf.empty_pb2 import Empty\n')] |
"""Unit tests XACML Context handler. This PIP presents a SAML interface for its
Policy Enforcement Point and has a SAML interface to query a remote attribute
authority for attributes
"""
__author__ = "<NAME>"
__date__ = "13/08/10"
__copyright__ = "(C) 2010 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "<EMAIL>"
__revision__ = '$Id$'
import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
from os import path
import unittest
from configparser import SafeConfigParser
from ndg.security.server.test.base import BaseTestCase
from ndg.security.server.xacml.ctx_handler.saml_ctx_handler import SamlCtxHandler
class SamlCtxHandlerTestCase(BaseTestCase):
"""Test XACML Context handler. This PIP presents a SAML interface for its
Policy Enforcement Point and has a SAML interface to query a remote
attribute authority for attributes
"""
THIS_DIR = path.abspath(path.dirname(__file__))
CONFIG_FILENAME = 'saml_ctx_handler.cfg'
CONFIG_FILEPATH = path.join(THIS_DIR, CONFIG_FILENAME)
def test01Init(self):
handler = SamlCtxHandler()
self.assertTrue(handler)
def test02InitFromConfigFile(self):
# Initialise from settings in a config file
handler = SamlCtxHandler.fromConfig(self.__class__.CONFIG_FILEPATH)
self.assertTrue(handler)
self.assertTrue(handler.policyFilePath)
def test03InitFromKeywords(self):
# Initialise from a dictionary
# Populate by reading from the config file
cfg = SafeConfigParser(defaults={'here': self.__class__.THIS_DIR})
cfg.optionxform = str
cfg.read(self.__class__.CONFIG_FILEPATH)
kw = dict(cfg.items('DEFAULT'))
handler = SamlCtxHandler.fromKeywords(**kw)
self.assertTrue(handler)
self.assertTrue(handler.pip.attributeQuery)
self.assertTrue(handler.policyFilePath)
self.assertTrue(handler.issuerName)
self.assertTrue(handler.issuerFormat)
self.assertTrue(handler.assertionLifetime)
self.assertTrue(handler.xacmlExtFunc)
if __name__ == "__main__":
unittest.main() | [
"logging.basicConfig",
"logging.getLogger",
"ndg.security.server.xacml.ctx_handler.saml_ctx_handler.SamlCtxHandler",
"ndg.security.server.xacml.ctx_handler.saml_ctx_handler.SamlCtxHandler.fromKeywords",
"os.path.join",
"os.path.dirname",
"ndg.security.server.xacml.ctx_handler.saml_ctx_handler.SamlCtxHandler.fromConfig",
"unittest.main",
"configparser.SafeConfigParser"
] | [((425, 465), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (444, 465), False, 'import logging\n'), ((472, 499), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (489, 499), False, 'import logging\n'), ((1081, 1117), 'os.path.join', 'path.join', (['THIS_DIR', 'CONFIG_FILENAME'], {}), '(THIS_DIR, CONFIG_FILENAME)\n', (1090, 1117), False, 'from os import path\n'), ((2249, 2264), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2262, 2264), False, 'import unittest\n'), ((990, 1012), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (1002, 1012), False, 'from os import path\n'), ((1171, 1187), 'ndg.security.server.xacml.ctx_handler.saml_ctx_handler.SamlCtxHandler', 'SamlCtxHandler', ([], {}), '()\n', (1185, 1187), False, 'from ndg.security.server.xacml.ctx_handler.saml_ctx_handler import SamlCtxHandler\n'), ((1340, 1397), 'ndg.security.server.xacml.ctx_handler.saml_ctx_handler.SamlCtxHandler.fromConfig', 'SamlCtxHandler.fromConfig', (['self.__class__.CONFIG_FILEPATH'], {}), '(self.__class__.CONFIG_FILEPATH)\n', (1365, 1397), False, 'from ndg.security.server.xacml.ctx_handler.saml_ctx_handler import SamlCtxHandler\n'), ((1639, 1699), 'configparser.SafeConfigParser', 'SafeConfigParser', ([], {'defaults': "{'here': self.__class__.THIS_DIR}"}), "(defaults={'here': self.__class__.THIS_DIR})\n", (1655, 1699), False, 'from configparser import SafeConfigParser\n'), ((1846, 1879), 'ndg.security.server.xacml.ctx_handler.saml_ctx_handler.SamlCtxHandler.fromKeywords', 'SamlCtxHandler.fromKeywords', ([], {}), '(**kw)\n', (1873, 1879), False, 'from ndg.security.server.xacml.ctx_handler.saml_ctx_handler import SamlCtxHandler\n')] |
import torch
import torch.nn.functional as F
from math import exp
import numpy as np
# https://github.com/jorge-pessoa/pytorch-msssim
# https://github.com/Po-Hsun-Su/pytorch-ssim
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
# gauss.requires_grad = True
return gauss/gauss.sum()
def create_window(window_size):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(1, 1, window_size, window_size).contiguous()
return window
def ssim(img1, img2, window_size=11, size_average=True, full=False, val_range=None):
# Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
if val_range is None:
if torch.max(img1) > 128:
max_val = 255
else:
max_val = 1
if torch.min(img1) < -0.5:
min_val = -1
else:
min_val = 0
L = max_val - min_val
else:
L = val_range
padd = 0
(_, channels, height, width) = img1.size()
real_size = min(window_size, height, width)
window = create_window(real_size)
ret_channels = []
cs_channels = []
for ch in range(channels): # loop over channels, then average
img1_ch = torch.unsqueeze(img1[:, ch, :, :], 1)
img2_ch = torch.unsqueeze(img2[:, ch, :, :], 1)
mu1 = F.conv2d(img1_ch, window, padding=padd)
mu2 = F.conv2d(img2_ch, window, padding=padd)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1_ch * img1_ch, window, padding=padd) - mu1_sq
sigma2_sq = F.conv2d(img2_ch * img2_ch, window, padding=padd) - mu2_sq
sigma12 = F.conv2d(img1_ch * img2_ch, window, padding=padd) - mu1_mu2
C1 = (0.01 * L) ** 2
C2 = (0.03 * L) ** 2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
v1 = 2.0 * sigma12 + C2
v2 = sigma1_sq + sigma2_sq + C2
cs = torch.mean(v1 / v2) # contrast sensitivity
if size_average:
ret = ssim_map.mean()
else:
ret = ssim_map.mean(1).mean(1).mean(1)
cs_channels.append(cs)
ret_channels.append(ret)
cs_mean = torch.mean(torch.stack(cs_channels), dim=-1)
ret_mean = torch.mean(torch.stack(ret_channels), dim=-1)
if full:
return ret_mean, cs_mean
return ret_mean
def msssim(img1, img2, window_size=11, size_average=True, val_range=None, normalize=False):
device = img1.device
weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(device)
levels = weights.size()[0]
mssim = []
mcs = []
for _ in range(levels):
sim, cs = ssim(img1, img2, window_size=window_size, size_average=size_average, full=True, val_range=val_range)
mssim.append(sim)
mcs.append(cs)
img1 = F.avg_pool2d(img1, (2, 2))
img2 = F.avg_pool2d(img2, (2, 2))
mssim = torch.stack(mssim)
mcs = torch.stack(mcs)
if normalize:
# Normalize (to avoid NaNs)
mssim = (mssim + 1) / 2
mcs = (mcs + 1) / 2
pow1 = mcs ** weights
pow2 = mssim ** weights
# output = torch.prod(pow1 * pow2)
# From Matlab implementation https://ece.uwaterloo.ca/~z70wang/research/iwssim/
output = torch.prod(pow1[:-1] * pow2[-1])
return output
| [
"torch.nn.functional.conv2d",
"torch.mean",
"torch.unsqueeze",
"torch.stack",
"torch.max",
"torch.nn.functional.avg_pool2d",
"torch.min",
"torch.prod",
"torch.FloatTensor"
] | [((3158, 3176), 'torch.stack', 'torch.stack', (['mssim'], {}), '(mssim)\n', (3169, 3176), False, 'import torch\n'), ((3187, 3203), 'torch.stack', 'torch.stack', (['mcs'], {}), '(mcs)\n', (3198, 3203), False, 'import torch\n'), ((3510, 3542), 'torch.prod', 'torch.prod', (['(pow1[:-1] * pow2[-1])'], {}), '(pow1[:-1] * pow2[-1])\n', (3520, 3542), False, 'import torch\n'), ((1390, 1427), 'torch.unsqueeze', 'torch.unsqueeze', (['img1[:, ch, :, :]', '(1)'], {}), '(img1[:, ch, :, :], 1)\n', (1405, 1427), False, 'import torch\n'), ((1446, 1483), 'torch.unsqueeze', 'torch.unsqueeze', (['img2[:, ch, :, :]', '(1)'], {}), '(img2[:, ch, :, :], 1)\n', (1461, 1483), False, 'import torch\n'), ((1498, 1537), 'torch.nn.functional.conv2d', 'F.conv2d', (['img1_ch', 'window'], {'padding': 'padd'}), '(img1_ch, window, padding=padd)\n', (1506, 1537), True, 'import torch.nn.functional as F\n'), ((1552, 1591), 'torch.nn.functional.conv2d', 'F.conv2d', (['img2_ch', 'window'], {'padding': 'padd'}), '(img2_ch, window, padding=padd)\n', (1560, 1591), True, 'import torch.nn.functional as F\n'), ((2179, 2198), 'torch.mean', 'torch.mean', (['(v1 / v2)'], {}), '(v1 / v2)\n', (2189, 2198), False, 'import torch\n'), ((2439, 2463), 'torch.stack', 'torch.stack', (['cs_channels'], {}), '(cs_channels)\n', (2450, 2463), False, 'import torch\n'), ((2499, 2524), 'torch.stack', 'torch.stack', (['ret_channels'], {}), '(ret_channels)\n', (2510, 2524), False, 'import torch\n'), ((3076, 3102), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['img1', '(2, 2)'], {}), '(img1, (2, 2))\n', (3088, 3102), True, 'import torch.nn.functional as F\n'), ((3118, 3144), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['img2', '(2, 2)'], {}), '(img2, (2, 2))\n', (3130, 3144), True, 'import torch.nn.functional as F\n'), ((866, 881), 'torch.max', 'torch.max', (['img1'], {}), '(img1)\n', (875, 881), False, 'import torch\n'), ((965, 980), 'torch.min', 'torch.min', (['img1'], {}), '(img1)\n', (974, 980), False, 'import torch\n'), ((1698, 1747), 'torch.nn.functional.conv2d', 'F.conv2d', (['(img1_ch * img1_ch)', 'window'], {'padding': 'padd'}), '(img1_ch * img1_ch, window, padding=padd)\n', (1706, 1747), True, 'import torch.nn.functional as F\n'), ((1777, 1826), 'torch.nn.functional.conv2d', 'F.conv2d', (['(img2_ch * img2_ch)', 'window'], {'padding': 'padd'}), '(img2_ch * img2_ch, window, padding=padd)\n', (1785, 1826), True, 'import torch.nn.functional as F\n'), ((1854, 1903), 'torch.nn.functional.conv2d', 'F.conv2d', (['(img1_ch * img2_ch)', 'window'], {'padding': 'padd'}), '(img1_ch * img2_ch, window, padding=padd)\n', (1862, 1903), True, 'import torch.nn.functional as F\n'), ((2734, 2793), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.0448, 0.2856, 0.3001, 0.2363, 0.1333]'], {}), '([0.0448, 0.2856, 0.3001, 0.2363, 0.1333])\n', (2751, 2793), False, 'import torch\n')] |
"""
Utilizes pretrained MTCNN model.
References
----------
1. Weights and implementation:
https://github.com/ipazc/mtcnn
2. Original paper:
https://arxiv.org/ftp/arxiv/papers/1604/1604.02878.pdf
"""
from typing import List
import cv2
import numpy as np
from mtcnn import MTCNN
from application.models.detectors.DetectorInterface import DetectorInterface
class MTCNNDetector(DetectorInterface):
def __init__(self):
self.mtcnn = MTCNN()
self.name = "mtcnn"
def preprocess_frame(self, frame: np.ndarray) -> np.ndarray:
"""Overrides DetectorInterface.preprocess_frame()"""
return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
def detect_faces(self, frame: np.ndarray) -> List[List[int]]:
"""Overrides DetectorInterface.detect_faces()"""
res = self.mtcnn.detect_faces(frame)
boxes = [face["box"] for face in res]
return boxes
| [
"cv2.cvtColor",
"mtcnn.MTCNN"
] | [((469, 476), 'mtcnn.MTCNN', 'MTCNN', ([], {}), '()\n', (474, 476), False, 'from mtcnn import MTCNN\n'), ((660, 698), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (672, 698), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible.module_utils.common.process import get_bin_path
def test_get_bin_path(mocker):
path = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
mocker.patch.dict('os.environ', {'PATH': path})
mocker.patch('os.pathsep', ':')
mocker.patch('os.path.isdir', return_value=False)
mocker.patch('ansible.module_utils.common.process.is_executable', return_value=True)
# pytest-mock 2.0.0 will throw when os.path.exists is messed with
# and then another method is patched afterwards. Likely
# something in the pytest-mock chain uses os.path.exists internally, and
# since pytest-mock prohibits context-specific patching, there's not a
# good solution. For now, just patch os.path.exists last.
mocker.patch('os.path.exists', side_effect=[False, True])
assert '/usr/local/bin/notacommand' == get_bin_path('notacommand')
def test_get_path_path_raise_valueerror(mocker):
mocker.patch.dict('os.environ', {'PATH': ''})
mocker.patch('os.path.exists', return_value=False)
mocker.patch('os.path.isdir', return_value=False)
mocker.patch('ansible.module_utils.common.process.is_executable', return_value=True)
with pytest.raises(ValueError, match='Failed to find required executable notacommand'):
get_bin_path('notacommand')
| [
"pytest.raises",
"ansible.module_utils.common.process.get_bin_path"
] | [((1108, 1135), 'ansible.module_utils.common.process.get_bin_path', 'get_bin_path', (['"""notacommand"""'], {}), "('notacommand')\n", (1120, 1135), False, 'from ansible.module_utils.common.process import get_bin_path\n'), ((1446, 1532), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Failed to find required executable notacommand"""'}), "(ValueError, match=\n 'Failed to find required executable notacommand')\n", (1459, 1532), False, 'import pytest\n'), ((1537, 1564), 'ansible.module_utils.common.process.get_bin_path', 'get_bin_path', (['"""notacommand"""'], {}), "('notacommand')\n", (1549, 1564), False, 'from ansible.module_utils.common.process import get_bin_path\n')] |
import os
import hashlib
import sys
from functools import partial
def hashItOut(file):
hashObj = hashlib.md5()
done= False;
with open(file, 'rb') as file:
while not done:
block = file.read(1024)
if block:
hashObj.update(block)
else:
hashValue = hashObj.hexdigest().upper()
done = True
return hashValue
def main():
# get file to hash and verifies it exists
inputFile = input("Enter the path and file you wish to hash")
# hash type MD5
hashType = None
hashValueFinal = None
done = False
inputFile = inputFile.lstrip().strip()
hashValueFinal = hashItOut(inputFile)
print("File >> ", inputFile, " > hash value: ", hashValueFinal)
print("Creating image file")
outputFile = input("Enter the name of the ouput file: ")
print("writing bytes to new file")
# with open(inputFile, "rb") as f1, open (outputFile, "wb") as f2:
#
# size= f1.__sizeof__()
# print("Bytes in inputfile >> ", size)
#
# bytesLeft = size
# print("Both files opened in binary form")
# for _bytes in iter(partial(f1.read, 1024), ''):
#
# #print("Bytes Left >> ",bytesLeft)
# print("copying bytes")
# f2.write(_bytes)
# print(f2.__sizeof__())
# # bytesLeft -= 1024
# f2.close()
with open(inputFile, "rb") as f1:
with open(outputFile, "wb") as f2:
while True:
b = f1.read(1024)
if b:
f2.write(b)
else:
break
f1.close()
f2.close()
secondHash = hashItOut(outputFile)
print("First file >> ",inputFile, " MD5 >> ",hashValueFinal)
print("Second file >> ",outputFile," MD5 >> ",secondHash)
if hashValueFinal == secondHash:
print("Hash values match")
else:
print("Hash values dont match")
if __name__ == '__main__':
main() | [
"hashlib.md5"
] | [((105, 118), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (116, 118), False, 'import hashlib\n')] |
import time, sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.append('../../')
from py2Periodic.physics import twoDimTurbulence
from numpy import pi
# Instantiate a model for two-dimensional turbulence.
turb = twoDimTurbulence.model(
nx = 128,
Lx = 2.0*pi,
dt = 1e-2,
nThreads = 1,
timeStepper = 'RK4',
visc = 1e-4,
viscOrder = 2.0,
)
turb.describe_model()
# Set an initial random vorticity field.
q0 = np.random.standard_normal((turb.ny, turb.nx))
turb.set_q(q0)
# Step the model forward in time
turb.step_nSteps(nSteps=1e4, dnLog=1e3)
# Update variables like vorticity, velocity, etc
turb.update_state_variables()
print("The root-mean-square vorticity is " + \
"{:0.3f}".format(np.sqrt((turb.q**2.0).mean())))
# Plot the result
fig = plt.figure('vorticity'); plt.clf()
plt.pcolormesh(turb.x, turb.y, turb.q, cmap='YlGnBu_r')
plt.axis('square')
plt.xlabel('$x$')
plt.ylabel('$y$')
print("\nClose the figure to end the program.")
plt.show()
| [
"numpy.random.standard_normal",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.pcolormesh",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"py2Periodic.physics.twoDimTurbulence.model",
"sys.path.append",
"matplotlib.pyplot.show"
] | [((69, 94), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (84, 94), False, 'import time, sys\n'), ((227, 342), 'py2Periodic.physics.twoDimTurbulence.model', 'twoDimTurbulence.model', ([], {'nx': '(128)', 'Lx': '(2.0 * pi)', 'dt': '(0.01)', 'nThreads': '(1)', 'timeStepper': '"""RK4"""', 'visc': '(0.0001)', 'viscOrder': '(2.0)'}), "(nx=128, Lx=2.0 * pi, dt=0.01, nThreads=1,\n timeStepper='RK4', visc=0.0001, viscOrder=2.0)\n", (249, 342), False, 'from py2Periodic.physics import twoDimTurbulence\n'), ((455, 500), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(turb.ny, turb.nx)'], {}), '((turb.ny, turb.nx))\n', (480, 500), True, 'import numpy as np\n'), ((800, 823), 'matplotlib.pyplot.figure', 'plt.figure', (['"""vorticity"""'], {}), "('vorticity')\n", (810, 823), True, 'import matplotlib.pyplot as plt\n'), ((825, 834), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (832, 834), True, 'import matplotlib.pyplot as plt\n'), ((836, 891), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['turb.x', 'turb.y', 'turb.q'], {'cmap': '"""YlGnBu_r"""'}), "(turb.x, turb.y, turb.q, cmap='YlGnBu_r')\n", (850, 891), True, 'import matplotlib.pyplot as plt\n'), ((892, 910), 'matplotlib.pyplot.axis', 'plt.axis', (['"""square"""'], {}), "('square')\n", (900, 910), True, 'import matplotlib.pyplot as plt\n'), ((913, 930), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (923, 930), True, 'import matplotlib.pyplot as plt\n'), ((931, 948), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$y$"""'], {}), "('$y$')\n", (941, 948), True, 'import matplotlib.pyplot as plt\n'), ((998, 1008), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1006, 1008), True, 'import matplotlib.pyplot as plt\n')] |
from typing import TYPE_CHECKING, Type, TypeVar, Union
from numpy import dtype
if TYPE_CHECKING: # pragma: no cover
DTypeLike = Union[dtype, None, type, str]
else: # pragma: no cover
DTypeLike = TypeVar(
"DTypeLike",
bound=Union[dtype, None, type, str],
)
if TYPE_CHECKING: # pragma: no cover
DTypeLikeReals = Union[dtype, None, Type[bool], Type[int], Type[float], str]
else: # pragma: no cover
DTypeLikeReals = TypeVar(
"DTypeLikeReals",
bound=Union[dtype, None, Type[bool], Type[int], Type[float], str],
)
| [
"typing.TypeVar"
] | [((207, 264), 'typing.TypeVar', 'TypeVar', (['"""DTypeLike"""'], {'bound': 'Union[dtype, None, type, str]'}), "('DTypeLike', bound=Union[dtype, None, type, str])\n", (214, 264), False, 'from typing import TYPE_CHECKING, Type, TypeVar, Union\n'), ((456, 552), 'typing.TypeVar', 'TypeVar', (['"""DTypeLikeReals"""'], {'bound': 'Union[dtype, None, Type[bool], Type[int], Type[float], str]'}), "('DTypeLikeReals', bound=Union[dtype, None, Type[bool], Type[int],\n Type[float], str])\n", (463, 552), False, 'from typing import TYPE_CHECKING, Type, TypeVar, Union\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from setuptools import setup, find_packages
from pip.req import parse_requirements
import plexchange
import os
# Solution from http://bit.ly/29Yl8VN
def resolve_requires(requirements_file):
requirements = parse_requirements("./%s" % requirements_file,
session=False)
return [str(ir.req) for ir in requirements]
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
# Solution from: http://bit.ly/2mig8RT
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# We still running: python setup.py sdist upload --repository=testpypi
# Twine isn't handling long_descriptions as per:
# https://github.com/pypa/twine/issues/262
setup(
name="plexchange",
version=".".join(map(str,plexchange.__version__)),
description="Exchange ActiveSync Multiplexer",
long_description=read('README.md'),
license="MIT",
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
install_requires=resolve_requires("requirements/requirements.txt"),
#extras_require = {
# 'otherthing': resolve_requires("requirements/otherthing.txt"),
#},
url="https://github.com/whardier/plexchange",
packages=find_packages(),
package_dir={'plexchange': "plexchange"},
include_package_data=True,
classifiers=[
"Development Status :: 1 - Planning",
"Environment :: Console",
"Environment :: Web Environment",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Communications :: Email",
"Topic :: Internet :: Proxy Servers",
"Topic :: Internet :: WWW/HTTP",
],
#scripts=["plexchange/bin/plexchange-cli.py"],
#entry_points={'console_scripts': [
# "plexchange = plexchange.management:run_from_command_line",
#]},
)
| [
"os.path.dirname",
"pip.req.parse_requirements",
"setuptools.find_packages"
] | [((1360, 1421), 'pip.req.parse_requirements', 'parse_requirements', (["('./%s' % requirements_file)"], {'session': '(False)'}), "('./%s' % requirements_file, session=False)\n", (1378, 1421), False, 'from pip.req import parse_requirements\n'), ((2538, 2553), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (2551, 2553), False, 'from setuptools import setup, find_packages\n'), ((1792, 1817), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1807, 1817), False, 'import os\n')] |
# coding=utf-8
# Copyright 2020 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The Bidirectional Encoder Representations from Transformers (BERT) model."""
import json
import tensorflow.compat.v2 as tf
from tensorflow_models.official.nlp import optimization
from tensorflow_models.official.nlp.bert import bert_models
from tensorflow_models.official.nlp.bert import configs
def create_config(config_dir):
"""Load a BERT config object from directory."""
with tf.io.gfile.GFile(config_dir) as config_file:
bert_config = json.load(config_file)
return configs.BertConfig(**bert_config)
def create_feature_and_label(inputs, feature_size):
"""Creates features and labels for a BERT model."""
input_token_ids = inputs['features']
labels = inputs['labels']
num_tokens = inputs['num_tokens']
input_mask = tf.sequence_mask(num_tokens, feature_size, dtype=tf.int32)
type_id = tf.sequence_mask(num_tokens, feature_size, dtype=tf.int32)
features = [input_token_ids, input_mask, type_id]
return features, labels
def create_optimizer(initial_lr,
steps_per_epoch,
epochs,
warmup_proportion,
end_lr=0.0,
optimizer_type='adamw'):
"""Creates a BERT optimizer with learning rate schedule."""
num_train_steps = steps_per_epoch * epochs
num_warmup_steps = int(num_train_steps * warmup_proportion)
return optimization.create_optimizer(
initial_lr,
num_train_steps,
num_warmup_steps,
end_lr=end_lr,
optimizer_type=optimizer_type)
def create_model(num_classes, feature_size, bert_config):
"""Creates a BERT classifier model."""
# TODO(jereliu): Point to a locally implemented BERT for v2.
return bert_models.classifier_model(
bert_config=bert_config,
num_labels=num_classes,
max_seq_length=feature_size,
)
| [
"tensorflow_models.official.nlp.bert.bert_models.classifier_model",
"tensorflow.compat.v2.sequence_mask",
"tensorflow_models.official.nlp.optimization.create_optimizer",
"json.load",
"tensorflow.compat.v2.io.gfile.GFile",
"tensorflow_models.official.nlp.bert.configs.BertConfig"
] | [((1106, 1139), 'tensorflow_models.official.nlp.bert.configs.BertConfig', 'configs.BertConfig', ([], {}), '(**bert_config)\n', (1124, 1139), False, 'from tensorflow_models.official.nlp.bert import configs\n'), ((1367, 1425), 'tensorflow.compat.v2.sequence_mask', 'tf.sequence_mask', (['num_tokens', 'feature_size'], {'dtype': 'tf.int32'}), '(num_tokens, feature_size, dtype=tf.int32)\n', (1383, 1425), True, 'import tensorflow.compat.v2 as tf\n'), ((1438, 1496), 'tensorflow.compat.v2.sequence_mask', 'tf.sequence_mask', (['num_tokens', 'feature_size'], {'dtype': 'tf.int32'}), '(num_tokens, feature_size, dtype=tf.int32)\n', (1454, 1496), True, 'import tensorflow.compat.v2 as tf\n'), ((1975, 2101), 'tensorflow_models.official.nlp.optimization.create_optimizer', 'optimization.create_optimizer', (['initial_lr', 'num_train_steps', 'num_warmup_steps'], {'end_lr': 'end_lr', 'optimizer_type': 'optimizer_type'}), '(initial_lr, num_train_steps, num_warmup_steps,\n end_lr=end_lr, optimizer_type=optimizer_type)\n', (2004, 2101), False, 'from tensorflow_models.official.nlp import optimization\n'), ((2302, 2413), 'tensorflow_models.official.nlp.bert.bert_models.classifier_model', 'bert_models.classifier_model', ([], {'bert_config': 'bert_config', 'num_labels': 'num_classes', 'max_seq_length': 'feature_size'}), '(bert_config=bert_config, num_labels=\n num_classes, max_seq_length=feature_size)\n', (2330, 2413), False, 'from tensorflow_models.official.nlp.bert import bert_models\n'), ((1010, 1039), 'tensorflow.compat.v2.io.gfile.GFile', 'tf.io.gfile.GFile', (['config_dir'], {}), '(config_dir)\n', (1027, 1039), True, 'import tensorflow.compat.v2 as tf\n'), ((1074, 1096), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (1083, 1096), False, 'import json\n')] |
from mongoengine import Document, StringField, EmbeddedDocument, EmbeddedDocumentField, EmbeddedDocumentListField, \
ValidationError, BooleanField
from cilantro_audit.constants import SEVERITY_VALUES, TEXT_MAX_LENGTH, TEXT_MIN_LENGTH, TITLE_MIN_LENGTH, \
TITLE_MAX_LENGTH
class SeverityEnum:
RED = "0:RED"
YELLOW = "1:YELLOW"
GREEN = "2:GREEN"
class Severity(EmbeddedDocument):
severity = StringField(required=True)
@staticmethod
def default():
return Severity.green()
@staticmethod
def red():
return Severity(SeverityEnum.RED)
@staticmethod
def yellow():
return Severity(SeverityEnum.YELLOW)
@staticmethod
def green():
return Severity(SeverityEnum.GREEN)
"""
Cycles through the different options:
GREEN -> YELLOW -> RED -> GREEN -> ...
"""
def next(self):
if SeverityEnum.GREEN == self.severity:
return Severity.yellow()
elif SeverityEnum.YELLOW == self.severity:
return Severity.red()
else:
return Severity.green()
def validate(self, clean=True):
super().validate(clean)
if self.severity not in SEVERITY_VALUES:
raise ValidationError("Severity must be one of { \"0:RED\", \"1:YELLOW\", \"2:GREEN\" }")
class Question(EmbeddedDocument):
text = StringField(required=True, max_length=TEXT_MAX_LENGTH, min_length=TEXT_MIN_LENGTH)
yes = EmbeddedDocumentField(Severity, required=True, default=Severity.default())
no = EmbeddedDocumentField(Severity, required=True, default=Severity.default())
other = EmbeddedDocumentField(Severity, required=True, default=Severity.default())
class AuditTemplateBuilder:
def __init__(self):
self.title = None
self.questions = []
self.locked = False
def with_title(self, title):
self.title = title
return self
def with_question(self, question):
self.questions.append(question)
return self
def with_lock(self):
self.locked = True
return self
def build(self):
template = AuditTemplate(title=self.title, questions=self.questions, locked=self.locked)
template.validate()
return template
class AuditTemplate(Document):
title = StringField(required=True, max_length=TITLE_MAX_LENGTH, min_length=TITLE_MIN_LENGTH)
questions = EmbeddedDocumentListField(Question, required=True)
locked = BooleanField(required=True, default=False)
| [
"mongoengine.EmbeddedDocumentListField",
"mongoengine.BooleanField",
"mongoengine.StringField",
"mongoengine.ValidationError"
] | [((418, 444), 'mongoengine.StringField', 'StringField', ([], {'required': '(True)'}), '(required=True)\n', (429, 444), False, 'from mongoengine import Document, StringField, EmbeddedDocument, EmbeddedDocumentField, EmbeddedDocumentListField, ValidationError, BooleanField\n'), ((1363, 1450), 'mongoengine.StringField', 'StringField', ([], {'required': '(True)', 'max_length': 'TEXT_MAX_LENGTH', 'min_length': 'TEXT_MIN_LENGTH'}), '(required=True, max_length=TEXT_MAX_LENGTH, min_length=\n TEXT_MIN_LENGTH)\n', (1374, 1450), False, 'from mongoengine import Document, StringField, EmbeddedDocument, EmbeddedDocumentField, EmbeddedDocumentListField, ValidationError, BooleanField\n'), ((2308, 2397), 'mongoengine.StringField', 'StringField', ([], {'required': '(True)', 'max_length': 'TITLE_MAX_LENGTH', 'min_length': 'TITLE_MIN_LENGTH'}), '(required=True, max_length=TITLE_MAX_LENGTH, min_length=\n TITLE_MIN_LENGTH)\n', (2319, 2397), False, 'from mongoengine import Document, StringField, EmbeddedDocument, EmbeddedDocumentField, EmbeddedDocumentListField, ValidationError, BooleanField\n'), ((2409, 2459), 'mongoengine.EmbeddedDocumentListField', 'EmbeddedDocumentListField', (['Question'], {'required': '(True)'}), '(Question, required=True)\n', (2434, 2459), False, 'from mongoengine import Document, StringField, EmbeddedDocument, EmbeddedDocumentField, EmbeddedDocumentListField, ValidationError, BooleanField\n'), ((2473, 2515), 'mongoengine.BooleanField', 'BooleanField', ([], {'required': '(True)', 'default': '(False)'}), '(required=True, default=False)\n', (2485, 2515), False, 'from mongoengine import Document, StringField, EmbeddedDocument, EmbeddedDocumentField, EmbeddedDocumentListField, ValidationError, BooleanField\n'), ((1232, 1309), 'mongoengine.ValidationError', 'ValidationError', (['"""Severity must be one of { "0:RED", "1:YELLOW", "2:GREEN" }"""'], {}), '(\'Severity must be one of { "0:RED", "1:YELLOW", "2:GREEN" }\')\n', (1247, 1309), False, 'from mongoengine import Document, StringField, EmbeddedDocument, EmbeddedDocumentField, EmbeddedDocumentListField, ValidationError, BooleanField\n')] |
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.utils.checkpoint as checkpoint
try:
from torchdiffeq import odeint_adjoint as odeintadjoint
from torchdiffeq import odeint
odeint_available = True
except:
odeint_available = False
try:
from .external.anode.adjoint import odesolver_adjoint as odesolver
anode_available = True
except:
anode_available = False
class IntegrationWrapper(nn.Module):
def __init__(self,integration_fcn,func):
super(IntegrationWrapper, self).__init__()
self.func = func
self.integration_fcn = integration_fcn
def forward(self,t,x0, dummy_arg=None):
# this dummy arg needs to be such that it requires gradient, does not need to be used, but otherwise the autograd tape breaks
# See
# here: https://github.com/prigoyal/pytorch_memonger/blob/master/tutorial/Checkpointing_for_PyTorch_models.ipynb
# and here: https://discuss.pytorch.org/t/checkpoint-with-no-grad-requiring-inputs-problem/19117/11
assert dummy_arg is not None
assert dummy_arg.requires_grad is True
return self.integration_fcn(func=self.func,x0=x0,t=t)
class GenericIntegrator(object):
def __init__(self,integrator_library = None, integrator_name = None, use_adjoint_integration=False,
integrator_options=None, step_size=0.5, rtol=1e-3, atol=1e-5, nr_of_checkpoints=None, checkpointing_time_interval=None, **kwargs):
"""
Generic integrator class
:param integrator_library: 'odeint' (for NODE integrators) or 'anode' (for ANODE integrators)
:param integrator_name: string of integrator name ('odeint': 'dopri5','adams','euler','midpoint','rk4','explicit_adams','fixed_adams; 'anode': 'rk2', 'rk4'
:param use_adjoint_integration: if true, then ODE is solved backward to compute gradient (only for odeint)
:param integrator_options: dictionary with additional integrator option (passed to the integrators)
:param step_size: integrator step size (only for fixed steps-size solvers, i.e., not for dopri5/adams)
:param rtol: relative integration tolerance (only for adaptive solvers (dopri5/adams))
:param atol: absolute integration tolerance (only for adaptive solvers (dopri5/adams))
:param nr_of_checkpoints: supports more memory efficient inregration by adding checkpoints uniform in time.
:param checkpointing_time_interval: intead of defining the number of checkpoints (which dynamically adapts to the integration time, we can also define the desired time-interval between checkpoints
:param kwargs: optional arguments passed directly to the integrator
"""
super(GenericIntegrator, self).__init__()
self._available_integrator_libraries = ['odeint','anode']
if integrator_library is None:
self.integrator_library = 'odeint'
else:
self.integrator_library = integrator_library
if self.integrator_library not in self._available_integrator_libraries:
raise ValueError('Unknown integrator library {}'.format(self.integrator_library))
self._available_integrators = dict()
self._available_integrators['odeint'] = ['dopri5','adams','euler','midpoint','rk4','explicit_adams','fixed_adams']
self._available_integrators['anode'] = ['rk2','rk4']
if integrator_name is None:
self.integrator_name = self._available_integrators[self.integrator_library][0]
else:
self.integrator_name = integrator_name
if self.integrator_name not in self._available_integrators[self.integrator_library]:
raise ValueError('Integrator {} not available for {}'.format(self.integrator_name, self.integrator_library))
self.use_adjoint_integration = use_adjoint_integration
self.integrator_options = integrator_options
if self.integrator_options is None:
self.integrator_options = dict()
self.kwargs = kwargs
self.rtol = rtol
self.atol = atol
self.step_size = step_size
if step_size is not None:
if self.integrator_library == 'odeint':
if self.integrator_name not in ['dopri5', 'adams']:
if 'step_size' not in self.integrator_options:
self.integrator_options['step_size'] = step_size
self.nr_of_checkpoints = nr_of_checkpoints
self.checkpointing_time_interval = checkpointing_time_interval
# dummy tensor to support checkpointing
self._dummy_tensor = torch.ones(1, requires_grad=True)
# if max_num_steps is not None:
# self.integrator_options['max_num_steps'] = max_num_steps
def _integrate_odeint(self,func,x0,t):
if self.use_adjoint_integration:
# odeint(func, y0, t, rtol=1e-7, atol=1e-9, method=None, options=None)
res = odeintadjoint(func=func,y0=x0,t=t,rtol=self.rtol, atol=self.atol, method=self.integrator_name,options=self.integrator_options,**self.kwargs)
return res
else:
# odeint(func, y0, t, rtol=1e-7, atol=1e-9, method=None, options=None)
res = odeint(func=func,y0=x0,t=t, rtol=self.rtol, atol=self.atol, method=self.integrator_name,options=self.integrator_options,**self.kwargs)
return res
def _integrate_anode(self,func,x0,t=None):
# todo: provide more options for stepsize-control here
if self.integrator_options is None:
options = dict()
else:
options = self.integrator_options
# check that this is called correctly
if t is not None:
if len(t)==1:
if t!=1.0:
raise ValueError('Warning: ANODE always integates to one. Aborting.')
elif len(t)>2 or ((t[-1]-t[0])!=1.0):
raise ValueError('Warning: ANODE always integrates to unit time and does not provide any intermediate values. Expect trouble when calling it this way. Aborting.')
# everything okay, so we can proceed
Nt = 10
options.update({'Nt': int(Nt)})
options.update({'method': (self.integrator_name).upper()})
res = odesolver(func=func,z0=x0,options=options)
# to conform with odeint, the first dimension should be time, here it only produces one time-point
res_reshaped = res.unsqueeze(dim=0)
return res_reshaped
def create_integration_time_intervals(self,t):
if len(t)<2:
raise ValueError('Expected a range of time-points, but only got {}'.format(t))
if (self.nr_of_checkpoints is not None) and (self.checkpointing_time_interval is not None):
raise ValueError('nr_of_checkpoints and checkpointing_time_interval cannot both be set. Set one or the other')
t_from = t[0]
t_to = t[-1]
if self.nr_of_checkpoints is not None:
checkpointing_time_points = torch.linspace(t_from,t_to,self.nr_of_checkpoints+2) # if we want one checkpoint we need three points, hence +2
elif self.checkpointing_time_interval is not None:
checkpointing_time_points = torch.arange(t_from,t_to,self.checkpointing_time_interval)
if checkpointing_time_points[-1]!=t_to:
# append it
checkpointing_time_points = torch.cat((checkpointing_time_points, torch.tensor([t_to])), dim=0)
else:
raise ValueError('Either nr_of_checkpoints or checkpointing_time_interval needs to be set.')
# force the last time-points to numericall agree
checkpointing_time_points[-1] = t_to
# now we need to create the intervals (i.e., match the integration time-points we want to hit, to the checkpoints)
time_intervals = []
output_time_points = []
idx_t = 0
nr_t = len(t)
idx_checkpoint_t = 0
nr_checkpoint_t = len(checkpointing_time_points)
keep_accumulating = True
# always starts with the first-timepoint
current_time_interval = torch.tensor([t[idx_t]])
current_output_time_point = torch.tensor([True])
if t[idx_t]!=checkpointing_time_points[idx_checkpoint_t]:
raise ValueError('Need to start with the same time.')
idx_t += 1
idx_checkpoint_t += 1
while keep_accumulating:
next_t = t[idx_t]
next_cp_t = checkpointing_time_points[idx_checkpoint_t]
if next_cp_t>next_t:
# case: just keep on adding this time-point to the current time-interval and retain it for the output
current_time_interval = torch.cat((current_time_interval,torch.tensor([next_t])))
current_output_time_point = torch.cat((current_output_time_point,torch.tensor([True])))
idx_t += 1
elif next_cp_t<next_t:
# case: this is the checkpoint we want, so finalize it and move on to the next one
current_time_interval = torch.cat((current_time_interval,torch.tensor([next_cp_t])))
current_output_time_point = torch.cat((current_output_time_point,torch.tensor([False])))
time_intervals.append(current_time_interval)
output_time_points.append(current_output_time_point)
current_time_interval = torch.tensor([next_cp_t])
current_output_time_point = torch.tensor([False])
idx_checkpoint_t += 1
else: # the same
# case: they conincide, so move on to the next for both, but only keep one
current_time_interval = torch.cat((current_time_interval,torch.tensor([next_cp_t])))
current_output_time_point = torch.cat((current_output_time_point,torch.tensor([True])))
time_intervals.append(current_time_interval)
output_time_points.append(current_output_time_point)
current_time_interval = torch.tensor([next_cp_t])
current_output_time_point = torch.tensor([False])
idx_t += 1
idx_checkpoint_t += 1
# let's see if we are at the end
if (idx_t>=nr_t) or (idx_checkpoint_t>=nr_checkpoint_t):
keep_accumulating = False
return time_intervals, output_time_points
def _integrate_checkpointed(self,func,x0,t):
# first get the checkpointed time-inntervals
integration_times, output_time_points = self.create_integration_time_intervals(t=t)
current_x0 = x0
overall_integration_results = None
# now let's chunk the solutions together
integrator = IntegrationWrapper(integration_fcn=self._integrate_direct,func=func)
for current_integration_times, current_output_time_points in zip(integration_times,output_time_points):
current_res = checkpoint.checkpoint(integrator, current_integration_times, current_x0, self._dummy_tensor)
current_x0 = current_res[-1,...]
if overall_integration_results is None:
overall_integration_results = current_res[current_output_time_points,...]
else:
overall_integration_results = torch.cat((overall_integration_results,current_res[current_output_time_points,...]),dim=0)
return overall_integration_results
def _integrate_direct(self, func, x0, t):
if self.integrator_library == 'odeint':
return self._integrate_odeint(func=func, x0=x0, t=t)
elif self.integrator_library == 'anode':
return self._integrate_anode(func=func, x0=x0, t=t)
def integrate(self,func,x0,t):
if (self.nr_of_checkpoints is None) and (self.checkpointing_time_interval is None):
return self._integrate_direct(func=func,x0=x0,t=t)
else:
# do chunk-based-integration
return self._integrate_checkpointed(func=func,x0=x0,t=t)
| [
"torchdiffeq.odeint_adjoint",
"torchdiffeq.odeint",
"torch.utils.checkpoint.checkpoint",
"torch.tensor",
"torch.cat",
"torch.arange",
"torch.linspace",
"torch.ones"
] | [((4637, 4670), 'torch.ones', 'torch.ones', (['(1)'], {'requires_grad': '(True)'}), '(1, requires_grad=True)\n', (4647, 4670), False, 'import torch\n'), ((8137, 8161), 'torch.tensor', 'torch.tensor', (['[t[idx_t]]'], {}), '([t[idx_t]])\n', (8149, 8161), False, 'import torch\n'), ((8198, 8218), 'torch.tensor', 'torch.tensor', (['[True]'], {}), '([True])\n', (8210, 8218), False, 'import torch\n'), ((4969, 5119), 'torchdiffeq.odeint_adjoint', 'odeintadjoint', ([], {'func': 'func', 'y0': 'x0', 't': 't', 'rtol': 'self.rtol', 'atol': 'self.atol', 'method': 'self.integrator_name', 'options': 'self.integrator_options'}), '(func=func, y0=x0, t=t, rtol=self.rtol, atol=self.atol, method\n =self.integrator_name, options=self.integrator_options, **self.kwargs)\n', (4982, 5119), True, 'from torchdiffeq import odeint_adjoint as odeintadjoint\n'), ((5248, 5391), 'torchdiffeq.odeint', 'odeint', ([], {'func': 'func', 'y0': 'x0', 't': 't', 'rtol': 'self.rtol', 'atol': 'self.atol', 'method': 'self.integrator_name', 'options': 'self.integrator_options'}), '(func=func, y0=x0, t=t, rtol=self.rtol, atol=self.atol, method=self.\n integrator_name, options=self.integrator_options, **self.kwargs)\n', (5254, 5391), False, 'from torchdiffeq import odeint\n'), ((7026, 7082), 'torch.linspace', 'torch.linspace', (['t_from', 't_to', '(self.nr_of_checkpoints + 2)'], {}), '(t_from, t_to, self.nr_of_checkpoints + 2)\n', (7040, 7082), False, 'import torch\n'), ((10962, 11058), 'torch.utils.checkpoint.checkpoint', 'checkpoint.checkpoint', (['integrator', 'current_integration_times', 'current_x0', 'self._dummy_tensor'], {}), '(integrator, current_integration_times, current_x0,\n self._dummy_tensor)\n', (10983, 11058), True, 'import torch.utils.checkpoint as checkpoint\n'), ((7237, 7297), 'torch.arange', 'torch.arange', (['t_from', 't_to', 'self.checkpointing_time_interval'], {}), '(t_from, t_to, self.checkpointing_time_interval)\n', (7249, 7297), False, 'import torch\n'), ((11307, 11405), 'torch.cat', 'torch.cat', (['(overall_integration_results, current_res[current_output_time_points, ...])'], {'dim': '(0)'}), '((overall_integration_results, current_res[\n current_output_time_points, ...]), dim=0)\n', (11316, 11405), False, 'import torch\n'), ((9426, 9451), 'torch.tensor', 'torch.tensor', (['[next_cp_t]'], {}), '([next_cp_t])\n', (9438, 9451), False, 'import torch\n'), ((9496, 9517), 'torch.tensor', 'torch.tensor', (['[False]'], {}), '([False])\n', (9508, 9517), False, 'import torch\n'), ((10051, 10076), 'torch.tensor', 'torch.tensor', (['[next_cp_t]'], {}), '([next_cp_t])\n', (10063, 10076), False, 'import torch\n'), ((10121, 10142), 'torch.tensor', 'torch.tensor', (['[False]'], {}), '([False])\n', (10133, 10142), False, 'import torch\n'), ((8760, 8782), 'torch.tensor', 'torch.tensor', (['[next_t]'], {}), '([next_t])\n', (8772, 8782), False, 'import torch\n'), ((8866, 8886), 'torch.tensor', 'torch.tensor', (['[True]'], {}), '([True])\n', (8878, 8886), False, 'import torch\n'), ((7458, 7478), 'torch.tensor', 'torch.tensor', (['[t_to]'], {}), '([t_to])\n', (7470, 7478), False, 'import torch\n'), ((9123, 9148), 'torch.tensor', 'torch.tensor', (['[next_cp_t]'], {}), '([next_cp_t])\n', (9135, 9148), False, 'import torch\n'), ((9232, 9253), 'torch.tensor', 'torch.tensor', (['[False]'], {}), '([False])\n', (9244, 9253), False, 'import torch\n'), ((9749, 9774), 'torch.tensor', 'torch.tensor', (['[next_cp_t]'], {}), '([next_cp_t])\n', (9761, 9774), False, 'import torch\n'), ((9858, 9878), 'torch.tensor', 'torch.tensor', (['[True]'], {}), '([True])\n', (9870, 9878), False, 'import torch\n')] |
from django.conf import settings
from django.db import models
class Profile(models.Model):
"Generated Model"
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name="profile_user",
)
phone_number = models.CharField(
max_length=256,
)
is_admin = models.BooleanField()
user_type = models.IntegerField()
| [
"django.db.models.IntegerField",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.ForeignKey"
] | [((126, 228), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'on_delete': 'models.CASCADE', 'related_name': '"""profile_user"""'}), "(settings.AUTH_USER_MODEL, on_delete=models.CASCADE,\n related_name='profile_user')\n", (143, 228), False, 'from django.db import models\n'), ((275, 307), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (291, 307), False, 'from django.db import models\n'), ((338, 359), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (357, 359), False, 'from django.db import models\n'), ((376, 397), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (395, 397), False, 'from django.db import models\n')] |
from dataclasses import dataclass
import datetime
import pytest
from jira_offline.utils.serializer import DeserializeError, DataclassSerializer
@dataclass
class Test(DataclassSerializer):
dt: datetime.date
def test_date_deserialize():
'''
Test date deserializes
'''
obj = Test.deserialize({'dt': '2018-09-24'})
assert isinstance(obj.dt, datetime.date)
assert obj.dt.year == 2018
assert obj.dt.month == 9
assert obj.dt.day == 24
def test_date_deserialize_roundtrip():
'''
Test date deserializes/serializes in a loss-less roundtrip
'''
json = Test.deserialize({'dt': '2018-09-24'}).serialize()
assert json['dt'] == '2018-09-24'
def test_date_serialize():
'''
Test date serializes
'''
json = Test(dt=datetime.date(2018, 9, 24)).serialize()
assert json['dt'] == '2018-09-24'
def test_date_serialize_roundtrip():
'''
Test date serializes/deserializes in a loss-less roundtrip
'''
obj = Test.deserialize(
Test(dt=datetime.date(2018, 9, 24)).serialize()
)
assert obj.dt.year == 2018
assert obj.dt.month == 9
assert obj.dt.day == 24
def test_date_bad_deserialize():
'''
Test bad date deserialize raises exception
'''
with pytest.raises(DeserializeError):
Test.deserialize({'dt': '2018-09-2x'})
| [
"pytest.raises",
"datetime.date"
] | [((1256, 1287), 'pytest.raises', 'pytest.raises', (['DeserializeError'], {}), '(DeserializeError)\n', (1269, 1287), False, 'import pytest\n'), ((777, 803), 'datetime.date', 'datetime.date', (['(2018)', '(9)', '(24)'], {}), '(2018, 9, 24)\n', (790, 803), False, 'import datetime\n'), ((1016, 1042), 'datetime.date', 'datetime.date', (['(2018)', '(9)', '(24)'], {}), '(2018, 9, 24)\n', (1029, 1042), False, 'import datetime\n')] |
import re
import unittest
from unittest.mock import ANY
import boto3
from cloudwanderer import CloudWanderer
from cloudwanderer.storage_connectors import MemoryStorageConnector
from ..helpers import GenericAssertionHelpers, get_default_mocker
from ..mocks import add_infra
class TestCloudWandererWriteResources(unittest.TestCase, GenericAssertionHelpers):
eu_west_2_resources = [
{
"urn": "urn:aws:.*:eu-west-2:ec2:instance:.*",
"attr": "BaseResource",
"VpcId": "vpc-.*",
"SubnetId": "subnet-.*",
"InstanceId": "i-.*",
}
]
us_east_1_resources = [
{
"urn": "urn:aws:.*:us-east-1:iam:role:.*",
"attr": "BaseResource",
"RoleName": "test-role",
"Path": re.escape("/"),
},
{
"urn": "urn:aws:.*:us-east-1:iam:role:.*",
"attr": "role_inline_policy_attachments",
"PolicyNames": ["test-role-policy"],
},
{
"urn": "urn:aws:.*:us-east-1:iam:role:.*",
"attr": "role_managed_policy_attachments",
"AttachedPolicies": [
{
"PolicyName": "APIGatewayServiceRolePolicy",
"PolicyArn": "arn:aws:iam::aws:policy/aws-service-role/APIGatewayServiceRolePolicy",
}
],
"IsTruncated": False,
},
{
"urn": "urn:aws:.*:us-east-1:iam:role_policy:.*",
"attr": "BaseResource",
"PolicyName": "test-role-policy",
"PolicyDocument": ANY,
},
{
# This is a us-east-1 resource because s3 buckets are discovered from us-east-1
# irrespective of their region.
"urn": "urn:aws:.*:eu-west-2:s3:bucket:.*",
"attr": "BaseResource",
"Name": "test-eu-west-2",
},
]
def setUp(self):
self.enabled_regions = ["eu-west-2", "us-east-1", "ap-east-1"]
get_default_mocker().start_general_mock(
restrict_regions=self.enabled_regions,
restrict_services=["ec2", "s3", "iam"],
limit_resources=[
"ec2:instance",
"s3:bucket",
"iam:group",
"iam:role",
],
)
add_infra(regions=self.enabled_regions)
self.storage_connector = MemoryStorageConnector()
self.wanderer = CloudWanderer(storage_connectors=[self.storage_connector])
def tearDown(self):
get_default_mocker().stop_general_mock()
def test_write_resources(self):
self.wanderer.write_resources()
for region_name in self.enabled_regions:
self.assert_dictionary_overlap(
self.storage_connector.read_all(),
[
{
"urn": f"urn:aws:.*:{region_name}:ec2:instance:.*",
"attr": "BaseResource",
"VpcId": "vpc-.*",
"SubnetId": "subnet-.*",
"InstanceId": "i-.*",
},
{
"urn": f"urn:aws:.*:{region_name}:s3:bucket:.*",
"attr": "BaseResource",
"Name": f"test-{region_name}",
},
],
)
if region_name == "us-east-1":
self.assert_dictionary_overlap(self.storage_connector.read_all(), self.us_east_1_resources)
else:
self.assert_no_dictionary_overlap(
self.storage_connector.read_all(),
[
{
"urn": f"urn:aws:.*:{region_name}:iam:role:.*",
"attr": "BaseResource",
"RoleName": "test-role",
"Path": re.escape("/"),
}
],
)
def test_write_resources_exclude_resources(self):
self.wanderer.write_resources(exclude_resources=["ec2:instance"])
for region_name in self.enabled_regions:
self.assert_no_dictionary_overlap(
self.storage_connector.read_all(),
[
{
"urn": f"urn:aws:.*:{region_name}:ec2:instance:.*",
"attr": "BaseResource",
"VpcId": "vpc-.*",
"SubnetId": "subnet-.*",
"InstanceId": "i-.*",
}
],
)
self.assert_dictionary_overlap(self.storage_connector.read_all(), self.us_east_1_resources)
def test_write_resources_eu_west_1(self):
self.wanderer.write_resources(
regions=["eu-west-2"],
)
self.assert_dictionary_overlap(self.storage_connector.read_all(), self.eu_west_2_resources)
self.assert_no_dictionary_overlap(self.storage_connector.read_all(), self.us_east_1_resources)
def test_write_resources_us_east_1(self):
self.wanderer.write_resources(regions=["us-east-1"])
self.assert_dictionary_overlap(self.storage_connector.read_all(), self.us_east_1_resources)
self.assert_no_dictionary_overlap(self.storage_connector.read_all(), self.eu_west_2_resources)
def test_write_resources_of_service_eu_west_1(self):
self.wanderer.write_resources(regions=["eu-west-2"], service_names=["ec2"])
self.wanderer.write_resources(regions=["eu-west-2"], service_names=["s3"])
self.assert_dictionary_overlap(self.storage_connector.read_all(), self.eu_west_2_resources)
self.assert_no_dictionary_overlap(self.storage_connector.read_all(), self.us_east_1_resources)
def test_write_resources_of_service_us_east_1(self):
self.wanderer.write_resources(service_names=["ec2"], regions=["us-east-1"])
self.wanderer.write_resources(service_names=["s3"], regions=["us-east-1"])
self.wanderer.write_resources(service_names=["iam"], regions=["us-east-1"])
self.assert_dictionary_overlap(self.storage_connector.read_all(), self.us_east_1_resources)
self.assert_no_dictionary_overlap(self.storage_connector.read_all(), self.eu_west_2_resources)
def test_write_resources_of_type_eu_west_1(self):
self.wanderer.write_resources(regions=["eu-west-2"], service_names=["s3"], resource_types=["bucket"])
self.wanderer.write_resources(regions=["eu-west-2"], service_names=["ec2"], resource_types=["instance"])
self.wanderer.write_resources(regions=["eu-west-2"], service_names=["iam"], resource_types=["role"])
self.assert_dictionary_overlap(self.storage_connector.read_all(), self.eu_west_2_resources)
self.assert_no_dictionary_overlap(self.storage_connector.read_all(), self.us_east_1_resources)
def test_write_resources_of_type_us_east_1(self):
self.wanderer.write_resources(service_names=["s3"], resource_types=["bucket"], regions=["us-east-1"])
self.wanderer.write_resources(service_names=["ec2"], resource_types=["instance"], regions=["us-east-1"])
self.wanderer.write_resources(service_names=["iam"], resource_types=["role"], regions=["us-east-1"])
self.assert_dictionary_overlap(self.storage_connector.read_all(), self.us_east_1_resources)
self.assert_no_dictionary_overlap(self.storage_connector.read_all(), self.eu_west_2_resources)
def test_cleanup_resources_of_type_us_east_1(self):
self.wanderer.write_resources(service_names=["iam"], resource_types=["role"], regions=["us-east-1"])
self.assert_dictionary_overlap(
self.storage_connector.read_all(),
[
{
"urn": "urn:aws:.*:us-east-1:iam:role:.*",
"attr": "role_managed_policy_attachments",
"AttachedPolicies": [
{
"PolicyName": "APIGatewayServiceRolePolicy",
"PolicyArn": "arn:aws:iam::aws:policy/aws-service-role/APIGatewayServiceRolePolicy",
}
],
"IsTruncated": False,
},
{
"urn": "urn:aws:.*:us-east-1:iam:role_policy:.*",
"attr": "BaseResource",
"PolicyName": "test-role-policy",
"PolicyDocument": ANY,
},
],
)
# Delete the role
iam_resource = boto3.resource("iam")
iam_resource.Role("test-role").detach_policy(
PolicyArn="arn:aws:iam::aws:policy/aws-service-role/APIGatewayServiceRolePolicy"
)
iam_resource.Role("test-role").Policy("test-role-policy").delete()
iam_resource.Role("test-role").delete()
self.wanderer.write_resources(service_names=["iam"], resource_types=["role"], regions=["us-east-1"])
self.assert_no_dictionary_overlap(
self.storage_connector.read_all(),
[
{
"urn": "urn:aws:.*:us-east-1:iam:role:.*",
"attr": "role_managed_policy_attachments",
"AttachedPolicies": [
{
"PolicyName": "APIGatewayServiceRolePolicy",
"PolicyArn": "arn:aws:iam::aws:policy/aws-service-role/APIGatewayServiceRolePolicy",
}
],
"IsTruncated": False,
},
{
"urn": "urn:aws:.*:us-east-1:iam:role_policy:.*",
"attr": "BaseResource",
"PolicyName": "test-role-policy",
"PolicyDocument": ANY,
},
],
)
| [
"cloudwanderer.storage_connectors.MemoryStorageConnector",
"boto3.resource",
"re.escape",
"cloudwanderer.CloudWanderer"
] | [((2416, 2440), 'cloudwanderer.storage_connectors.MemoryStorageConnector', 'MemoryStorageConnector', ([], {}), '()\n', (2438, 2440), False, 'from cloudwanderer.storage_connectors import MemoryStorageConnector\n'), ((2465, 2523), 'cloudwanderer.CloudWanderer', 'CloudWanderer', ([], {'storage_connectors': '[self.storage_connector]'}), '(storage_connectors=[self.storage_connector])\n', (2478, 2523), False, 'from cloudwanderer import CloudWanderer\n'), ((8628, 8649), 'boto3.resource', 'boto3.resource', (['"""iam"""'], {}), "('iam')\n", (8642, 8649), False, 'import boto3\n'), ((798, 812), 're.escape', 're.escape', (['"""/"""'], {}), "('/')\n", (807, 812), False, 'import re\n'), ((3941, 3955), 're.escape', 're.escape', (['"""/"""'], {}), "('/')\n", (3950, 3955), False, 'import re\n')] |
import argparse
import json
import math
from collections import Counter
def load_gold_stardard(gold_path):
gold_answers = dict()
with open(gold_path) as json_file:
data = json.load(json_file)
for ques in data:
gold_answers[ques['id']] = ques['relations']
print(f"\tgold answers: loaded {len(data)} questions!")
return gold_answers
def load_system_answers(system_path):
system_answers = dict()
with open(system_path) as json_file:
data = json.load(json_file)
for ques in data:
if 'relations' in ques:
system_answers[ques['id']] = ques['relations']
else:
print(f"Missing relations: {ques['id']}")
print(f"\tsystem answers: loaded {len(data)} questions!")
return system_answers
def calculate_f1(precision, recall):
if precision + recall == 0:
return 0
else:
return 2 * ((precision * recall) / (precision + recall))
def evaluate_dbpedia(gold_answers, system_answers):
count, total_p, total_r, total_f1 = 0, 0, 0, 0
for ques_id in gold_answers:
count += 1
# if an answer is not provided to a question, we just move on
if ques_id not in system_answers:
continue
gold_answer_list = gold_answers[ques_id]
system_relations = system_answers[ques_id].copy()
sys_rel_count, gold_rel_count, found_count, correct_count = len(system_relations), 0, 0, 0
# collect all gold relations
all_gold_rels = set()
for gold_rel_set in gold_answer_list:
all_gold_rels.update(gold_rel_set)
# mark all correct answers from system answers
for rel in system_relations:
if rel in all_gold_rels:
correct_count += 1
# check how many relation sets are covered in system answers. In ground truth, we have multiple correct
# relations for a given slot.
# For example, {"dbo:locatedInArea", "dbo:city", "dbo:isPartOf", "dbo:location", "dbo:region"}
for gold_rel_set in gold_answer_list:
gold_rel_count += 1
found_rel = False
for rel in gold_rel_set:
if rel in system_relations:
found_rel = True
system_relations.remove(rel)
break
if found_rel:
found_count += 1
# precision, recall and F1 calculation
precision = correct_count / sys_rel_count
recall = found_count / gold_rel_count
total_p += precision
total_r += recall
total_f1 += calculate_f1(precision, recall)
return total_p/count, total_r/count, total_f1/count
def evaluate_wikidata(gold_answers, system_answers):
count, total_p, total_r, total_f1 = 0, 0, 0, 0
for ques_id in gold_answers:
count += 1
# if an answer is not provided to a question, we just move on
if ques_id not in system_answers:
continue
gold_relations = gold_answers[ques_id]
system_relations = system_answers[ques_id]
if len(system_relations) == 0:
continue
precision = (sum((Counter(system_relations) & Counter(gold_relations)).values())) / len(system_relations)
recall = (sum((Counter(system_relations) & Counter(gold_relations)).values())) / len(gold_relations)
f1 = calculate_f1(precision, recall)
total_p += precision
total_r += recall
total_f1 += f1
return total_p/count, total_r/count, total_f1/count
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--gt', type=str,
help='ground truth JSON file')
parser.add_argument('--so', type=str,
help='system output JSON file')
parser.add_argument('--kb', type=str,
help='Knowledge Base', default="dbpedia")
args = parser.parse_args()
return args
def main(args):
system_path = args.so
gt_path = args.gt
kb = args.kb
print(f"Config:\n\tGround truth: {gt_path}\n\tSystem path: {system_path}")
gold_answers = load_gold_stardard(gt_path)
system_answers = load_system_answers(system_path)
if kb == "dbpedia":
precision, recall, f1 = evaluate_dbpedia(gold_answers, system_answers)
elif kb == "wikidata":
precision, recall, f1 = evaluate_wikidata(gold_answers, system_answers)
else:
raise Exception(f"Invalid KB: {kb}")
print(f"\nResults:\n\tPrecision: {round(precision, 5)}\n\tRecall: {round(recall, 5)}\n\tF1: {round(f1, 5)}")
if __name__ == "__main__":
main(arg_parser()) | [
"json.load",
"collections.Counter",
"argparse.ArgumentParser"
] | [((3576, 3601), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3599, 3601), False, 'import argparse\n'), ((188, 208), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (197, 208), False, 'import json\n'), ((492, 512), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (501, 512), False, 'import json\n'), ((3166, 3191), 'collections.Counter', 'Counter', (['system_relations'], {}), '(system_relations)\n', (3173, 3191), False, 'from collections import Counter\n'), ((3194, 3217), 'collections.Counter', 'Counter', (['gold_relations'], {}), '(gold_relations)\n', (3201, 3217), False, 'from collections import Counter\n'), ((3277, 3302), 'collections.Counter', 'Counter', (['system_relations'], {}), '(system_relations)\n', (3284, 3302), False, 'from collections import Counter\n'), ((3305, 3328), 'collections.Counter', 'Counter', (['gold_relations'], {}), '(gold_relations)\n', (3312, 3328), False, 'from collections import Counter\n')] |
# all functions related to user accounts
import hashlib
from models import *
from base64 import b64decode, b64encode
import json
import requests
from search_helpers import get_article_object
GET = requests.get
async def create_pmid(handler, user, repo_name, pmid, github_token):
""" Create a file for this PMID. """
p = int(pmid)
pmid_data = {
"message": "Add {0}.json".format(p),
"content": encode_for_github(
{})}
await handler.github_request(requests.put,
"repos/{0}/{1}/contents/{2}.json".format(
user,
repo_name,
p),
github_token,
pmid_data)
return True
async def get_or_create_pmid(handler, user, collection_name, pmid, github_token):
""" Get the contents of this PMID file, or create it if it doesn't
exist. """
p = int(pmid)
repo_name = get_repo_name_from_collection(collection_name)
try:
pmid_contents = await handler.github_request(
GET, "repos/{0}/{1}/contents/{2}.json".format(
user, repo_name, p), github_token)
return pmid_contents
except OSError as e:
# The article didn't already exist
await create_pmid(handler, user, repo_name, p, github_token)
pmid_contents = await handler.github_request(
requests.get, "repos/{0}/{1}/contents/{2}.json".format(
user, repo_name, p), github_token)
return pmid_contents
def encode_for_github(d):
""" Encode with base64 and utf-8. Take a dictionary. """
return b64encode(
json.dumps(
d,
indent=4,
sort_keys=True).encode("utf-8")).decode("utf-8")
def decode_from_github(s):
""" Reverse the encode_dict_for_github function.
Return a dictionary. """
return json.loads(b64decode(s).decode("utf-8"))
def get_repo_name_from_collection(name):
""" Convert a collection name to a repository name for GitHub. """
return "brainspell-neo-collection-" + name.replace(" ", "-")
def get_collection_from_repo_name(name):
""" Convert a repo name to the user-specified collection name. """
return name[len("brainspell-neo-collection-"):]
def get_github_username_from_api_key(api_key):
""" Fetch the GitHub username corresponding to a given API key. """
user = User.select().where((User.password == api_key))
user_obj = next(user.execute())
return user_obj.username
def valid_api_key(api_key):
""" Return whether an API key exists in our database. """
user = User.select().where((User.password == api_key))
return user.execute().count >= 1
def get_user_object_from_api_key(api_key):
""" Return a PeeWee user object from an API key. """
return User.select().where(User.password == api_key).execute()
def register_github_user(user_dict):
""" Add a GitHub user to our database. """
# if the user doesn't already exist
if (User.select().where(User.username ==
user_dict["login"]).execute().count == 0):
username = user_dict["login"]
email = user_dict["email"]
hasher = hashlib.sha1()
# password (a.k.a. API key) is a hash of the Github ID
hasher.update(str(user_dict["id"]).encode('utf-8'))
password = hasher.hexdigest()
User.create(username=username, emailaddress=email, password=password)
return True
else:
return False # user already exists
def add_collection_to_brainspell_database(
collection_name,
description,
api_key,
cold_run=True):
""" Create a collection in our database if it doesn't exist,
or return false if the collection already exists. """
if valid_api_key(api_key):
user = list(get_user_object_from_api_key(api_key))[0]
# get the dict of user collections
if not user.collections:
user_collections = {}
else:
# unfortunately, because malformatted JSON exists in our database,
# we have to use eval instead of using JSON.decode()
user_collections = eval(user.collections)
# if the collection doesn't already exist
if collection_name not in user_collections:
# create the collection
user_collections[collection_name] = {}
user_collections[collection_name]["description"] = str(description)
user_collections[collection_name]["pmids"] = []
if not cold_run:
q = User.update(
collections=json.dumps(user_collections)).where(
User.username == user.username)
q.execute()
return True
return False
def bulk_add_articles_to_brainspell_database_collection(
collection, pmids, api_key, cold_run=True):
""" Adds the PMIDs to collection_name, if such a collection exists under
the given user. Assumes that the collection exists. Does not add repeats.
Takes collection_name *without* "brainspell-collection".
Return False if an assumption is violated, True otherwise. """
user = get_user_object_from_api_key(api_key)
if user.count > 0:
user = list(user)[0]
if user.collections:
# assumes collections are well-formed JSON
target = json.loads(user.collections)
if collection not in target:
target[collection] = {
"description": "None",
"pmids": []
}
pmid_set = set(map(lambda x: str(x), target[collection]["pmids"]))
for pmid in pmids:
pmid_set.add(str(pmid))
target[collection]["pmids"] = list(pmid_set)
if not cold_run:
q = User.update(
collections=json.dumps(target)).where(
User.password == api_key)
q.execute()
return True
else:
return False # user has no collections; violates assumptions
return False # user does not exist
def remove_all_brainspell_database_collections(api_key):
""" Dangerous! Drops all of a user's Brainspell collections
from our local database. Does not affect GitHub.
Called from CollectionsEndpointHandler."""
if valid_api_key(api_key):
q = User.update(
collections=json.dumps({})).where(
User.password == api_key)
q.execute()
def get_brainspell_collections_from_api_key(api_key):
"""
Return a user's collections from Brainspell's database given an API key.
May be inconsistent with GitHub.
"""
response = {}
if valid_api_key(api_key):
user = list(get_user_object_from_api_key(api_key))[0]
if user.collections:
return json.loads(user.collections)
return response
def add_article_to_brainspell_database_collection(
collection, pmid, api_key, cold_run=True):
"""
Add a collection to our local database. Do not add to GitHub in this function.
Assumes that the collection already exists. Assumes that the user exists.
Takes collection_name *without* "brainspell-collection".
Returns False if the article is already in the collection, or if an assumption
is violated.
This is an O(N) operation with respect to the collection size.
If you're adding many articles, it's O(N^2). If you're adding multiple articles,
please use bulk_add_articles_to_brainspell_database_collection.
Called by AddToCollectionEndpointHandler.
"""
user = get_user_object_from_api_key(api_key)
if user.count > 0:
user = list(user)[0]
if user.collections:
# assumes collections are well-formed JSON
target = json.loads(user.collections)
if collection not in target:
target[collection] = {
"description": "None",
"pmids": []
}
pmids_list = set(
map(lambda x: str(x), target[collection]["pmids"]))
# provide a check for if the PMID is already in the collection
if str(pmid) not in pmids_list:
pmids_list.add(str(pmid))
target[collection]["pmids"] = list(pmids_list)
if not cold_run:
q = User.update(
collections=json.dumps(target)).where(
User.password == api_key)
q.execute()
return True
else:
return False # article already in collection
else:
return False # user has no collections; violates assumptions
return False # user does not exist
def remove_article_from_brainspell_database_collection(
collection, pmid, api_key, cold_run=True):
""" Remove an article from the Brainspell repo. Do not affect GitHub.
Takes collection_name *without* "brainspell-collection".
Similar implementation to add_article_to_brainspell_database_collection. """
user = get_user_object_from_api_key(api_key)
if user.count == 0:
return False
user = list(user)[0]
if not user.collections:
return False
# assumes collections are well-formed JSON
target = json.loads(user.collections)
if collection not in target:
return False
pmids_list = list(
map(lambda x: str(x), target[collection]["pmids"]))
if str(pmid) not in pmids_list:
return False
pmids_list.remove(str(pmid))
target[collection]["pmids"] = pmids_list
if not cold_run:
q = User.update(
collections=json.dumps(target)).where(
User.password == api_key)
q.execute()
return True
def cache_user_collections(api_key, collections_obj):
""" Force overwrite the existing user collection field with
the passed collections_object data. """
q = User.update(
collections=json.dumps(collections_obj)).where(
User.password == api_key)
q.execute()
def add_unmapped_article_to_cached_collections(api_key, pmid, collection_name):
query = list(
User.select(
User.collections).where(
User.password == api_key).execute())[0]
collections = json.loads(query.collections)
relevant_article = list(get_article_object(pmid))[0]
target_collection = [
x for x in collections if x['name'] == collection_name][0]
target_collection['unmapped_articles'].append({
'title': relevant_article.title,
'pmid': relevant_article.pmid,
'authors': relevant_article.authors,
'reference': relevant_article.reference,
})
cache_user_collections(api_key, collections)
| [
"json.loads",
"search_helpers.get_article_object",
"json.dumps",
"base64.b64decode",
"hashlib.sha1"
] | [((9468, 9496), 'json.loads', 'json.loads', (['user.collections'], {}), '(user.collections)\n', (9478, 9496), False, 'import json\n'), ((10466, 10495), 'json.loads', 'json.loads', (['query.collections'], {}), '(query.collections)\n', (10476, 10495), False, 'import json\n'), ((3290, 3304), 'hashlib.sha1', 'hashlib.sha1', ([], {}), '()\n', (3302, 3304), False, 'import hashlib\n'), ((5475, 5503), 'json.loads', 'json.loads', (['user.collections'], {}), '(user.collections)\n', (5485, 5503), False, 'import json\n'), ((6966, 6994), 'json.loads', 'json.loads', (['user.collections'], {}), '(user.collections)\n', (6976, 6994), False, 'import json\n'), ((7934, 7962), 'json.loads', 'json.loads', (['user.collections'], {}), '(user.collections)\n', (7944, 7962), False, 'import json\n'), ((10524, 10548), 'search_helpers.get_article_object', 'get_article_object', (['pmid'], {}), '(pmid)\n', (10542, 10548), False, 'from search_helpers import get_article_object\n'), ((1977, 1989), 'base64.b64decode', 'b64decode', (['s'], {}), '(s)\n', (1986, 1989), False, 'from base64 import b64decode, b64encode\n'), ((10148, 10175), 'json.dumps', 'json.dumps', (['collections_obj'], {}), '(collections_obj)\n', (10158, 10175), False, 'import json\n'), ((1734, 1773), 'json.dumps', 'json.dumps', (['d'], {'indent': '(4)', 'sort_keys': '(True)'}), '(d, indent=4, sort_keys=True)\n', (1744, 1773), False, 'import json\n'), ((6538, 6552), 'json.dumps', 'json.dumps', (['{}'], {}), '({})\n', (6548, 6552), False, 'import json\n'), ((9841, 9859), 'json.dumps', 'json.dumps', (['target'], {}), '(target)\n', (9851, 9859), False, 'import json\n'), ((4714, 4742), 'json.dumps', 'json.dumps', (['user_collections'], {}), '(user_collections)\n', (4724, 4742), False, 'import json\n'), ((5980, 5998), 'json.dumps', 'json.dumps', (['target'], {}), '(target)\n', (5990, 5998), False, 'import json\n'), ((8564, 8582), 'json.dumps', 'json.dumps', (['target'], {}), '(target)\n', (8574, 8582), False, 'import json\n')] |
import os
DOWNLOAD_IMAGES = True
MAX_PROCESS = os.cpu_count()
IMAGES_PATH = "tmp/"
PAGE_URL = "https://en.wikipedia.org/wiki/List_of_animal_names"
TABLE_HEADER_COLS = ["Animal",
"Scientific term",
"Young",
"Female",
"Male",
"Collateral adjective"]
TABLE_CLASS_NAME = "wikitable"
ADD_TH_TAG = lambda name: f"<tr>{name}</tr>"
HTML_TEMPLATE = lambda body: f"""
<!doctype html>
<html lang="en">
<head>
</head>
<body>
{body}
</body>
</html>
"""
| [
"os.cpu_count"
] | [((48, 62), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (60, 62), False, 'import os\n')] |
from typing import Optional, Any, Protocol
from attr import attrs, attrib
class TreeNode(Protocol):
key: float
y: float
parent: Optional["TreeNode"]
left: Optional["TreeNode"]
right: Optional["TreeNode"]
l_count: int
l_y: float
l_y_sq: float
r_count: int
r_y: float
r_y_sq: float
def add(self, val: float, y: float) -> None:
...
@attrs(auto_attribs=True, slots=True)
class NodeEBST(TreeNode):
key: float
y: float
parent: Optional[TreeNode] = None
left: Optional[TreeNode] = attrib(init=False, default=None)
right: Optional[TreeNode] = attrib(init=False, default=None)
l_count: int = attrib(init=False, default=1)
l_y: float = attrib(init=False)
l_y_sq: float = attrib(init=False)
r_count: int = attrib(init=False, default=0)
r_y: float = attrib(init=False, default=0)
r_y_sq: float = attrib(init=False, default=0)
def __attrs_post_init__(self):
self.l_y = self.y
self.l_y_sq = self.y**2
def add(self, val: float, y: float) -> None:
if val <= self.key:
self.l_count += 1
self.l_y += y
self.l_y_sq += y**2
if self.left is None and val != self.key:
self.left = NodeEBST(val, y, self)
elif val == self.key:
pass
else:
self.left.add(val,y)
else:
self.r_count += 1
self.r_y += y
self.r_y_sq += y**2
if self.right is None:
self.right = NodeEBST(val, y, self)
else:
self.right.add(val,y)
return
@attrs(auto_attribs=True, slots=True)
class EBSTree:
root: Optional[TreeNode] = None
def add(self, key: float, y: float) -> None:
if self.root is None:
self.root = NodeEBST(key, y)
else:
self.root.add(key, y)
| [
"attr.attrib",
"attr.attrs"
] | [((393, 429), 'attr.attrs', 'attrs', ([], {'auto_attribs': '(True)', 'slots': '(True)'}), '(auto_attribs=True, slots=True)\n', (398, 429), False, 'from attr import attrs, attrib\n'), ((1660, 1696), 'attr.attrs', 'attrs', ([], {'auto_attribs': '(True)', 'slots': '(True)'}), '(auto_attribs=True, slots=True)\n', (1665, 1696), False, 'from attr import attrs, attrib\n'), ((554, 586), 'attr.attrib', 'attrib', ([], {'init': '(False)', 'default': 'None'}), '(init=False, default=None)\n', (560, 586), False, 'from attr import attrs, attrib\n'), ((619, 651), 'attr.attrib', 'attrib', ([], {'init': '(False)', 'default': 'None'}), '(init=False, default=None)\n', (625, 651), False, 'from attr import attrs, attrib\n'), ((671, 700), 'attr.attrib', 'attrib', ([], {'init': '(False)', 'default': '(1)'}), '(init=False, default=1)\n', (677, 700), False, 'from attr import attrs, attrib\n'), ((718, 736), 'attr.attrib', 'attrib', ([], {'init': '(False)'}), '(init=False)\n', (724, 736), False, 'from attr import attrs, attrib\n'), ((757, 775), 'attr.attrib', 'attrib', ([], {'init': '(False)'}), '(init=False)\n', (763, 775), False, 'from attr import attrs, attrib\n'), ((795, 824), 'attr.attrib', 'attrib', ([], {'init': '(False)', 'default': '(0)'}), '(init=False, default=0)\n', (801, 824), False, 'from attr import attrs, attrib\n'), ((842, 871), 'attr.attrib', 'attrib', ([], {'init': '(False)', 'default': '(0)'}), '(init=False, default=0)\n', (848, 871), False, 'from attr import attrs, attrib\n'), ((892, 921), 'attr.attrib', 'attrib', ([], {'init': '(False)', 'default': '(0)'}), '(init=False, default=0)\n', (898, 921), False, 'from attr import attrs, attrib\n')] |
import numpy as np
def generate_profile(npoints, length, alpha, window, h = 1., seed=None):
"""
Generates fractal fault profile with zero mean and a given amplitude/wavelength ratio
Inputs:
npoints = number of grid points
length = length of fault in physical domain
alpha = amplitude to wavelength ratio
window = length of minimum wavelength in grid points
seed = seed for random number generator
Returns:
heights of fault profile (array-like)
"""
npoints = int(npoints)
window = int(window)
length = float(length)
alpha = float(alpha)
h = float(h)
if not seed is None:
seed = int(seed)
prng = np.random.RandomState(seed)
phase = 2.*np.pi*prng.rand(npoints)
k = 2.*np.pi*np.fft.fftfreq(npoints,length/float(npoints-1))
amp = np.zeros(npoints)
nflt = npoints//window
if npoints%2 == 0:
nfreq = npoints//2+1
else:
nfreq = (npoints-1)//2+1
amp[1:] = (alpha*(2.*np.pi/np.abs(k[1:]))**(0.5*(1.+2.*h))*np.sqrt(np.pi/length)/2.*float(npoints))
amp[nflt+1:-nflt] = 0.
f = amp*np.exp(np.complex(0., 1.)*phase)
fund = np.fft.fft(prng.choice([-1., 1.])*alpha*length*np.sin(np.linspace(0., length, npoints)*np.pi/length))
f = np.real(np.fft.ifft(f+fund))
return f-f[0]-(f[-1]-f[0])/length*np.linspace(0., length, npoints)
def calc_diff(f, dx):
"""
Calculates derivative using 4th order finite differences
Inputs:
f = function
dx = grid spacing
Returns:
derivative (array-like)
"""
df = (np.roll(f,-3)/60.-np.roll(f,-2)*3./20.+np.roll(f,-1)*3./4.-np.roll(f,1)*3./4.+np.roll(f,2)*3./20.-np.roll(f,3)/60.)/dx
df[0] = (-21600./13649.*f[0]+81763./40947.*f[1]+131./27298.*f[2]-9143./13649.*f[3]+20539./81894.*f[4])/dx
df[1] = (-81763./180195.*f[0]+7357./36039.*f[2]+30637./72078.*f[3]-2328./12013.*f[4]+6611./360390.*f[5])/dx
df[2] = (-131./54220.*f[0]-7357./16266.*f[1]+645./2711.*f[3]+11237./32532.*f[4]-3487./27110.*f[5])/dx
df[3] = (9143./53590.*f[0]-30637./64308.*f[1]-645./5359.*f[2]+13733./32154.*f[4]-67./4660.*f[5]+72./5359.*f[6])/dx
df[4] = (-20539./236310.*f[0]+2328./7877.*f[1]-11237./47262.*f[2]-13733./23631.*f[3]+89387./118155.*f[5]-1296./7877.*f[6]+144./7877.*f[7])/dx
df[5] = (-6611./262806.*f[1]+3487./43801.*f[2]+1541./87602.*f[3]-89387./131403.*f[4]+32400./43801.*f[6]-6480./43801.*f[7]+720./43801.*f[8])/dx
df[-1] = -(-21600./13649.*f[-1]+81763./40947.*f[-2]+131./27298.*f[-3]-9143./13649.*f[-4]+20539./81894.*f[-5])/dx
df[-2] = -(-81763./180195.*f[-1]+7357./36039.*f[-3]+30637./72078.*f[-4]-2328./12013.*f[-5]+6611./360390.*f[-6])/dx
df[-3] = -(-131./54220.*f[-1]-7357./16266.*f[-2]+645./2711.*f[-4]+11237./32532.*f[-5]-3487./27110.*f[-6])/dx
df[-4] = -(9143./53590.*f[-1]-30637./64308.*f[-2]-645./5359.*f[-3]+13733./32154.*f[-5]-67./4660.*f[-6]+72./5359.*f[-7])/dx
df[-5] = -(-20539./236310.*f[-1]+2328./7877.*f[-2]-11237./47262.*f[-3]-13733./23631.*f[-4]+89387./118155.*f[-6]-1296./7877.*f[-7]+144./7877.*f[-8])/dx
df[-6] = -(-6611./262806.*f[-2]+3487./43801.*f[-3]+1541./87602.*f[-4]-89387./131403.*f[-5]+32400./43801.*f[-7]-6480./43801.*f[-8]+720./43801.*f[-9])/dx
return df
def generate_normals_2d(x, y, direction):
"""
Returns components of normal vectors given coordinates x and y
x and y must be array-like of the same length
direction indicates whether the surface has a normal in the 'x' direction or 'y' direction
coordinates normal to direction must be evenly spaced
nx and ny are array-like and of the same length as x and y
"""
assert x.shape == y.shape, "x and y must have the same length"
assert len(x.shape) == 1 and len(y.shape) == 1, "x and y must be 1d arrays"
assert direction == 'x' or direction == 'y', "direction must be 'x' or 'y'"
if direction == 'x':
dx = y[2]-y[1]
assert(dx > 0.)
m = calc_diff(x, dx)
ny = -m/np.sqrt(1.+m**2)
nx = 1./np.sqrt(1.+m**2)
else:
dx = x[2]-x[1]
assert(dx > 0.)
m = calc_diff(y, dx)
nx = -m/np.sqrt(1.+m**2)
ny = 1./np.sqrt(1.+m**2)
return nx, ny
def rotate_xy2nt_2d(sxx, sxy, syy, n, orientation=None):
"""
Rotates stress components from xy to normal/tangential to given normal vector
Inputs:
stress components sxx, sxy, syy (negative in compression), can be arrays
n is normal vector for receiver fault (must have length 2)
orientation (optional) is a string indicating how you would like to compute the tangent vector
In all cases, the hypothetical second tangent vector is in the +z direction
"left" or "x" indicates that the tangent vector is defined by n x t1 = z (x is right handed
cross product, chosen so that faults with a purely +x normal will give a tangent in
the +y direction). This means that a positive Coulomb function leads to left lateral
slip on the fault.
"right" or "y" indicates that the tangent vector is defined by t1 x n = z (so that a normal
in the +y direction will give a tangent vector in the +x direction). This means that a
positive Coulomb function leads to right lateral slip on the fault.
None indicates that the "right" or "y" convention is used (default)
Returns:
normal and shear stress in rotated coordinates
"""
assert len(n) == 2, "normal vector must have length 2"
assert np.isclose(np.sqrt(n[0]**2+n[1]**2),1.), "normal vector must be normalized"
assert (orientation == "right" or orientation == "left" or
orientation == "x" or orientation == "y" or orientation == None)
m = tangent_2d(n, orientation)
sn = n[0]**2*sxx+2.*n[0]*n[1]*sxy+n[1]**2*syy
st = n[0]*m[0]*sxx+(m[0]*n[1]+n[0]*m[1])*sxy+n[1]*m[1]*syy
return sn, st
def tangent_2d(n, orientation=None):
"""
Returns vector orthogonal to input vector n (must have length 2)
orientation (optional) is a string indicating how you would like to compute the tangent vector
In all cases, the hypothetical second tangent vector is in the +z direction
"left" or "x" indicates that the tangent vector is defined by n x t1 = z (x is right handed
cross product, chosen so that faults with a purely +x normal will give a tangent in
the +y direction). This means that a positive Coulomb function leads to left lateral
slip on the fault.
"right" or "y" indicates that the tangent vector is defined by t1 x n = z (so that a normal
in the +y direction will give a tangent vector in the +x direction). This means that a
positive Coulomb function leads to right lateral slip on the fault.
None indicates that the "right" or "y" convention is used (default)
"""
assert len(n) == 2, "normal vector must be of length 2"
assert np.isclose(np.sqrt(n[0]**2+n[1]**2),1.), "normal vector must be normalized"
assert (orientation == "right" or orientation == "left" or
orientation == "x" or orientation == "y" or orientation == None)
m = np.empty(2)
if (orientation == "x" or orientation == "left"):
m[1] = n[0]/np.sqrt(n[0]**2+n[1]**2)
m[0] = -n[1]/np.sqrt(n[0]**2+n[1]**2)
else:
m[0] = n[1]/np.sqrt(n[0]**2+n[1]**2)
m[1] = -n[0]/np.sqrt(n[0]**2+n[1]**2)
return m
| [
"numpy.abs",
"numpy.sqrt",
"numpy.roll",
"numpy.complex",
"numpy.zeros",
"numpy.linspace",
"numpy.empty",
"numpy.fft.ifft",
"numpy.random.RandomState"
] | [((676, 703), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (697, 703), True, 'import numpy as np\n'), ((819, 836), 'numpy.zeros', 'np.zeros', (['npoints'], {}), '(npoints)\n', (827, 836), True, 'import numpy as np\n'), ((7277, 7288), 'numpy.empty', 'np.empty', (['(2)'], {}), '(2)\n', (7285, 7288), True, 'import numpy as np\n'), ((1264, 1285), 'numpy.fft.ifft', 'np.fft.ifft', (['(f + fund)'], {}), '(f + fund)\n', (1275, 1285), True, 'import numpy as np\n'), ((5575, 5605), 'numpy.sqrt', 'np.sqrt', (['(n[0] ** 2 + n[1] ** 2)'], {}), '(n[0] ** 2 + n[1] ** 2)\n', (5582, 5605), True, 'import numpy as np\n'), ((7063, 7093), 'numpy.sqrt', 'np.sqrt', (['(n[0] ** 2 + n[1] ** 2)'], {}), '(n[0] ** 2 + n[1] ** 2)\n', (7070, 7093), True, 'import numpy as np\n'), ((1323, 1356), 'numpy.linspace', 'np.linspace', (['(0.0)', 'length', 'npoints'], {}), '(0.0, length, npoints)\n', (1334, 1356), True, 'import numpy as np\n'), ((3957, 3978), 'numpy.sqrt', 'np.sqrt', (['(1.0 + m ** 2)'], {}), '(1.0 + m ** 2)\n', (3964, 3978), True, 'import numpy as np\n'), ((3990, 4011), 'numpy.sqrt', 'np.sqrt', (['(1.0 + m ** 2)'], {}), '(1.0 + m ** 2)\n', (3997, 4011), True, 'import numpy as np\n'), ((4109, 4130), 'numpy.sqrt', 'np.sqrt', (['(1.0 + m ** 2)'], {}), '(1.0 + m ** 2)\n', (4116, 4130), True, 'import numpy as np\n'), ((4142, 4163), 'numpy.sqrt', 'np.sqrt', (['(1.0 + m ** 2)'], {}), '(1.0 + m ** 2)\n', (4149, 4163), True, 'import numpy as np\n'), ((7364, 7394), 'numpy.sqrt', 'np.sqrt', (['(n[0] ** 2 + n[1] ** 2)'], {}), '(n[0] ** 2 + n[1] ** 2)\n', (7371, 7394), True, 'import numpy as np\n'), ((7410, 7440), 'numpy.sqrt', 'np.sqrt', (['(n[0] ** 2 + n[1] ** 2)'], {}), '(n[0] ** 2 + n[1] ** 2)\n', (7417, 7440), True, 'import numpy as np\n'), ((7465, 7495), 'numpy.sqrt', 'np.sqrt', (['(n[0] ** 2 + n[1] ** 2)'], {}), '(n[0] ** 2 + n[1] ** 2)\n', (7472, 7495), True, 'import numpy as np\n'), ((7511, 7541), 'numpy.sqrt', 'np.sqrt', (['(n[0] ** 2 + n[1] ** 2)'], {}), '(n[0] ** 2 + n[1] ** 2)\n', (7518, 7541), True, 'import numpy as np\n'), ((1022, 1045), 'numpy.sqrt', 'np.sqrt', (['(np.pi / length)'], {}), '(np.pi / length)\n', (1029, 1045), True, 'import numpy as np\n'), ((1109, 1129), 'numpy.complex', 'np.complex', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1119, 1129), True, 'import numpy as np\n'), ((1657, 1670), 'numpy.roll', 'np.roll', (['f', '(3)'], {}), '(f, 3)\n', (1664, 1670), True, 'import numpy as np\n'), ((1200, 1233), 'numpy.linspace', 'np.linspace', (['(0.0)', 'length', 'npoints'], {}), '(0.0, length, npoints)\n', (1211, 1233), True, 'import numpy as np\n'), ((1637, 1650), 'numpy.roll', 'np.roll', (['f', '(2)'], {}), '(f, 2)\n', (1644, 1650), True, 'import numpy as np\n'), ((990, 1003), 'numpy.abs', 'np.abs', (['k[1:]'], {}), '(k[1:])\n', (996, 1003), True, 'import numpy as np\n'), ((1618, 1631), 'numpy.roll', 'np.roll', (['f', '(1)'], {}), '(f, 1)\n', (1625, 1631), True, 'import numpy as np\n'), ((1559, 1573), 'numpy.roll', 'np.roll', (['f', '(-3)'], {}), '(f, -3)\n', (1566, 1573), True, 'import numpy as np\n'), ((1598, 1612), 'numpy.roll', 'np.roll', (['f', '(-1)'], {}), '(f, -1)\n', (1605, 1612), True, 'import numpy as np\n'), ((1577, 1591), 'numpy.roll', 'np.roll', (['f', '(-2)'], {}), '(f, -2)\n', (1584, 1591), True, 'import numpy as np\n')] |
from rich import print
from youtubesearchpython import SearchVideos, SearchPlaylists
from PyInquirer import style_from_dict, Token, prompt
from PyInquirer import Validator, ValidationError
from halo import Halo
import regex
import re
import sys
import pyperclip
#
spinner = Halo(text='Searching', spinner='dots')
style = style_from_dict({
Token.QuestionMark: '#E91E63 bold',
Token.Selected: '#8B64CE bold',
Token.Instruction: '#EBF2FA', # default
Token.Answer: '#2196f3 bold',
Token.Question: '#00ff00 bold',
})
# TODO search_video(query) in validator | and create two seprate vaildator of video query and playlist query
class queryValidator(Validator):
def validate(self, document):
query = document.text
if len(query) >= 55:
raise ValidationError(
message='Please be concise. Query should be less than 50 characters.',
cursor_position=len(document.text)) # Move cursor to end
elif query == "":
raise ValidationError(
message='Please enter something.',
cursor_position=len(document.text)) # Move cursor to end
class linkValidator(Validator):
def validate(self, document):
url = document.text
if urlvalidator(url):
return True
else:
raise ValidationError(
message='Please enter a valid youtube link',
cursor_position=len(document.text)) # Move cursor to end
def urlvalidator(url):
if regex.match(r'^(http(s)?:\/\/)?((w){3}.)?youtu(be|.be)?(\.com)?\/.+', url):
return True
else:
return False
def check_clipboard():
clipboard_content = pyperclip.paste()
if urlvalidator(clipboard_content):
return True
else:
return False
def search_query():
questions = [
{
'type': 'input',
'name': 'query',
'message': 'Search:',
'validate': queryValidator
}]
answers = prompt(questions, style=style)
if not answers:
pass
return answers
def search_video(query):
spinner.start()
data = SearchVideos(query, offset=1, mode="dict", max_results=8)
final_data = data.result()
if not final_data:
spinner.stop()
print("[bold red] Something went wrong. Try another query or check network connenctivity[bold red]")
else:
options = []
for i in final_data['search_result']:
temp_dict = {}
temp_dict.update({'name': i['title'], 'value': i['link']})
options.append(temp_dict)
spinner.stop()
return options
def search_playlist(query):
spinner.start()
data = SearchPlaylists(query, offset=1, mode='dict', max_results=8)
final_data = data.result()
if not final_data:
spinner.stop()
print("[bold red] Something went wrong. Try another query or check network connenctivity[/bold red]")
else:
options = []
for i in final_data['search_result']:
temp_dict = {}
temp_dict.update({'name': i['title'], 'value': i['link']})
options.append(temp_dict)
spinner.stop()
return options
def get_options(answers):
options = [
{
'name': 'Enter URL',
'value': 'manual'
},
{
'name': 'Search Video',
'value': 'video_search'
},
{
'name': 'Search Playlist',
'value': 'playlist_search'
}
]
if check_clipboard():
options[0] = {
'name': 'Enter other URL',
'value': 'manual'
}
options.insert(0, {
'name': 'URL form clipboard',
'value': 'clipboard',
})
return options
def start_1():
ques1 = [
{
'type': 'list',
'name': 'source',
'message': 'Select:',
'choices': get_options
},
{
'type': 'input',
'name': 'url',
'message': 'Enter Link:',
'validate': linkValidator,
'when': lambda ans1: ans1['source'] == 'manual'
},
{
'type': 'input',
'name': 'query',
'message': 'Search:',
'validate': queryValidator,
'when': lambda ans1: ans1['source'] == 'video_search' or ans1['source'] == 'playlist_search'
}]
ans1 = prompt(ques1, style=style)
if not ans1:
pass
if ans1['source'] == 'video_search':
query = ans1['query']
ques2 = {
'type': 'list',
'name': 'url',
'message': 'Results (select):',
'choices': search_video(query)
}
ans2 = prompt(ques2, style=style)
if not ans2:
pass
return(ans2['url'])
elif ans1['source'] == 'playlist_search':
query = ans1['query']
ques2 = {
'type': 'list',
'name': 'url',
'message': 'Results (select):',
'choices': search_playlist(query)
}
ans2 = prompt(ques2, style=style)
if not ans2:
pass
return(ans2['url'])
elif ans1['source'] == 'manual':
ans2 = {}
ans2['url'] = ans1['url']
return(ans2['url'])
elif ans1['source'] == 'clipboard':
ans2 = {}
ans2['url'] = pyperclip.paste()
return(ans2['url'])
| [
"halo.Halo",
"PyInquirer.prompt",
"youtubesearchpython.SearchVideos",
"regex.match",
"youtubesearchpython.SearchPlaylists",
"rich.print",
"pyperclip.paste",
"PyInquirer.style_from_dict"
] | [((275, 313), 'halo.Halo', 'Halo', ([], {'text': '"""Searching"""', 'spinner': '"""dots"""'}), "(text='Searching', spinner='dots')\n", (279, 313), False, 'from halo import Halo\n'), ((323, 508), 'PyInquirer.style_from_dict', 'style_from_dict', (["{Token.QuestionMark: '#E91E63 bold', Token.Selected: '#8B64CE bold', Token.\n Instruction: '#EBF2FA', Token.Answer: '#2196f3 bold', Token.Question:\n '#00ff00 bold'}"], {}), "({Token.QuestionMark: '#E91E63 bold', Token.Selected:\n '#8B64CE bold', Token.Instruction: '#EBF2FA', Token.Answer:\n '#2196f3 bold', Token.Question: '#00ff00 bold'})\n", (338, 508), False, 'from PyInquirer import style_from_dict, Token, prompt\n'), ((1522, 1599), 'regex.match', 'regex.match', (['"""^(http(s)?:\\\\/\\\\/)?((w){3}.)?youtu(be|.be)?(\\\\.com)?\\\\/.+"""', 'url'], {}), "('^(http(s)?:\\\\/\\\\/)?((w){3}.)?youtu(be|.be)?(\\\\.com)?\\\\/.+', url)\n", (1533, 1599), False, 'import regex\n'), ((1698, 1715), 'pyperclip.paste', 'pyperclip.paste', ([], {}), '()\n', (1713, 1715), False, 'import pyperclip\n'), ((2013, 2043), 'PyInquirer.prompt', 'prompt', (['questions'], {'style': 'style'}), '(questions, style=style)\n', (2019, 2043), False, 'from PyInquirer import style_from_dict, Token, prompt\n'), ((2156, 2213), 'youtubesearchpython.SearchVideos', 'SearchVideos', (['query'], {'offset': '(1)', 'mode': '"""dict"""', 'max_results': '(8)'}), "(query, offset=1, mode='dict', max_results=8)\n", (2168, 2213), False, 'from youtubesearchpython import SearchVideos, SearchPlaylists\n'), ((2724, 2784), 'youtubesearchpython.SearchPlaylists', 'SearchPlaylists', (['query'], {'offset': '(1)', 'mode': '"""dict"""', 'max_results': '(8)'}), "(query, offset=1, mode='dict', max_results=8)\n", (2739, 2784), False, 'from youtubesearchpython import SearchVideos, SearchPlaylists\n'), ((4485, 4511), 'PyInquirer.prompt', 'prompt', (['ques1'], {'style': 'style'}), '(ques1, style=style)\n', (4491, 4511), False, 'from PyInquirer import style_from_dict, Token, prompt\n'), ((2299, 2410), 'rich.print', 'print', (['"""[bold red] Something went wrong. Try another query or check network connenctivity[bold red]"""'], {}), "(\n '[bold red] Something went wrong. Try another query or check network connenctivity[bold red]'\n )\n", (2304, 2410), False, 'from rich import print\n'), ((2870, 2982), 'rich.print', 'print', (['"""[bold red] Something went wrong. Try another query or check network connenctivity[/bold red]"""'], {}), "(\n '[bold red] Something went wrong. Try another query or check network connenctivity[/bold red]'\n )\n", (2875, 2982), False, 'from rich import print\n'), ((4799, 4825), 'PyInquirer.prompt', 'prompt', (['ques2'], {'style': 'style'}), '(ques2, style=style)\n', (4805, 4825), False, 'from PyInquirer import style_from_dict, Token, prompt\n'), ((5158, 5184), 'PyInquirer.prompt', 'prompt', (['ques2'], {'style': 'style'}), '(ques2, style=style)\n', (5164, 5184), False, 'from PyInquirer import style_from_dict, Token, prompt\n'), ((5451, 5468), 'pyperclip.paste', 'pyperclip.paste', ([], {}), '()\n', (5466, 5468), False, 'import pyperclip\n')] |
from ciphers.caeser_cipher import CaeserCipher
from pytest import raises
class TestCaeser:
def setup_class(self):
self.default_cipher = CaeserCipher()
self.custom_cipher = CaeserCipher(rotation=1)
def test_caeser_cipher_encrypt_default_rotation(self):
input_string = "some random string"
output_string = self.default_cipher.encrypt(plain_text=input_string)
assert output_string == "4c0> e.#qc0 4(e^#["
def test_caeser_cipher_decrypt_default_rotation(self):
input_string = "4c0> e.#qc0 4(e^#["
output_string = self.default_cipher.decrypt(encrypted=input_string)
assert output_string == "some random string"
def test_caeser_cipher_encrypt_custom_rotation(self):
input_string = "some random string"
output_string = self.custom_cipher.encrypt(plain_text=input_string)
assert output_string == ">:.3 =~/2:. >?=7/5"
def test_caeser_cipher_decrypt_custom_rotation(self):
input_string = ">:.3 =~/2:. >?=7/5"
output_string = self.custom_cipher.decrypt(encrypted=input_string)
assert output_string == "some random string"
def test_get_rotation(self):
assert self.custom_cipher.rotation == 1
def test_set_rotation_invalid_input(self):
with raises(Exception) as value_error:
self.custom_cipher.rotation = "five"
assert str(value_error) == "Rotation must be an integer"
| [
"pytest.raises",
"ciphers.caeser_cipher.CaeserCipher"
] | [((150, 164), 'ciphers.caeser_cipher.CaeserCipher', 'CaeserCipher', ([], {}), '()\n', (162, 164), False, 'from ciphers.caeser_cipher import CaeserCipher\n'), ((194, 218), 'ciphers.caeser_cipher.CaeserCipher', 'CaeserCipher', ([], {'rotation': '(1)'}), '(rotation=1)\n', (206, 218), False, 'from ciphers.caeser_cipher import CaeserCipher\n'), ((1292, 1309), 'pytest.raises', 'raises', (['Exception'], {}), '(Exception)\n', (1298, 1309), False, 'from pytest import raises\n')] |
import re
from dvc.command.version import CmdVersion
from dvc.cli import parse_args
def test_run_in_repo(repo_dir, dvc):
cmd = CmdVersion(parse_args(["version"]))
ret = cmd.run_cmd()
assert ret == 0
def test_run_outside_of_repo(repo_dir):
cmd = CmdVersion(parse_args(["version"]))
ret = cmd.run_cmd()
assert ret == 0
def test_info(caplog):
cmd = CmdVersion(parse_args(["version"]))
cmd.run()
assert re.search(re.compile(r"DVC version: \d+\.\d+\.\d+"), caplog.text)
assert re.search(re.compile(r"Python version: \d\.\d\.\d"), caplog.text)
assert re.search(re.compile(r"Platform: .*"), caplog.text)
| [
"dvc.cli.parse_args",
"re.compile"
] | [((145, 168), 'dvc.cli.parse_args', 'parse_args', (["['version']"], {}), "(['version'])\n", (155, 168), False, 'from dvc.cli import parse_args\n'), ((277, 300), 'dvc.cli.parse_args', 'parse_args', (["['version']"], {}), "(['version'])\n", (287, 300), False, 'from dvc.cli import parse_args\n'), ((392, 415), 'dvc.cli.parse_args', 'parse_args', (["['version']"], {}), "(['version'])\n", (402, 415), False, 'from dvc.cli import parse_args\n'), ((453, 498), 're.compile', 're.compile', (['"""DVC version: \\\\d+\\\\.\\\\d+\\\\.\\\\d+"""'], {}), "('DVC version: \\\\d+\\\\.\\\\d+\\\\.\\\\d+')\n", (463, 498), False, 'import re\n'), ((530, 575), 're.compile', 're.compile', (['"""Python version: \\\\d\\\\.\\\\d\\\\.\\\\d"""'], {}), "('Python version: \\\\d\\\\.\\\\d\\\\.\\\\d')\n", (540, 575), False, 'import re\n'), ((607, 633), 're.compile', 're.compile', (['"""Platform: .*"""'], {}), "('Platform: .*')\n", (617, 633), False, 'import re\n')] |
import text_game_maker
from text_game_maker.audio import audio
from text_game_maker.utils import utils
from text_game_maker.game_objects.base import serialize as base_serialize
from text_game_maker.game_objects.base import deserialize as base_deserialize
craftables = {}
def serialize():
ret = {}
for name in craftables:
items, item = craftables[name]
ret[name] = [base_serialize(items), base_serialize(item)]
return ret
def deserialize(data, version):
d = {}
for key in data:
items, item = data[key]
d[key] = [base_deserialize(items, version), base_deserialize(item, version)]
craftables.clear()
craftables.update(d)
def add(items, item):
"""
Add new craftable item
:param [text_game_maker.game_objects.items.Item] items: list of ingredients
:param text_game_maker.game_objects.items.Item item: new item created by\
combining ingredients
"""
craftables[item.name] = [items, item]
def help_text():
"""
Retreive human-readable description of all added craftable items
:return: description of all added craftable items
:rtype: str
"""
ret = []
for name in craftables:
items, item = craftables[name]
item_names = utils.list_to_english([str(x) for x in items])
ret.append("%s: requires %s" % (item.name, item_names))
if ret:
return '\n'.join(ret)
return None
def _find_item(item, items):
for i in items:
if (i.name == item.name) and isinstance(i, item.__class__):
return i
return None
def _find_craftable(name):
items = []
item = None
if name in craftables:
items, item = craftables[name]
else:
for k in craftables:
if k.startswith(name) or k.endswith(name) or (k in name):
items, item = craftables[k]
break
return items, item
def _need_items(name, word, items):
names = [str(x) for x in items]
utils.save_sound(audio.FAILURE_SOUND)
utils.game_print("Can't %s %s. Need %s."
% (word, name, utils.list_to_english(names)))
def can_craft(name):
"""
Check if player has the ability to craft an item by name. Note this function
only checks if player has acquired the blueprint to craft an item, and does
not care whether the player has ingredients required to craft the item.
:param str name: item name
:return: True if player can craft the item, False otherwise
:rtype: bool
"""
_, item = _find_craftable(name)
return not (item is None)
def craft(name, word, player):
"""
Craft an item by name. Deletes ingredients from player's inventory and
places crafted item into player's inventory.
:param str name: name of the item to craft
:param str word: command/action word used by player
:param text_game_maker.player.player.Player player: player instance
:return: crafted item, or None if crafting fails
:rtype: text_game_maker.game_objects.items.Item
"""
items, item = _find_craftable(name)
if item is None:
utils.save_sound(audio.FAILURE_SOUND)
utils.game_print("Don't know how to %s %s" % (word, name))
return None
ingredients = []
player_items = []
player_items.extend(player.pockets.items)
if player.inventory:
player_items.extend(player.inventory.items)
for i in items:
ingredient = _find_item(i, player_items)
if ingredient is None:
_need_items(name, word, items)
return None
ingredients.append(ingredient)
for i in ingredients:
i.delete()
item.prep = "your " + item.name
item.add_to_player_inventory(player)
utils.save_sound(audio.NEW_ITEM_SOUND)
utils.game_print("Created %s." % item.name)
return item
| [
"text_game_maker.utils.utils.list_to_english",
"text_game_maker.utils.utils.game_print",
"text_game_maker.utils.utils.save_sound",
"text_game_maker.game_objects.base.deserialize",
"text_game_maker.game_objects.base.serialize"
] | [((1985, 2022), 'text_game_maker.utils.utils.save_sound', 'utils.save_sound', (['audio.FAILURE_SOUND'], {}), '(audio.FAILURE_SOUND)\n', (2001, 2022), False, 'from text_game_maker.utils import utils\n'), ((3732, 3770), 'text_game_maker.utils.utils.save_sound', 'utils.save_sound', (['audio.NEW_ITEM_SOUND'], {}), '(audio.NEW_ITEM_SOUND)\n', (3748, 3770), False, 'from text_game_maker.utils import utils\n'), ((3775, 3818), 'text_game_maker.utils.utils.game_print', 'utils.game_print', (["('Created %s.' % item.name)"], {}), "('Created %s.' % item.name)\n", (3791, 3818), False, 'from text_game_maker.utils import utils\n'), ((3103, 3140), 'text_game_maker.utils.utils.save_sound', 'utils.save_sound', (['audio.FAILURE_SOUND'], {}), '(audio.FAILURE_SOUND)\n', (3119, 3140), False, 'from text_game_maker.utils import utils\n'), ((3149, 3207), 'text_game_maker.utils.utils.game_print', 'utils.game_print', (['("Don\'t know how to %s %s" % (word, name))'], {}), '("Don\'t know how to %s %s" % (word, name))\n', (3165, 3207), False, 'from text_game_maker.utils import utils\n'), ((392, 413), 'text_game_maker.game_objects.base.serialize', 'base_serialize', (['items'], {}), '(items)\n', (406, 413), True, 'from text_game_maker.game_objects.base import serialize as base_serialize\n'), ((415, 435), 'text_game_maker.game_objects.base.serialize', 'base_serialize', (['item'], {}), '(item)\n', (429, 435), True, 'from text_game_maker.game_objects.base import serialize as base_serialize\n'), ((568, 600), 'text_game_maker.game_objects.base.deserialize', 'base_deserialize', (['items', 'version'], {}), '(items, version)\n', (584, 600), True, 'from text_game_maker.game_objects.base import deserialize as base_deserialize\n'), ((602, 633), 'text_game_maker.game_objects.base.deserialize', 'base_deserialize', (['item', 'version'], {}), '(item, version)\n', (618, 633), True, 'from text_game_maker.game_objects.base import deserialize as base_deserialize\n'), ((2095, 2123), 'text_game_maker.utils.utils.list_to_english', 'utils.list_to_english', (['names'], {}), '(names)\n', (2116, 2123), False, 'from text_game_maker.utils import utils\n')] |
# see https://cdn-learn.adafruit.com/downloads/pdf/neopixels-on-raspberry-pi.pdf
import board
import neopixel
class NeoPixel24:
pixels = None
def __init__(self):
self.pixels = neopixel.NeoPixel(board.D10, 24, brightness=0.3, auto_write=False, pixel_order=neopixel.GRB);
def fillColor(self, color):
print("neo color", color)
self.pixels.fill(color)
self.pixels.show()
def fillColors(self, col1, col2):
for i in range(24):
if (int(i/3) % 2) == 0:
self.pixels[i] = col1
else:
self.pixels[i] = col2
self.pixels.show()
def rotate(self):
first = self.pixels[0]
for i in range(0,23):
self.pixels[i] = self.pixels[i+1]
self.pixels[23] = first
self.pixels.show() | [
"neopixel.NeoPixel"
] | [((195, 291), 'neopixel.NeoPixel', 'neopixel.NeoPixel', (['board.D10', '(24)'], {'brightness': '(0.3)', 'auto_write': '(False)', 'pixel_order': 'neopixel.GRB'}), '(board.D10, 24, brightness=0.3, auto_write=False,\n pixel_order=neopixel.GRB)\n', (212, 291), False, 'import neopixel\n')] |
import csv
from external_rater import n, k, N, join_amazon_external_rater_fleiss_kappa
def create_consensus_dataset(dataset_file, external_rater_file, destination_file):
kappa, dict_counts = join_amazon_external_rater_fleiss_kappa(dataset_file, external_rater_file, n, N)
dataset = open(dataset_file, encoding='utf-8')
dataset_reader = csv.reader(dataset, delimiter=',')
line_count = 0
dict_content = {}
for row in dataset_reader:
if line_count == 0:
header = row
elif row:
if row[21] == '' and row[27] not in dict_content:
dict_content[row[27]] = row
line_count += 1
dataset.close()
sentence_list = []
blacklist = []
for sentence, counts in dict_counts.items():
for count in counts:
if count in [6, 7, 8] and count == counts[0]:
sentence_list.append(dict_content[sentence][:-1] + ['Yes, they share a direct/explicit relation in the sentence.'])
elif count in [6, 7, 8] and count == counts[1]:
sentence_list.append( dict_content[sentence][:-1] + ['No, they are separate entities with no correlation in the sentence.'])
elif count in [6, 7, 8] and count == counts[2]:
sentence_list.append(dict_content[sentence][:-1] + ['The entities seem to be illy marked, or something is wrong with the entities/sentence.'])
if counts[2] == 4 or counts[2] == 5:
blacklist.append(sentence)
elif 6 not in counts and 7 not in counts and 8 not in counts and counts[2] != 4 and counts[2] != 5:
sentence_list.append(dict_content[sentence][:-1] + ['No, they are separate entities with no correlation in the sentence.'])
output_file = open(destination_file, 'w', encoding='utf-8')
output_file_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
output_file_writer.writerow(header)
for row in sentence_list:
output_file_writer.writerow(row)
return blacklist
#create_consensus_dataset('data/batch_results_30.csv', 'data/external_rater_results.tsv', 'data/batch_results_30_consensus.csv')
| [
"external_rater.join_amazon_external_rater_fleiss_kappa",
"csv.writer",
"csv.reader"
] | [((198, 283), 'external_rater.join_amazon_external_rater_fleiss_kappa', 'join_amazon_external_rater_fleiss_kappa', (['dataset_file', 'external_rater_file', 'n', 'N'], {}), '(dataset_file, external_rater_file, n, N\n )\n', (237, 283), False, 'from external_rater import n, k, N, join_amazon_external_rater_fleiss_kappa\n'), ((352, 386), 'csv.reader', 'csv.reader', (['dataset'], {'delimiter': '""","""'}), "(dataset, delimiter=',')\n", (362, 386), False, 'import csv\n'), ((1836, 1912), 'csv.writer', 'csv.writer', (['output_file'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_ALL'}), '(output_file, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_ALL)\n', (1846, 1912), False, 'import csv\n')] |
import fire
from matplotlib import pyplot as plt
import numpy as np
from scipy.stats import norm, laplace, uniform
# True weights for the underlying mixture
true_mixture_weights = [.3, .5, .2]
N = 100
k = 3
# This function generates N from the true mixture of distributions.
# When you use this function to plot your estimated densities,
# pass in the weights you obtain instead of the default parameters.
def generate_samples(mixture_weights):
np.random.seed(1)
# Parameters for the distributions in our mixture: N(3, 2^2), U(-1, 2), L(-2, 3)
samples = np.vstack([[np.random.normal(3, 2),
np.random.uniform(-1, 2),
np.random.laplace(-2, 3)] for _ in range(N)])
indices = np.random.choice(list(range(len(mixture_weights))), p=mixture_weights, size=N)
samples = np.take_along_axis(samples, indices[:, None], axis=1)
return samples.flatten()
# This function evaluates a set of univariate samples
# under the Gaussian, Uniform, and Laplace densities in the problem.
def evaluate_density(samples):
# Parameters for the distributions in our mixture: N(3, 2^2), U(-1, 2), L(-2, 3)
# Minor note: Scipy uniform.pdf expects the first argument as the start interval and the
# second as the length of interval, *not* the endpoint
densities = [[norm.pdf(x, 3, 2),
uniform.pdf(x, -1, 3),
laplace.pdf(x, -2, 3)] for x in samples]
densities = np.array(densities)
return densities
# This function takes as input the estimated mixture weights
# and generates a plot of both the estimated and true density
# by analytically evaluating the density at each point in a discretized
# interval [-20, 20]
def plot_estimated_and_true_density(estimated_mixture_weights):
discretized_x = np.linspace(-20, 20, num=100)
all_densities = evaluate_density(discretized_x)
estimated_densities = all_densities @ estimated_mixture_weights
true_densities = all_densities @ true_mixture_weights
plt.plot(discretized_x, estimated_densities, label="Estimated")
plt.plot(discretized_x, true_densities, label="True")
plt.title("True vs Estimated Density")
plt.legend()
plt.show()
samples = generate_samples(true_mixture_weights)
densities = evaluate_density(samples)
import cvxpy as cp
def main():
lamb = cp.Variable(k)
con = [lamb >= 0., cp.sum(lamb) == 1.]
obj = cp.Maximize(sum(cp.log(lamb @ row) for row in densities))
cp.Problem(constraints=con, objective=obj).solve()
plot_estimated_and_true_density(lamb.value)
print(lamb.value)
if __name__ == "__main__":
fire.Fire(main)
| [
"fire.Fire",
"numpy.array",
"numpy.take_along_axis",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.random.seed",
"numpy.random.laplace",
"numpy.random.normal",
"cvxpy.Problem",
"scipy.stats.laplace.pdf",
"scipy.stats.uniform.pdf",
"scipy.stats.norm.pdf",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"cvxpy.Variable",
"cvxpy.sum",
"numpy.random.uniform",
"cvxpy.log"
] | [((452, 469), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (466, 469), True, 'import numpy as np\n'), ((836, 889), 'numpy.take_along_axis', 'np.take_along_axis', (['samples', 'indices[:, None]'], {'axis': '(1)'}), '(samples, indices[:, None], axis=1)\n', (854, 889), True, 'import numpy as np\n'), ((1465, 1484), 'numpy.array', 'np.array', (['densities'], {}), '(densities)\n', (1473, 1484), True, 'import numpy as np\n'), ((1808, 1837), 'numpy.linspace', 'np.linspace', (['(-20)', '(20)'], {'num': '(100)'}), '(-20, 20, num=100)\n', (1819, 1837), True, 'import numpy as np\n'), ((2020, 2083), 'matplotlib.pyplot.plot', 'plt.plot', (['discretized_x', 'estimated_densities'], {'label': '"""Estimated"""'}), "(discretized_x, estimated_densities, label='Estimated')\n", (2028, 2083), True, 'from matplotlib import pyplot as plt\n'), ((2088, 2141), 'matplotlib.pyplot.plot', 'plt.plot', (['discretized_x', 'true_densities'], {'label': '"""True"""'}), "(discretized_x, true_densities, label='True')\n", (2096, 2141), True, 'from matplotlib import pyplot as plt\n'), ((2146, 2184), 'matplotlib.pyplot.title', 'plt.title', (['"""True vs Estimated Density"""'], {}), "('True vs Estimated Density')\n", (2155, 2184), True, 'from matplotlib import pyplot as plt\n'), ((2189, 2201), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2199, 2201), True, 'from matplotlib import pyplot as plt\n'), ((2206, 2216), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2214, 2216), True, 'from matplotlib import pyplot as plt\n'), ((2351, 2365), 'cvxpy.Variable', 'cp.Variable', (['k'], {}), '(k)\n', (2362, 2365), True, 'import cvxpy as cp\n'), ((2636, 2651), 'fire.Fire', 'fire.Fire', (['main'], {}), '(main)\n', (2645, 2651), False, 'import fire\n'), ((1330, 1347), 'scipy.stats.norm.pdf', 'norm.pdf', (['x', '(3)', '(2)'], {}), '(x, 3, 2)\n', (1338, 1347), False, 'from scipy.stats import norm, laplace, uniform\n'), ((1367, 1388), 'scipy.stats.uniform.pdf', 'uniform.pdf', (['x', '(-1)', '(3)'], {}), '(x, -1, 3)\n', (1378, 1388), False, 'from scipy.stats import norm, laplace, uniform\n'), ((1408, 1429), 'scipy.stats.laplace.pdf', 'laplace.pdf', (['x', '(-2)', '(3)'], {}), '(x, -2, 3)\n', (1419, 1429), False, 'from scipy.stats import norm, laplace, uniform\n'), ((2389, 2401), 'cvxpy.sum', 'cp.sum', (['lamb'], {}), '(lamb)\n', (2395, 2401), True, 'import cvxpy as cp\n'), ((2481, 2523), 'cvxpy.Problem', 'cp.Problem', ([], {'constraints': 'con', 'objective': 'obj'}), '(constraints=con, objective=obj)\n', (2491, 2523), True, 'import cvxpy as cp\n'), ((581, 603), 'numpy.random.normal', 'np.random.normal', (['(3)', '(2)'], {}), '(3, 2)\n', (597, 603), True, 'import numpy as np\n'), ((631, 655), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(2)'], {}), '(-1, 2)\n', (648, 655), True, 'import numpy as np\n'), ((683, 707), 'numpy.random.laplace', 'np.random.laplace', (['(-2)', '(3)'], {}), '(-2, 3)\n', (700, 707), True, 'import numpy as np\n'), ((2435, 2453), 'cvxpy.log', 'cp.log', (['(lamb @ row)'], {}), '(lamb @ row)\n', (2441, 2453), True, 'import cvxpy as cp\n')] |
import subprocess
import paths
import datetime
from functions_recorder import initialize_projector, record_vr_rig, plot_inputs_vr, load_csv
from functions_osc import create_and_send
from os.path import join
from time import sleep
from functions_GUI import get_filename_suffix, replace_name_part
# configure recording
# initialize projector
my_device = initialize_projector()
# assemble the main body of the file name
time_name = datetime.datetime.now().strftime("%m_%d_%Y_%H_%M_%S")
# get and format the current time
csvName = join(paths.bonsai_out, time_name + r'_social_suffix.csv')
videoName = csvName.replace('.csv', '.avi')
# launch bonsai tracking
bonsai_process = subprocess.Popen([paths.bonsai_path, paths.bonsaiworkflowSOC_path,
"-p:csvName="""+csvName+"""""", "-p:videoName="""+videoName+"""""", "--start"])
# launch Unity tracking
unity_process = subprocess.Popen([paths.unitySOC_path])
# start recording
duration, current_path_sync = record_vr_rig(my_device, paths.sync_path, time_name, '_syncSOC')
# close the opened applications
create_and_send(paths.bonsai_ip, paths.bonsai_port, paths.bonsai_address, [1])
create_and_send(paths.unity_ip, paths.unity_port, paths.unity_address, [1])
sleep(2)
bonsai_process.kill()
# plot the timing
# load the frame_list
print(duration)
frame_list = load_csv(current_path_sync)
plot_inputs_vr(frame_list)
# ask the user for the suffix (animal, result, notes)
suffix = get_filename_suffix()
# add the suffix to all the file names
file_list = [csvName, videoName, current_path_sync]
failed_files, _ = replace_name_part(file_list, 'suffix', suffix)
print(failed_files)
# do the same for the bonsai files
unity_file = [paths.unity_temp_path]
failed_unity, _ = replace_name_part(unity_file, 'suffix', '_'.join((time_name, suffix)))
print(failed_unity)
| [
"functions_recorder.initialize_projector",
"functions_GUI.get_filename_suffix",
"functions_recorder.record_vr_rig",
"subprocess.Popen",
"functions_recorder.plot_inputs_vr",
"functions_recorder.load_csv",
"os.path.join",
"time.sleep",
"datetime.datetime.now",
"functions_osc.create_and_send",
"functions_GUI.replace_name_part"
] | [((354, 376), 'functions_recorder.initialize_projector', 'initialize_projector', ([], {}), '()\n', (374, 376), False, 'from functions_recorder import initialize_projector, record_vr_rig, plot_inputs_vr, load_csv\n'), ((531, 587), 'os.path.join', 'join', (['paths.bonsai_out', "(time_name + '_social_suffix.csv')"], {}), "(paths.bonsai_out, time_name + '_social_suffix.csv')\n", (535, 587), False, 'from os.path import join\n'), ((676, 823), 'subprocess.Popen', 'subprocess.Popen', (["[paths.bonsai_path, paths.bonsaiworkflowSOC_path, '-p:csvName=' + csvName +\n '', '-p:videoName=' + videoName + '', '--start']"], {}), "([paths.bonsai_path, paths.bonsaiworkflowSOC_path, \n '-p:csvName=' + csvName + '', '-p:videoName=' + videoName + '', '--start'])\n", (692, 823), False, 'import subprocess\n'), ((899, 938), 'subprocess.Popen', 'subprocess.Popen', (['[paths.unitySOC_path]'], {}), '([paths.unitySOC_path])\n', (915, 938), False, 'import subprocess\n'), ((988, 1052), 'functions_recorder.record_vr_rig', 'record_vr_rig', (['my_device', 'paths.sync_path', 'time_name', '"""_syncSOC"""'], {}), "(my_device, paths.sync_path, time_name, '_syncSOC')\n", (1001, 1052), False, 'from functions_recorder import initialize_projector, record_vr_rig, plot_inputs_vr, load_csv\n'), ((1086, 1164), 'functions_osc.create_and_send', 'create_and_send', (['paths.bonsai_ip', 'paths.bonsai_port', 'paths.bonsai_address', '[1]'], {}), '(paths.bonsai_ip, paths.bonsai_port, paths.bonsai_address, [1])\n', (1101, 1164), False, 'from functions_osc import create_and_send\n'), ((1166, 1241), 'functions_osc.create_and_send', 'create_and_send', (['paths.unity_ip', 'paths.unity_port', 'paths.unity_address', '[1]'], {}), '(paths.unity_ip, paths.unity_port, paths.unity_address, [1])\n', (1181, 1241), False, 'from functions_osc import create_and_send\n'), ((1243, 1251), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (1248, 1251), False, 'from time import sleep\n'), ((1346, 1373), 'functions_recorder.load_csv', 'load_csv', (['current_path_sync'], {}), '(current_path_sync)\n', (1354, 1373), False, 'from functions_recorder import initialize_projector, record_vr_rig, plot_inputs_vr, load_csv\n'), ((1374, 1400), 'functions_recorder.plot_inputs_vr', 'plot_inputs_vr', (['frame_list'], {}), '(frame_list)\n', (1388, 1400), False, 'from functions_recorder import initialize_projector, record_vr_rig, plot_inputs_vr, load_csv\n'), ((1465, 1486), 'functions_GUI.get_filename_suffix', 'get_filename_suffix', ([], {}), '()\n', (1484, 1486), False, 'from functions_GUI import get_filename_suffix, replace_name_part\n'), ((1597, 1643), 'functions_GUI.replace_name_part', 'replace_name_part', (['file_list', '"""suffix"""', 'suffix'], {}), "(file_list, 'suffix', suffix)\n", (1614, 1643), False, 'from functions_GUI import get_filename_suffix, replace_name_part\n'), ((432, 455), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (453, 455), False, 'import datetime\n')] |
"""
This file exists to contain all Django and Python compatibility issues.
In order to avoid circular references, nothing should be imported from
debug_toolbar.
"""
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
try:
from django.template.base import linebreak_iter # NOQA
except ImportError: # Django < 1.9
from django.views.debug import linebreak_iter # NOQA
try:
from django.template.engine import Engine
except ImportError: # Django < 1.8
Engine = None
from django.template.context import get_standard_processors # NOQA
from django.template.loader import find_template_loader # NOQA
def get_template_dirs():
"""Compatibility method to fetch the template directories."""
if Engine:
try:
engine = Engine.get_default()
except ImproperlyConfigured:
template_dirs = []
else:
template_dirs = engine.dirs
else: # Django < 1.8
template_dirs = settings.TEMPLATE_DIRS
return template_dirs
def get_template_loaders():
"""Compatibility method to fetch the template loaders."""
if Engine:
try:
engine = Engine.get_default()
except ImproperlyConfigured:
loaders = []
else:
loaders = engine.template_loaders
else: # Django < 1.8
loaders = [
find_template_loader(loader_name)
for loader_name in settings.TEMPLATE_LOADERS]
return loaders
def get_template_context_processors():
"""Compatibility method to fetch the template context processors."""
if Engine:
try:
engine = Engine.get_default()
except ImproperlyConfigured:
context_processors = []
else:
context_processors = engine.template_context_processors
else: # Django < 1.8
context_processors = get_standard_processors()
return context_processors
| [
"django.template.loader.find_template_loader",
"django.template.context.get_standard_processors",
"django.template.engine.Engine.get_default"
] | [((1893, 1918), 'django.template.context.get_standard_processors', 'get_standard_processors', ([], {}), '()\n', (1916, 1918), False, 'from django.template.context import get_standard_processors\n'), ((805, 825), 'django.template.engine.Engine.get_default', 'Engine.get_default', ([], {}), '()\n', (823, 825), False, 'from django.template.engine import Engine\n'), ((1187, 1207), 'django.template.engine.Engine.get_default', 'Engine.get_default', ([], {}), '()\n', (1205, 1207), False, 'from django.template.engine import Engine\n'), ((1388, 1421), 'django.template.loader.find_template_loader', 'find_template_loader', (['loader_name'], {}), '(loader_name)\n', (1408, 1421), False, 'from django.template.loader import find_template_loader\n'), ((1662, 1682), 'django.template.engine.Engine.get_default', 'Engine.get_default', ([], {}), '()\n', (1680, 1682), False, 'from django.template.engine import Engine\n')] |
import numpy as np
def vee(ss):
size = ss.shape[0]*ss.shape[1]
ss_round = np.around(ss, decimals=10)
if size == 4:
if (ss_round[0,1]==ss_round[1,0]):
vec = ss[0,1]
elif size == 9:
if (ss_round[2,1]==-ss_round[1,2] and ss_round[0,2]==-ss_round[2,0] and ss_round[1,0]==-ss_round[0,1]):
vec = np.array([[ss[2,1]],[ss[0,2]],[ss[1,0]]])
return vec | [
"numpy.array",
"numpy.around"
] | [((83, 109), 'numpy.around', 'np.around', (['ss'], {'decimals': '(10)'}), '(ss, decimals=10)\n', (92, 109), True, 'import numpy as np\n'), ((347, 393), 'numpy.array', 'np.array', (['[[ss[2, 1]], [ss[0, 2]], [ss[1, 0]]]'], {}), '([[ss[2, 1]], [ss[0, 2]], [ss[1, 0]]])\n', (355, 393), True, 'import numpy as np\n')] |
from ravestate.context import Context
import sys
#from hanging_threads import start_monitoring
#monitoring_thread = start_monitoring()
ctx = Context(*sys.argv[1:])
ctx.run()
| [
"ravestate.context.Context"
] | [((143, 165), 'ravestate.context.Context', 'Context', (['*sys.argv[1:]'], {}), '(*sys.argv[1:])\n', (150, 165), False, 'from ravestate.context import Context\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" Piader v2
"""
from future import standard_library
standard_library.install_aliases()
from builtins import object
import queue
import event_server
import local_key
import time
import views.home as home_view
import views.options as options_view
import views.game as game_view
import views.gameover as gameover_view
import views.scoreboard as scoreboard_view
import configuration as cfg
from lcdmanager import display
class Piader(object):
"""Piader main class"""
option = {
'game_tick': 0.5,
'game_on': True,
'score': 0,
'objects': [],
'gui_current_tab': 'home'
}
views = {}
def __init__(self, game_manager, score_manager=None):
"""init class"""
self.game_manager = game_manager
self.score_manager = score_manager
if self.game_manager.width < 6:
raise ValueError("Width must be larger than 5")
if self.game_manager.height < 4:
raise ValueError("Height must be larger than 3")
self.cfg = cfg.Configuration()
self.queue = queue.Queue()
self.event_server = event_server.EventServerThread(self.queue)
self.display = display.Display(0.5, True)
self.display.add(self.game_manager, 'one')
if self.score_manager:
self.display.add(self.score_manager, 'two')
self.local_keyboard = local_key.Keyboard()
self.views['home'] = home_view.Home(self.game_manager, self)
self.views['options'] = options_view.Options(self.game_manager, self)
self.views['game'] = game_view.Game(self.game_manager, self)
self.views['gameover'] = gameover_view.Gameover(self.game_manager, self)
if self.score_manager:
self.scoreboard_view = scoreboard_view.Scoreboard(self.score_manager, self)
def main_loop(self):
"""main loop"""
self.event_server.start()
self.display.start()
self.set_tab('home')
try:
while self.option['game_on']:
start = time.time()
self.local_keyboard.read()
action = self._get_action()
self.views[self.option['gui_current_tab']].loop(action)
if self.score_manager:
self.scoreboard_view.loop(action)
end = time.time()
if end - start < self.option['game_tick']:
t_delta = end - start
time.sleep(max(0, self.option['game_tick'] - t_delta))
finally:
self.local_keyboard.shutdown()
self.event_server.join()
self.display.join()
def _get_action(self):
"""get event and return it"""
try:
event = self.queue.get(True, 0.05)
except queue.Empty:
event = None
return event
def quit_game(self):
"""quit game"""
self.option['game_on'] = False
def set_tab(self, tab):
"""change views"""
for view in self.views:
self.views[view].hide()
self.views[tab].show()
self.option['gui_current_tab'] = tab
| [
"local_key.Keyboard",
"lcdmanager.display.Display",
"views.scoreboard.Scoreboard",
"views.gameover.Gameover",
"future.standard_library.install_aliases",
"views.home.Home",
"event_server.EventServerThread",
"views.options.Options",
"views.game.Game",
"queue.Queue",
"time.time",
"configuration.Configuration"
] | [((97, 131), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (129, 131), False, 'from future import standard_library\n'), ((1067, 1086), 'configuration.Configuration', 'cfg.Configuration', ([], {}), '()\n', (1084, 1086), True, 'import configuration as cfg\n'), ((1108, 1121), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (1119, 1121), False, 'import queue\n'), ((1150, 1192), 'event_server.EventServerThread', 'event_server.EventServerThread', (['self.queue'], {}), '(self.queue)\n', (1180, 1192), False, 'import event_server\n'), ((1216, 1242), 'lcdmanager.display.Display', 'display.Display', (['(0.5)', '(True)'], {}), '(0.5, True)\n', (1231, 1242), False, 'from lcdmanager import display\n'), ((1411, 1431), 'local_key.Keyboard', 'local_key.Keyboard', ([], {}), '()\n', (1429, 1431), False, 'import local_key\n'), ((1461, 1500), 'views.home.Home', 'home_view.Home', (['self.game_manager', 'self'], {}), '(self.game_manager, self)\n', (1475, 1500), True, 'import views.home as home_view\n'), ((1533, 1578), 'views.options.Options', 'options_view.Options', (['self.game_manager', 'self'], {}), '(self.game_manager, self)\n', (1553, 1578), True, 'import views.options as options_view\n'), ((1608, 1647), 'views.game.Game', 'game_view.Game', (['self.game_manager', 'self'], {}), '(self.game_manager, self)\n', (1622, 1647), True, 'import views.game as game_view\n'), ((1681, 1728), 'views.gameover.Gameover', 'gameover_view.Gameover', (['self.game_manager', 'self'], {}), '(self.game_manager, self)\n', (1703, 1728), True, 'import views.gameover as gameover_view\n'), ((1795, 1847), 'views.scoreboard.Scoreboard', 'scoreboard_view.Scoreboard', (['self.score_manager', 'self'], {}), '(self.score_manager, self)\n', (1821, 1847), True, 'import views.scoreboard as scoreboard_view\n'), ((2069, 2080), 'time.time', 'time.time', ([], {}), '()\n', (2078, 2080), False, 'import time\n'), ((2356, 2367), 'time.time', 'time.time', ([], {}), '()\n', (2365, 2367), False, 'import time\n')] |
__author__ = "<NAME>"
__copyright__ = "--"
import numpy as np
import tensorflow as tf
class Capsule(object):
def __init__(self, in_dim, r_dim, g_dim):
self.in_dim = in_dim
self.r_dim = r_dim
self.g_dim = g_dim
def get_fc_var(self, in_size, out_size, name):
# TODO
# Store this variable in CPU instead of GPU when multiple GPUs
# with tf.device('/cpu:0')
initial_value = tf.truncated_normal([in_size, out_size], .0, .001)
weights = tf.get_variable(name=name + "_weights", initializer=initial_value)
bias_initial_value = tf.truncated_normal([out_size], .0, .001)
biases = tf.get_variable(name=name + "_biases", initializer=bias_initial_value)
return weights, biases
def fc_layer(self, bottom, in_size, out_size, name):
with tf.variable_scope(name):
weights, biases = self.get_fc_var(in_size, out_size, name)
fc = tf.nn.bias_add(tf.matmul(bottom, weights), biases)
return fc
def build(self, X_in, extra_in):
rec = tf.sigmoid(self.fc_layer(X_in, self.in_dim, self.r_dim, 'recog_layer_pre_act'), 'recog_layer')
xy_vec = self.fc_layer(rec, self.r_dim, 2, 'xy_prediction')
pro = tf.sigmoid(self.fc_layer(rec, self.r_dim, 1, 'probability_lin'), 'probability_prediction')
probability_vec = tf.tile(pro, (1, self.in_dim))
xy_extend = tf.add(xy_vec, extra_in)
gen = tf.sigmoid(self.fc_layer(xy_extend, 2, self.g_dim, 'gen_pre_act'), 'gen_layer')
out = self.fc_layer(gen, self.g_dim, self.in_dim, 'out_prediction')
return tf.multiply(out, probability_vec)
| [
"tensorflow.tile",
"tensorflow.get_variable",
"tensorflow.variable_scope",
"tensorflow.multiply",
"tensorflow.add",
"tensorflow.matmul",
"tensorflow.truncated_normal"
] | [((445, 497), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[in_size, out_size]', '(0.0)', '(0.001)'], {}), '([in_size, out_size], 0.0, 0.001)\n', (464, 497), True, 'import tensorflow as tf\n'), ((514, 580), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': "(name + '_weights')", 'initializer': 'initial_value'}), "(name=name + '_weights', initializer=initial_value)\n", (529, 580), True, 'import tensorflow as tf\n'), ((611, 654), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[out_size]', '(0.0)', '(0.001)'], {}), '([out_size], 0.0, 0.001)\n', (630, 654), True, 'import tensorflow as tf\n'), ((670, 740), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': "(name + '_biases')", 'initializer': 'bias_initial_value'}), "(name=name + '_biases', initializer=bias_initial_value)\n", (685, 740), True, 'import tensorflow as tf\n'), ((1384, 1414), 'tensorflow.tile', 'tf.tile', (['pro', '(1, self.in_dim)'], {}), '(pro, (1, self.in_dim))\n', (1391, 1414), True, 'import tensorflow as tf\n'), ((1436, 1460), 'tensorflow.add', 'tf.add', (['xy_vec', 'extra_in'], {}), '(xy_vec, extra_in)\n', (1442, 1460), True, 'import tensorflow as tf\n'), ((1648, 1681), 'tensorflow.multiply', 'tf.multiply', (['out', 'probability_vec'], {}), '(out, probability_vec)\n', (1659, 1681), True, 'import tensorflow as tf\n'), ((845, 868), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (862, 868), True, 'import tensorflow as tf\n'), ((973, 999), 'tensorflow.matmul', 'tf.matmul', (['bottom', 'weights'], {}), '(bottom, weights)\n', (982, 999), True, 'import tensorflow as tf\n')] |
import string
import random
from model.contact import Contact
import os.path
import jsonpickle
import sys
import getopt
try:
opts, args=getopt.getopt(sys.argv[1:],"n:f",["numbers of contacts","file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n=5
f="data/contacts.json"
for o,a in opts:
if o== "-n":
n=int(a)
elif o=="-f":
f=a
def random_str(prefix,maxlen):
symbols= string.ascii_letters + string.digits+' '*100
return prefix+"".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
test_data=[Contact(firstname=random_str('firstname',15), middlename='middle', lastname="lastname", nickname='nickname', title='title',
company="sdd", address="we", homephone="500", mobilephone="433", workphone="443", fax="3",
byear="1988", ayear="1988",address2="retert", phone2="rr", notes="e",
droplist="//div[@id='content']/form/select[1]//option[4]",
droplist2="//div[@id='content']/form/select[2]//option[8]",
droplist3="//div[@id='content']/form/select[3]//option[3]",droplist4="//div[@id='content']/form/select[4]//option[8]") for i in range(n)]
file=os.path.join(os.path.dirname(os.path.abspath(__file__)),"..",f)
with open(file,"w") as out:
jsonpickle.set_encoder_options('json',indent=2)
out.write(jsonpickle.encode(test_data))
| [
"jsonpickle.set_encoder_options",
"getopt.getopt",
"random.choice",
"random.randrange",
"getopt.usage",
"sys.exit",
"jsonpickle.encode"
] | [((142, 209), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '"""n:f"""', "['numbers of contacts', 'file']"], {}), "(sys.argv[1:], 'n:f', ['numbers of contacts', 'file'])\n", (155, 209), False, 'import getopt\n'), ((1426, 1474), 'jsonpickle.set_encoder_options', 'jsonpickle.set_encoder_options', (['"""json"""'], {'indent': '(2)'}), "('json', indent=2)\n", (1456, 1474), False, 'import jsonpickle\n'), ((245, 259), 'getopt.usage', 'getopt.usage', ([], {}), '()\n', (257, 259), False, 'import getopt\n'), ((264, 275), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (272, 275), False, 'import sys\n'), ((1488, 1516), 'jsonpickle.encode', 'jsonpickle.encode', (['test_data'], {}), '(test_data)\n', (1505, 1516), False, 'import jsonpickle\n'), ((503, 525), 'random.choice', 'random.choice', (['symbols'], {}), '(symbols)\n', (516, 525), False, 'import random\n'), ((541, 565), 'random.randrange', 'random.randrange', (['maxlen'], {}), '(maxlen)\n', (557, 565), False, 'import random\n')] |
import numpy as np
from qsim import Circuit, Executor, Operation
# subclass Executor
class CustomExecutor(Executor):
"""
Custom quantum operation executor for external backend, for example, based on GPU.
"""
def __init__(self, initial_state: np.ndarray):
# TODO: implement custom logic for state initialization
super().__init__(initial_state=initial_state)
def apply_operation(self, operation: Operation):
# TODO: implement custom logic for operation execution
super().apply_operation(operation=operation)
# create circuit
c = Circuit(qubit_count=2)
# inject custom backend
c.executor = CustomExecutor
# now use the circuit as usual
c.initialize([1, 0])
c.h.on(0)
state = c.execute()
print(f'one-hot vector state: {state}')
| [
"qsim.Circuit"
] | [((584, 606), 'qsim.Circuit', 'Circuit', ([], {'qubit_count': '(2)'}), '(qubit_count=2)\n', (591, 606), False, 'from qsim import Circuit, Executor, Operation\n')] |
import time
import gym
import numpy as np
class AutoMonitor(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
# These metrics are never reset:
self._episodes = 0
self._steps = 0
# These metrics are reset when an episode ends:
self._length = None
self._return = None
# Metric histories for averaging
self._all_lengths = []
self._all_returns = []
# Initial time reference point
self._start_time = time.time()
# Print the header
self._print('episode', 'timestep', 'length', 'return', 'avg_length',
'avg_return', 'hours', sep=',', flush=True)
def step(self, action):
observation, reward, done, info = super().step(action)
self._on_step(reward)
if done:
self._on_done()
return observation, reward, done, info
def reset(self, **kwargs):
self._on_reset()
return super().reset(**kwargs)
def _on_step(self, reward):
self._steps += 1
self._length += 1
self._return += reward
def _on_done(self):
self._episodes += 1
self._all_lengths.append(self._length)
self._all_returns.append(self._return)
hours = (time.time() - self._start_time) / 3600
avg_length = np.mean(self._all_lengths[-100:])
avg_return = np.mean(self._all_returns[-100:])
self._print(self._episodes, self._steps, self._length, self._return, avg_length,
avg_return, '{:.3f}'.format(hours), sep=',', flush=True)
def _on_reset(self):
self._length = 0
self._return = 0.0
def _print(self, *args, **kwargs):
print("AM", end=':')
print(*args, **kwargs)
| [
"numpy.mean",
"time.time"
] | [((512, 523), 'time.time', 'time.time', ([], {}), '()\n', (521, 523), False, 'import time\n'), ((1343, 1376), 'numpy.mean', 'np.mean', (['self._all_lengths[-100:]'], {}), '(self._all_lengths[-100:])\n', (1350, 1376), True, 'import numpy as np\n'), ((1398, 1431), 'numpy.mean', 'np.mean', (['self._all_returns[-100:]'], {}), '(self._all_returns[-100:])\n', (1405, 1431), True, 'import numpy as np\n'), ((1283, 1294), 'time.time', 'time.time', ([], {}), '()\n', (1292, 1294), False, 'import time\n')] |
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import scipy.stats
import functools
import pandas as pd
import numpy as np
import pymc3
import os
from scratch import log_likelihood
from scratch import metropolis
run_scratch = True
save_scratch_trace = True
run_pymc3 = False
# Make a straight line
y = [500, 10]
x = np.linspace(0, 20)
y_obs = np.array(y[0] + y[1]*x, dtype=int)
y_obs_noise = np.random.poisson(y_obs)
# General MCMC parameters
niter = 30000
nburn = 1000
if run_scratch:
####################################
########### From scratch ###########
####################################
print('Running linear regression from scratch example.')
# set up MCMC
prior = [scipy.stats.uniform(250, 600),
scipy.stats.uniform(0, 20)]
start = [prior_i.rvs() for prior_i in prior]
def target(parameters, x, y_obs):
""" Calculates the product of the likelihood and prior """
y = parameters[0] + parameters[1]*x
l = log_likelihood.log_poisson_likelihood(y, y_obs)
p = np.sum([np.log(prior_i.pdf(p)) for prior_i, p in zip(prior, parameters)])
return l + p
def proposal(parameters, proposal_jump=[10, 0.5]):
"""
Generate a new proposal, or "guess" for the MCMC to try next.
The new proposed value is picked from a Normal, centered on
the old value given by p. The proposal_jump array specifies
the standard deviation of the possible jumps from the prior
parameter value.
"""
new_vals = np.array([scipy.stats.norm(loc=p_i, scale=jump_i).rvs()
for p_i, jump_i in zip(parameters, proposal_jump)])
return new_vals
target_line = functools.partial(target, x=x, y_obs=y_obs_noise)
# Run the MCMC sampler
scratch_chain = metropolis.log_metroplis(start, target_line, proposal,
niter, nburn=nburn)
if save_scratch_trace:
if not os.path.exists('./data/'): # Check if data directory exists.
os.makedirs('./data/')
print('Made a ./data/ directory')
df = pd.DataFrame(scratch_chain, columns=['y0', 'y1'])
df.to_csv('./data/linear_regression_scratch_trace.csv', index=False)
if run_pymc3:
####################################
########### Using pymc3 ############
####################################
print('Running linear regression using pymc3 example.')
with pymc3.Model() as model:
# Define priors
y0 = pymc3.Uniform('y0', 250, 600)
y1 = pymc3.Uniform('y1', 0, 20)
# Define likelihood
likelihood = pymc3.Poisson('f(y)', mu=y0 + y1*x, observed=y_obs_noise)
# Inference
pymc_chain = pymc3.sample(draws=niter, cores=None, tune=nburn)
####################################
######## Plot the results ##########
####################################
if run_scratch:
fig = plt.figure(constrained_layout=True, figsize=(8, 5))
gs = gridspec.GridSpec(nrows=2, ncols=4, figure=fig)
ax = np.array([
[fig.add_subplot(gs[0, 0]), fig.add_subplot(gs[0, 1])],
[fig.add_subplot(gs[1, 0]), fig.add_subplot(gs[1, 1])]
])
bx = fig.add_subplot(gs[:, 2:])
# Plot the chain and posteriors
ax[0, 0].plot(scratch_chain[:, 0]); ax[1, 0].plot(scratch_chain[:, 1])
ax[0, 1].hist(scratch_chain[:, 0], density=True)
ax[1, 1].hist(scratch_chain[:, 1], density=True)
# Overlay a vertical line over the true value
ax[0,1].axvline(y[0], c='k')
ax[1,1].axvline(y[1], c='k')
# Now plot the true line, superposed by the 95% credible interval
# estimated from the posterior.
ci = np.nan*np.ones((x.shape[0], 2))
for i, x_i in enumerate(x):
y_scatter = scratch_chain[:, 0] + x_i*scratch_chain[:, 1]
ci[i, :] = np.quantile(y_scatter, (0.025, 0.975))
bx.plot(x, y[0] + y[1]*x, 'b', label='True')
bx.scatter(x, y_obs_noise, c='r', label='True+noise')
bx.fill_between(x, ci[:, 0], ci[:, 1], color='g', alpha=0.3, label='95% CI')
# Lots of plot adjustments and labling
fig.suptitle(f'Linear regression from scratch\n'
f'f(x) = y[0] + x*y[1] | true values y[0] = {y[0]}, y[1] = {y[1]}')
ax[0,0].set(ylabel='y[0]', title='y[0] chain')
ax[1,0].set(ylabel='y[1]', xlabel='Iteration', title='y[1] chain')
ax[0,1].set(ylabel='probability', title='y[0] posterior')
ax[1,1].set(ylabel='probability', title='y[1] posterior')
bx.set(xlabel='x', ylabel='f(x)')
bx.legend()
if run_pymc3: # Plot the pymc3 results for confirmation.
# Use the bult in traceplot functionality to visualize the posteriors.
lines = [('y0', {}, [y[0]]), ('y1', {}, [y[1]])] # This API is very cumbersome!
pymc3.traceplot(pymc_chain[100:], lines=lines)
# Now make a plot similar to the from scratch plot I made of the family
# of lines picked from the posterior.
plt.figure(figsize=(7, 7))
plt.plot(x, y_obs_noise, 'x', label='True+noise')
pymc3.plot_posterior_predictive_glm(pymc_chain, samples=100, eval=x,
lm=lambda x, sample: sample['y0'] + sample['y1']*x,
label='posterior predictive regression lines')
plt.plot(x, y[0] + y[1]*x, label='True', lw=3., c='y')
plt.title('Posterior predictive regression lines')
plt.legend(loc=0)
plt.xlabel('x')
plt.ylabel('y')
# Lastly, make a pairplot, i.e. a corner plot
pymc3.plot_joint(pymc_chain, figsize=(5, 5), kind="hexbin")
plt.show() | [
"matplotlib.pyplot.ylabel",
"numpy.array",
"pymc3.sample",
"pymc3.plot_posterior_predictive_glm",
"os.path.exists",
"numpy.random.poisson",
"pymc3.Uniform",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.gridspec.GridSpec",
"pandas.DataFrame",
"pymc3.plot_joint",
"numpy.ones",
"scratch.log_likelihood.log_poisson_likelihood",
"scratch.metropolis.log_metroplis",
"matplotlib.pyplot.title",
"pymc3.Model",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"os.makedirs",
"pymc3.traceplot",
"matplotlib.pyplot.figure",
"numpy.quantile",
"functools.partial",
"pymc3.Poisson"
] | [((342, 360), 'numpy.linspace', 'np.linspace', (['(0)', '(20)'], {}), '(0, 20)\n', (353, 360), True, 'import numpy as np\n'), ((369, 405), 'numpy.array', 'np.array', (['(y[0] + y[1] * x)'], {'dtype': 'int'}), '(y[0] + y[1] * x, dtype=int)\n', (377, 405), True, 'import numpy as np\n'), ((418, 442), 'numpy.random.poisson', 'np.random.poisson', (['y_obs'], {}), '(y_obs)\n', (435, 442), True, 'import numpy as np\n'), ((5611, 5621), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5619, 5621), True, 'import matplotlib.pyplot as plt\n'), ((1755, 1804), 'functools.partial', 'functools.partial', (['target'], {'x': 'x', 'y_obs': 'y_obs_noise'}), '(target, x=x, y_obs=y_obs_noise)\n', (1772, 1804), False, 'import functools\n'), ((1853, 1927), 'scratch.metropolis.log_metroplis', 'metropolis.log_metroplis', (['start', 'target_line', 'proposal', 'niter'], {'nburn': 'nburn'}), '(start, target_line, proposal, niter, nburn=nburn)\n', (1877, 1927), False, 'from scratch import metropolis\n'), ((2967, 3018), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'constrained_layout': '(True)', 'figsize': '(8, 5)'}), '(constrained_layout=True, figsize=(8, 5))\n', (2977, 3018), True, 'import matplotlib.pyplot as plt\n'), ((3028, 3075), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', ([], {'nrows': '(2)', 'ncols': '(4)', 'figure': 'fig'}), '(nrows=2, ncols=4, figure=fig)\n', (3045, 3075), True, 'import matplotlib.gridspec as gridspec\n'), ((4832, 4878), 'pymc3.traceplot', 'pymc3.traceplot', (['pymc_chain[100:]'], {'lines': 'lines'}), '(pymc_chain[100:], lines=lines)\n', (4847, 4878), False, 'import pymc3\n'), ((5002, 5028), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (5012, 5028), True, 'import matplotlib.pyplot as plt\n'), ((5033, 5082), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_obs_noise', '"""x"""'], {'label': '"""True+noise"""'}), "(x, y_obs_noise, 'x', label='True+noise')\n", (5041, 5082), True, 'import matplotlib.pyplot as plt\n'), ((5087, 5265), 'pymc3.plot_posterior_predictive_glm', 'pymc3.plot_posterior_predictive_glm', (['pymc_chain'], {'samples': '(100)', 'eval': 'x', 'lm': "(lambda x, sample: sample['y0'] + sample['y1'] * x)", 'label': '"""posterior predictive regression lines"""'}), "(pymc_chain, samples=100, eval=x, lm=lambda\n x, sample: sample['y0'] + sample['y1'] * x, label=\n 'posterior predictive regression lines')\n", (5122, 5265), False, 'import pymc3\n'), ((5323, 5380), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(y[0] + y[1] * x)'], {'label': '"""True"""', 'lw': '(3.0)', 'c': '"""y"""'}), "(x, y[0] + y[1] * x, label='True', lw=3.0, c='y')\n", (5331, 5380), True, 'import matplotlib.pyplot as plt\n'), ((5383, 5433), 'matplotlib.pyplot.title', 'plt.title', (['"""Posterior predictive regression lines"""'], {}), "('Posterior predictive regression lines')\n", (5392, 5433), True, 'import matplotlib.pyplot as plt\n'), ((5438, 5455), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (5448, 5455), True, 'import matplotlib.pyplot as plt\n'), ((5460, 5475), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (5470, 5475), True, 'import matplotlib.pyplot as plt\n'), ((5480, 5495), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (5490, 5495), True, 'import matplotlib.pyplot as plt\n'), ((5550, 5609), 'pymc3.plot_joint', 'pymc3.plot_joint', (['pymc_chain'], {'figsize': '(5, 5)', 'kind': '"""hexbin"""'}), "(pymc_chain, figsize=(5, 5), kind='hexbin')\n", (5566, 5609), False, 'import pymc3\n'), ((1013, 1060), 'scratch.log_likelihood.log_poisson_likelihood', 'log_likelihood.log_poisson_likelihood', (['y', 'y_obs'], {}), '(y, y_obs)\n', (1050, 1060), False, 'from scratch import log_likelihood\n'), ((2163, 2212), 'pandas.DataFrame', 'pd.DataFrame', (['scratch_chain'], {'columns': "['y0', 'y1']"}), "(scratch_chain, columns=['y0', 'y1'])\n", (2175, 2212), True, 'import pandas as pd\n'), ((2498, 2511), 'pymc3.Model', 'pymc3.Model', ([], {}), '()\n', (2509, 2511), False, 'import pymc3\n'), ((2559, 2588), 'pymc3.Uniform', 'pymc3.Uniform', (['"""y0"""', '(250)', '(600)'], {}), "('y0', 250, 600)\n", (2572, 2588), False, 'import pymc3\n'), ((2602, 2628), 'pymc3.Uniform', 'pymc3.Uniform', (['"""y1"""', '(0)', '(20)'], {}), "('y1', 0, 20)\n", (2615, 2628), False, 'import pymc3\n'), ((2679, 2738), 'pymc3.Poisson', 'pymc3.Poisson', (['"""f(y)"""'], {'mu': '(y0 + y1 * x)', 'observed': 'y_obs_noise'}), "('f(y)', mu=y0 + y1 * x, observed=y_obs_noise)\n", (2692, 2738), False, 'import pymc3\n'), ((2779, 2828), 'pymc3.sample', 'pymc3.sample', ([], {'draws': 'niter', 'cores': 'None', 'tune': 'nburn'}), '(draws=niter, cores=None, tune=nburn)\n', (2791, 2828), False, 'import pymc3\n'), ((3760, 3784), 'numpy.ones', 'np.ones', (['(x.shape[0], 2)'], {}), '((x.shape[0], 2))\n', (3767, 3784), True, 'import numpy as np\n'), ((3902, 3940), 'numpy.quantile', 'np.quantile', (['y_scatter', '(0.025, 0.975)'], {}), '(y_scatter, (0.025, 0.975))\n', (3913, 3940), True, 'import numpy as np\n'), ((2008, 2033), 'os.path.exists', 'os.path.exists', (['"""./data/"""'], {}), "('./data/')\n", (2022, 2033), False, 'import os\n'), ((2081, 2103), 'os.makedirs', 'os.makedirs', (['"""./data/"""'], {}), "('./data/')\n", (2092, 2103), False, 'import os\n')] |
from django.conf.urls import url
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
path('',views.home, name = 'Home'),
path('search/',views.search_photo, name = 'search_photo'),
path(r'photo/(\d<photo_id>+)',views.photo,name ='photo')
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| [
"django.conf.urls.static.static",
"django.urls.path"
] | [((177, 210), 'django.urls.path', 'path', (['""""""', 'views.home'], {'name': '"""Home"""'}), "('', views.home, name='Home')\n", (181, 210), False, 'from django.urls import path\n'), ((217, 273), 'django.urls.path', 'path', (['"""search/"""', 'views.search_photo'], {'name': '"""search_photo"""'}), "('search/', views.search_photo, name='search_photo')\n", (221, 273), False, 'from django.urls import path\n'), ((280, 337), 'django.urls.path', 'path', (['"""photo/(\\\\d<photo_id>+)"""', 'views.photo'], {'name': '"""photo"""'}), "('photo/(\\\\d<photo_id>+)', views.photo, name='photo')\n", (284, 337), False, 'from django.urls import path\n'), ((377, 438), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (383, 438), False, 'from django.conf.urls.static import static\n')] |
import os
import logging
import objectpath
from datetime import datetime, timedelta
from indra.statements import Concept, Event, Influence, TimeContext, \
RefContext, WorldContext, Evidence, QualitativeDelta, MovementContext, \
Migration, QuantitativeState
logger = logging.getLogger(__name__)
# List out relation types and their default (implied) polarities.
polarities = {'causation': 1, 'precondition': 1, 'catalyst': 1,
'mitigation': -1, 'prevention': -1,
'temporallyPrecedes': None}
class HumeJsonLdProcessor(object):
"""This processor extracts INDRA Statements from Hume JSON-LD output.
Parameters
----------
json_dict : dict
A JSON dictionary containing the Hume extractions in JSON-LD format.
Attributes
----------
tree : objectpath.Tree
The objectpath Tree object representing the extractions.
statements : list[indra.statements.Statement]
A list of INDRA Statements that were extracted by the processor.
"""
def __init__(self, json_dict):
self.tree = objectpath.Tree(json_dict)
self.statements = []
self.document_dict = {}
self.concept_dict = {}
self.relation_dict = {}
self.eid_stmt_dict = {}
self.extractions_by_id = {}
self._get_documents()
self.relation_subj_obj_ids = []
self._get_extractions_by_id()
def _get_extractions_by_id(self):
self.extractions_by_id = {
extr['@id']: extr for extr in
self.tree.execute("$.extractions[(@.@type is 'Extraction')]")
if '@id' in extr}
def extract_relations(self):
relations = self._find_relations()
for relation_type, relation in relations:
# Extract concepts and contexts.
subj = self._get_event_and_context(relation, arg_type='source')
obj = self._get_event_and_context(relation, arg_type='destination')
if not subj.concept or not obj.concept:
continue
# Apply the naive polarity from the type of statement. For the
# purpose of the multiplication here, if obj.delta.polarity is
# None to begin with, we assume it is positive
obj_pol = obj.delta.polarity
obj_pol = obj_pol if obj_pol is not None else 1
rel_pol = polarities[relation_type]
obj.delta.polarity = rel_pol * obj_pol if rel_pol else None
evidence = self._get_evidence(relation, get_states(relation))
st = Influence(subj, obj, evidence=evidence)
self.eid_stmt_dict[relation['@id']] = st
self.statements.append(st)
def extract_events(self):
events = self._find_events()
for event in events:
evidence = self._get_evidence(event, get_states(event))
stmt = self._get_event_and_context(event, eid=event['@id'],
evidence=evidence)
self.eid_stmt_dict[event['@id']] = stmt
self.statements.append(stmt)
def _find_events(self):
"""Find standalone events and return them in a list."""
# First populate self.concept_dict and self.relations_subj_obj_ids
if not self.relation_dict or not self.concept_dict or \
not self.relation_subj_obj_ids:
self._find_relations()
# Check if events are part of relations
events = []
for e in self.concept_dict.values():
label_set = set(e.get('labels', []))
if 'Event' in label_set:
if e['@id'] not in self.relation_subj_obj_ids:
events.append(e)
if not events:
logger.debug('No standalone events found.')
else:
logger.debug('%d standalone events found.' % len(events))
return events
def _find_relations(self):
"""Find all relevant relation elements and return them in a list."""
# Get relations from extractions
relations = []
for eid, e in self.extractions_by_id.items():
label_set = set(e.get('labels', []))
# If this is a DirectedRelation
if 'DirectedRelation' in label_set:
self.relation_dict[eid] = e
subtype = e.get('subtype')
if any(t in subtype for t in polarities.keys()):
relations.append((subtype, e))
# Save IDs of relation's subject and object
if e['arguments']:
for a in e['arguments']:
if a['type'] == 'source' or \
a['type'] == 'destination':
self.relation_subj_obj_ids.append(
a['value']['@id'])
# If this is an Event or an Entity
if {'Event', 'Entity'} & label_set:
self.concept_dict[e['@id']] = e
if not relations and not self.relation_dict:
logger.debug("No relations found.")
else:
logger.debug('%d relations of types %s found'
% (len(relations), ', '.join(polarities.keys())))
logger.debug('%d relations in dict.' % len(self.relation_dict))
logger.debug('%d concepts found.' % len(self.concept_dict))
return relations
def _get_documents(self):
"""Populate sentences attribute with a dict keyed by document id."""
documents = self.tree.execute("$.documents")
for doc in documents:
sentences = {s['@id']: s['text'] for s in doc.get('sentences', [])}
self.document_dict[doc['@id']] = {'sentences': sentences,
'location': doc['location']}
def _make_world_context(self, entity):
"""Get place and time info from the json for this entity."""
loc_context = None
time_context = None
# Look for time and place contexts.
for argument in entity["arguments"]:
if argument["type"] in {"has_location", "has_origin_location",
"has_destination_location",
"has_intermediate_location"}:
entity_id = argument["value"]["@id"]
loc_entity = self.concept_dict[entity_id]
loc_context = _resolve_geo(loc_entity)
if argument["type"] in {"has_time", "has_start_time",
"has_end_time"}:
entity_id = argument["value"]["@id"]
temporal_entity = self.concept_dict[entity_id]
time_context = _resolve_time(temporal_entity)
# Put context together
context = None
if loc_context or time_context:
context = WorldContext(time=time_context, geo_location=loc_context)
return context
def _make_movement_context(self, entity):
movement_locations = list()
time_context = None
# Use None for quantitative_state if no information found, default
# value will be assigned when creating a Statement
quantitative_state = None
for argument in entity['arguments']:
entity_id = argument["value"]["@id"]
hume_entity = self.concept_dict[entity_id]
if argument['type'] in {"has_actor", "has_affected_actor",
"has_active_actor"}:
for count in hume_entity.get('counts', list()):
quantitative_state = QuantitativeState(
entity="person", value=count['value'],
unit=count['unit'], modifier=count['modifier'])
if argument['type'] == "has_origin_location":
movement_locations.append(
{'location': _resolve_geo(hume_entity), 'role': 'origin'})
if argument['type'] == 'has_destination_location':
movement_locations.append(
{'location': _resolve_geo(hume_entity),
'role': 'destination'})
if argument['type'] in {"has_time", "has_start_time",
"has_end_time"}:
time_context = _resolve_time(hume_entity)
return MovementContext(locations=movement_locations,
time=time_context), quantitative_state
def _make_concept(self, entity):
"""Return Concept from a Hume entity."""
# Use the canonical name as the name of the Concept by default
name = self._sanitize(entity['canonicalName'])
# But if there is a trigger head text, we prefer that since
# it almost always results in a cleaner name
# This is removed for now since the head word seems to be too
# minimal for some concepts, e.g. it gives us only "security"
# for "food security".
"""
trigger = entity.get('trigger')
if trigger is not None:
head_text = trigger.get('head text')
if head_text is not None:
name = head_text
"""
# Save raw text and Hume scored groundings as db_refs
db_refs = self._get_grounding(entity)
concept = Concept(name, db_refs=db_refs)
metadata = {arg['type']: arg['value']['@id']
for arg in entity['arguments']}
return concept, metadata
def _get_bounds(self, ref_dicts):
minb = None
maxb = None
for ref_dict in ref_dicts:
bounds = ref_dict.pop('BOUNDS', None)
if bounds:
minb = min(bounds[0], minb if minb is not None else bounds[0])
maxb = max(bounds[1], maxb if maxb is not None else bounds[1])
return minb, maxb
def _get_event_and_context(self, event, eid=None, arg_type=None,
evidence=None):
"""Return an INDRA Event based on an event entry."""
if not eid:
eid = _choose_id(event, arg_type)
ev = self.concept_dict[eid]
concept, metadata = self._make_concept(ev)
is_migration_event = False
hume_grounding = {x[0] for x in concept.db_refs['WM']}
for grounding_en in hume_grounding:
if "wm/concept/causal_factor/social_and_political/migration" in \
grounding_en:
is_migration_event = True
if is_migration_event:
movement_context, quantitative_state = (
self._make_movement_context(ev))
event_obj = Migration(concept, delta=quantitative_state,
context=movement_context, evidence=evidence)
else:
ev_delta = QualitativeDelta(
polarity=get_polarity(ev), adjectives=None)
context = self._make_world_context(ev)
event_obj = Event(concept, delta=ev_delta, context=context,
evidence=evidence)
return event_obj
def _get_text_and_bounds(self, provenance):
# First try looking up the full sentence through provenance
doc_id = provenance['document']['@id']
sent_id = provenance['sentence']
text = self.document_dict[doc_id]['sentences'][sent_id]
text = self._sanitize(text)
if 'sentenceCharPositions' in provenance:
bounds = [provenance['sentenceCharPositions'][k]
for k in ['start', 'end']]
else:
bounds = []
return text, bounds
def _get_evidence(self, event, adjectives):
"""Return the Evidence object for the INDRA Statement."""
provenance = event.get('provenance')
# First try looking up the full sentence through provenance
text, bounds = self._get_text_and_bounds(provenance[0])
annotations = {
'found_by': event.get('rule'),
'provenance': provenance,
'event_type': os.path.basename(event.get('type')),
'adjectives': adjectives,
'bounds': bounds
}
ev = Evidence(source_api='hume', text=text, annotations=annotations)
return [ev]
def _get_grounding(self, entity):
"""Return Hume grounding."""
db_refs = {'TEXT': entity['text']}
groundings = entity.get('grounding')
if not groundings:
return db_refs
# Get rid of leading slash
groundings = [(x['ontologyConcept'][1:], x['value']) for x in
groundings]
grounding_entries = sorted(list(set(groundings)),
key=lambda x: (x[1], x[0].count('/'), x[0]),
reverse=True)
# We could get an empty list here in which case we don't add the
# grounding
if grounding_entries:
db_refs['WM'] = grounding_entries
return db_refs
@staticmethod
def _sanitize(text):
"""Return sanitized Hume text field for human readability."""
# TODO: any cleanup needed here?
if text is None:
return None
text = text.replace('\n', ' ')
return text
class HumeJsonLdProcessorCompositional(HumeJsonLdProcessor):
def _get_grounding(self, entity):
"""Return Hume grounding."""
db_refs = {}
txt = entity.get('text')
if txt:
db_refs['TEXT'] = txt
groundings = entity.get('grounding')
if not groundings:
return db_refs
# Get rid of leading slash
groundings = [(x['ontologyConcept'][1:], x['value']) for x in
groundings]
grounding_entries = sorted(list(set(groundings)),
key=lambda x: (x[1], x[0].count('/'), x[0]),
reverse=True)
if 'mentions' in entity:
prov = entity['mentions'][0]['provenance'][0]
else:
prov = entity['provenance'][0]
_, bounds = self._get_text_and_bounds(prov)
db_refs['BOUNDS'] = bounds
# We could get an empty list here in which case we don't add the
# grounding
if grounding_entries:
db_refs['WM'] = grounding_entries
return db_refs
def _get_event_and_context(self, event, eid=None, arg_type=None,
evidence=None):
"""Return an INDRA Event based on an event entry."""
if not eid:
eid = _choose_id(event, arg_type)
ev = self.concept_dict[eid]
concept, metadata = self._make_concept(ev)
# is_migration_event = False
property_id = _choose_id(event, 'has_property')
theme_id = _choose_id(event, 'has_theme')
property = self.extractions_by_id[property_id] \
if property_id else None
theme = self.extractions_by_id[theme_id] \
if theme_id else None
process_grounding = concept.db_refs
theme_grounding = self._get_grounding(theme) if theme else {}
property_grounding = self._get_grounding(property) if property else {}
minb, maxb = self._get_bounds([theme_grounding, process_grounding,
property_grounding])
event_sentence, _ = self._get_text_and_bounds(event['provenance'][0])
doc_id = event['provenance'][0]['document']['@id']
sent_id = event['provenance'][0]['sentence']
# If we successfully got within-sentence coordinates, we can use the
# entity text from there and overwrite the concept name as well as
# the context grounding TEXT entry
if minb is not None and maxb is not None:
entity_text = \
self.document_dict[doc_id]['sentences'][sent_id][minb:maxb+1]
concept.name = entity_text
concept.db_refs['TEXT'] = entity_text
process_grounding_wm = process_grounding.get('WM')
theme_grounding_wm = theme_grounding.get('WM')
property_grounding_wm = property_grounding.get('WM')
# FIXME: what do we do if there are multiple entries in
# theme/property grounding?
#assert process_grounding_wm is None or len(process_grounding_wm) == 1
assert property_grounding_wm is None or len(property_grounding_wm) == 1
assert theme_grounding_wm is None or len(theme_grounding_wm) == 1
property_grounding_wm = property_grounding_wm[0] \
if property_grounding_wm else None
theme_grounding_wm = theme_grounding_wm[0] \
if theme_grounding_wm else None
process_grounding_wm = process_grounding_wm[0] \
if process_grounding_wm else None
# For some reason the event's grounding is sometimes duplicated as
# property grounding (e.g., price), in this case we treat the grounding
# as a property
if process_grounding_wm and property_grounding_wm and \
process_grounding_wm[0] == property_grounding_wm[0]:
process_grounding_wm = None
# First case: we have a theme so we apply the property and the process
# to it
if theme_grounding:
compositional_grounding = [[theme_grounding_wm,
property_grounding_wm,
process_grounding_wm, None]]
# Second case: we don't have a theme so we take the process as the theme
# and apply any property to it
elif process_grounding_wm:
compositional_grounding = [[process_grounding_wm,
property_grounding_wm,
None, None]]
elif property_grounding_wm:
compositional_grounding = [[property_grounding_wm,
None, None, None]]
assert compositional_grounding[0][0]
concept.db_refs['WM'] = compositional_grounding
# Migrations turned off for now
#for grounding_en in process_grounding:
# if "wm/concept/causal_factor/social_and_political/migration" in \
# grounding_en:
# is_migration_event = True
#if is_migration_event:
# movement_context, quantitative_state = (
# self._make_movement_context(ev))
# event_obj = Migration(concept, delta=quantitative_state,
# context=movement_context, evidence=evidence)
#else:
ev_delta = QualitativeDelta(
polarity=get_polarity(ev))
context = self._make_world_context(ev)
event_obj = Event(concept, delta=ev_delta, context=context,
evidence=evidence)
return event_obj
def _choose_id(event, arg_type):
args = event.get('arguments', [])
obj_tag = [arg for arg in args if arg['type'] == arg_type]
if obj_tag:
obj_id = obj_tag[0]['value']['@id']
else:
obj_id = None
return obj_id
def get_states(event):
ret_list = []
if 'states' in event:
for state_property in event['states']:
if state_property['type'] != 'polarity':
ret_list.append(state_property['text'])
return ret_list
def get_polarity(event):
pol_map = {'Positive': 1, 'Negative': -1}
if 'states' in event:
for state_property in event['states']:
if state_property['type'] == 'polarity':
return pol_map[state_property['text']]
return None
def _resolve_geo(hume_loc_entity):
place = hume_loc_entity.get('canonicalName', hume_loc_entity.get('text'))
geo_id = hume_loc_entity.get('geoname_id', None)
if geo_id is not None:
return RefContext(name=place, db_refs={"GEOID": geo_id})
else:
return RefContext(place)
def _resolve_time(hume_temporal_entity):
if 'mentions' in hume_temporal_entity:
text = hume_temporal_entity['mentions'][0]['text']
else:
text = hume_temporal_entity['text']
if len(hume_temporal_entity.get("timeInterval", [])) < 1:
return TimeContext(text=text)
time = hume_temporal_entity["timeInterval"][0]
start = datetime.strptime(time['start'], '%Y-%m-%dT%H:%M')
end = datetime.strptime(time['end'], '%Y-%m-%dT%H:%M')
end = end + timedelta(minutes=1)
duration = int((end - start).total_seconds())
return TimeContext(text=text, start=start, end=end,
duration=duration)
| [
"logging.getLogger",
"indra.statements.MovementContext",
"indra.statements.Influence",
"datetime.datetime.strptime",
"indra.statements.Event",
"indra.statements.Concept",
"indra.statements.QuantitativeState",
"indra.statements.WorldContext",
"objectpath.Tree",
"indra.statements.Evidence",
"indra.statements.RefContext",
"indra.statements.TimeContext",
"indra.statements.Migration",
"datetime.timedelta"
] | [((278, 305), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (295, 305), False, 'import logging\n'), ((20273, 20323), 'datetime.datetime.strptime', 'datetime.strptime', (["time['start']", '"""%Y-%m-%dT%H:%M"""'], {}), "(time['start'], '%Y-%m-%dT%H:%M')\n", (20290, 20323), False, 'from datetime import datetime, timedelta\n'), ((20334, 20382), 'datetime.datetime.strptime', 'datetime.strptime', (["time['end']", '"""%Y-%m-%dT%H:%M"""'], {}), "(time['end'], '%Y-%m-%dT%H:%M')\n", (20351, 20382), False, 'from datetime import datetime, timedelta\n'), ((20481, 20544), 'indra.statements.TimeContext', 'TimeContext', ([], {'text': 'text', 'start': 'start', 'end': 'end', 'duration': 'duration'}), '(text=text, start=start, end=end, duration=duration)\n', (20492, 20544), False, 'from indra.statements import Concept, Event, Influence, TimeContext, RefContext, WorldContext, Evidence, QualitativeDelta, MovementContext, Migration, QuantitativeState\n'), ((1079, 1105), 'objectpath.Tree', 'objectpath.Tree', (['json_dict'], {}), '(json_dict)\n', (1094, 1105), False, 'import objectpath\n'), ((9319, 9349), 'indra.statements.Concept', 'Concept', (['name'], {'db_refs': 'db_refs'}), '(name, db_refs=db_refs)\n', (9326, 9349), False, 'from indra.statements import Concept, Event, Influence, TimeContext, RefContext, WorldContext, Evidence, QualitativeDelta, MovementContext, Migration, QuantitativeState\n'), ((12168, 12231), 'indra.statements.Evidence', 'Evidence', ([], {'source_api': '"""hume"""', 'text': 'text', 'annotations': 'annotations'}), "(source_api='hume', text=text, annotations=annotations)\n", (12176, 12231), False, 'from indra.statements import Concept, Event, Influence, TimeContext, RefContext, WorldContext, Evidence, QualitativeDelta, MovementContext, Migration, QuantitativeState\n'), ((18729, 18795), 'indra.statements.Event', 'Event', (['concept'], {'delta': 'ev_delta', 'context': 'context', 'evidence': 'evidence'}), '(concept, delta=ev_delta, context=context, evidence=evidence)\n', (18734, 18795), False, 'from indra.statements import Concept, Event, Influence, TimeContext, RefContext, WorldContext, Evidence, QualitativeDelta, MovementContext, Migration, QuantitativeState\n'), ((19818, 19867), 'indra.statements.RefContext', 'RefContext', ([], {'name': 'place', 'db_refs': "{'GEOID': geo_id}"}), "(name=place, db_refs={'GEOID': geo_id})\n", (19828, 19867), False, 'from indra.statements import Concept, Event, Influence, TimeContext, RefContext, WorldContext, Evidence, QualitativeDelta, MovementContext, Migration, QuantitativeState\n'), ((19893, 19910), 'indra.statements.RefContext', 'RefContext', (['place'], {}), '(place)\n', (19903, 19910), False, 'from indra.statements import Concept, Event, Influence, TimeContext, RefContext, WorldContext, Evidence, QualitativeDelta, MovementContext, Migration, QuantitativeState\n'), ((20187, 20209), 'indra.statements.TimeContext', 'TimeContext', ([], {'text': 'text'}), '(text=text)\n', (20198, 20209), False, 'from indra.statements import Concept, Event, Influence, TimeContext, RefContext, WorldContext, Evidence, QualitativeDelta, MovementContext, Migration, QuantitativeState\n'), ((20399, 20419), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (20408, 20419), False, 'from datetime import datetime, timedelta\n'), ((2556, 2595), 'indra.statements.Influence', 'Influence', (['subj', 'obj'], {'evidence': 'evidence'}), '(subj, obj, evidence=evidence)\n', (2565, 2595), False, 'from indra.statements import Concept, Event, Influence, TimeContext, RefContext, WorldContext, Evidence, QualitativeDelta, MovementContext, Migration, QuantitativeState\n'), ((6876, 6933), 'indra.statements.WorldContext', 'WorldContext', ([], {'time': 'time_context', 'geo_location': 'loc_context'}), '(time=time_context, geo_location=loc_context)\n', (6888, 6933), False, 'from indra.statements import Concept, Event, Influence, TimeContext, RefContext, WorldContext, Evidence, QualitativeDelta, MovementContext, Migration, QuantitativeState\n'), ((8356, 8420), 'indra.statements.MovementContext', 'MovementContext', ([], {'locations': 'movement_locations', 'time': 'time_context'}), '(locations=movement_locations, time=time_context)\n', (8371, 8420), False, 'from indra.statements import Concept, Event, Influence, TimeContext, RefContext, WorldContext, Evidence, QualitativeDelta, MovementContext, Migration, QuantitativeState\n'), ((10645, 10738), 'indra.statements.Migration', 'Migration', (['concept'], {'delta': 'quantitative_state', 'context': 'movement_context', 'evidence': 'evidence'}), '(concept, delta=quantitative_state, context=movement_context,\n evidence=evidence)\n', (10654, 10738), False, 'from indra.statements import Concept, Event, Influence, TimeContext, RefContext, WorldContext, Evidence, QualitativeDelta, MovementContext, Migration, QuantitativeState\n'), ((10959, 11025), 'indra.statements.Event', 'Event', (['concept'], {'delta': 'ev_delta', 'context': 'context', 'evidence': 'evidence'}), '(concept, delta=ev_delta, context=context, evidence=evidence)\n', (10964, 11025), False, 'from indra.statements import Concept, Event, Influence, TimeContext, RefContext, WorldContext, Evidence, QualitativeDelta, MovementContext, Migration, QuantitativeState\n'), ((7619, 7727), 'indra.statements.QuantitativeState', 'QuantitativeState', ([], {'entity': '"""person"""', 'value': "count['value']", 'unit': "count['unit']", 'modifier': "count['modifier']"}), "(entity='person', value=count['value'], unit=count['unit'],\n modifier=count['modifier'])\n", (7636, 7727), False, 'from indra.statements import Concept, Event, Influence, TimeContext, RefContext, WorldContext, Evidence, QualitativeDelta, MovementContext, Migration, QuantitativeState\n')] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Pre-existing BD
Revision ID: bda3c34581e0
Revises: <PASSWORD>
Create Date: 2020-08-05 14:18:11.909757
"""
# revision identifiers, used by Alembic.
revision = 'bda3c34581e0'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('apic_aim_network_extensions',
sa.Column('bridge_domain_dn', sa.String(1024),
nullable=True))
def downgrade():
pass
| [
"sqlalchemy.String"
] | [((943, 958), 'sqlalchemy.String', 'sa.String', (['(1024)'], {}), '(1024)\n', (952, 958), True, 'import sqlalchemy as sa\n')] |
#!/usr/bin/env python
import pybullet as p
import time
import pybullet_data
import numpy as np
import sys
sys.path.append('../../../')
from spotmicro.util import pybullet_data as pd
physicsClient = p.connect(p.GUI) # or p.DIRECT for non-graphical version
p.setAdditionalSearchPath(pybullet_data.getDataPath()) # optionally
p.setGravity(0, 0, -9.81)
# p.setTimeStep(1./240.) # slow, accurate
p.setRealTimeSimulation(0) # we want to be faster than real time :)
planeId = p.loadURDF("plane.urdf")
StartPos = [0, 0, 0.3]
StartOrientation = p.getQuaternionFromEuler([0, 0, 0])
p.resetDebugVisualizerCamera(cameraDistance=0.8,
cameraYaw=45,
cameraPitch=-30,
cameraTargetPosition=[0, 0, 0])
boxId = p.loadURDF(pd.getDataPath() + "/assets/urdf/spot.urdf",
StartPos,
StartOrientation,
flags=p.URDF_USE_SELF_COLLISION_EXCLUDE_PARENT)
numj = p.getNumJoints(boxId)
numb = p.getNumBodies()
Pos, Orn = p.getBasePositionAndOrientation(boxId)
print(Pos, Orn)
print("Number of joints {}".format(numj))
print("Number of links {}".format(numb))
joint = []
movingJoints = [
6,
7,
8, # FL
10,
11,
12, # FR
15,
16,
17, # BL
19,
20,
21 # BR
]
maxVelocity = 100
mode = p.POSITION_CONTROL
p.setJointMotorControlArray(bodyUniqueId=boxId,
jointIndices=movingJoints,
controlMode=p.POSITION_CONTROL,
targetPositions=np.zeros(12),
targetVelocities=np.zeros(12),
forces=np.ones(12) * np.inf)
counter = 0
angle1 = -np.pi / 2.0
angle2 = 0.0
angle = angle1
for i in range(100000000):
counter += 1
if counter % 1000 == 0:
p.setJointMotorControlArray(
bodyUniqueId=boxId,
jointIndices=[8, 12, 17, 21], # FWrists
controlMode=p.POSITION_CONTROL,
targetPositions=np.ones(4) * angle,
targetVelocities=np.zeros(4),
forces=np.ones(4) * 0.15)
counter = 0
if angle == angle1:
angle = angle2
else:
angle = angle1
p.stepSimulation()
p.disconnect() | [
"pybullet.getNumBodies",
"pybullet.resetDebugVisualizerCamera",
"pybullet_data.getDataPath",
"numpy.ones",
"pybullet.getBasePositionAndOrientation",
"pybullet.connect",
"pybullet.getNumJoints",
"pybullet.setGravity",
"pybullet.getQuaternionFromEuler",
"pybullet.disconnect",
"numpy.zeros",
"spotmicro.util.pybullet_data.getDataPath",
"pybullet.stepSimulation",
"pybullet.setRealTimeSimulation",
"sys.path.append",
"pybullet.loadURDF"
] | [((109, 137), 'sys.path.append', 'sys.path.append', (['"""../../../"""'], {}), "('../../../')\n", (124, 137), False, 'import sys\n'), ((202, 218), 'pybullet.connect', 'p.connect', (['p.GUI'], {}), '(p.GUI)\n', (211, 218), True, 'import pybullet as p\n'), ((329, 354), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-9.81)'], {}), '(0, 0, -9.81)\n', (341, 354), True, 'import pybullet as p\n'), ((403, 429), 'pybullet.setRealTimeSimulation', 'p.setRealTimeSimulation', (['(0)'], {}), '(0)\n', (426, 429), True, 'import pybullet as p\n'), ((482, 506), 'pybullet.loadURDF', 'p.loadURDF', (['"""plane.urdf"""'], {}), "('plane.urdf')\n", (492, 506), True, 'import pybullet as p\n'), ((549, 584), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (573, 584), True, 'import pybullet as p\n'), ((585, 701), 'pybullet.resetDebugVisualizerCamera', 'p.resetDebugVisualizerCamera', ([], {'cameraDistance': '(0.8)', 'cameraYaw': '(45)', 'cameraPitch': '(-30)', 'cameraTargetPosition': '[0, 0, 0]'}), '(cameraDistance=0.8, cameraYaw=45, cameraPitch=\n -30, cameraTargetPosition=[0, 0, 0])\n', (613, 701), True, 'import pybullet as p\n'), ((989, 1010), 'pybullet.getNumJoints', 'p.getNumJoints', (['boxId'], {}), '(boxId)\n', (1003, 1010), True, 'import pybullet as p\n'), ((1018, 1034), 'pybullet.getNumBodies', 'p.getNumBodies', ([], {}), '()\n', (1032, 1034), True, 'import pybullet as p\n'), ((1046, 1084), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['boxId'], {}), '(boxId)\n', (1077, 1084), True, 'import pybullet as p\n'), ((2282, 2296), 'pybullet.disconnect', 'p.disconnect', ([], {}), '()\n', (2294, 2296), True, 'import pybullet as p\n'), ((286, 313), 'pybullet_data.getDataPath', 'pybullet_data.getDataPath', ([], {}), '()\n', (311, 313), False, 'import pybullet_data\n'), ((2263, 2281), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (2279, 2281), True, 'import pybullet as p\n'), ((803, 819), 'spotmicro.util.pybullet_data.getDataPath', 'pd.getDataPath', ([], {}), '()\n', (817, 819), True, 'from spotmicro.util import pybullet_data as pd\n'), ((1582, 1594), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (1590, 1594), True, 'import numpy as np\n'), ((1641, 1653), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (1649, 1653), True, 'import numpy as np\n'), ((1690, 1701), 'numpy.ones', 'np.ones', (['(12)'], {}), '(12)\n', (1697, 1701), True, 'import numpy as np\n'), ((2092, 2103), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2100, 2103), True, 'import numpy as np\n'), ((2043, 2053), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (2050, 2053), True, 'import numpy as np\n'), ((2124, 2134), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (2131, 2134), True, 'import numpy as np\n')] |
"""Code for preprocessing Cora/Citeseer/Pubmed datasets."""
import os
from gensim.models import doc2vec
import networkx as nx
import node2vec as n2v
import numpy as np
import torch
from tqdm.auto import tqdm
class InitialEdgeFeatureExtractor:
"""Extracts cos sim of node vectors, word ratios and concat of Doc2vec."""
def __init__(self):
"""Inits InitialEdgeFeatureExtractor."""
self._cos = torch.nn.CosineSimilarity()
def extract(
self,
g,
raw_node_features,
d2v_node_features,
node_classes,
verbose,
):
"""Implements feature extraction."""
original_edges = sorted(g.edges())
max_cls = node_classes.max().item()
edge_fts = {}
edge_cls = []
for u, v in tqdm(original_edges,
desc='Extracting edge features',
disable=not verbose,
leave=False):
raw_x_u = raw_node_features[u].float()
x_u = d2v_node_features[u].float()
c_u = node_classes[u].item()
raw_x_v = raw_node_features[v].float()
x_v = d2v_node_features[v].float()
c_v = node_classes[v].item()
# Add original edge
edge_fts[(u, v)] = self._mk_features(
raw_x_u=raw_x_u, raw_x_v=raw_x_v,
x_u=x_u, x_v=x_v,
is_original_edge=1,
)
edge_cls.append(c_u if c_u == c_v else max_cls + 1)
# Add reversed edge (for aggregation models)
edge_fts[(v, u)] = self._mk_features(
raw_x_u=raw_x_v, raw_x_v=raw_x_u,
x_u=x_v, x_v=x_u,
is_original_edge=0,
)
H, edge2idx = self._to_edge_feature_tensor(edge_fts)
return original_edges, edge_cls, H, edge2idx
def _mk_features(self, raw_x_u, raw_x_v, x_u, x_v, is_original_edge):
return [
self._cos_sim(raw_x_u, raw_x_v),
self._unique_word_ratio(raw_x_u),
self._unique_word_ratio(raw_x_v),
*self._concat(x_u, x_v),
is_original_edge,
]
def _cos_sim(self, a, b):
return self._cos(a.unsqueeze(0), b.unsqueeze(0)).item()
@staticmethod
def _unique_word_ratio(a):
return torch.sum(a).item() / a.size(0)
@staticmethod
def _concat(a, b):
return torch.cat([a, b], dim=-1).tolist()
@staticmethod
def _to_edge_feature_tensor(ef: dict):
edges = sorted(ef.keys())
idxs = range(len(ef.keys()))
edge2idx = dict(zip(edges, idxs))
idx2edge = dict(zip(idxs, edges))
H = torch.tensor([ef[idx2edge[idx]] for idx in idxs], dtype=torch.float)
H = torch.cat([torch.zeros(1, H.size(1)), H], dim=0)
edge2idx = {k: v + 1 for k, v in edge2idx.items()}
edge2idx[(-1, -1)] = 0
return H, edge2idx
class Node2vecExtractor:
"""Extracts node features using Node2vec."""
def __init__(self, dim):
"""Inits Node2vecExtractor."""
self._dim = dim
def extract(self, g):
"""Implements feature extraction."""
emb = n2v.Node2Vec(
graph=g,
dimensions=self._dim,
workers=8,
p=4, q=1,
quiet=True,
).fit()
nodes = sorted(g.nodes())
idxs = range(g.number_of_nodes())
node2idx = dict(zip(nodes, idxs))
idx2node = dict(zip(idxs, nodes))
M = torch.tensor([emb.wv[str(idx2node[idx])] for idx in idxs])
M = torch.cat([torch.zeros(1, M.size(1)), M], dim=0)
node2idx = {k: v + 1 for k, v in node2idx.items()}
node2idx[-1] = 0
return {'M': M, 'node2idx': node2idx}
def read_node_info(path):
"""Reads node features and classes from raw file."""
raw = {}
with open(path, 'r') as fin:
for line in fin.readlines():
row = line.strip().split('\t')
node_id = row[0]
fts = [int(f) for f in row[1:-1]]
cls = row[-1]
raw[node_id] = {'fts': fts, 'cls': cls}
unique_cls = set(v['cls'] for v in raw.values())
orig2new_cls = dict(zip(
sorted(unique_cls),
range(len(unique_cls))
))
for node_info in raw.values():
node_info['cls'] = orig2new_cls[node_info['cls']]
return raw
def read_graph(path):
"""Reads the citation links and builds graphs from it."""
cites = []
with open(path, 'r') as fin:
for line in tqdm(fin.readlines(), desc='Read raw graph', leave=False):
e = line.strip().split('\t')
u, v = e[0], e[1]
if u == v: # Remove self-loops
continue
if (v, u) in cites:
continue
cites.append((u, v))
g = nx.DiGraph()
g.add_edges_from(cites)
giant_component_nodes = next(nx.connected_components(G=g.to_undirected()))
g = g.subgraph(nodes=giant_component_nodes)
return g
def remove_unknown_nodes(g, node_info):
"""Removes nodes from graph that are not present in `content` file."""
g_cpy = g.copy()
node_to_remove = [
node
for node in g_cpy.nodes()
if node not in node_info.keys()
]
g_cpy.remove_nodes_from(node_to_remove)
while True:
zero_deg_nodes = [node for node, deg in g_cpy.degree() if deg == 0]
if not zero_deg_nodes:
break
g_cpy.remove_nodes_from(zero_deg_nodes)
return g_cpy
def read_raw_data(path):
"""Reads the raw graph data and cleans up unknown nodes."""
raw_graph = read_graph(path=os.path.join(path, 'cites'))
raw_node_info = read_node_info(path=os.path.join(path, 'content'))
g = remove_unknown_nodes(g=raw_graph, node_info=raw_node_info)
orig2new_nodes = dict(zip(
sorted(g.nodes()),
range(g.number_of_nodes())
))
new2orig_nodes = dict(zip(
range(g.number_of_nodes()),
sorted(g.nodes()),
))
g = nx.relabel_nodes(g, mapping=orig2new_nodes)
node_fts = []
node_cls = []
for node_id in sorted(new2orig_nodes.keys()):
orig_node_id = new2orig_nodes[node_id]
node_fts.append(raw_node_info[orig_node_id]['fts'])
node_cls.append(raw_node_info[orig_node_id]['cls'])
node_fts = torch.tensor(node_fts)
node_cls = torch.tensor(node_cls)
return g, node_fts, node_cls
def to_doc2vec_emb(node_fts, dim, epochs):
"""Computes Doc2vec embeddings from node BoW features."""
documents = [
doc2vec.TaggedDocument(
words=[str(idx) for idx in range(len(doc)) if doc[idx] == 1],
tags=[f'Doc_{doc_idx}'],
)
for doc_idx, doc in enumerate(node_fts.tolist())
]
d2v = doc2vec.Doc2Vec(dm=0, vector_size=dim, workers=8)
d2v.build_vocab(documents=documents)
d2v.train(documents=documents, total_examples=len(documents), epochs=epochs)
vecs = torch.tensor([
d2v.docvecs[f'Doc_{idx}']
for idx in range(node_fts.size(0))
])
return vecs
def sample_train_val_test(X, y, num_cls, train_size, val_size, test_size):
"""Samples train/val/test splits."""
# Train
X_train, y_train = [], []
for cls in range(num_cls):
xc, yc = X[y == cls], y[y == cls]
idxs = np.random.choice(
range(xc.shape[0]),
size=train_size,
replace=False,
)
X_train.extend([tuple(x) for x in xc[idxs].tolist()])
y_train.extend(yc[idxs].tolist())
# Val
rest_idxs = [i for i in range(X.shape[0]) if tuple(X[i]) not in X_train]
val_idxs = np.random.choice(rest_idxs, size=val_size, replace=False)
X_val = [tuple(x) for x in X[val_idxs].tolist()]
y_val = y[val_idxs].tolist()
# Test
rest_idxs = [
i for i in range(X.shape[0])
if tuple(X[i]) not in X_train and tuple(X[i]) not in X_val
]
test_idxs = np.random.choice(rest_idxs, size=test_size, replace=False)
X_test = [tuple(x) for x in X[test_idxs].tolist()]
y_test = y[test_idxs].tolist()
return {
'train': {
'X': X_train,
'y': y_train,
},
'val': {
'X': X_val,
'y': y_val,
},
'test': {
'X': X_test,
'y': y_test,
}
}
def read_cora_citeseer_pubmed(
path: str,
node_dim: int,
doc2vec_kwargs: dict,
split_sizes: dict,
num_datasets: int,
verbose: bool = False,
):
"""Reads and preprocesses Cora dataset."""
g, raw_node_fts, node_cls = read_raw_data(path)
# Convert node BoW features to Doc2vec embeddings
d2v_node_fts = to_doc2vec_emb(
node_fts=raw_node_fts,
dim=doc2vec_kwargs['dim'],
epochs=doc2vec_kwargs['epochs'],
)
# Extract edge and new node features
edges, edge_labels, H, edge2idx = InitialEdgeFeatureExtractor().extract(
g=g,
raw_node_features=raw_node_fts,
d2v_node_features=d2v_node_fts,
node_classes=node_cls,
verbose=verbose,
)
num_cls = (node_cls.max() + 2).item()
# Sample multiple datasets
Xy, graphs = [], []
M = []
for _ in tqdm(range(num_datasets), desc='Datasets'):
# Train/val/test split
tr_val_te = sample_train_val_test(
X=np.array(edges),
y=np.array(edge_labels),
num_cls=num_cls,
train_size=split_sizes['train'],
val_size=split_sizes['validation'],
test_size=split_sizes['test'],
)
Xy.append(tr_val_te)
# Remove test edges from graph (inductive)
g_cpy = g.copy()
g_cpy.remove_edges_from(tr_val_te['test']['X'])
g_cpy.remove_nodes_from([n for n, d in g_cpy.degree() if d == 0])
graphs.append(g_cpy)
# Compute Node2vec features
M.append(Node2vecExtractor(dim=node_dim).extract(g_cpy))
M_test = Node2vecExtractor(dim=node_dim).extract(g)
return {
# Dataset independent
'original_graph': g,
'H': H,
'edge2idx': edge2idx,
'num_cls': num_cls,
'dims': {'node': node_dim, 'edge': H.size(1)},
'num_datasets': num_datasets,
# Dataset dependent
'Xy': Xy,
'graphs': graphs,
'M': M,
'M_test': M_test,
}
| [
"networkx.relabel_nodes",
"torch.nn.CosineSimilarity",
"numpy.random.choice",
"networkx.DiGraph",
"os.path.join",
"torch.tensor",
"numpy.array",
"torch.sum",
"tqdm.auto.tqdm",
"gensim.models.doc2vec.Doc2Vec",
"torch.cat",
"node2vec.Node2Vec"
] | [((4857, 4869), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (4867, 4869), True, 'import networkx as nx\n'), ((6052, 6095), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['g'], {'mapping': 'orig2new_nodes'}), '(g, mapping=orig2new_nodes)\n', (6068, 6095), True, 'import networkx as nx\n'), ((6367, 6389), 'torch.tensor', 'torch.tensor', (['node_fts'], {}), '(node_fts)\n', (6379, 6389), False, 'import torch\n'), ((6405, 6427), 'torch.tensor', 'torch.tensor', (['node_cls'], {}), '(node_cls)\n', (6417, 6427), False, 'import torch\n'), ((6813, 6862), 'gensim.models.doc2vec.Doc2Vec', 'doc2vec.Doc2Vec', ([], {'dm': '(0)', 'vector_size': 'dim', 'workers': '(8)'}), '(dm=0, vector_size=dim, workers=8)\n', (6828, 6862), False, 'from gensim.models import doc2vec\n'), ((7685, 7742), 'numpy.random.choice', 'np.random.choice', (['rest_idxs'], {'size': 'val_size', 'replace': '(False)'}), '(rest_idxs, size=val_size, replace=False)\n', (7701, 7742), True, 'import numpy as np\n'), ((7986, 8044), 'numpy.random.choice', 'np.random.choice', (['rest_idxs'], {'size': 'test_size', 'replace': '(False)'}), '(rest_idxs, size=test_size, replace=False)\n', (8002, 8044), True, 'import numpy as np\n'), ((419, 446), 'torch.nn.CosineSimilarity', 'torch.nn.CosineSimilarity', ([], {}), '()\n', (444, 446), False, 'import torch\n'), ((787, 878), 'tqdm.auto.tqdm', 'tqdm', (['original_edges'], {'desc': '"""Extracting edge features"""', 'disable': '(not verbose)', 'leave': '(False)'}), "(original_edges, desc='Extracting edge features', disable=not verbose,\n leave=False)\n", (791, 878), False, 'from tqdm.auto import tqdm\n'), ((2690, 2758), 'torch.tensor', 'torch.tensor', (['[ef[idx2edge[idx]] for idx in idxs]'], {'dtype': 'torch.float'}), '([ef[idx2edge[idx]] for idx in idxs], dtype=torch.float)\n', (2702, 2758), False, 'import torch\n'), ((5672, 5699), 'os.path.join', 'os.path.join', (['path', '"""cites"""'], {}), "(path, 'cites')\n", (5684, 5699), False, 'import os\n'), ((5742, 5771), 'os.path.join', 'os.path.join', (['path', '"""content"""'], {}), "(path, 'content')\n", (5754, 5771), False, 'import os\n'), ((2424, 2449), 'torch.cat', 'torch.cat', (['[a, b]'], {'dim': '(-1)'}), '([a, b], dim=-1)\n', (2433, 2449), False, 'import torch\n'), ((3195, 3271), 'node2vec.Node2Vec', 'n2v.Node2Vec', ([], {'graph': 'g', 'dimensions': 'self._dim', 'workers': '(8)', 'p': '(4)', 'q': '(1)', 'quiet': '(True)'}), '(graph=g, dimensions=self._dim, workers=8, p=4, q=1, quiet=True)\n', (3207, 3271), True, 'import node2vec as n2v\n'), ((9390, 9405), 'numpy.array', 'np.array', (['edges'], {}), '(edges)\n', (9398, 9405), True, 'import numpy as np\n'), ((9421, 9442), 'numpy.array', 'np.array', (['edge_labels'], {}), '(edge_labels)\n', (9429, 9442), True, 'import numpy as np\n'), ((2335, 2347), 'torch.sum', 'torch.sum', (['a'], {}), '(a)\n', (2344, 2347), False, 'import torch\n')] |
import logging
from models.game import Game
from app import db
from constants import SERVER_ERROR
logger = logging.getLogger(__name__)
legal_words = []
with open("models/legal_words.txt") as file:
legal_words = {word.strip() for word in file.readlines()}
logger.info('populating')
class GameController:
MAX_GUESSES = 6
MAX_RETRIES = 10
GREEN_SQUARE = '\U0001F7E9'
YELLOW_SQUARE = '\U0001F7E8'
BLACK_SQUARE = '\U00002B1B'
def __init__(self, chat_id):
self.chat_id = chat_id
self.game = self.retrieve_game()
def retrieve_game(self, counter=0):
try:
return Game.query.filter_by(chat_id=self.chat_id).order_by(Game.id.desc()).first()
except Exception as e:
logger.error(e)
db.session.close()
counter += 1
if counter < self.MAX_RETRIES:
return self.retrieve_game(counter)
else:
raise e
def try_create_game(self, answer, setter_chat_id, setter_username) -> str:
if self.is_game_ongoing():
return 'There is an ongoing game already!'
elif not self.is_answer_legal(answer):
return 'Please set a valid word! Words must be between 4 to 6 letters and present in the dictionary.'
elif not self.create_game(answer, setter_chat_id, setter_username):
return SERVER_ERROR
return f'{setter_username} has started Wordle with Friends! \nThe word is {len(answer)} letters long. \nUse /guess [word] to guess. \nYou have 6 tries.'
def try_guessing(self, word, guesser_username) -> str:
word = word.lower()
if not self.is_game_ongoing():
return 'There is no ongoing game! Start a new one with /start.'
if not self.is_guess_legal(word):
return f'"{word}" is invalid! Your guess must be a legal word of {len(self.game.answer)} letters! Use /guess to try again.'
if not self.add_guess(word, guesser_username):
return SERVER_ERROR
return self.display_past_guesses()
def is_game_ongoing(self) -> bool:
if not self.game:
return False
guesses = self.game.get_guesses()
return not guesses or (len(guesses) < self.MAX_GUESSES and guesses[-1]['guess'] != self.game.answer)
def display_past_guesses(self) -> str:
if not self.game:
return 'No games have been played. Start a new one with /start, or email <EMAIL> with a bug report.'
guesses = self.game.get_guesses()
if not guesses:
return 'There have been no guesses so far. Use /guess to guess.'
row = []
for i in range(len(guesses)):
row.append(f"""<code>{' '.join([c for c in guesses[i]['guess'].upper()])}</code>""")
row.append(self.format_guess_result(guesses[i]['guess']))
row.append(f"({guesses[i]['by']}: {i+1}/{self.MAX_GUESSES})")
history = f"Game started by {self.game.setter_username}\n" + "\n".join(row)
if guesses[-1]['guess'].lower() == self.game.answer:
history += "\nCongratulations! Use /start to play again! #wordlewithfriends"
elif len(guesses) == self.MAX_GUESSES:
history += f"\nBetter luck next time! The answer was {self.game.answer.upper()}. Use /start to start another game! #wordlewithfriends"
else:
history += f"\n{self.format_keyboard()}"
return history
def format_guess_result(self, guess: str) -> str:
answer = self.game.answer
has_char_in_answer_been_found = [False for _ in guess]
guess_result = [self.BLACK_SQUARE for _ in guess]
# Check green squares first
for i in range(len(guess)):
if answer[i] == guess[i]:
guess_result[i] = self.GREEN_SQUARE
has_char_in_answer_been_found[i] = True
# Check for orange squres
for i in range(len(guess)):
# If it's not in answer or has already been filled, skip
if guess_result[i] == self.GREEN_SQUARE or guess[i] not in answer:
continue
# Search
for j in range(len(answer)):
# There is a character in answer corresponding to guess
if not has_char_in_answer_been_found[j] and guess[i] == answer[j]:
has_char_in_answer_been_found[j] = True
guess_result[i] = self.YELLOW_SQUARE
break
return ' '.join(guess_result)
def format_keyboard(self):
if not self.game or not self.game.get_guesses():
return ''
guesses = self.game.get_guesses()
answer = self.game.answer
# 0 for wrong letter, 1 for wrong position, 2 for right position
output_dict = {}
for guess in guesses:
word = guess['guess']
for i in range(len(word)):
c = word[i]
if c == answer[i]:
output_dict[c] = 2
elif c in answer:
output_dict[c] = 2 if output_dict.get(c) == 2 else 1
else:
output_dict[c] = 0
keyboard_rows = ['qwertyuiop', 'asdfghjkl', 'zxcvbnm']
output = []
for row in keyboard_rows:
output_row = []
for c in row:
if c in output_dict:
if output_dict[c] == 2:
result = f"<u><b>{c}</b></u>"
elif output_dict[c] == 1:
result = f"<u><i>{c}</i></u>"
else:
result = " "
else:
result = c
output_row.append(result)
output.append(' '.join(output_row))
return '\n'.join(output).upper()
def create_game(self, answer: str, setter_chat_id: str, setter_username: str, counter=0) -> bool:
try:
if self.game:
self.game = None
game = Game(chat_id=self.chat_id, answer=answer.lower(),
setter_chat_id=setter_chat_id, setter_username=setter_username)
db.session.add(game)
db.session.commit()
self.game = game
return True
except Exception as e:
logger.error(e)
db.session.rollback()
counter += 1
if counter < self.MAX_RETRIES:
return self.create_game(answer, setter_chat_id, setter_username, counter)
return False
def add_guess(self, guess: str, guesser_username: str, counter=0) -> bool:
try:
self.game.add_guess(guess, guesser_username)
db.session.commit()
return True
except Exception as e:
logger.error(e)
db.session.rollback()
counter += 1
if counter < self.MAX_RETRIES:
return self.add_guess(guess, guesser_username, counter)
return False
def is_guess_legal(self, guess: str) -> bool:
answer = self.game.answer
return type(guess) == str and len(guess) == len(answer) and guess.lower() in legal_words
@classmethod
def is_answer_legal(cls, answer: str) -> bool:
return type(answer) == str and 4 <= len(answer) <= 6 and answer.lower() in legal_words
| [
"logging.getLogger",
"app.db.session.commit",
"app.db.session.close",
"app.db.session.add",
"models.game.Game.query.filter_by",
"models.game.Game.id.desc",
"app.db.session.rollback"
] | [((108, 135), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (125, 135), False, 'import logging\n'), ((6187, 6207), 'app.db.session.add', 'db.session.add', (['game'], {}), '(game)\n', (6201, 6207), False, 'from app import db\n'), ((6220, 6239), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (6237, 6239), False, 'from app import db\n'), ((6731, 6750), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (6748, 6750), False, 'from app import db\n'), ((779, 797), 'app.db.session.close', 'db.session.close', ([], {}), '()\n', (795, 797), False, 'from app import db\n'), ((6364, 6385), 'app.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (6383, 6385), False, 'from app import db\n'), ((6846, 6867), 'app.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (6865, 6867), False, 'from app import db\n'), ((684, 698), 'models.game.Game.id.desc', 'Game.id.desc', ([], {}), '()\n', (696, 698), False, 'from models.game import Game\n'), ((632, 674), 'models.game.Game.query.filter_by', 'Game.query.filter_by', ([], {'chat_id': 'self.chat_id'}), '(chat_id=self.chat_id)\n', (652, 674), False, 'from models.game import Game\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 30 17:09:46 2020
@author: Daniel
"""
from sly import Parser
from sly.yacc import SlyLogger
from lexer import Lex
from tree import Op, Func, Int, Str, Var, DeclaredVar
from tree import Compare, Wrapper, DeclaredFunc, OtherFunc
import error
import exec as e
import io
class Parse(Parser):
log = SlyLogger(io.StringIO())
debugfile = 'parser.out'
start = 'stmt'
tokens = Lex.tokens
precedence = (
('nonassoc', LT, GT, LE, GE, EQ, NE),
('left', PLUS, MINUS),
('left', TIMES, DIVIDE),
('right', 'UMINUS')
)
def __init__(self):
self.parse_list = []
# Grammar rules and actions
@_('expr PLUS expr')
def expr(self, p):
return Op('+', p.expr0, p.expr1)
@_('expr MINUS expr')
def expr(self, p):
return Op('-', p.expr0, p.expr1)
@_('expr TIMES expr')
def expr(self, p):
return Op('*', p.expr0, p.expr1)
@_('expr DIVIDE expr')
def expr(self, p):
return Op('/', p.expr0, p.expr1)
@_('expr LE expr')
@_('expr GE expr')
@_('expr NE expr')
@_('expr EQ expr')
@_('expr LT expr')
@_('expr GT expr')
@_('expr AND expr')
@_('expr OR expr')
def expr(self, p):
return Compare(p.expr0, p[1], p.expr1)
@_('NUMBER')
def expr(self, p):
return Int(p.NUMBER)
@_('STRING')
def expr(self, p):
return Str(p.STRING)
@_('ID "." ID')
def expr(self, p):
return p.ID0 + "." + p.ID1
@_('ID "." expr')
def expr(self, p):
#print(type(p.expr))
if type(p.expr) != str:
print(p.lineno)
error.Raise(error.syntaxerror, "Variable name cannot be a non function or keyword")
return p.ID + "." + p.expr
@_('expr "." expr')
def expr(self, p):
if type(p.expr0) != str or type(p.expr1) != str:
error.Raise(error.syntaxerror, "Variable name cannot be a non function or keyword")
return p.expr0 + "." + p.expr1
@_('expr "." ID')
def expr(self, p):
if type(p.expr) != str:
error.Raise(error.syntaxerror, "Variable name cannot be a non function or keyword")
return p.expr + "." + p.ID
@_('ID')
def expr(self, p):
return DeclaredVar(p.ID)
@_('ID "(" param ")"')
@_('ID "(" expr ")"')
def expr(self, p):
if type(p[2]) != list:
d = [p[2].value]
else:
d = list(p[2])
return DeclaredFunc(p.ID, d)
@_('ID "(" ")"')
def expr(self, p):
return DeclaredFunc(p.ID, [])
@_('ID "=" expr')
@_('expr "=" expr')
@_('PRINT "(" expr ")"')
def stmt(self, p):
#print(p.expr)
if p[1] == "(":
return Func(print, [p.expr], builtin=True)
else:
return Var(p[0], p[2])
@_('RETURN expr')
def stmt(self, p):
return OtherFunc("return", p.expr)
@_('PRINT')
@_('INPUT')
def expr(self, p):
if p[0] == "print":
f = print
else:
f = input
return Wrapper("func", ["txt"], [Func(f, ["txt"], builtin=True)])
@_('INPUT "(" expr ")"')
def expr(self, p):
return Func(e.Input, [p.expr], builtin=True, returns=True)
@_('OBJECT')
def expr(self, p):
return {}
@_('WHILE expr "{" stmt "}"')
def stmt(self, p):
return Wrapper("while", p.expr, p.stmt)
@_('IF expr "{" stmt "}"')
def stmt(self, p):
return Wrapper("if", p.expr, p.stmt)
@_('IF expr "{" stmt "}" ELSE "{" stmt "}"')
def stmt(self, p):
return Wrapper("if", p.expr, p.stmt0, ["else"], [p.stmt1])
@_('FUNC ID param "{" stmt "}"')
@_('FUNC ID expr "{" stmt "}"')
def stmt(self, p):
if type(p[2]) != list and type(p[2]) != Str:
error.Raise(error.syntaxerror, "Func only takes strings as parameters")
elif type(p[2]) == Str:
d = [p[2].value]
else:
d = list(p[2])
return Wrapper("func", d, p.stmt, [p.ID])
@_('FUNC ID "{" stmt "}"')
def stmt(self, p):
return Wrapper("func", [], p.stmt, [p.ID])
@_('expr "," expr')
def param(self, p):
return [p.expr0, p.expr1]
@_('expr "," param')
def param(self, p):
d = [p.expr]
d.extend(p.param)
return d
@_('param "," param')
def param(self, p):
d = p.param0
d.extend(p.param1)
return d
@_('stmt ";" stmt')
def stmt(self, p):
l = [p.stmt0]
l.extend(p.stmt1)
return l
@_('stmt ";"')
def stmt(self, p):
l = [p.stmt]
return l
@_('expr ";" stmt')
def stmt(self, p):
l = [p.expr]
l.extend(p.stmt)
return l
@_('expr ";"')
def stmt(self, p):
l = [p.expr]
return l
@_('MINUS expr %prec UMINUS')
def expr(self, p):
return Int(-p.expr.value)
@_('"(" expr ")"')
def expr(self, p):
return p.expr
def error(self, p):
if p:
error.Raise(error.syntaxerror, "Syntax error at token " + p.type + " line " + str(p.lineno))
else:
error.Raise(error.eoferror, "Syntax error at EOF") | [
"tree.DeclaredFunc",
"tree.Compare",
"tree.Str",
"tree.Op",
"tree.DeclaredVar",
"tree.OtherFunc",
"tree.Var",
"tree.Func",
"error.Raise",
"tree.Wrapper",
"io.StringIO",
"tree.Int"
] | [((357, 370), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (368, 370), False, 'import io\n'), ((772, 797), 'tree.Op', 'Op', (['"""+"""', 'p.expr0', 'p.expr1'], {}), "('+', p.expr0, p.expr1)\n", (774, 797), False, 'from tree import Op, Func, Int, Str, Var, DeclaredVar\n'), ((863, 888), 'tree.Op', 'Op', (['"""-"""', 'p.expr0', 'p.expr1'], {}), "('-', p.expr0, p.expr1)\n", (865, 888), False, 'from tree import Op, Func, Int, Str, Var, DeclaredVar\n'), ((954, 979), 'tree.Op', 'Op', (['"""*"""', 'p.expr0', 'p.expr1'], {}), "('*', p.expr0, p.expr1)\n", (956, 979), False, 'from tree import Op, Func, Int, Str, Var, DeclaredVar\n'), ((1046, 1071), 'tree.Op', 'Op', (['"""/"""', 'p.expr0', 'p.expr1'], {}), "('/', p.expr0, p.expr1)\n", (1048, 1071), False, 'from tree import Op, Func, Int, Str, Var, DeclaredVar\n'), ((1300, 1331), 'tree.Compare', 'Compare', (['p.expr0', 'p[1]', 'p.expr1'], {}), '(p.expr0, p[1], p.expr1)\n', (1307, 1331), False, 'from tree import Compare, Wrapper, DeclaredFunc, OtherFunc\n'), ((1388, 1401), 'tree.Int', 'Int', (['p.NUMBER'], {}), '(p.NUMBER)\n', (1391, 1401), False, 'from tree import Op, Func, Int, Str, Var, DeclaredVar\n'), ((1462, 1475), 'tree.Str', 'Str', (['p.STRING'], {}), '(p.STRING)\n', (1465, 1475), False, 'from tree import Op, Func, Int, Str, Var, DeclaredVar\n'), ((2342, 2359), 'tree.DeclaredVar', 'DeclaredVar', (['p.ID'], {}), '(p.ID)\n', (2353, 2359), False, 'from tree import Op, Func, Int, Str, Var, DeclaredVar\n'), ((2557, 2578), 'tree.DeclaredFunc', 'DeclaredFunc', (['p.ID', 'd'], {}), '(p.ID, d)\n', (2569, 2578), False, 'from tree import Compare, Wrapper, DeclaredFunc, OtherFunc\n'), ((2643, 2665), 'tree.DeclaredFunc', 'DeclaredFunc', (['p.ID', '[]'], {}), '(p.ID, [])\n', (2655, 2665), False, 'from tree import Compare, Wrapper, DeclaredFunc, OtherFunc\n'), ((2985, 3012), 'tree.OtherFunc', 'OtherFunc', (['"""return"""', 'p.expr'], {}), "('return', p.expr)\n", (2994, 3012), False, 'from tree import Compare, Wrapper, DeclaredFunc, OtherFunc\n'), ((3305, 3356), 'tree.Func', 'Func', (['e.Input', '[p.expr]'], {'builtin': '(True)', 'returns': '(True)'}), '(e.Input, [p.expr], builtin=True, returns=True)\n', (3309, 3356), False, 'from tree import Op, Func, Int, Str, Var, DeclaredVar\n'), ((3497, 3529), 'tree.Wrapper', 'Wrapper', (['"""while"""', 'p.expr', 'p.stmt'], {}), "('while', p.expr, p.stmt)\n", (3504, 3529), False, 'from tree import Compare, Wrapper, DeclaredFunc, OtherFunc\n'), ((3604, 3633), 'tree.Wrapper', 'Wrapper', (['"""if"""', 'p.expr', 'p.stmt'], {}), "('if', p.expr, p.stmt)\n", (3611, 3633), False, 'from tree import Compare, Wrapper, DeclaredFunc, OtherFunc\n'), ((3726, 3777), 'tree.Wrapper', 'Wrapper', (['"""if"""', 'p.expr', 'p.stmt0', "['else']", '[p.stmt1]'], {}), "('if', p.expr, p.stmt0, ['else'], [p.stmt1])\n", (3733, 3777), False, 'from tree import Compare, Wrapper, DeclaredFunc, OtherFunc\n'), ((4133, 4167), 'tree.Wrapper', 'Wrapper', (['"""func"""', 'd', 'p.stmt', '[p.ID]'], {}), "('func', d, p.stmt, [p.ID])\n", (4140, 4167), False, 'from tree import Compare, Wrapper, DeclaredFunc, OtherFunc\n'), ((4237, 4272), 'tree.Wrapper', 'Wrapper', (['"""func"""', '[]', 'p.stmt', '[p.ID]'], {}), "('func', [], p.stmt, [p.ID])\n", (4244, 4272), False, 'from tree import Compare, Wrapper, DeclaredFunc, OtherFunc\n'), ((5079, 5097), 'tree.Int', 'Int', (['(-p.expr.value)'], {}), '(-p.expr.value)\n', (5082, 5097), False, 'from tree import Op, Func, Int, Str, Var, DeclaredVar\n'), ((1710, 1797), 'error.Raise', 'error.Raise', (['error.syntaxerror', '"""Variable name cannot be a non function or keyword"""'], {}), "(error.syntaxerror,\n 'Variable name cannot be a non function or keyword')\n", (1721, 1797), False, 'import error\n'), ((1950, 2037), 'error.Raise', 'error.Raise', (['error.syntaxerror', '"""Variable name cannot be a non function or keyword"""'], {}), "(error.syntaxerror,\n 'Variable name cannot be a non function or keyword')\n", (1961, 2037), False, 'import error\n'), ((2167, 2254), 'error.Raise', 'error.Raise', (['error.syntaxerror', '"""Variable name cannot be a non function or keyword"""'], {}), "(error.syntaxerror,\n 'Variable name cannot be a non function or keyword')\n", (2178, 2254), False, 'import error\n'), ((2835, 2870), 'tree.Func', 'Func', (['print', '[p.expr]'], {'builtin': '(True)'}), '(print, [p.expr], builtin=True)\n', (2839, 2870), False, 'from tree import Op, Func, Int, Str, Var, DeclaredVar\n'), ((2904, 2919), 'tree.Var', 'Var', (['p[0]', 'p[2]'], {}), '(p[0], p[2])\n', (2907, 2919), False, 'from tree import Op, Func, Int, Str, Var, DeclaredVar\n'), ((3944, 4015), 'error.Raise', 'error.Raise', (['error.syntaxerror', '"""Func only takes strings as parameters"""'], {}), "(error.syntaxerror, 'Func only takes strings as parameters')\n", (3955, 4015), False, 'import error\n'), ((5345, 5395), 'error.Raise', 'error.Raise', (['error.eoferror', '"""Syntax error at EOF"""'], {}), "(error.eoferror, 'Syntax error at EOF')\n", (5356, 5395), False, 'import error\n'), ((3200, 3230), 'tree.Func', 'Func', (['f', "['txt']"], {'builtin': '(True)'}), "(f, ['txt'], builtin=True)\n", (3204, 3230), False, 'from tree import Op, Func, Int, Str, Var, DeclaredVar\n')] |
from unittest import TestCase
import unittest.mock as mock
from orion.handlers.users_handler import UsersHandler
from orion.models.location import Location
class TestUsersHandler(TestCase):
def setUp(self):
self.mock_ctx = mock.MagicMock()
def test_metadata(self):
handler = UsersHandler(ctx=self.mock_ctx)
self.assertEqual(handler.methods, ['GET'])
self.assertEqual(handler.path, '/api/users')
def test_results_valid(self):
self.mock_ctx.db.session.query().distinct().all.return_value = [
mock.MagicMock(user='user1', device='device1'),
mock.MagicMock(user='user1', device='device2'),
mock.MagicMock(user='user1', device='device3'),
mock.MagicMock(user='user2', device='device4'),
mock.MagicMock(user='user2', device='device5'),
]
handler = UsersHandler(ctx=self.mock_ctx)
resp, status = handler.run()
args, _ = self.mock_ctx.db.session.query.call_args
self.assertTrue(resp['success'])
self.assertEqual(status, 200)
self.assertEqual(
resp['data'],
[
{'user': 'user1', 'devices': ['device1', 'device2', 'device3']},
{'user': 'user2', 'devices': ['device4', 'device5']},
],
)
def test_results_empty(self):
self.mock_ctx.db.session.query().distinct().all.return_value = []
handler = UsersHandler(ctx=self.mock_ctx)
resp, status = handler.run()
args, _ = self.mock_ctx.db.session.query.call_args
self.assertTrue(resp['success'])
self.assertEqual(status, 200)
self.assertEqual(resp['data'], [])
self.assertEqual(args, (Location.user, Location.device))
| [
"orion.handlers.users_handler.UsersHandler",
"unittest.mock.MagicMock"
] | [((239, 255), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (253, 255), True, 'import unittest.mock as mock\n'), ((304, 335), 'orion.handlers.users_handler.UsersHandler', 'UsersHandler', ([], {'ctx': 'self.mock_ctx'}), '(ctx=self.mock_ctx)\n', (316, 335), False, 'from orion.handlers.users_handler import UsersHandler\n'), ((878, 909), 'orion.handlers.users_handler.UsersHandler', 'UsersHandler', ([], {'ctx': 'self.mock_ctx'}), '(ctx=self.mock_ctx)\n', (890, 909), False, 'from orion.handlers.users_handler import UsersHandler\n'), ((1456, 1487), 'orion.handlers.users_handler.UsersHandler', 'UsersHandler', ([], {'ctx': 'self.mock_ctx'}), '(ctx=self.mock_ctx)\n', (1468, 1487), False, 'from orion.handlers.users_handler import UsersHandler\n'), ((561, 607), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'user': '"""user1"""', 'device': '"""device1"""'}), "(user='user1', device='device1')\n", (575, 607), True, 'import unittest.mock as mock\n'), ((621, 667), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'user': '"""user1"""', 'device': '"""device2"""'}), "(user='user1', device='device2')\n", (635, 667), True, 'import unittest.mock as mock\n'), ((681, 727), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'user': '"""user1"""', 'device': '"""device3"""'}), "(user='user1', device='device3')\n", (695, 727), True, 'import unittest.mock as mock\n'), ((741, 787), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'user': '"""user2"""', 'device': '"""device4"""'}), "(user='user2', device='device4')\n", (755, 787), True, 'import unittest.mock as mock\n'), ((801, 847), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'user': '"""user2"""', 'device': '"""device5"""'}), "(user='user2', device='device5')\n", (815, 847), True, 'import unittest.mock as mock\n')] |
from setuptools import setup
setup(name='kalipto',
version='0.1.1',
description='A steganographic chat over social media',
url='https://github.com/reale/kalipto',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['kalipto'],
install_requires=['gossipy','stegano','twitter','wget'],
entry_points = {
'console_scripts': ['kalipto=kalipto.cmdline:main'],
},
zip_safe=False)
| [
"setuptools.setup"
] | [((30, 410), 'setuptools.setup', 'setup', ([], {'name': '"""kalipto"""', 'version': '"""0.1.1"""', 'description': '"""A steganographic chat over social media"""', 'url': '"""https://github.com/reale/kalipto"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'packages': "['kalipto']", 'install_requires': "['gossipy', 'stegano', 'twitter', 'wget']", 'entry_points': "{'console_scripts': ['kalipto=kalipto.cmdline:main']}", 'zip_safe': '(False)'}), "(name='kalipto', version='0.1.1', description=\n 'A steganographic chat over social media', url=\n 'https://github.com/reale/kalipto', author='<NAME>', author_email=\n '<EMAIL>', license='MIT', packages=['kalipto'], install_requires=[\n 'gossipy', 'stegano', 'twitter', 'wget'], entry_points={\n 'console_scripts': ['kalipto=kalipto.cmdline:main']}, zip_safe=False)\n", (35, 410), False, 'from setuptools import setup\n')] |
#!/usr/bin/env python3
# coding: utf-8
from wxpy import *
import platform
console_qr=(False if platform.system() == 'Windows' else True)
bot = Bot('bot.pkl', console_qr=console_qr)
bot.enable_puid('wxpy_puid.pkl')
def search_group_puid(puid):
try:
return bot.groups().search(puid=puid)[0]
pass
except:
return "查无此群"
pass
def search_user_puid(puid):
try:
return bot.friends().search(puid=puid)[0]
pass
except:
return "查无此人"
pass
embed() | [
"platform.system"
] | [((98, 115), 'platform.system', 'platform.system', ([], {}), '()\n', (113, 115), False, 'import platform\n')] |
"""Pure GRASP solver"""
from random import sample
from timeit import default_timer
from typing import List, Optional, Tuple
import numpy as np
from tsp_heuristics.heuristics.local_search import local_search_algo
from tsp_heuristics.heuristics.utils import get_tour_distance as compute_permutation_distance
def solve_tsp_grasp(
distance_matrix: np.ndarray,
start_position: int = 0,
alpha: float = 0.5,
perturbation_scheme: str = "two_opt",
max_iterations: int = 1,
max_processing_time: Optional[float] = None,
log_file: Optional[str] = None,
) -> Tuple[List, float, float]:
"""Solve a TSP problem with a GRASP heuristic
"""
max_processing_time = max_processing_time or np.inf
tic = default_timer()
best_Tour = setup(distance_matrix, None)[0]
i = 0
while i <= max_iterations:
intial_Tour = constructive_phase(distance_matrix, alpha, start_position)
optimized_Tour = local_search_algo(intial_Tour, distance_matrix,method='first improvement')[0]
f_best_tour = compute_permutation_distance(best_Tour, distance_matrix)
f_optimized_tour = compute_permutation_distance(optimized_Tour, distance_matrix)
if f_best_tour > f_optimized_tour:
best_Tour = optimized_Tour
if default_timer() - tic > max_processing_time:
i = max_iterations + 1
break
i += 1
return best_Tour, compute_permutation_distance(best_Tour, distance_matrix), (default_timer() - tic)
def constructive_phase(
distance_matrix: np.ndarray,
alpha: float,
start: int = 0
) -> List:
Tour = [start]
for _ in range(distance_matrix.shape[0] - 1):
min_index = get_maxmin_index_from_row(distance_matrix, Tour[-1], Tour, 'min')
max_index = get_maxmin_index_from_row(distance_matrix, Tour[-1], Tour, 'max')
f_min = distance_matrix[Tour[-1]][min_index]
f_max = distance_matrix[Tour[-1]][max_index]
# List of Restrict Candidates = LRC
LRC_index = np.array(range(len(distance_matrix[Tour[-1]])))
LRC_condition = distance_matrix[Tour[-1]] <= f_min + alpha*(f_max - f_min)
LRC_condition[Tour[-1]] = False
LRC_index = LRC_index[LRC_condition]
new_city_index = np.random.choice(LRC_index, 1, replace=False)[0]
Tour.append(new_city_index)
return Tour
def get_maxmin_index_from_row(
distance_matrix: np.ndarray,
row: int,
previous_indexes: List,
type: str,
)-> int:
"""Get the minimum/maximum element in the adjusted row array from a distance matrix.
We adjust the row array in order to never get the "previous_indexes" list of indexes.
"""
distance_matrix = distance_matrix.copy()
arr = distance_matrix[row].astype(float)
aux_list = range(arr.shape[0])
aux_list_2 = []
for i in aux_list:
if i in previous_indexes:
aux_list_2.append(True)
else:
aux_list_2.append(False)
previous_indexes_bool = aux_list_2
if type == 'max':
arr[previous_indexes_bool] = -1
target_index = np.argmax(arr)
if type == 'min':
arr[previous_indexes_bool] = np.Inf
target_index = np.argmin(arr)
return target_index
def setup(
distance_matrix: np.ndarray, x0: Optional[List] = None
) -> Tuple[List[int], float]:
"""Return initial solution and its objective value
Parameters
----------
distance_matrix
Distance matrix of shape (n x n) with the (i, j) entry indicating the
distance from node i to j
x0
Permutation of nodes from 0 to n - 1 indicating the starting solution.
If not provided, a random list is created.
Returns
-------
x0
Permutation with initial solution. If ``x0`` was provided, it is the
same list
fx0
Objective value of x0
"""
if not x0:
n = distance_matrix.shape[0] # number of nodes
x0 = [0] + sample(range(1, n), n - 1) # ensure 0 is the first node
fx0 = compute_permutation_distance(x0, distance_matrix)
return x0, fx0
| [
"tsp_heuristics.heuristics.utils.get_tour_distance",
"numpy.random.choice",
"timeit.default_timer",
"numpy.argmax",
"tsp_heuristics.heuristics.local_search.local_search_algo",
"numpy.argmin"
] | [((730, 745), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (743, 745), False, 'from timeit import default_timer\n'), ((4124, 4173), 'tsp_heuristics.heuristics.utils.get_tour_distance', 'compute_permutation_distance', (['x0', 'distance_matrix'], {}), '(x0, distance_matrix)\n', (4152, 4173), True, 'from tsp_heuristics.heuristics.utils import get_tour_distance as compute_permutation_distance\n'), ((1050, 1106), 'tsp_heuristics.heuristics.utils.get_tour_distance', 'compute_permutation_distance', (['best_Tour', 'distance_matrix'], {}), '(best_Tour, distance_matrix)\n', (1078, 1106), True, 'from tsp_heuristics.heuristics.utils import get_tour_distance as compute_permutation_distance\n'), ((1134, 1195), 'tsp_heuristics.heuristics.utils.get_tour_distance', 'compute_permutation_distance', (['optimized_Tour', 'distance_matrix'], {}), '(optimized_Tour, distance_matrix)\n', (1162, 1195), True, 'from tsp_heuristics.heuristics.utils import get_tour_distance as compute_permutation_distance\n'), ((1451, 1507), 'tsp_heuristics.heuristics.utils.get_tour_distance', 'compute_permutation_distance', (['best_Tour', 'distance_matrix'], {}), '(best_Tour, distance_matrix)\n', (1479, 1507), True, 'from tsp_heuristics.heuristics.utils import get_tour_distance as compute_permutation_distance\n'), ((3185, 3199), 'numpy.argmax', 'np.argmax', (['arr'], {}), '(arr)\n', (3194, 3199), True, 'import numpy as np\n'), ((3289, 3303), 'numpy.argmin', 'np.argmin', (['arr'], {}), '(arr)\n', (3298, 3303), True, 'import numpy as np\n'), ((941, 1016), 'tsp_heuristics.heuristics.local_search.local_search_algo', 'local_search_algo', (['intial_Tour', 'distance_matrix'], {'method': '"""first improvement"""'}), "(intial_Tour, distance_matrix, method='first improvement')\n", (958, 1016), False, 'from tsp_heuristics.heuristics.local_search import local_search_algo\n'), ((1510, 1525), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (1523, 1525), False, 'from timeit import default_timer\n'), ((2337, 2382), 'numpy.random.choice', 'np.random.choice', (['LRC_index', '(1)'], {'replace': '(False)'}), '(LRC_index, 1, replace=False)\n', (2353, 2382), True, 'import numpy as np\n'), ((1307, 1322), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (1320, 1322), False, 'from timeit import default_timer\n')] |
#!/usr/bin/env python
"""
Show a VelocityPlot to interactively modify lines in an AbsSystem
"""
import sys
import pdb
import warnings
# Script to run XAbsSysGui from the command line or ipython
def main(*args, **kwargs):
""" Runs the XAbsSysGui on input files
"""
import argparse
parser = argparse.ArgumentParser(description='Parse for XAbsSys')
parser.add_argument("spec_file", type=str, help="Spectral file")
parser.add_argument("abssys_file", type=str, help="AbsSys file (JSON)")
parser.add_argument("-outfile", type=str, help="Output filename")
parser.add_argument("-llist", type=str, help="Name of LineList")
#parser.add_argument("-exten", type=int, help="FITS extension")
parser.add_argument("--specdb", help="Spectral file is a SPECDB database", action="store_true")
parser.add_argument("--group", type=str, help="SPECDB group name")
parser.add_argument("--un_norm", help="Spectrum is NOT normalized", action="store_true")
parser.add_argument("--chk_z", help="Check the z limits of your components? [default=False]",
action="store_true")
pargs = parser.parse_args()
from qtpy.QtWidgets import QApplication
from linetools.guis.xabssysgui import XAbsSysGui
from linetools.isgm.io import abssys_from_json
from IPython import embed
# Normalized?
norm = True
if pargs.un_norm:
norm = False
# Extension
#exten = (pargs.exten if hasattr(pargs, 'exten') else 0)
# Read spec keywords
rsp_kwargs = {}
# Line list
if pargs.llist is not None:
from linetools.lists.linelist import LineList
llist = LineList(pargs.llist)
else:
llist = None
# Read AbsSystem
from linetools.isgm.abssystem import GenericAbsSystem
abs_sys = GenericAbsSystem.from_json(pargs.abssys_file)#, chk_vel=False)
if not pargs.chk_z:
warnings.warn("Not checking your system's velocity limits. This is the Default but be so warned.")
abs_sys = GenericAbsSystem.from_json(pargs.abssys_file, chk_z=pargs.chk_z)
if len(abs_sys.list_of_abslines()) == 0:
warnings.warn("No absorption lines given. I hope you intended that to be the case!")
app = QApplication(sys.argv)
# Load spectrum using specdb?
if pargs.specdb:
# Instantiate
from specdb.specdb import SpecDB
from specdb import group_utils
sdb = SpecDB(db_file=pargs.spec_file)
# Grab spectrum
if pargs.group is not None:
groups = [pargs.group]
else:
groups = None
spec, meta = sdb.spectra_from_coord(abs_sys.coord, groups=groups)
if spec.nspec > 1:
group_utils.show_group_meta(meta, idkey=sdb.idkey, show_all_keys=False)
raise ValueError("Retreived more than 1 spectrum. Choose your GROUP with --group=")
spec_file = pargs.spec_file+'_{:s}'.format(meta['GROUP'][0])
else:
spec = pargs.spec_file
spec_file = pargs.spec_file
# Save spectrum filename to AbsSystem
abs_sys.spec_file = spec_file
# Run
gui = XAbsSysGui(spec, abs_sys, norm=norm, llist=llist, outfil=pargs.outfile)
gui.show()
app.exec_()
| [
"qtpy.QtWidgets.QApplication",
"argparse.ArgumentParser",
"linetools.isgm.abssystem.GenericAbsSystem.from_json",
"specdb.specdb.SpecDB",
"specdb.group_utils.show_group_meta",
"warnings.warn",
"linetools.lists.linelist.LineList",
"linetools.guis.xabssysgui.XAbsSysGui"
] | [((309, 365), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parse for XAbsSys"""'}), "(description='Parse for XAbsSys')\n", (332, 365), False, 'import argparse\n'), ((1806, 1851), 'linetools.isgm.abssystem.GenericAbsSystem.from_json', 'GenericAbsSystem.from_json', (['pargs.abssys_file'], {}), '(pargs.abssys_file)\n', (1832, 1851), False, 'from linetools.isgm.abssystem import GenericAbsSystem\n'), ((2015, 2079), 'linetools.isgm.abssystem.GenericAbsSystem.from_json', 'GenericAbsSystem.from_json', (['pargs.abssys_file'], {'chk_z': 'pargs.chk_z'}), '(pargs.abssys_file, chk_z=pargs.chk_z)\n', (2041, 2079), False, 'from linetools.isgm.abssystem import GenericAbsSystem\n'), ((2230, 2252), 'qtpy.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (2242, 2252), False, 'from qtpy.QtWidgets import QApplication\n'), ((3117, 3188), 'linetools.guis.xabssysgui.XAbsSysGui', 'XAbsSysGui', (['spec', 'abs_sys'], {'norm': 'norm', 'llist': 'llist', 'outfil': 'pargs.outfile'}), '(spec, abs_sys, norm=norm, llist=llist, outfil=pargs.outfile)\n', (3127, 3188), False, 'from linetools.guis.xabssysgui import XAbsSysGui\n'), ((1659, 1680), 'linetools.lists.linelist.LineList', 'LineList', (['pargs.llist'], {}), '(pargs.llist)\n', (1667, 1680), False, 'from linetools.lists.linelist import LineList\n'), ((1901, 2010), 'warnings.warn', 'warnings.warn', (['"""Not checking your system\'s velocity limits. This is the Default but be so warned."""'], {}), '(\n "Not checking your system\'s velocity limits. This is the Default but be so warned."\n )\n', (1914, 2010), False, 'import warnings\n'), ((2133, 2223), 'warnings.warn', 'warnings.warn', (['"""No absorption lines given. I hope you intended that to be the case!"""'], {}), "(\n 'No absorption lines given. I hope you intended that to be the case!')\n", (2146, 2223), False, 'import warnings\n'), ((2425, 2456), 'specdb.specdb.SpecDB', 'SpecDB', ([], {'db_file': 'pargs.spec_file'}), '(db_file=pargs.spec_file)\n', (2431, 2456), False, 'from specdb.specdb import SpecDB\n'), ((2705, 2776), 'specdb.group_utils.show_group_meta', 'group_utils.show_group_meta', (['meta'], {'idkey': 'sdb.idkey', 'show_all_keys': '(False)'}), '(meta, idkey=sdb.idkey, show_all_keys=False)\n', (2732, 2776), False, 'from specdb import group_utils\n')] |
# injector.py
#
# <NAME> 2017
#
#
# Contains classes and methods for injectiona nd recovery testing
#
#
import spectroseti.output
__author__ = 'nate'
import definitions as defs
import apf as apf
import apfdefinitions as apfdefs
#import output
import numpy as np
from tqdm import tqdm
import random
gaussian_kernel_4 = np.array([0.02169646, 0.08608803, 0.2514724, 0.54092311, 0.85695116,
1., 0.85957485, 0.54424083, 0.25379, 0.0871478, 0.02203095])
class Injector:
fwhm = 4.2 # CHECK THIS
injected_lasers = None
injected = False
spectrum = None
spectrum_backup = None
def __init__(self):
pass
def setparams(self,fwhm=4.2,):
self.fwhm=fwhm
def set_spectrum(self,spectrum):
self.spectrum = spectrum
#deep copy spectrum for backup
#Typecheck spectrum here
# Generates flux levels and locations, stores in injected_lasers
def gen_lasers(self,per_order = 10, fluxes = [], autodetermine_flux = True):
if autodetermine_flux:
if self.spectrum:
# commpute flux levels from spectrum
# base this on order medians?
pass
else:
fluxes = [10, 20, 50, 100, 500, 1000, 5000]
acc=[]
#this could be cleaner
for i in range(79):
for j in range(per_order):
# Maybe put a flux in here too?
acc.append([i,random.randint(500,4300), random.choice(fluxes)])
self.injected_lasers = acc
def inject_lasers(self):
#check if the lasers have been generated
if self.injected_lasers and self.spectrum:
for laser in self.injected_lasers:
kernel = gen_laser_profile(power=laser[2])
#Update this so that the backup remains
low = laser[1]-6
high = laser[1]+6
print(self.spectrum.counts[laser[0],low:high])
self.spectrum.counts[laser[0],low:high] = \
self.spectrum.counts[laser[0],low:high] + kernel
print(self.spectrum.counts[laser[0], low:high])
print('Next')
self.injected=True
else:
print('Either spectrum is not set or lasers are not chosen')
# Test for recovery of simulated signals
def recovery(self, deblaze='both', bypass_threshold=True, repetitions=100, per_order=10,):
pass
# This should take deblazing parameters, repetitions
# Should run the target without any injection, collect positives, mask out these locations
# SHould also mask out the locations of stellar emission lines
# Should also mask out night sky lines?
# Run, record
# Inject, record injections
# Run, compare recovery, inc overlapping injections
# log to a file or to terminal
# How to speed this up? It will be very hard to run thousands of trials at a minute each
# One possibility is to inject the lasers at the end, pretending that it didn't affect deblazing?
# This would allow us to run a bunch of lasers after deblazing, and threshold have been determined.
# in fact it makes more sense to add lasers at this stage (post - deblazing) due to flattening
'''
So the flow is:
deblaze savitzky
opt: store (deep copy) spectrum
deblaze meanshift
run search
opt: run search on savitzky spectrum
'''
def gen_laser_profile(power=1.0):
kernel = np.zeros(12)
pixel_offset_weight = random.random()
# Assign most of the counts to one pixel, the rest next door
kernel[:-1] = kernel[:-1] + gaussian_kernel_4 * power * pixel_offset_weight
kernel[1:] = kernel[1:] + gaussian_kernel_4 * power * (1.0 - pixel_offset_weight)
return kernel | [
"random.choice",
"numpy.array",
"numpy.zeros",
"random.random",
"random.randint"
] | [((324, 459), 'numpy.array', 'np.array', (['[0.02169646, 0.08608803, 0.2514724, 0.54092311, 0.85695116, 1.0, 0.85957485,\n 0.54424083, 0.25379, 0.0871478, 0.02203095]'], {}), '([0.02169646, 0.08608803, 0.2514724, 0.54092311, 0.85695116, 1.0, \n 0.85957485, 0.54424083, 0.25379, 0.0871478, 0.02203095])\n', (332, 459), True, 'import numpy as np\n'), ((3598, 3610), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (3606, 3610), True, 'import numpy as np\n'), ((3637, 3652), 'random.random', 'random.random', ([], {}), '()\n', (3650, 3652), False, 'import random\n'), ((1474, 1499), 'random.randint', 'random.randint', (['(500)', '(4300)'], {}), '(500, 4300)\n', (1488, 1499), False, 'import random\n'), ((1500, 1521), 'random.choice', 'random.choice', (['fluxes'], {}), '(fluxes)\n', (1513, 1521), False, 'import random\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
""" Example with use of pyqt read 3d image
"""
import json
from loguru import logger
import itertools
import numpy as np
import matplotlib.pyplot as plt
import io3d
import sed3
ann = {}
for item in itertools.product(['sliver07', '3Dircadb1'], ['data3d'], range(1, 2)):
print(item)
datap1 = io3d.read_dataset(*item)
improj = np.sum(datap1["data3d"], axis=1)
plt.imshow(improj)
pts = plt.ginput(n=2, show_clicks=True)
plt.close()
print(pts)
ann[item] = pts
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.ginput",
"matplotlib.pyplot.close",
"numpy.sum",
"io3d.read_dataset"
] | [((348, 372), 'io3d.read_dataset', 'io3d.read_dataset', (['*item'], {}), '(*item)\n', (365, 372), False, 'import io3d\n'), ((386, 418), 'numpy.sum', 'np.sum', (["datap1['data3d']"], {'axis': '(1)'}), "(datap1['data3d'], axis=1)\n", (392, 418), True, 'import numpy as np\n'), ((423, 441), 'matplotlib.pyplot.imshow', 'plt.imshow', (['improj'], {}), '(improj)\n', (433, 441), True, 'import matplotlib.pyplot as plt\n'), ((452, 485), 'matplotlib.pyplot.ginput', 'plt.ginput', ([], {'n': '(2)', 'show_clicks': '(True)'}), '(n=2, show_clicks=True)\n', (462, 485), True, 'import matplotlib.pyplot as plt\n'), ((490, 501), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (499, 501), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 2 20:34:47 2021
@author: <NAME>
Correction on Gaussian fit of PSF in non-paraxial case
Formula from Zhang, 2007: https://doi.org/10.1364/AO.46.001819
"""
import numpy as np
# Variables
n = 1
alpha = np.arcsin(0.5) / n
#%% Radial correction
# Computes sigma = factor * wavelength. Make dimensionless by dividing by wavelength
wavelength = 1
k = 2 * np.pi / wavelength
# Formula from Zhang2007 paper
# Dimensionless because of deviding by wavelength
# So it computes a factor according to sigma = factor * wavelength
def non_paraxial_sigma_radial(a, n, k):
# n refractive index
# k wavenumber
# a maximum half angle of light cone
fraction = (4 - 7 * np.cos(a)**(3/2) + 3 * np.cos(a)**(7/2)) / (7 * (1 - np.cos(a)**(3/2)))
sigma = fraction**(-0.5) / (n * k)
return sigma
factor_radial = non_paraxial_sigma_radial(alpha, n, k)
print(factor_radial)
#%% Axial correction
# uses eq. from table 3 of same paper Zhang 2007
def non_paraxial_sigma_axial(a, n, k):
cos = np.cos(a)
numerator = 5 * np.sqrt(7) * (1 - cos**1.5)
denominator = np.sqrt(6) * n * k * (4 * cos**5 - 25 * cos**3.5 + 42 * cos**2.5 - 25 * cos**1.5 + 4)**.5
sigma = numerator / denominator
return sigma
factor_axial = non_paraxial_sigma_axial(alpha, n, k)
# We want to convert to the Rayleigh range
sigma_axial = factor_axial * 820
rayleigh = sigma_axial * np.sqrt(2 * np.log(2))
| [
"numpy.sqrt",
"numpy.log",
"numpy.arcsin",
"numpy.cos"
] | [((253, 267), 'numpy.arcsin', 'np.arcsin', (['(0.5)'], {}), '(0.5)\n', (262, 267), True, 'import numpy as np\n'), ((1060, 1069), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (1066, 1069), True, 'import numpy as np\n'), ((1095, 1105), 'numpy.sqrt', 'np.sqrt', (['(7)'], {}), '(7)\n', (1102, 1105), True, 'import numpy as np\n'), ((1469, 1478), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1475, 1478), True, 'import numpy as np\n'), ((1141, 1151), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (1148, 1151), True, 'import numpy as np\n'), ((755, 764), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (761, 764), True, 'import numpy as np\n'), ((786, 795), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (792, 795), True, 'import numpy as np\n'), ((732, 741), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (738, 741), True, 'import numpy as np\n')] |
import numba as nb
import numpy as np
def demean_embs(id2pointer: nb.typed.Dict, embeddings: np.ndarray) -> np.ndarray:
"""
Aggregate word embeddings by averaging on phrase-level.
:param id2pointer nb.typed.dict: token id to word embedding pointers
:param embeddings np.ndarray: word embedding matrix
"""
N = len(id2pointer)
embs = []
for i in range(N):
emb = embeddings[id2pointer[i]]
if emb.shape[0] > 1:
emb = emb.mean(0, keepdims=True)
embs.append(emb)
embs = np.vstack(embs)
embs /= np.linalg.norm(embs, axis=1, ord=2, keepdims=True)
return embs
@nb.njit(parallel=True)
def preranking(src: np.ndarray, trg: np.ndarray, k: int) -> np.ndarray:
"""
Prerank using cosine similarity of (averaged) word embeddings.
L2-normlization already performed vectorized in 'demean_embs'.
:param src np.ndarray: demeaned source language phrase embeddings
:param trg np.ndarray: demeaned target language phrase embeddings
:param k int: top-k candidates to retain
"""
N = src.shape[0]
argsort = np.empty((N, k), dtype=nb.int64)
for i in nb.prange(N):
argsort[i] = (src[i] @ trg.T).argsort()[-k:]
return argsort
| [
"numba.njit",
"numpy.empty",
"numpy.vstack",
"numpy.linalg.norm",
"numba.prange"
] | [((635, 657), 'numba.njit', 'nb.njit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (642, 657), True, 'import numba as nb\n'), ((538, 553), 'numpy.vstack', 'np.vstack', (['embs'], {}), '(embs)\n', (547, 553), True, 'import numpy as np\n'), ((566, 616), 'numpy.linalg.norm', 'np.linalg.norm', (['embs'], {'axis': '(1)', 'ord': '(2)', 'keepdims': '(True)'}), '(embs, axis=1, ord=2, keepdims=True)\n', (580, 616), True, 'import numpy as np\n'), ((1102, 1134), 'numpy.empty', 'np.empty', (['(N, k)'], {'dtype': 'nb.int64'}), '((N, k), dtype=nb.int64)\n', (1110, 1134), True, 'import numpy as np\n'), ((1148, 1160), 'numba.prange', 'nb.prange', (['N'], {}), '(N)\n', (1157, 1160), True, 'import numba as nb\n')] |
from emails import full_path
import pandas as pd
from send_emails import login_pitt, open_outlook, new_message
id_df = pd.read_csv('/users/madke/downloads/AAPECS Recontacts - Sheet3 (1).csv')
full_info = pd.read_csv('/users/madke/downloads/AAPECS Recontacts - Sheet1 (1).csv')
ids = id_df.merge(full_info, left_on='ID', right_on='ID', how='left')
print(ids.head())
subject = 'Mental Health and Covid-19 Follow-up'
def main(df):
results = full_path(df)
driver = login_pitt()
driver = open_outlook(driver)
for index, row in results.iterrows():
driver = new_message(driver, row['email'], row['template'], subject)
if __name__ == "__main__":
main(ids)
| [
"send_emails.open_outlook",
"pandas.read_csv",
"emails.full_path",
"send_emails.new_message",
"send_emails.login_pitt"
] | [((120, 192), 'pandas.read_csv', 'pd.read_csv', (['"""/users/madke/downloads/AAPECS Recontacts - Sheet3 (1).csv"""'], {}), "('/users/madke/downloads/AAPECS Recontacts - Sheet3 (1).csv')\n", (131, 192), True, 'import pandas as pd\n'), ((205, 277), 'pandas.read_csv', 'pd.read_csv', (['"""/users/madke/downloads/AAPECS Recontacts - Sheet1 (1).csv"""'], {}), "('/users/madke/downloads/AAPECS Recontacts - Sheet1 (1).csv')\n", (216, 277), True, 'import pandas as pd\n'), ((445, 458), 'emails.full_path', 'full_path', (['df'], {}), '(df)\n', (454, 458), False, 'from emails import full_path\n'), ((472, 484), 'send_emails.login_pitt', 'login_pitt', ([], {}), '()\n', (482, 484), False, 'from send_emails import login_pitt, open_outlook, new_message\n'), ((498, 518), 'send_emails.open_outlook', 'open_outlook', (['driver'], {}), '(driver)\n', (510, 518), False, 'from send_emails import login_pitt, open_outlook, new_message\n'), ((578, 637), 'send_emails.new_message', 'new_message', (['driver', "row['email']", "row['template']", 'subject'], {}), "(driver, row['email'], row['template'], subject)\n", (589, 637), False, 'from send_emails import login_pitt, open_outlook, new_message\n')] |
from typing import Optional
import asyncio
import pytest
from hypothesis import given
from hypothesis.strategies import builds, composite, data, integers, lists, none, one_of
from pipelines import Job, MultiQueue, Pipe
# Test build pipeline list
# Test pipeline list
# Property would be a strategy that makes a class
# tree and the output of that should
# Pipe is number of jobs - 1
# Assumption no orphan nodes
#
class MockJob(Job):
async def start(
self, in_q: Optional[asyncio.Queue], out_q: Optional[MultiQueue]
) -> None:
print("Running")
@composite
def job_tree(draw):
parent = draw(builds(MockJob))
root = parent
depth = draw(integers(min_value=1, max_value=10))
for i in range(depth):
children = draw(lists(builds(MockJob)))
for c in children:
parent.set_downstream(c)
if children:
parent = children[0]
return root
def _number_of_pipes(job):
"""
This should be number of jobs plus
all end nodes.
"""
pipe_count = 0
children = job.children
while children:
pipe_count += 1
child = children.pop()
if child.children == []:
pipe_count += 1
children.extend(child.children)
if pipe_count == 0:
return 1
return pipe_count
@pytest.mark.asyncio
@given(job_tree())
async def test_build_test_pipeline(s):
pipe_list = s._build_ordered_pipe_list(s, None, [])
assert len(pipe_list) == _number_of_pipes(s)
@composite
def pipe(draw):
job = draw(builds(MockJob))
queue = draw(one_of(builds(asyncio.Queue), none()))
multi_queue = draw(builds(MultiQueue))
return Pipe(parent=job, queue=queue, subscribed_queues=multi_queue)
def _num_tasks(pipes):
task_count = 0
for i, p in enumerate(pipes):
if i == 0:
task_count += 1
continue
if not p.queue:
continue
task_count += 1
return task_count
def _num_queues(pipes):
queue_count = 0
for i, p in enumerate(pipes):
if i == 0:
continue
if p.queue:
queue_count += 1
for q in p.subscribed_queues.queues:
queue_count += 1
return queue_count
# This should test queue length and task length
# Which should match worker length
@pytest.mark.asyncio
@given(lists(pipe(), min_size=1), integers(min_value=0), data())
async def test__build_tasks(pipes, workers, data):
job = data.draw(builds(MockJob))
job.workers = workers
tasks, queues = job._build_tasks(pipes)
assert len(tasks) == _num_tasks(pipes)
assert len(queues) == _num_queues(pipes)
@pytest.mark.asyncio
async def test_single_job():
j = MockJob()
await j.execute_jobs()
# Test build tasks
| [
"hypothesis.strategies.builds",
"hypothesis.strategies.none",
"hypothesis.strategies.integers",
"hypothesis.strategies.data",
"pipelines.Pipe"
] | [((1671, 1731), 'pipelines.Pipe', 'Pipe', ([], {'parent': 'job', 'queue': 'queue', 'subscribed_queues': 'multi_queue'}), '(parent=job, queue=queue, subscribed_queues=multi_queue)\n', (1675, 1731), False, 'from pipelines import Job, MultiQueue, Pipe\n'), ((2384, 2405), 'hypothesis.strategies.integers', 'integers', ([], {'min_value': '(0)'}), '(min_value=0)\n', (2392, 2405), False, 'from hypothesis.strategies import builds, composite, data, integers, lists, none, one_of\n'), ((2407, 2413), 'hypothesis.strategies.data', 'data', ([], {}), '()\n', (2411, 2413), False, 'from hypothesis.strategies import builds, composite, data, integers, lists, none, one_of\n'), ((626, 641), 'hypothesis.strategies.builds', 'builds', (['MockJob'], {}), '(MockJob)\n', (632, 641), False, 'from hypothesis.strategies import builds, composite, data, integers, lists, none, one_of\n'), ((678, 713), 'hypothesis.strategies.integers', 'integers', ([], {'min_value': '(1)', 'max_value': '(10)'}), '(min_value=1, max_value=10)\n', (686, 713), False, 'from hypothesis.strategies import builds, composite, data, integers, lists, none, one_of\n'), ((1544, 1559), 'hypothesis.strategies.builds', 'builds', (['MockJob'], {}), '(MockJob)\n', (1550, 1559), False, 'from hypothesis.strategies import builds, composite, data, integers, lists, none, one_of\n'), ((1640, 1658), 'hypothesis.strategies.builds', 'builds', (['MultiQueue'], {}), '(MultiQueue)\n', (1646, 1658), False, 'from hypothesis.strategies import builds, composite, data, integers, lists, none, one_of\n'), ((2486, 2501), 'hypothesis.strategies.builds', 'builds', (['MockJob'], {}), '(MockJob)\n', (2492, 2501), False, 'from hypothesis.strategies import builds, composite, data, integers, lists, none, one_of\n'), ((1585, 1606), 'hypothesis.strategies.builds', 'builds', (['asyncio.Queue'], {}), '(asyncio.Queue)\n', (1591, 1606), False, 'from hypothesis.strategies import builds, composite, data, integers, lists, none, one_of\n'), ((1608, 1614), 'hypothesis.strategies.none', 'none', ([], {}), '()\n', (1612, 1614), False, 'from hypothesis.strategies import builds, composite, data, integers, lists, none, one_of\n'), ((772, 787), 'hypothesis.strategies.builds', 'builds', (['MockJob'], {}), '(MockJob)\n', (778, 787), False, 'from hypothesis.strategies import builds, composite, data, integers, lists, none, one_of\n')] |
from rest_framework.serializers import ModelSerializer
from rest_framework import serializers
from ..models import Lecture
class LectureCreateSerializer(ModelSerializer):
class Meta:
model = Lecture
fields = ['lect_id', 'course_id', 'lect_no', 'start_time', 'end_time',
'comment', 'updated', 'created', 'classroom']
class LectureListSerializer(ModelSerializer):
class Meta:
model = Lecture
fields = ['lect_id', 'course_id', 'lect_no', 'start_time', 'end_time',
'isAttendanceTaken', 'comment', 'updated', 'created',
'classroom']
class CalendarDatesSerializer(serializers.Serializer):
lect_id = serializers.CharField(read_only=True)
course_id = serializers.CharField(read_only=True)
lect_no = serializers.CharField(read_only=True)
start_time = serializers.CharField(read_only=True)
end_time = serializers.CharField(read_only=True)
comment = serializers.CharField(read_only=True)
updated = serializers.CharField(read_only=True)
created = serializers.CharField(read_only=True)
is_present=serializers.BooleanField(default=False) | [
"rest_framework.serializers.CharField",
"rest_framework.serializers.BooleanField"
] | [((699, 736), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (720, 736), False, 'from rest_framework import serializers\n'), ((753, 790), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (774, 790), False, 'from rest_framework import serializers\n'), ((805, 842), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (826, 842), False, 'from rest_framework import serializers\n'), ((860, 897), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (881, 897), False, 'from rest_framework import serializers\n'), ((913, 950), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (934, 950), False, 'from rest_framework import serializers\n'), ((965, 1002), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (986, 1002), False, 'from rest_framework import serializers\n'), ((1017, 1054), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (1038, 1054), False, 'from rest_framework import serializers\n'), ((1069, 1106), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (1090, 1106), False, 'from rest_framework import serializers\n'), ((1122, 1161), 'rest_framework.serializers.BooleanField', 'serializers.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1146, 1161), False, 'from rest_framework import serializers\n')] |
#!/usr/bin/env python
# Copyright (c) <NAME>.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pandas as pd
import os
with open("data/pwkp.test.orig") as f:
original_content = f.readlines()
original_content = [t.strip() for t in original_content]
# original_content_low = [sent.lower() for sent in original_content]
# PWKP
dataframe = pd.read_excel("data/pwkp_data.ods", engine="odf",header=[0, 1],)
#print(dataframe.columns)
current_system = ""
for i,row in dataframe.iterrows():
system = row["System", "Unnamed: 0_level_1"]
if type(system) == str:
current_system = system.strip().split(" (")[0]
dataframe.loc[i, [["System", "Unnamed: 0_level_1"]]] = current_system
n = 0
m = 0
dataframe["original"] = ""
dataframe["simplification"] = ""
systems = sorted(list(set(dataframe["System", "Unnamed: 0_level_1"])))
for system in systems:
if os.path.exists("data/"+system+".tok"):
with open("data/"+system+".tok") as f:
content = f.readlines()
current_original_content = original_content
elif os.path.exists("data/"+system+".tok.low"):
with open("data/"+system+".tok.low") as f:
content = f.readlines()
current_original_content = original_content # todo: _low add lowered sentences
else:
current_original_content = None
print("no data found for system", system)
continue
content = [t.strip() for t in content if t != "\n"]
if len(content) != len(dataframe[dataframe["System", "Unnamed: 0_level_1"] == system]):
print(system, len(content),len(dataframe[dataframe["System", "Unnamed: 0_level_1"] == system]))
for i, index in enumerate(dataframe[dataframe["System", "Unnamed: 0_level_1"] == system].index):
dataframe.loc[index, "simplification"] = content[i].strip()
dataframe.loc[index, "original"] = current_original_content[i].strip()
dataframe.to_csv("data/pwkp_with_text.csv")
def invert_rating(rating):
if rating == 3:
return 1
elif rating == 2:
return 2
elif rating == 1:
return 3
else:
return None
dataframe = pd.read_csv("data/pwkp_with_text.csv", header=[0,1])
new_dataframe = pd.DataFrame(columns = ["original", "simplification", "sentence_id", "sample_id", "system_name", "aspect", "rater_id", "rating"])
annotator_column = sorted(list(set([name for name,level in dataframe.columns if "Annotator" in name])))
aspect_mapping = {"a": "fluency", "d": "structural_simplicity", "e": "meaning"} # "b": "information_gain", "c": "information_loss",
# "b": "information_gain", "c": "information_loss",
# G: Qa: Is the output grammatical?
# MPa: Qb: Does the output add information, compared to the input?
# MPb: Qc: Does the output remove important information, compared to the input?
# S: Qd: Is the output simpler than the input, ignoring the complexity of the words?
j, i = 0, 0
n = 0
for i, row in dataframe.iterrows():
if type(row["original","Unnamed: 23_level_1"]) == str and type(row["simplification","Unnamed: 24_level_1"]) == str and \
not pd.isna(row["original","Unnamed: 23_level_1"]) and not pd.isna(row["simplification", "Unnamed: 24_level_1"]):
for annotator in annotator_column:
for rating in aspect_mapping:
if rating == "e":
rating_score = (invert_rating(row[annotator, "c"])+invert_rating(row[annotator, "b"]))/2
else:
rating_score = row[annotator, rating]
new_dataframe.loc[j] = [row["original","Unnamed: 23_level_1"], row["simplification", "Unnamed: 24_level_1"],
row["Sentence", "Unnamed: 1_level_1"], str(row["Sentence", "Unnamed: 1_level_1"])+"_"+row["System", "Unnamed: 0_level_1"],
row["System", "Unnamed: 0_level_1"], aspect_mapping[rating], annotator, rating_score]
j +=1
if row["original","Unnamed: 23_level_1"] == row["simplification", "Unnamed: 24_level_1"]:
n += 1
print(i,j,n)
new_dataframe.to_csv("data/pwkp_ratings.csv")
| [
"os.path.exists",
"pandas.read_csv",
"pandas.read_excel",
"pandas.DataFrame",
"pandas.isna"
] | [((454, 518), 'pandas.read_excel', 'pd.read_excel', (['"""data/pwkp_data.ods"""'], {'engine': '"""odf"""', 'header': '[0, 1]'}), "('data/pwkp_data.ods', engine='odf', header=[0, 1])\n", (467, 518), True, 'import pandas as pd\n'), ((2239, 2292), 'pandas.read_csv', 'pd.read_csv', (['"""data/pwkp_with_text.csv"""'], {'header': '[0, 1]'}), "('data/pwkp_with_text.csv', header=[0, 1])\n", (2250, 2292), True, 'import pandas as pd\n'), ((2308, 2439), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['original', 'simplification', 'sentence_id', 'sample_id', 'system_name',\n 'aspect', 'rater_id', 'rating']"}), "(columns=['original', 'simplification', 'sentence_id',\n 'sample_id', 'system_name', 'aspect', 'rater_id', 'rating'])\n", (2320, 2439), True, 'import pandas as pd\n'), ((980, 1021), 'os.path.exists', 'os.path.exists', (["('data/' + system + '.tok')"], {}), "('data/' + system + '.tok')\n", (994, 1021), False, 'import os\n'), ((1167, 1212), 'os.path.exists', 'os.path.exists', (["('data/' + system + '.tok.low')"], {}), "('data/' + system + '.tok.low')\n", (1181, 1212), False, 'import os\n'), ((3223, 3270), 'pandas.isna', 'pd.isna', (["row['original', 'Unnamed: 23_level_1']"], {}), "(row['original', 'Unnamed: 23_level_1'])\n", (3230, 3270), True, 'import pandas as pd\n'), ((3278, 3331), 'pandas.isna', 'pd.isna', (["row['simplification', 'Unnamed: 24_level_1']"], {}), "(row['simplification', 'Unnamed: 24_level_1'])\n", (3285, 3331), True, 'import pandas as pd\n')] |
from django.contrib.postgres.fields import ArrayField
from django.db import connection
from django.db.models import FloatField
from django.utils.html import escape
from postgres_stats.aggregates import Percentile
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from core.models import (
Data,
DataLabel,
DataPrediction,
IRRLog,
Label,
Model,
Project,
ProjectPermissions,
TrainingSet,
)
from core.permissions import IsAdminOrCreator
from core.utils.util import irr_heatmap_data, perc_agreement_table_data
from core.utils.utils_model import cohens_kappa, fleiss_kappa
@api_view(["GET"])
@permission_classes((IsAdminOrCreator,))
def label_distribution(request, project_pk):
"""This function finds and returns the number of each label per user. This is used
by a graph on the front end admin page.
Args:
request: The POST request
project_pk: Primary key of the project
Returns:
a dictionary of the amount of labels per person per type
"""
project = Project.objects.get(pk=project_pk)
labels = [label for label in project.labels.all()]
users = []
users.append(project.creator)
users.extend([perm.profile for perm in project.projectpermissions_set.all()])
# Sort labels by the count
labels.sort(
key=lambda label: DataLabel.objects.filter(label=label).count(), reverse=True
)
dataset = []
# Get first labels
for label in labels[0:5]:
temp_values = []
for u in users:
label_count = DataLabel.objects.filter(profile=u, label=label).count()
if label_count > 0:
temp_values.append({"x": u.__str__(), "y": label_count})
if temp_values != []:
dataset.append({"key": label.name, "values": temp_values})
other_values = []
for u in users:
other_count = 0
for label in labels[5:]:
other_count += DataLabel.objects.filter(profile=u, label=label).count()
if other_count > 0:
other_values.append({"x": u.__str__(), "y": other_count})
if other_values != []:
dataset.append({"key": "other labels", "values": other_values})
return Response(dataset)
@api_view(["GET"])
@permission_classes((IsAdminOrCreator,))
def label_timing(request, project_pk):
"""This function finds and returns the requested label time metrics. This is used by
the graphs on the admin page to show how long each labeler is taking.
Args:
request: The POST request
project_pk: Primary key of the project
Returns:
a dictionary of label timing information.
"""
project = Project.objects.get(pk=project_pk)
users = []
users.append(project.creator)
users.extend([perm.profile for perm in project.projectpermissions_set.all()])
dataset = []
yDomain = 0
for u in users:
result = DataLabel.objects.filter(
data__project=project_pk, profile=u
).aggregate(
quartiles=Percentile(
"time_to_label",
[0.05, 0.25, 0.5, 0.75, 0.95],
continuous=False,
output_field=ArrayField(FloatField()),
)
)
if result["quartiles"]:
if result["quartiles"][4] > yDomain:
yDomain = result["quartiles"][4] + 10
temp = {
"label": u.__str__(),
"values": {
"Q1": result["quartiles"][1],
"Q2": result["quartiles"][2],
"Q3": result["quartiles"][3],
"whisker_low": result["quartiles"][0],
"whisker_high": result["quartiles"][4],
},
}
dataset.append(temp)
return Response({"data": dataset, "yDomain": yDomain})
@api_view(["GET"])
@permission_classes((IsAdminOrCreator,))
def model_metrics(request, project_pk):
"""This function finds and returns the requested metrics.
This is
used by the graphs on the front end admin page.
Args:
request: The POST request
project_pk: Primary key of the project
Returns:
a dictionary of model metric information
"""
metric = request.GET.get("metric", "accuracy")
project = Project.objects.get(pk=project_pk)
models = Model.objects.filter(project=project).order_by("training_set__set_number")
if metric == "accuracy":
values = []
for model in models:
values.append({"x": model.training_set.set_number, "y": model.cv_accuracy})
dataset = [{"key": "Accuracy", "values": values}]
else:
labels = {str(label.pk): label.name for label in project.labels.all()}
dataset = []
for label in labels:
values = []
for model in models:
current_metric = model.cv_metrics[metric][label]
values.append({"x": model.training_set.set_number, "y": current_metric})
dataset.append({"key": labels[label], "values": values})
return Response(dataset)
@api_view(["GET"])
@permission_classes((IsAdminOrCreator,))
def data_coded_table(request, project_pk):
"""This returns the labeled data.
Args:
request: The POST request
project_pk: Primary key of the project
Returns:
data: a list of data information
"""
project = Project.objects.get(pk=project_pk)
data_objs = DataLabel.objects.filter(data__project=project, data__irr_ind=False)
data = []
for d in data_objs:
temp = {
"Text": escape(d.data.text),
"Label": d.label.name,
"Coder": d.profile.__str__(),
}
data.append(temp)
return Response({"data": data})
@api_view(["GET"])
@permission_classes((IsAdminOrCreator,))
def data_predicted_table(request, project_pk):
"""This returns the predictions for the unlabeled data.
Args:
request: The POST request
project_pk: Primary key of the project
Returns:
data: a list of data information
"""
project = Project.objects.get(pk=project_pk)
previous_run = project.get_current_training_set().set_number - 1
sql = """
SELECT d.{data_text_col}, l.{label_name_col}, dp.{pred_prob_col}
FROM (
SELECT {pred_data_id_col}, MAX({pred_prob_col}) AS max_prob
FROM {pred_table}
GROUP BY {pred_data_id_col}
) as tmp
LEFT JOIN {pred_table} as dp
ON dp.{pred_data_id_col} = tmp.{pred_data_id_col} AND dp.{pred_prob_col} = tmp.max_prob
LEFT JOIN {label_table} as l
ON l.{label_pk_col} = dp.{pred_label_id_col}
LEFT JOIN {data_table} as d
ON d.{data_pk_col} = dp.{pred_data_id_col}
LEFT JOIN {model_table} as m
ON m.{model_pk_col} = dp.{pred_model_id_col}
LEFT JOIN {trainingset_table} as ts
ON ts.{trainingset_pk_col} = m.{model_trainingset_id_col}
WHERE ts.{trainingset_setnumber_col} = {previous_run} AND d.{data_project_id_col} = {project_pk}
""".format(
data_text_col=Data._meta.get_field("text").column,
label_name_col=Label._meta.get_field("name").column,
pred_prob_col=DataPrediction._meta.get_field("predicted_probability").column,
pred_data_id_col=DataPrediction._meta.get_field("data").column,
pred_table=DataPrediction._meta.db_table,
label_table=Label._meta.db_table,
label_pk_col=Label._meta.pk.name,
pred_label_id_col=DataPrediction._meta.get_field("label").column,
data_table=Data._meta.db_table,
data_pk_col=Data._meta.pk.name,
model_table=Model._meta.db_table,
model_pk_col=Model._meta.pk.name,
pred_model_id_col=DataPrediction._meta.get_field("model").column,
trainingset_table=TrainingSet._meta.db_table,
trainingset_pk_col=TrainingSet._meta.pk.name,
model_trainingset_id_col=Model._meta.get_field("training_set").column,
trainingset_setnumber_col=TrainingSet._meta.get_field("set_number").column,
previous_run=previous_run,
data_project_id_col=Data._meta.get_field("project").column,
project_pk=project.pk,
)
with connection.cursor() as c:
c.execute(sql)
data_objs = c.fetchall()
data = []
for d in data_objs:
temp = {"Text": escape(d[0]), "Label": d[1], "Probability": d[2]}
data.append(temp)
return Response({"data": data})
@api_view(["GET"])
@permission_classes((IsAdminOrCreator,))
def get_irr_metrics(request, project_pk):
"""This function takes the current coded IRR and calculates several reliability
metrics.
Args:
request: The POST request
project_pk: Primary key of the project
Returns:
{}
"""
# need to take the IRRLog and pull out exactly the max_labelers amount
# of labels for each datum
project = Project.objects.get(pk=project_pk)
try:
if project.num_users_irr > 2:
kappa, perc_agreement = fleiss_kappa(project)
else:
kappa, perc_agreement = cohens_kappa(project)
kappa = round(kappa, 3)
perc_agreement = str(round(perc_agreement, 5) * 100) + "%"
except ValueError:
kappa = "No irr data processed"
perc_agreement = "No irr data processed"
return Response({"kappa": kappa, "percent agreement": perc_agreement})
@api_view(["GET"])
@permission_classes((IsAdminOrCreator,))
def perc_agree_table(request, project_pk):
"""Finds the percent agreement between each pair of coders to be displayed on the
IRR page as a table."""
project = Project.objects.get(pk=project_pk)
irr_data = set(
IRRLog.objects.filter(data__project=project).values_list("data", flat=True)
)
if len(irr_data) == 0:
return Response({"data": []})
user_agree = perc_agreement_table_data(project)
return Response({"data": user_agree})
@api_view(["GET"])
@permission_classes((IsAdminOrCreator,))
def heat_map_data(request, project_pk):
"""Calculates the data for the heat map of irr data and returns the correct one for
the pair of coders given.
Args:
request: the GET request with the pk of the two users
project_pk: the Primary key of the project
Returns:
a list of dictionaries of form {label1, label2, count}
"""
project = Project.objects.get(pk=project_pk)
heatmap_data = irr_heatmap_data(project)
labels = list(
Label.objects.all().filter(project=project).values_list("name", flat=True)
)
labels.append("Skip")
coders = []
profiles = ProjectPermissions.objects.filter(project=project)
coders.append({"name": str(project.creator), "pk": project.creator.pk})
for p in profiles:
coders.append({"name": str(p.profile), "pk": p.profile.pk})
return Response({"data": heatmap_data, "labels": labels, "coders": coders})
| [
"core.utils.utils_model.cohens_kappa",
"core.models.Label.objects.all",
"core.models.Label._meta.get_field",
"core.models.Model._meta.get_field",
"django.db.models.FloatField",
"core.models.Data._meta.get_field",
"rest_framework.decorators.api_view",
"core.models.DataLabel.objects.filter",
"core.utils.utils_model.fleiss_kappa",
"core.models.Project.objects.get",
"core.models.TrainingSet._meta.get_field",
"core.utils.util.perc_agreement_table_data",
"django.utils.html.escape",
"core.models.ProjectPermissions.objects.filter",
"rest_framework.decorators.permission_classes",
"core.models.Model.objects.filter",
"rest_framework.response.Response",
"django.db.connection.cursor",
"core.models.DataPrediction._meta.get_field",
"core.models.IRRLog.objects.filter",
"core.utils.util.irr_heatmap_data"
] | [((670, 687), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (678, 687), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((689, 728), 'rest_framework.decorators.permission_classes', 'permission_classes', (['(IsAdminOrCreator,)'], {}), '((IsAdminOrCreator,))\n', (707, 728), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((2282, 2299), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (2290, 2299), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((2301, 2340), 'rest_framework.decorators.permission_classes', 'permission_classes', (['(IsAdminOrCreator,)'], {}), '((IsAdminOrCreator,))\n', (2319, 2340), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((3901, 3918), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (3909, 3918), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((3920, 3959), 'rest_framework.decorators.permission_classes', 'permission_classes', (['(IsAdminOrCreator,)'], {}), '((IsAdminOrCreator,))\n', (3938, 3959), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((5155, 5172), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (5163, 5172), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((5174, 5213), 'rest_framework.decorators.permission_classes', 'permission_classes', (['(IsAdminOrCreator,)'], {}), '((IsAdminOrCreator,))\n', (5192, 5213), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((5834, 5851), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (5842, 5851), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((5853, 5892), 'rest_framework.decorators.permission_classes', 'permission_classes', (['(IsAdminOrCreator,)'], {}), '((IsAdminOrCreator,))\n', (5871, 5892), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((8507, 8524), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (8515, 8524), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((8526, 8565), 'rest_framework.decorators.permission_classes', 'permission_classes', (['(IsAdminOrCreator,)'], {}), '((IsAdminOrCreator,))\n', (8544, 8565), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((9452, 9469), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (9460, 9469), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((9471, 9510), 'rest_framework.decorators.permission_classes', 'permission_classes', (['(IsAdminOrCreator,)'], {}), '((IsAdminOrCreator,))\n', (9489, 9510), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((9991, 10008), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (9999, 10008), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((10010, 10049), 'rest_framework.decorators.permission_classes', 'permission_classes', (['(IsAdminOrCreator,)'], {}), '((IsAdminOrCreator,))\n', (10028, 10049), False, 'from rest_framework.decorators import api_view, permission_classes\n'), ((1097, 1131), 'core.models.Project.objects.get', 'Project.objects.get', ([], {'pk': 'project_pk'}), '(pk=project_pk)\n', (1116, 1131), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((2261, 2278), 'rest_framework.response.Response', 'Response', (['dataset'], {}), '(dataset)\n', (2269, 2278), False, 'from rest_framework.response import Response\n'), ((2720, 2754), 'core.models.Project.objects.get', 'Project.objects.get', ([], {'pk': 'project_pk'}), '(pk=project_pk)\n', (2739, 2754), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((3850, 3897), 'rest_framework.response.Response', 'Response', (["{'data': dataset, 'yDomain': yDomain}"], {}), "({'data': dataset, 'yDomain': yDomain})\n", (3858, 3897), False, 'from rest_framework.response import Response\n'), ((4354, 4388), 'core.models.Project.objects.get', 'Project.objects.get', ([], {'pk': 'project_pk'}), '(pk=project_pk)\n', (4373, 4388), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((5134, 5151), 'rest_framework.response.Response', 'Response', (['dataset'], {}), '(dataset)\n', (5142, 5151), False, 'from rest_framework.response import Response\n'), ((5463, 5497), 'core.models.Project.objects.get', 'Project.objects.get', ([], {'pk': 'project_pk'}), '(pk=project_pk)\n', (5482, 5497), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((5515, 5583), 'core.models.DataLabel.objects.filter', 'DataLabel.objects.filter', ([], {'data__project': 'project', 'data__irr_ind': '(False)'}), '(data__project=project, data__irr_ind=False)\n', (5539, 5583), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((5806, 5830), 'rest_framework.response.Response', 'Response', (["{'data': data}"], {}), "({'data': data})\n", (5814, 5830), False, 'from rest_framework.response import Response\n'), ((6168, 6202), 'core.models.Project.objects.get', 'Project.objects.get', ([], {'pk': 'project_pk'}), '(pk=project_pk)\n', (6187, 6202), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((8479, 8503), 'rest_framework.response.Response', 'Response', (["{'data': data}"], {}), "({'data': data})\n", (8487, 8503), False, 'from rest_framework.response import Response\n'), ((8950, 8984), 'core.models.Project.objects.get', 'Project.objects.get', ([], {'pk': 'project_pk'}), '(pk=project_pk)\n', (8969, 8984), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((9385, 9448), 'rest_framework.response.Response', 'Response', (["{'kappa': kappa, 'percent agreement': perc_agreement}"], {}), "({'kappa': kappa, 'percent agreement': perc_agreement})\n", (9393, 9448), False, 'from rest_framework.response import Response\n'), ((9682, 9716), 'core.models.Project.objects.get', 'Project.objects.get', ([], {'pk': 'project_pk'}), '(pk=project_pk)\n', (9701, 9716), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((9911, 9945), 'core.utils.util.perc_agreement_table_data', 'perc_agreement_table_data', (['project'], {}), '(project)\n', (9936, 9945), False, 'from core.utils.util import irr_heatmap_data, perc_agreement_table_data\n'), ((9957, 9987), 'rest_framework.response.Response', 'Response', (["{'data': user_agree}"], {}), "({'data': user_agree})\n", (9965, 9987), False, 'from rest_framework.response import Response\n'), ((10430, 10464), 'core.models.Project.objects.get', 'Project.objects.get', ([], {'pk': 'project_pk'}), '(pk=project_pk)\n', (10449, 10464), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((10485, 10510), 'core.utils.util.irr_heatmap_data', 'irr_heatmap_data', (['project'], {}), '(project)\n', (10501, 10510), False, 'from core.utils.util import irr_heatmap_data, perc_agreement_table_data\n'), ((10676, 10726), 'core.models.ProjectPermissions.objects.filter', 'ProjectPermissions.objects.filter', ([], {'project': 'project'}), '(project=project)\n', (10709, 10726), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((10906, 10974), 'rest_framework.response.Response', 'Response', (["{'data': heatmap_data, 'labels': labels, 'coders': coders}"], {}), "({'data': heatmap_data, 'labels': labels, 'coders': coders})\n", (10914, 10974), False, 'from rest_framework.response import Response\n'), ((8246, 8265), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (8263, 8265), False, 'from django.db import connection\n'), ((9870, 9892), 'rest_framework.response.Response', 'Response', (["{'data': []}"], {}), "({'data': []})\n", (9878, 9892), False, 'from rest_framework.response import Response\n'), ((4402, 4439), 'core.models.Model.objects.filter', 'Model.objects.filter', ([], {'project': 'project'}), '(project=project)\n', (4422, 4439), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((5660, 5679), 'django.utils.html.escape', 'escape', (['d.data.text'], {}), '(d.data.text)\n', (5666, 5679), False, 'from django.utils.html import escape\n'), ((8391, 8403), 'django.utils.html.escape', 'escape', (['d[0]'], {}), '(d[0])\n', (8397, 8403), False, 'from django.utils.html import escape\n'), ((9069, 9090), 'core.utils.utils_model.fleiss_kappa', 'fleiss_kappa', (['project'], {}), '(project)\n', (9081, 9090), False, 'from core.utils.utils_model import cohens_kappa, fleiss_kappa\n'), ((9141, 9162), 'core.utils.utils_model.cohens_kappa', 'cohens_kappa', (['project'], {}), '(project)\n', (9153, 9162), False, 'from core.utils.utils_model import cohens_kappa, fleiss_kappa\n'), ((2958, 3019), 'core.models.DataLabel.objects.filter', 'DataLabel.objects.filter', ([], {'data__project': 'project_pk', 'profile': 'u'}), '(data__project=project_pk, profile=u)\n', (2982, 3019), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((7123, 7151), 'core.models.Data._meta.get_field', 'Data._meta.get_field', (['"""text"""'], {}), "('text')\n", (7143, 7151), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((7183, 7212), 'core.models.Label._meta.get_field', 'Label._meta.get_field', (['"""name"""'], {}), "('name')\n", (7204, 7212), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((7243, 7298), 'core.models.DataPrediction._meta.get_field', 'DataPrediction._meta.get_field', (['"""predicted_probability"""'], {}), "('predicted_probability')\n", (7273, 7298), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((7332, 7370), 'core.models.DataPrediction._meta.get_field', 'DataPrediction._meta.get_field', (['"""data"""'], {}), "('data')\n", (7362, 7370), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((7539, 7578), 'core.models.DataPrediction._meta.get_field', 'DataPrediction._meta.get_field', (['"""label"""'], {}), "('label')\n", (7569, 7578), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((7777, 7816), 'core.models.DataPrediction._meta.get_field', 'DataPrediction._meta.get_field', (['"""model"""'], {}), "('model')\n", (7807, 7816), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((7966, 8003), 'core.models.Model._meta.get_field', 'Model._meta.get_field', (['"""training_set"""'], {}), "('training_set')\n", (7987, 8003), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((8046, 8087), 'core.models.TrainingSet._meta.get_field', 'TrainingSet._meta.get_field', (['"""set_number"""'], {}), "('set_number')\n", (8073, 8087), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((8159, 8190), 'core.models.Data._meta.get_field', 'Data._meta.get_field', (['"""project"""'], {}), "('project')\n", (8179, 8190), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((9745, 9789), 'core.models.IRRLog.objects.filter', 'IRRLog.objects.filter', ([], {'data__project': 'project'}), '(data__project=project)\n', (9766, 9789), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((1605, 1653), 'core.models.DataLabel.objects.filter', 'DataLabel.objects.filter', ([], {'profile': 'u', 'label': 'label'}), '(profile=u, label=label)\n', (1629, 1653), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((1995, 2043), 'core.models.DataLabel.objects.filter', 'DataLabel.objects.filter', ([], {'profile': 'u', 'label': 'label'}), '(profile=u, label=label)\n', (2019, 2043), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((1393, 1430), 'core.models.DataLabel.objects.filter', 'DataLabel.objects.filter', ([], {'label': 'label'}), '(label=label)\n', (1417, 1430), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((10538, 10557), 'core.models.Label.objects.all', 'Label.objects.all', ([], {}), '()\n', (10555, 10557), False, 'from core.models import Data, DataLabel, DataPrediction, IRRLog, Label, Model, Project, ProjectPermissions, TrainingSet\n'), ((3241, 3253), 'django.db.models.FloatField', 'FloatField', ([], {}), '()\n', (3251, 3253), False, 'from django.db.models import FloatField\n')] |
import pytest
from eth_utils import encode_hex, remove_0x_prefix
from ethereum.tester import keys
import os
import json
from microraiden.utils import privkey_to_addr
from microraiden.constants import (
CONTRACTS_ABI_JSON,
CHANNEL_MANAGER_ABI_NAME,
TOKEN_ABI_NAME,
)
@pytest.fixture
def proxy_ssl():
return False
@pytest.fixture
def test_dir(request):
return request.fspath.dirname
@pytest.fixture
def proxy_ssl_certs(test_dir):
return os.path.join(test_dir + '/data/key.pem'), os.path.join(test_dir + '/data/cert.pem')
@pytest.fixture(scope='session')
def use_tester(request):
return request.config.getoption('use_tester')
@pytest.fixture(scope='session')
def clean_channels(request):
return request.config.getoption('clean_channels')
@pytest.fixture
def api_endpoint():
"""address of a paywall proxy"""
return 'localhost'
@pytest.fixture
def api_endpoint_port():
"""port the paywall proxy listens on"""
return 5000
@pytest.fixture
def api_endpoint_address(api_endpoint, api_endpoint_port):
return api_endpoint + ":" + str(api_endpoint_port)
@pytest.fixture
def init_contract_address():
return "0x" + "a" * 40
@pytest.fixture(scope='session')
def deployer_privkey():
return remove_0x_prefix(encode_hex(keys[3]))
@pytest.fixture(scope='session')
def deployer_address(deployer_privkey):
return privkey_to_addr(deployer_privkey)
@pytest.fixture(scope='session')
def contract_abi_path():
return os.path.join(os.path.dirname(os.path.dirname(__file__)), "../" + CONTRACTS_ABI_JSON)
@pytest.fixture(scope='session')
def contract_abis(contract_abi_path):
with open(contract_abi_path) as abi_file:
return json.load(abi_file)
@pytest.fixture(scope='session')
def channel_manager_abi(contract_abis):
return contract_abis[CHANNEL_MANAGER_ABI_NAME]['abi']
@pytest.fixture(scope='session')
def channel_manager_bytecode(contract_abis):
return contract_abis[CHANNEL_MANAGER_ABI_NAME]['bytecode']
@pytest.fixture(scope='session')
def token_abi(contract_abis):
return contract_abis[TOKEN_ABI_NAME]['abi']
@pytest.fixture(scope='session')
def token_bytecode(contract_abis):
return contract_abis[TOKEN_ABI_NAME]['bytecode']
@pytest.fixture(scope='session')
def kovan_block_time():
return 4
@pytest.fixture
def state_db_path(tmpdir):
return ':memory:'
# db = tmpdir.join("state.db")
# return db.strpath
| [
"os.path.join",
"microraiden.utils.privkey_to_addr",
"json.load",
"os.path.dirname",
"eth_utils.encode_hex",
"pytest.fixture"
] | [((553, 584), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (567, 584), False, 'import pytest\n'), ((663, 694), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (677, 694), False, 'import pytest\n'), ((1188, 1219), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1202, 1219), False, 'import pytest\n'), ((1296, 1327), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1310, 1327), False, 'import pytest\n'), ((1416, 1447), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1430, 1447), False, 'import pytest\n'), ((1572, 1603), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1586, 1603), False, 'import pytest\n'), ((1726, 1757), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1740, 1757), False, 'import pytest\n'), ((1859, 1890), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1873, 1890), False, 'import pytest\n'), ((2002, 2033), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (2016, 2033), False, 'import pytest\n'), ((2115, 2146), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (2129, 2146), False, 'import pytest\n'), ((2238, 2269), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (2252, 2269), False, 'import pytest\n'), ((1379, 1412), 'microraiden.utils.privkey_to_addr', 'privkey_to_addr', (['deployer_privkey'], {}), '(deployer_privkey)\n', (1394, 1412), False, 'from microraiden.utils import privkey_to_addr\n'), ((466, 506), 'os.path.join', 'os.path.join', (["(test_dir + '/data/key.pem')"], {}), "(test_dir + '/data/key.pem')\n", (478, 506), False, 'import os\n'), ((508, 549), 'os.path.join', 'os.path.join', (["(test_dir + '/data/cert.pem')"], {}), "(test_dir + '/data/cert.pem')\n", (520, 549), False, 'import os\n'), ((1272, 1291), 'eth_utils.encode_hex', 'encode_hex', (['keys[3]'], {}), '(keys[3])\n', (1282, 1291), False, 'from eth_utils import encode_hex, remove_0x_prefix\n'), ((1703, 1722), 'json.load', 'json.load', (['abi_file'], {}), '(abi_file)\n', (1712, 1722), False, 'import json\n'), ((1513, 1538), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1528, 1538), False, 'import os\n')] |
from conans import ConanFile, CMake, tools
from conans.tools import download, unzip, check_sha256
import os, shutil
class SpirvtoolsConan(ConanFile):
name = 'spirv-tools'
_revision = 'c430a41ae32c24bfc0ea87aac1bb19526caafb4e'
version = '2018.0'
license = 'Apache 2.0'
url = 'https://github.com/koeleck/conan-packages/tree/master/spirv-tools'
description = 'SPIRV-Tools package for conan'
settings = 'os', 'compiler', 'build_type', 'arch'
options = {'fPIC': [True, False]}
default_options = 'fPIC=False'
generators = 'cmake'
def config_options(self):
if self.settings.compiler == 'Visual Studio':
self.options.remove('fPIC')
def source(self):
download('https://github.com/KhronosGroup/SPIRV-Tools/archive/v{}.zip'.format(self.version), 'spirv-tools.zip')
check_sha256('spirv-tools.zip', '55fd0d446a43839abf5fef4ee01fde938b509fe7574dcae66792e1011fbf1a3a')
unzip('spirv-tools.zip')
os.unlink('spirv-tools.zip')
shutil.move('SPIRV-Tools-{}'.format(self.version), 'spirv-tools')
# SPIRV headers
spirv_headers_revision = 'ce309203d7eceaf908bea8862c27f3e0749f7d00'
download('https://github.com/KhronosGroup/SPIRV-Headers/archive/{}.zip'.format(spirv_headers_revision), 'spirv-headers.zip')
check_sha256('spirv-headers.zip', '5747e7851b3559bb19e35cc2c9262bb3fb2c279b908737fa24e48e4ab9cc9db5')
unzip('spirv-headers.zip')
os.unlink('spirv-headers.zip')
shutil.move('SPIRV-Headers-{}'.format(spirv_headers_revision), 'spirv-headers')
# This small hack might be useful to guarantee proper /MT /MD linkage in MSVC
# if the packaged project doesn't have variables to set it properly
tools.replace_in_file('spirv-tools/CMakeLists.txt', 'project(spirv-tools)', '''project(spirv-tools)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()''')
def build(self):
cmake = CMake(self)
cmake.definitions['SPIRV_SKIP_EXECUTABLES'] = True
cmake.definitions['SPIRV-Headers_SOURCE_DIR'] = '{}/spirv-headers'.format(self.source_folder)
if not self.settings.compiler == 'Visual Studio':
cmake.definitions['CMAKE_POSITION_INDEPENDENT_CODE'] = self.options.fPIC
cmake.configure(source_folder='spirv-tools')
cmake.build()
def package(self):
self.copy('license*', dst='.', src='{}/spirv-tools'.format(self.source_folder), ignore_case=True, keep_path=False)
self.copy('*', dst='include', src='{}/spirv-tools/include'.format(self.source_folder))
# glslang needs this header (when optimizer is enabled)
self.copy('message.h', dst='include/spirv-tools', src='{}/spirv-tools/source'.format(self.source_folder), keep_path=False)
self.copy('*.lib', dst='lib', keep_path=False)
self.copy('*.dll', dst='bin', keep_path=False)
self.copy('*.so', dst='lib', keep_path=False)
self.copy('*.dylib', dst='lib', keep_path=False)
self.copy('*.a', dst='lib', keep_path=False)
with open('{}/revision.txt'.format(self.package_folder), 'w') as rev_file:
rev_file.write(self._revision)
def package_info(self):
self.cpp_info.libs = ['SPIRV-Tools', 'SPIRV-Tools-link', 'SPIRV-Tools-opt']
self.user_info.revision = self._revision
| [
"conans.tools.replace_in_file",
"conans.tools.check_sha256",
"conans.CMake",
"conans.tools.unzip",
"os.unlink"
] | [((842, 945), 'conans.tools.check_sha256', 'check_sha256', (['"""spirv-tools.zip"""', '"""55fd0d446a43839abf5fef4ee01fde938b509fe7574dcae66792e1011fbf1a3a"""'], {}), "('spirv-tools.zip',\n '55fd0d446a43839abf5fef4ee01fde938b509fe7574dcae66792e1011fbf1a3a')\n", (854, 945), False, 'from conans.tools import download, unzip, check_sha256\n'), ((950, 974), 'conans.tools.unzip', 'unzip', (['"""spirv-tools.zip"""'], {}), "('spirv-tools.zip')\n", (955, 974), False, 'from conans.tools import download, unzip, check_sha256\n'), ((983, 1011), 'os.unlink', 'os.unlink', (['"""spirv-tools.zip"""'], {}), "('spirv-tools.zip')\n", (992, 1011), False, 'import os, shutil\n'), ((1328, 1433), 'conans.tools.check_sha256', 'check_sha256', (['"""spirv-headers.zip"""', '"""5747e7851b3559bb19e35cc2c9262bb3fb2c279b908737fa24e48e4ab9cc9db5"""'], {}), "('spirv-headers.zip',\n '5747e7851b3559bb19e35cc2c9262bb3fb2c279b908737fa24e48e4ab9cc9db5')\n", (1340, 1433), False, 'from conans.tools import download, unzip, check_sha256\n'), ((1438, 1464), 'conans.tools.unzip', 'unzip', (['"""spirv-headers.zip"""'], {}), "('spirv-headers.zip')\n", (1443, 1464), False, 'from conans.tools import download, unzip, check_sha256\n'), ((1473, 1503), 'os.unlink', 'os.unlink', (['"""spirv-headers.zip"""'], {}), "('spirv-headers.zip')\n", (1482, 1503), False, 'import os, shutil\n'), ((1763, 1945), 'conans.tools.replace_in_file', 'tools.replace_in_file', (['"""spirv-tools/CMakeLists.txt"""', '"""project(spirv-tools)"""', '"""project(spirv-tools)\ninclude(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\nconan_basic_setup()"""'], {}), '(\'spirv-tools/CMakeLists.txt\', \'project(spirv-tools)\',\n """project(spirv-tools)\ninclude(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\nconan_basic_setup()"""\n )\n', (1784, 1945), False, 'from conans import ConanFile, CMake, tools\n'), ((1975, 1986), 'conans.CMake', 'CMake', (['self'], {}), '(self)\n', (1980, 1986), False, 'from conans import ConanFile, CMake, tools\n')] |
from selfdrive.car.tesla.teslacan import TeslaCAN
from selfdrive.car.tesla.HUD_module import HUDController
from selfdrive.car.tesla.LONG_module import LONGController
from selfdrive.car.modules.CFG_module import load_bool_param
from opendbc.can.packer import CANPacker
from selfdrive.car.tesla.values import DBC, CAR, CarControllerParams, CAN_CHASSIS, CAN_AUTOPILOT, CAN_EPAS, CruiseButtons
import cereal.messaging as messaging
from common.numpy_fast import clip, interp
class CarController():
def __init__(self, dbc_name, CP, VM):
self.CP = CP
self.last_angle = 0
self.packer = CANPacker(dbc_name)
self.pt_packer = None
if DBC[CP.carFingerprint]['pt']:
self.pt_packer = CANPacker(DBC[CP.carFingerprint]['pt'])
self.tesla_can = TeslaCAN(dbc_name, self.packer, self.pt_packer)
self.prev_das_steeringControl_counter = -1
self.long_control_counter = 0
#initialize modules
self.hud_controller = HUDController(CP,self.packer,self.tesla_can)
pedalcan = 2
if load_bool_param("TinklaPedalCanZero", False):
pedalcan = 0
self.long_controller = LONGController(CP,self.packer,self.tesla_can,pedalcan)
self.cruiseDelayFrame = 0
self.prevCruiseEnabled = False
self.lP = messaging.sub_sock('longitudinalPlan')
self.rS = messaging.sub_sock('radarState')
self.mD = messaging.sub_sock('modelV2')
self.long_control_counter = 0
#def update(self, c, enabled, CS, frame, actuators, cruise_cancel):
def update(self, c, enabled, CS, frame, actuators, cruise_cancel, pcm_speed, pcm_override, hud_alert, audible_alert,
left_line, right_line, lead, left_lane_depart, right_lane_depart):
if frame % 100 == 0:
CS.autoresumeAcc = load_bool_param("TinklaAutoResumeACC",False)
can_sends = []
#add 1 second delay logic to wait for AP which has a status at 2Hz
if self.CP.carFingerprint != CAR.PREAP_MODELS:
if CS.cruiseEnabled:
if not self.prevCruiseEnabled:
self.cruiseDelayFrame = frame
if frame - self.cruiseDelayFrame > 30:
CS.cruiseDelay = True
else:
self.cruiseDelayFrame = 0
CS.cruiseDelay = False
self.prevCruiseEnabled = CS.cruiseEnabled
#receive socks
long_plan = messaging.recv_one_or_none(self.lP)
radar_state = messaging.recv_one_or_none(self.rS)
model_data = messaging.recv_one_or_none(self.mD)
if not enabled:
self.v_target = CS.out.vEgo
self.a_target = 1
# Cancel when openpilot is not enabled anymore and no autopilot
# BB: do we need to do this? AP/Tesla does not behave this way
# LKAS can be disabled by steering and ACC remains engaged
#TODO: we need more logic arround this for AP0
if not enabled and bool(CS.out.cruiseState.enabled) and not CS.enableHumanLongControl:
cruise_cancel = True
if ((frame % 10) == 0 and cruise_cancel):
stlk_counter = ((CS.msg_stw_actn_req['MC_STW_ACTN_RQ'] + 1) % 16)
can_sends.insert(0,self.tesla_can.create_action_request(CS.msg_stw_actn_req, CruiseButtons.CANCEL, CAN_CHASSIS[self.CP.carFingerprint],stlk_counter))
if (self.CP.carFingerprint in [CAR.AP1_MODELS,CAR.AP2_MODELS]):
can_sends.insert(1,self.tesla_can.create_action_request(CS.msg_stw_actn_req, CruiseButtons.CANCEL, CAN_AUTOPILOT[self.CP.carFingerprint],stlk_counter))
#now process controls
if enabled and not CS.human_control:
apply_angle = actuators.steeringAngleDeg
# Angular rate limit based on speed
steer_up = (self.last_angle * apply_angle > 0. and abs(apply_angle) > abs(self.last_angle))
rate_limit = CarControllerParams.RATE_LIMIT_UP if steer_up else CarControllerParams.RATE_LIMIT_DOWN
max_angle_diff = interp(CS.out.vEgo, rate_limit.speed_points, rate_limit.max_angle_diff_points)
apply_angle = clip(apply_angle, (self.last_angle - max_angle_diff), (self.last_angle + max_angle_diff))
# To not fault the EPS
apply_angle = clip(apply_angle, (CS.out.steeringAngleDeg - 20), (CS.out.steeringAngleDeg + 20))
else:
apply_angle = CS.out.steeringAngleDeg
self.last_angle = apply_angle
if enabled or (self.CP.carFingerprint == CAR.PREAP_MODELS):
can_sends.append(self.tesla_can.create_steering_control(apply_angle, enabled and not CS.human_control and not CS.out.cruiseState.standstill, CAN_EPAS[self.CP.carFingerprint], 1))
#update LONG Control module
can_messages = self.long_controller.update(enabled, CS, frame, actuators, cruise_cancel,pcm_speed,pcm_override, long_plan,radar_state)
if len(can_messages) > 0:
can_sends[0:0] = can_messages
#update HUD Integration module
can_messages = self.hud_controller.update(enabled, CS, frame, actuators, cruise_cancel, hud_alert, audible_alert,
left_line, right_line, lead, left_lane_depart, right_lane_depart,CS.human_control,radar_state,CS.lat_plan,apply_angle,model_data)
if len(can_messages) > 0:
can_sends.extend(can_messages)
new_actuators = actuators.copy()
new_actuators.steeringAngleDeg = apply_angle
return new_actuators, can_sends
| [
"selfdrive.car.tesla.LONG_module.LONGController",
"selfdrive.car.tesla.HUD_module.HUDController",
"cereal.messaging.recv_one_or_none",
"common.numpy_fast.interp",
"selfdrive.car.modules.CFG_module.load_bool_param",
"common.numpy_fast.clip",
"opendbc.can.packer.CANPacker",
"cereal.messaging.sub_sock",
"selfdrive.car.tesla.teslacan.TeslaCAN"
] | [((595, 614), 'opendbc.can.packer.CANPacker', 'CANPacker', (['dbc_name'], {}), '(dbc_name)\n', (604, 614), False, 'from opendbc.can.packer import CANPacker\n'), ((762, 809), 'selfdrive.car.tesla.teslacan.TeslaCAN', 'TeslaCAN', (['dbc_name', 'self.packer', 'self.pt_packer'], {}), '(dbc_name, self.packer, self.pt_packer)\n', (770, 809), False, 'from selfdrive.car.tesla.teslacan import TeslaCAN\n'), ((947, 993), 'selfdrive.car.tesla.HUD_module.HUDController', 'HUDController', (['CP', 'self.packer', 'self.tesla_can'], {}), '(CP, self.packer, self.tesla_can)\n', (960, 993), False, 'from selfdrive.car.tesla.HUD_module import HUDController\n'), ((1016, 1060), 'selfdrive.car.modules.CFG_module.load_bool_param', 'load_bool_param', (['"""TinklaPedalCanZero"""', '(False)'], {}), "('TinklaPedalCanZero', False)\n", (1031, 1060), False, 'from selfdrive.car.modules.CFG_module import load_bool_param\n'), ((1108, 1165), 'selfdrive.car.tesla.LONG_module.LONGController', 'LONGController', (['CP', 'self.packer', 'self.tesla_can', 'pedalcan'], {}), '(CP, self.packer, self.tesla_can, pedalcan)\n', (1122, 1165), False, 'from selfdrive.car.tesla.LONG_module import LONGController\n'), ((1245, 1283), 'cereal.messaging.sub_sock', 'messaging.sub_sock', (['"""longitudinalPlan"""'], {}), "('longitudinalPlan')\n", (1263, 1283), True, 'import cereal.messaging as messaging\n'), ((1299, 1331), 'cereal.messaging.sub_sock', 'messaging.sub_sock', (['"""radarState"""'], {}), "('radarState')\n", (1317, 1331), True, 'import cereal.messaging as messaging\n'), ((1347, 1376), 'cereal.messaging.sub_sock', 'messaging.sub_sock', (['"""modelV2"""'], {}), "('modelV2')\n", (1365, 1376), True, 'import cereal.messaging as messaging\n'), ((2271, 2306), 'cereal.messaging.recv_one_or_none', 'messaging.recv_one_or_none', (['self.lP'], {}), '(self.lP)\n', (2297, 2306), True, 'import cereal.messaging as messaging\n'), ((2325, 2360), 'cereal.messaging.recv_one_or_none', 'messaging.recv_one_or_none', (['self.rS'], {}), '(self.rS)\n', (2351, 2360), True, 'import cereal.messaging as messaging\n'), ((2378, 2413), 'cereal.messaging.recv_one_or_none', 'messaging.recv_one_or_none', (['self.mD'], {}), '(self.mD)\n', (2404, 2413), True, 'import cereal.messaging as messaging\n'), ((701, 740), 'opendbc.can.packer.CANPacker', 'CANPacker', (["DBC[CP.carFingerprint]['pt']"], {}), "(DBC[CP.carFingerprint]['pt'])\n", (710, 740), False, 'from opendbc.can.packer import CANPacker\n'), ((1741, 1786), 'selfdrive.car.modules.CFG_module.load_bool_param', 'load_bool_param', (['"""TinklaAutoResumeACC"""', '(False)'], {}), "('TinklaAutoResumeACC', False)\n", (1756, 1786), False, 'from selfdrive.car.modules.CFG_module import load_bool_param\n'), ((3753, 3831), 'common.numpy_fast.interp', 'interp', (['CS.out.vEgo', 'rate_limit.speed_points', 'rate_limit.max_angle_diff_points'], {}), '(CS.out.vEgo, rate_limit.speed_points, rate_limit.max_angle_diff_points)\n', (3759, 3831), False, 'from common.numpy_fast import clip, interp\n'), ((3852, 3941), 'common.numpy_fast.clip', 'clip', (['apply_angle', '(self.last_angle - max_angle_diff)', '(self.last_angle + max_angle_diff)'], {}), '(apply_angle, self.last_angle - max_angle_diff, self.last_angle +\n max_angle_diff)\n', (3856, 3941), False, 'from common.numpy_fast import clip, interp\n'), ((3992, 4069), 'common.numpy_fast.clip', 'clip', (['apply_angle', '(CS.out.steeringAngleDeg - 20)', '(CS.out.steeringAngleDeg + 20)'], {}), '(apply_angle, CS.out.steeringAngleDeg - 20, CS.out.steeringAngleDeg + 20)\n', (3996, 4069), False, 'from common.numpy_fast import clip, interp\n')] |
# -*- coding: utf-8 -*-
"""
Author : <NAME>
Contact: <EMAIL>
Date : 2020/7/13 14:48
Desc : 读取配置文件的类
"""
from configparser import ConfigParser
class Conf(object):
def __init__(self, conf_file):
self.conf_obj = ConfigParser(comment_prefixes=";")
self.conf_obj.read(conf_file)
def getstring(self, section, key, default=""):
try:
return self.conf_obj.get(section, key)
except:
return default
def getboolean(self, section, key, default=False):
try:
return self.conf_obj.getboolean(section, key)
except:
return default
def getint(self, section, key, default=0):
try:
return self.conf_obj.getint(section, key)
except:
return default
def getfloat(self, section, key, default=0.0):
try:
return self.conf_obj.getfloat(section, key)
except:
return default
def set(self, section, key, value="", default=0):
try:
return self.conf_obj.set(section, key, value)
except:
return default
def write(self, conf_file):
with open(conf_file, 'w+') as wf:
self.conf_obj.write(wf)
| [
"configparser.ConfigParser"
] | [((228, 262), 'configparser.ConfigParser', 'ConfigParser', ([], {'comment_prefixes': '""";"""'}), "(comment_prefixes=';')\n", (240, 262), False, 'from configparser import ConfigParser\n')] |
import re
import sys
import torch
def replace(vars1, vars2):
for key in vars2:
if key in vars1:
sys.stdout.write("copy variable %s\n" % key)
vars1[key].copy_(vars2[key])
def replace2(vars1, vars2):
for key in vars2:
if key == "source_embedding":
key1 = "target_embedding"
sys.stdout.write("copy variable %s\n" % key1)
vars1[key1].copy_(vars2[key])
elif key == "bias":
key1 = "autoenc_bias"
sys.stdout.write("copy variable %s\n" % key1)
vars1[key1].copy_(vars2[key])
else:
key1 = key.replace("encoder", "autoenc")
if key1 in vars1:
sys.stdout.write("copy variable %s\n" % key1)
vars1[key1].copy_(vars2[key])
continue
if __name__ == "__main__":
states_1 = torch.load(sys.argv[1], map_location="cpu")
states_2 = torch.load(sys.argv[2], map_location="cpu")
states_3 = torch.load(sys.argv[3], map_location="cpu")
replace(states_1["model"], states_2["model"])
replace2(states_1["model"], states_3["model"])
torch.save(states_1, sys.argv[1])
| [
"torch.load",
"torch.save",
"sys.stdout.write"
] | [((873, 916), 'torch.load', 'torch.load', (['sys.argv[1]'], {'map_location': '"""cpu"""'}), "(sys.argv[1], map_location='cpu')\n", (883, 916), False, 'import torch\n'), ((932, 975), 'torch.load', 'torch.load', (['sys.argv[2]'], {'map_location': '"""cpu"""'}), "(sys.argv[2], map_location='cpu')\n", (942, 975), False, 'import torch\n'), ((991, 1034), 'torch.load', 'torch.load', (['sys.argv[3]'], {'map_location': '"""cpu"""'}), "(sys.argv[3], map_location='cpu')\n", (1001, 1034), False, 'import torch\n'), ((1142, 1175), 'torch.save', 'torch.save', (['states_1', 'sys.argv[1]'], {}), '(states_1, sys.argv[1])\n', (1152, 1175), False, 'import torch\n'), ((122, 166), 'sys.stdout.write', 'sys.stdout.write', (["('copy variable %s\\n' % key)"], {}), "('copy variable %s\\n' % key)\n", (138, 166), False, 'import sys\n'), ((348, 393), 'sys.stdout.write', 'sys.stdout.write', (["('copy variable %s\\n' % key1)"], {}), "('copy variable %s\\n' % key1)\n", (364, 393), False, 'import sys\n'), ((510, 555), 'sys.stdout.write', 'sys.stdout.write', (["('copy variable %s\\n' % key1)"], {}), "('copy variable %s\\n' % key1)\n", (526, 555), False, 'import sys\n'), ((712, 757), 'sys.stdout.write', 'sys.stdout.write', (["('copy variable %s\\n' % key1)"], {}), "('copy variable %s\\n' % key1)\n", (728, 757), False, 'import sys\n')] |
#!/usr/bin/env python
# coding=utf-8
from __future__ import unicode_literals
__author__ = "<NAME>"
__date__ = "2012"
import sys
import codecs
import logging
LOG_NAME = 'PyTreex'
LOGFORMAT = '%(asctime)-15s %(message)s'
logger = logging.getLogger(LOG_NAME)
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter(LOGFORMAT))
logger.addHandler(handler)
def log_info(message):
"Print an information message"
logging.getLogger(LOG_NAME).info('PYTREEX-INFO: ' + message)
def log_warn(message):
"Print a warning message"
logging.getLogger(LOG_NAME).warn('PYTREEX-WARN: ' + message)
def log_fatal(message, exc=Exception()):
"Print a fatal error message, then raise exception"
logging.getLogger(LOG_NAME).warn('PYTREEX-FATAL: ' + message)
raise exc
| [
"logging.getLogger",
"logging.Formatter",
"logging.StreamHandler"
] | [((233, 260), 'logging.getLogger', 'logging.getLogger', (['LOG_NAME'], {}), '(LOG_NAME)\n', (250, 260), False, 'import logging\n'), ((271, 304), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stderr'], {}), '(sys.stderr)\n', (292, 304), False, 'import logging\n'), ((326, 354), 'logging.Formatter', 'logging.Formatter', (['LOGFORMAT'], {}), '(LOGFORMAT)\n', (343, 354), False, 'import logging\n'), ((447, 474), 'logging.getLogger', 'logging.getLogger', (['LOG_NAME'], {}), '(LOG_NAME)\n', (464, 474), False, 'import logging\n'), ((567, 594), 'logging.getLogger', 'logging.getLogger', (['LOG_NAME'], {}), '(LOG_NAME)\n', (584, 594), False, 'import logging\n'), ((730, 757), 'logging.getLogger', 'logging.getLogger', (['LOG_NAME'], {}), '(LOG_NAME)\n', (747, 757), False, 'import logging\n')] |
"""
Copyright 2016 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import subprocess
import sys
import time
processes = 0
dictChannelKeys = {}
dictChannelsin = {}
dictProcesses = {}
def addInstance(channel, processes):
if channel in list(dictChannelKeys.keys()):
return
opener = "open" if sys.platform == "darwin" else "python"
process = subprocess.Popen([opener, "StrongLegsBot.py", channel, "info"])
dictProcesses["%s" % processes] = process.pid
dictChannelKeys["%s" % channel] = str(processes)
processes += 1
def deleteInstance(channel, processes):
pid = dictProcesses[dictChannelKeys[channel]]
os.system("taskkill /pid %s /f" % str(pid))
processes -= 1
channels_file = open("channels.txt", "r")
for channel in channels_file.readlines():
time.sleep(1)
addInstance(channel.strip("\n"), processes)
user_input = None
while user_input != "end":
try:
user_input = input("End?")
except Exception as e:
print(str(e))
break
| [
"subprocess.Popen",
"time.sleep"
] | [((861, 924), 'subprocess.Popen', 'subprocess.Popen', (["[opener, 'StrongLegsBot.py', channel, 'info']"], {}), "([opener, 'StrongLegsBot.py', channel, 'info'])\n", (877, 924), False, 'import subprocess\n'), ((1297, 1310), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1307, 1310), False, 'import time\n')] |
#! /usr/bin/env python3
#
# Reduce a translation file -- generally, a Timezone translation -- by
# dropping untranslated strings. An untranslated string is one that
# has an empty translation **and** is marked unfinished.
#
# This is mostly useful to cut down the size of the source file:
# far and away most of the zones are not translated, and it's just a
# handful of places that get special treatment.
from xml.dom.minidom import parse
import sys
valid = True
dom = parse(sys.argv[1])
for n in dom.getElementsByTagName("translation"):
attrs = n.attributes.keys()
drop = True
if "type" not in attrs:
drop = False
elif "type" in attrs and n.attributes["type"].value != "unfinished":
# In the samples I've seen, only "unfinished" is a valid type;
# once something has been translated, the attribute vanishes (see
# the if branch, above).
print("WARNING ''{!s}'' unknown type".format(n.attributes["type"].value))
drop = False
valid = False
else:
t = n.firstChild
if t is None:
# Unfinished and empty
drop = True
else:
drop = bool(t.data)
if drop:
message = n.parentNode
message.parentNode.removeChild(message)
message.unlink()
if valid:
for line in dom.toxml().split("\n"):
if line.strip():
print(line)
| [
"xml.dom.minidom.parse"
] | [((472, 490), 'xml.dom.minidom.parse', 'parse', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (477, 490), False, 'from xml.dom.minidom import parse\n')] |
# -*- coding: utf-8 -*-
# @Time : 2020-02-07 11:26
# @Author : binger
name = "logger_app"
version_info = (0, 0, 1, 20061114)
__version__ = ".".join([str(v) for v in version_info])
__description__ = '实现对logging的简单扩展'
from .model import LoggerApp, register_formatter_tag_mapper, FormatterRule
import time
def get_flask_unique_request_id(use_md5=True, save=True):
"""
获取 flask 请求时一个请求路由的唯一id
:param use_md5: 是否md5
:param save: 是否更新到 g
:return:
"""
from flask import g
request_id = getattr(g, "_link_id_", None)
if not request_id:
request_id = id(g._get_current_object())
if use_md5:
import hashlib
m = hashlib.md5(
str(request_id).encode("utf-8")
)
m.update(str(time.time()).encode("utf-8"))
request_id = m.hexdigest()
if save:
g._link_id_ = request_id
return request_id
| [
"flask.g._get_current_object",
"time.time"
] | [((597, 620), 'flask.g._get_current_object', 'g._get_current_object', ([], {}), '()\n', (618, 620), False, 'from flask import g\n'), ((785, 796), 'time.time', 'time.time', ([], {}), '()\n', (794, 796), False, 'import time\n')] |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility methods for breaking matrices into useful pieces."""
from typing import Set # pylint: disable=unused-import
from typing import Callable, List, Tuple, TypeVar
import numpy as np
from cirq.linalg import combinators
from cirq.linalg import diagonalize
from cirq.linalg import predicates
from cirq.linalg.tolerance import Tolerance
T = TypeVar('T')
def _group_similar(items: List[T],
comparer: Callable[[T, T], bool]) -> List[List[T]]:
"""Combines similar items into groups.
Args:
items: The list of items to group.
comparer: Determines if two items are similar.
Returns:
A list of groups of items.
"""
groups = [] # type: List[List[T]]
used = set() # type: Set[int]
for i in range(len(items)):
if i not in used:
group = [items[i]]
for j in range(i + 1, len(items)):
if j not in used and comparer(items[i], items[j]):
used.add(j)
group.append(items[j])
groups.append(group)
return groups
def _perp_eigendecompose(matrix: np.ndarray, tolerance: Tolerance
) -> Tuple[np.array, List[np.ndarray]]:
"""An eigendecomposition that ensures eigenvectors are perpendicular.
numpy.linalg.eig doesn't guarantee that eigenvectors from the same
eigenspace will be perpendicular. This method uses Gram-Schmidt to recover
a perpendicular set. It further checks that all eigenvectors are
perpendicular and raises an ArithmeticError otherwise.
Args:
matrix: The matrix to decompose.
tolerance: Thresholds for determining whether eigenvalues are from the
same eigenspace and whether eigenvectors are perpendicular.
Returns:
The eigenvalues and column eigenvectors. The i'th eigenvalue is
associated with the i'th column eigenvector.
Raises:
ArithmeticError: Failed to find perpendicular eigenvectors.
"""
vals, cols = np.linalg.eig(np.mat(matrix))
vecs = [cols[:, i] for i in range(len(cols))]
# Group by similar eigenvalue.
n = len(vecs)
groups = _group_similar(
list(range(n)),
lambda k1, k2: tolerance.all_close(vals[k1], vals[k2]))
# Remove overlap between eigenvectors with the same eigenvalue.
for g in groups:
q, _ = np.linalg.qr(np.concatenate([vecs[i] for i in g], axis=1))
for i in range(len(g)):
vecs[g[i]] = q[:, i]
# Ensure no eigenvectors overlap.
for i in range(len(vecs)):
for j in range(i + 1, len(vecs)):
if not tolerance.all_near_zero(np.dot(np.conj(vecs[i].T), vecs[j])):
raise ArithmeticError('Eigenvectors overlap.')
return vals, vecs
def map_eigenvalues(
matrix: np.ndarray,
func: Callable[[complex], complex],
tolerance: Tolerance = Tolerance.DEFAULT
) -> np.ndarray:
"""Applies a function to the eigenvalues of a matrix.
Given M = sum_k a_k |v_k><v_k|, returns f(M) = sum_k f(a_k) |v_k><v_k|.
Args:
matrix: The matrix to modify with the function.
func: The function to apply to the eigenvalues of the matrix.
tolerance: Thresholds used when separating eigenspaces.
Returns:
The transformed matrix.
"""
vals, vecs = _perp_eigendecompose(matrix, tolerance)
pieces = [np.outer(vec, np.conj(vec.T)) for vec in vecs]
out_vals = np.vectorize(func)(vals.astype(complex))
total = np.zeros(shape=matrix.shape)
for piece, val in zip(pieces, out_vals):
total = np.add(total, piece * val)
return total
def kron_factor_4x4_to_2x2s(
matrix: np.ndarray,
tolerance: Tolerance = Tolerance.DEFAULT
) -> Tuple[complex, np.ndarray, np.ndarray]:
"""Splits a 4x4 matrix U = kron(A, B) into A, B, and a global factor.
Requires the matrix to be the kronecker product of two 2x2 unitaries.
Requires the matrix to have a non-zero determinant.
Args:
matrix: The 4x4 unitary matrix to factor.
tolerance: Acceptable numeric error thresholds.
Returns:
A scalar factor and a pair of 2x2 unit-determinant matrices. The
kronecker product of all three is equal to the given matrix.
Raises:
ValueError:
The given matrix can't be tensor-factored into 2x2 pieces.
"""
# Use the entry with the largest magnitude as a reference point.
a, b = max(
((i, j) for i in range(4) for j in range(4)),
key=lambda t: abs(matrix[t]))
# Extract sub-factors touching the reference cell.
f1 = np.zeros((2, 2), dtype=np.complex128)
f2 = np.zeros((2, 2), dtype=np.complex128)
for i in range(2):
for j in range(2):
f1[(a >> 1) ^ i, (b >> 1) ^ j] = matrix[a ^ (i << 1), b ^ (j << 1)]
f2[(a & 1) ^ i, (b & 1) ^ j] = matrix[a ^ i, b ^ j]
# Rescale factors to have unit determinants.
f1 /= (np.sqrt(np.linalg.det(f1)) or 1)
f2 /= (np.sqrt(np.linalg.det(f2)) or 1)
# Determine global phase.
g = matrix[a, b] / (f1[a >> 1, b >> 1] * f2[a & 1, b & 1])
if np.real(g) < 0:
f1 *= -1
g = -g
restored = g * combinators.kron(f1, f2)
if np.any(np.isnan(restored)) or not tolerance.all_close(restored, matrix):
raise ValueError("Can't factor into kronecker product.")
return g, f1, f2
def so4_to_magic_su2s(
mat: np.ndarray,
tolerance: Tolerance = Tolerance.DEFAULT
) -> Tuple[np.ndarray, np.ndarray]:
"""Finds 2x2 special-unitaries A, B where mat = Mag.H @ kron(A, B) @ Mag.
Mag is the magic basis matrix:
1 0 0 i
0 i 1 0
0 i -1 0 (times sqrt(0.5) to normalize)
1 0 0 -i
Args:
mat: A real 4x4 orthogonal matrix.
tolerance: Per-matrix-entry tolerance on equality.
Returns:
A pair (A, B) of matrices in SU(2) such that Mag.H @ kron(A, B) @ Mag
is approximately equal to the given matrix.
Raises:
ValueError: Bad matrix.
ArithmeticError: Failed to perform the decomposition to desired
tolerance.
"""
if mat.shape != (4, 4) or not predicates.is_special_orthogonal(mat,
tolerance):
raise ValueError('mat must be 4x4 special orthogonal.')
magic = np.array([[1, 0, 0, 1j],
[0, 1j, 1, 0],
[0, 1j, -1, 0],
[1, 0, 0, -1j]]) * np.sqrt(0.5)
ab = combinators.dot(magic, mat, np.conj(magic.T))
_, a, b = kron_factor_4x4_to_2x2s(ab, tolerance)
# Check decomposition against desired tolerance.
reconstructed = combinators.dot(np.conj(magic.T),
combinators.kron(a, b),
magic)
if not tolerance.all_close(reconstructed, mat):
raise ArithmeticError('Failed to decompose to desired tolerance.')
return a, b
def kak_canonicalize_vector(
x: float, y: float, z: float
) -> Tuple[complex,
Tuple[np.ndarray, np.ndarray],
Tuple[float, float, float],
Tuple[np.ndarray, np.ndarray]]:
"""Canonicalizes an XX/YY/ZZ interaction by swap/negate/shift-ing axes.
Args:
x: The strength of the XX interaction.
y: The strength of the YY interaction.
z: The strength of the ZZ interaction.
Returns:
A nested tuple (g, (a1, a0), (x2, y2, z2), (b1, b0)) containing:
0. A global phase factor.
1. Post-non-local-operation matrices for the second/first qubit.
2. The canonicalized XX/YY/ZZ weights.
3. Pre-non-local-operation matrices for the second/first qubit.
Guarantees that the canonicalized x2, y2, z2 satisfy:
0 ≤ abs(z2) ≤ y2 ≤ x2 ≤ π/4
z2 ≠ -π/4
Guarantees that the implied output matrix:
g · (a1 ⊗ a0) · exp(i·(x2·XX + y2·YY + z2·ZZ)) · (b1 ⊗ b0)
is approximately equal to the implied input matrix:
exp(i·(x·XX + y·YY + z·ZZ))
"""
phase = [complex(1)] # Accumulated global phase.
left = [np.eye(2)] * 2 # Per-qubit left factors.
right = [np.eye(2)] * 2 # Per-qubit right factors.
v = [x, y, z] # Remaining XX/YY/ZZ interaction vector.
# These special-unitary matrices flip the X, Y, and Z axes respectively.
flippers = [
np.array([[0, 1], [1, 0]]) * 1j,
np.array([[0, -1j], [1j, 0]]) * 1j,
np.array([[1, 0], [0, -1]]) * 1j
]
# Each of these special-unitary matrices swaps two the roles of two axes.
# The matrix at index k swaps the *other two* axes (e.g. swappers[1] is a
# Hadamard operation that swaps X and Z).
swappers = [
np.array([[1, -1j], [1j, -1]]) * 1j * np.sqrt(0.5),
np.array([[1, 1], [1, -1]]) * 1j * np.sqrt(0.5),
np.array([[0, 1 - 1j], [1 + 1j, 0]]) * 1j * np.sqrt(0.5)
]
# Shifting strength by ½π is equivalent to local ops (e.g. exp(i½π XX)∝XX).
def shift(k, step):
v[k] += step * np.pi / 2
phase[0] *= 1j**step
right[0] = combinators.dot(flippers[k]**(step % 4), right[0])
right[1] = combinators.dot(flippers[k]**(step % 4), right[1])
# Two negations is equivalent to temporarily flipping along the other axis.
def negate(k1, k2):
v[k1] *= -1
v[k2] *= -1
phase[0] *= -1
s = flippers[3 - k1 - k2] # The other axis' flipper.
left[1] = combinators.dot(left[1], s)
right[1] = combinators.dot(s, right[1])
# Swapping components is equivalent to temporarily swapping the two axes.
def swap(k1, k2):
v[k1], v[k2] = v[k2], v[k1]
s = swappers[3 - k1 - k2] # The other axis' swapper.
left[0] = combinators.dot(left[0], s)
left[1] = combinators.dot(left[1], s)
right[0] = combinators.dot(s, right[0])
right[1] = combinators.dot(s, right[1])
# Shifts an axis strength into the range (-π/4, π/4].
def canonical_shift(k):
while v[k] <= -np.pi / 4:
shift(k, +1)
while v[k] > np.pi / 4:
shift(k, -1)
# Sorts axis strengths into descending order by absolute magnitude.
def sort():
if abs(v[0]) < abs(v[1]):
swap(0, 1)
if abs(v[1]) < abs(v[2]):
swap(1, 2)
if abs(v[0]) < abs(v[1]):
swap(0, 1)
# Get all strengths to (-¼π, ¼π] in descending order by absolute magnitude.
canonical_shift(0)
canonical_shift(1)
canonical_shift(2)
sort()
# Move all negativity into z.
if v[0] < 0:
negate(0, 2)
if v[1] < 0:
negate(1, 2)
canonical_shift(2)
return (
phase[0],
(left[1], left[0]),
(v[0], v[1], v[2]),
(right[1], right[0]),
)
def kak_decomposition(
mat: np.ndarray,
tolerance: Tolerance = Tolerance.DEFAULT
) -> Tuple[complex,
Tuple[np.ndarray, np.ndarray],
Tuple[float, float, float],
Tuple[np.ndarray, np.ndarray]]:
"""Decomposes a 2-qubit unitary into 1-qubit ops and XX/YY/ZZ interactions.
Args:
mat: The 4x4 unitary matrix to decompose.
tolerance: Per-matrix-entry tolerance on equality.
Returns:
A nested tuple (g, (a1, a0), (x, y, z), (b1, b0)) containing:
0. A global phase factor.
1. The pre-operation matrices to apply to the second/firs qubit.
2. The XX/YY/ZZ weights of the non-local operation.
3. The post-operation matrices to apply to the second/firs qubit.
Guarantees that the x2, y2, z2 are canonicalized to satisfy:
0 ≤ abs(z) ≤ y ≤ x ≤ π/4
z ≠ -π/4
Guarantees that the input matrix should approximately equal:
g · (a1 ⊗ a0) · exp(i·(x·XX + y·YY + z·ZZ)) · (b1 ⊗ b0)
Raises:
ValueError: Bad matrix.
ArithmeticError: Failed to perform the decomposition.
References:
'An Introduction to Cartan's KAK Decomposition for QC Programmers'
https://arxiv.org/abs/quant-ph/0507171
"""
magic = np.array([[1, 0, 0, 1j],
[0, 1j, 1, 0],
[0, 1j, -1, 0],
[1, 0, 0, -1j]]) * np.sqrt(0.5)
gamma = np.array([[1, 1, 1, 1],
[1, 1, -1, -1],
[-1, 1, -1, 1],
[1, -1, -1, 1]]) * 0.25
# Diagonalize in magic basis.
left, d, right = (
diagonalize.bidiagonalize_unitary_with_special_orthogonals(
combinators.dot(np.conj(magic.T), mat, magic),
tolerance))
# Recover pieces.
a1, a0 = so4_to_magic_su2s(left.T, tolerance)
b1, b0 = so4_to_magic_su2s(right.T, tolerance)
w, x, y, z = gamma.dot(np.vstack(np.angle(d))).flatten()
g = np.exp(1j * w)
# Canonicalize.
g2, (c1, c0), (x2, y2, z2), (d1, d0) = kak_canonicalize_vector(x, y, z)
return (
g * g2,
(a1.dot(c1), a0.dot(c0)),
(x2, y2, z2),
(d1.dot(b1), d0.dot(b0))
)
| [
"numpy.mat",
"numpy.eye",
"numpy.sqrt",
"numpy.add",
"cirq.linalg.predicates.is_special_orthogonal",
"numpy.conj",
"numpy.linalg.det",
"numpy.exp",
"numpy.real",
"numpy.zeros",
"numpy.array",
"cirq.linalg.combinators.dot",
"numpy.isnan",
"numpy.concatenate",
"numpy.angle",
"cirq.linalg.combinators.kron",
"numpy.vectorize",
"typing.TypeVar"
] | [((934, 946), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (941, 946), False, 'from typing import Callable, List, Tuple, TypeVar\n'), ((4080, 4108), 'numpy.zeros', 'np.zeros', ([], {'shape': 'matrix.shape'}), '(shape=matrix.shape)\n', (4088, 4108), True, 'import numpy as np\n'), ((5200, 5237), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {'dtype': 'np.complex128'}), '((2, 2), dtype=np.complex128)\n', (5208, 5237), True, 'import numpy as np\n'), ((5247, 5284), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {'dtype': 'np.complex128'}), '((2, 2), dtype=np.complex128)\n', (5255, 5284), True, 'import numpy as np\n'), ((13498, 13514), 'numpy.exp', 'np.exp', (['(1.0j * w)'], {}), '(1.0j * w)\n', (13504, 13514), True, 'import numpy as np\n'), ((2597, 2611), 'numpy.mat', 'np.mat', (['matrix'], {}), '(matrix)\n', (2603, 2611), True, 'import numpy as np\n'), ((4026, 4044), 'numpy.vectorize', 'np.vectorize', (['func'], {}), '(func)\n', (4038, 4044), True, 'import numpy as np\n'), ((4170, 4196), 'numpy.add', 'np.add', (['total', '(piece * val)'], {}), '(total, piece * val)\n', (4176, 4196), True, 'import numpy as np\n'), ((5718, 5728), 'numpy.real', 'np.real', (['g'], {}), '(g)\n', (5725, 5728), True, 'import numpy as np\n'), ((5786, 5810), 'cirq.linalg.combinators.kron', 'combinators.kron', (['f1', 'f2'], {}), '(f1, f2)\n', (5802, 5810), False, 'from cirq.linalg import combinators\n'), ((6976, 7061), 'numpy.array', 'np.array', (['[[1, 0, 0, 1.0j], [0, 1.0j, 1, 0], [0, 1.0j, -1, 0], [1, 0, 0, -1.0j]]'], {}), '([[1, 0, 0, 1.0j], [0, 1.0j, 1, 0], [0, 1.0j, -1, 0], [1, 0, 0, -1.0j]]\n )\n', (6984, 7061), True, 'import numpy as np\n'), ((7111, 7123), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (7118, 7123), True, 'import numpy as np\n'), ((7161, 7177), 'numpy.conj', 'np.conj', (['magic.T'], {}), '(magic.T)\n', (7168, 7177), True, 'import numpy as np\n'), ((7322, 7338), 'numpy.conj', 'np.conj', (['magic.T'], {}), '(magic.T)\n', (7329, 7338), True, 'import numpy as np\n'), ((7376, 7398), 'cirq.linalg.combinators.kron', 'combinators.kron', (['a', 'b'], {}), '(a, b)\n', (7392, 7398), False, 'from cirq.linalg import combinators\n'), ((9763, 9815), 'cirq.linalg.combinators.dot', 'combinators.dot', (['(flippers[k] ** (step % 4))', 'right[0]'], {}), '(flippers[k] ** (step % 4), right[0])\n', (9778, 9815), False, 'from cirq.linalg import combinators\n'), ((9833, 9885), 'cirq.linalg.combinators.dot', 'combinators.dot', (['(flippers[k] ** (step % 4))', 'right[1]'], {}), '(flippers[k] ** (step % 4), right[1])\n', (9848, 9885), False, 'from cirq.linalg import combinators\n'), ((10132, 10159), 'cirq.linalg.combinators.dot', 'combinators.dot', (['left[1]', 's'], {}), '(left[1], s)\n', (10147, 10159), False, 'from cirq.linalg import combinators\n'), ((10179, 10207), 'cirq.linalg.combinators.dot', 'combinators.dot', (['s', 'right[1]'], {}), '(s, right[1])\n', (10194, 10207), False, 'from cirq.linalg import combinators\n'), ((10425, 10452), 'cirq.linalg.combinators.dot', 'combinators.dot', (['left[0]', 's'], {}), '(left[0], s)\n', (10440, 10452), False, 'from cirq.linalg import combinators\n'), ((10471, 10498), 'cirq.linalg.combinators.dot', 'combinators.dot', (['left[1]', 's'], {}), '(left[1], s)\n', (10486, 10498), False, 'from cirq.linalg import combinators\n'), ((10518, 10546), 'cirq.linalg.combinators.dot', 'combinators.dot', (['s', 'right[0]'], {}), '(s, right[0])\n', (10533, 10546), False, 'from cirq.linalg import combinators\n'), ((10566, 10594), 'cirq.linalg.combinators.dot', 'combinators.dot', (['s', 'right[1]'], {}), '(s, right[1])\n', (10581, 10594), False, 'from cirq.linalg import combinators\n'), ((12796, 12881), 'numpy.array', 'np.array', (['[[1, 0, 0, 1.0j], [0, 1.0j, 1, 0], [0, 1.0j, -1, 0], [1, 0, 0, -1.0j]]'], {}), '([[1, 0, 0, 1.0j], [0, 1.0j, 1, 0], [0, 1.0j, -1, 0], [1, 0, 0, -1.0j]]\n )\n', (12804, 12881), True, 'import numpy as np\n'), ((12931, 12943), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (12938, 12943), True, 'import numpy as np\n'), ((12956, 13028), 'numpy.array', 'np.array', (['[[1, 1, 1, 1], [1, 1, -1, -1], [-1, 1, -1, 1], [1, -1, -1, 1]]'], {}), '([[1, 1, 1, 1], [1, 1, -1, -1], [-1, 1, -1, 1], [1, -1, -1, 1]])\n', (12964, 13028), True, 'import numpy as np\n'), ((2952, 2996), 'numpy.concatenate', 'np.concatenate', (['[vecs[i] for i in g]'], {'axis': '(1)'}), '([vecs[i] for i in g], axis=1)\n', (2966, 2996), True, 'import numpy as np\n'), ((3978, 3992), 'numpy.conj', 'np.conj', (['vec.T'], {}), '(vec.T)\n', (3985, 3992), True, 'import numpy as np\n'), ((5548, 5565), 'numpy.linalg.det', 'np.linalg.det', (['f1'], {}), '(f1)\n', (5561, 5565), True, 'import numpy as np\n'), ((5592, 5609), 'numpy.linalg.det', 'np.linalg.det', (['f2'], {}), '(f2)\n', (5605, 5609), True, 'import numpy as np\n'), ((5825, 5843), 'numpy.isnan', 'np.isnan', (['restored'], {}), '(restored)\n', (5833, 5843), True, 'import numpy as np\n'), ((6782, 6830), 'cirq.linalg.predicates.is_special_orthogonal', 'predicates.is_special_orthogonal', (['mat', 'tolerance'], {}), '(mat, tolerance)\n', (6814, 6830), False, 'from cirq.linalg import predicates\n'), ((8784, 8793), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (8790, 8793), True, 'import numpy as np\n'), ((8839, 8848), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (8845, 8848), True, 'import numpy as np\n'), ((9045, 9071), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (9053, 9071), True, 'import numpy as np\n'), ((9086, 9119), 'numpy.array', 'np.array', (['[[0, -1.0j], [1.0j, 0]]'], {}), '([[0, -1.0j], [1.0j, 0]])\n', (9094, 9119), True, 'import numpy as np\n'), ((9130, 9157), 'numpy.array', 'np.array', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (9138, 9157), True, 'import numpy as np\n'), ((9435, 9447), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (9442, 9447), True, 'import numpy as np\n'), ((9492, 9504), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (9499, 9504), True, 'import numpy as np\n'), ((9558, 9570), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (9565, 9570), True, 'import numpy as np\n'), ((13250, 13266), 'numpy.conj', 'np.conj', (['magic.T'], {}), '(magic.T)\n', (13257, 13266), True, 'import numpy as np\n'), ((9397, 9431), 'numpy.array', 'np.array', (['[[1, -1.0j], [1.0j, -1]]'], {}), '([[1, -1.0j], [1.0j, -1]])\n', (9405, 9431), True, 'import numpy as np\n'), ((9457, 9484), 'numpy.array', 'np.array', (['[[1, 1], [1, -1]]'], {}), '([[1, 1], [1, -1]])\n', (9465, 9484), True, 'import numpy as np\n'), ((9514, 9554), 'numpy.array', 'np.array', (['[[0, 1 - 1.0j], [1 + 1.0j, 0]]'], {}), '([[0, 1 - 1.0j], [1 + 1.0j, 0]])\n', (9522, 9554), True, 'import numpy as np\n'), ((13466, 13477), 'numpy.angle', 'np.angle', (['d'], {}), '(d)\n', (13474, 13477), True, 'import numpy as np\n'), ((3225, 3243), 'numpy.conj', 'np.conj', (['vecs[i].T'], {}), '(vecs[i].T)\n', (3232, 3243), True, 'import numpy as np\n')] |
from flask import Flask, render_template
from flask import request, jsonify
import time
import matplotlib
import yt
import numpy as np
matplotlib.use('agg')
app = Flask(__name__)
app.secret_key = 's3cr3t'
app.debug = True
@app.route('/')
def index():
return render_template('/index.html')
@app.route('/background_process')
def background_process():
#read the RGB values from the UI
redValues = np.asarray(eval(request.args.get('redValues')))
greenValues = np.asarray(eval(request.args.get('greenValues')))
blueValues = np.asarray(eval(request.args.get('blueValues')))
#create a random image name
img_name = int(round(time.time() * 1000))
assignValues(redValues, greenValues, blueValues, str(img_name))
#send back to the UI the image name to display
return jsonify(result= img_name)
def assignValues(redVal, greenVal, blueVal, resultname):
ctf = yt.ColorTransferFunction( (-10.0, -5.0) )
ctf.add_layers(8)
#assign the recieved RGB values to ColorTransferFunction
ctf.red.y = redVal
ctf.green.y = greenVal
ctf.blue.y = blueVal
#plot the img
ctf.plot("static/img/"+resultname+".png")
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000) | [
"flask.render_template",
"flask.request.args.get",
"flask.Flask",
"matplotlib.use",
"yt.ColorTransferFunction",
"time.time",
"flask.jsonify"
] | [((137, 158), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (151, 158), False, 'import matplotlib\n'), ((166, 181), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (171, 181), False, 'from flask import Flask, render_template\n'), ((267, 297), 'flask.render_template', 'render_template', (['"""/index.html"""'], {}), "('/index.html')\n", (282, 297), False, 'from flask import Flask, render_template\n'), ((810, 834), 'flask.jsonify', 'jsonify', ([], {'result': 'img_name'}), '(result=img_name)\n', (817, 834), False, 'from flask import request, jsonify\n'), ((921, 960), 'yt.ColorTransferFunction', 'yt.ColorTransferFunction', (['(-10.0, -5.0)'], {}), '((-10.0, -5.0))\n', (945, 960), False, 'import yt\n'), ((429, 458), 'flask.request.args.get', 'request.args.get', (['"""redValues"""'], {}), "('redValues')\n", (445, 458), False, 'from flask import request, jsonify\n'), ((495, 526), 'flask.request.args.get', 'request.args.get', (['"""greenValues"""'], {}), "('greenValues')\n", (511, 526), False, 'from flask import request, jsonify\n'), ((562, 592), 'flask.request.args.get', 'request.args.get', (['"""blueValues"""'], {}), "('blueValues')\n", (578, 592), False, 'from flask import request, jsonify\n'), ((653, 664), 'time.time', 'time.time', ([], {}), '()\n', (662, 664), False, 'import time\n')] |
from boxwise_flask.db import db
from peewee import CharField, IntegerField
class SizeRange(db.Model):
label = CharField(null=True)
seq = IntegerField(null=True)
class Meta:
table_name = "sizegroup"
| [
"peewee.CharField",
"peewee.IntegerField"
] | [((116, 136), 'peewee.CharField', 'CharField', ([], {'null': '(True)'}), '(null=True)\n', (125, 136), False, 'from peewee import CharField, IntegerField\n'), ((147, 170), 'peewee.IntegerField', 'IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (159, 170), False, 'from peewee import CharField, IntegerField\n')] |
from typing import Any, Union, Optional
import discord
from discord_slash import SlashContext, cog_ext
from discord.ext import commands
from ..property_manager import FirestorePropertyManager, Property, InvalidKeyError, InvalidValueError, BooleanProperty, ListProperty
from ..utils import send_maybe_hidden
class Settings(commands.Cog):
PROPERTY_COMMAND_DESCRIPTION = 'Change the bot\'s properties for a channel or server. Use this to change the bot prefix, default text-to-speech language, etc.'
PROPERTY_NAME_OPTION = {
'name': 'name',
'description': 'Property name.',
'type': 3,
'required': True,
'choices': [
{
'name': 'text_to_speech',
'value': 'text_to_speech'
},
{
'name': 'language',
'value': 'language'
},
{
'name': 'prefix',
'value': 'prefix'
},
{
'name': 'show_definition_source',
'value': 'show_definition_source'
},
{
'name': 'dictionary_apis',
'value': 'dictionary_apis'
},
{
'name': 'auto_translate',
'value': 'auto_translate'
}
]
}
def __init__(self):
self._scoped_property_manager = FirestorePropertyManager([
Property(
'prefix',
default='.',
description='The bot\'s prefix. This can be one or more characters. If you forget the prefix, just mention the bot and it will show you the current prefix.'
),
Property(
'text_to_speech',
choices=['force', 'flag', 'disable'],
default='flag',
description='Choices:\n'
'`force`: All definition requests will use text-to-speech.\n'
'`flag`: You must use the flag to use text-to-speech.\n'
'`disable`: Text-to-speech is disabled.'
),
Property(
'language',
default='en',
description='The language to use when displaying definitions and speaking. This can be a two-letter language code or a language name.'
),
BooleanProperty(
'show_definition_source',
default=False,
description='Choices:\n'
'`true`: The bot will show the definition source at the end of each definition.\n'
'`false`: The bot will not show the definition source.'
),
ListProperty(
'dictionary_apis',
default=['unofficial_google', 'owlbot', 'merriam_webster_collegiate', 'merriam_webster_medical', 'rapid_words'],
choices=['owlbot', 'unofficial_google', 'merriam_webster_medical', 'merriam_webster_collegiate', 'rapid_words'],
description='A comma-separated list of dictionary APIs to use in order of preference.\n'
'Choices:\n'
'`unofficial_google`, `owlbot`, `merriam_webster_collegiate`, `merriam_webster_medical`, `rapid_words`'
),
BooleanProperty(
'auto_translate',
default=False,
description='Choices:\n'
'`true`: Automatically translate words before looking up their definition.\n'
'`false`: Don\'t translate words before looking up their definition.'
)
])
@property
def scoped_property_manager(self):
return self._scoped_property_manager
@commands.group(
name='settings',
aliases=['p'],
help=PROPERTY_COMMAND_DESCRIPTION,
usage='(list <scope> | set <scope> <name> <value> | remove <scope> <name>)'
)
async def settings(self, context):
if context.invoked_subcommand is None:
raise commands.errors.ArgumentParsingError()
@settings.command(name='set')
async def set(self, context, scope_name: str, key: str, value: str):
await self._set(context, scope_name, key, value)
@cog_ext.cog_subcommand(
base='settings',
name='set',
description='Set a property.',
options=[
{
'name': 'scope',
'description': 'Property scope.',
'type': 3,
'required': True,
'choices': [
{
'name': 'guild',
'value': 'guild'
},
{
'name': 'channel',
'value': 'channel'
}
]
},
PROPERTY_NAME_OPTION,
{
'name': 'value',
'description': 'Property value.',
'type': 3,
'required': True
}
]
)
async def slash_set(self, context: SlashContext, scope: str, name: str, value: str):
await self._set(context, scope, name, value)
async def _set(self, context: Union[commands.Context, SlashContext], scope_name: str, key: str, value: str):
scope = self._get_scope_from_name(scope_name, context)
if scope is None:
if isinstance(context, SlashContext):
await context.defer(hidden=True)
await send_maybe_hidden(context, f'Invalid scope: `{scope_name}`! Must be either `guild` or `channel`.')
return
try:
self._scoped_property_manager.set(key, value, scope)
if isinstance(context, SlashContext):
await context.defer()
await context.send(f'Successfully set `{key}` to `{value}` in `{scope_name}`.')
except InvalidKeyError as e:
if isinstance(context, SlashContext):
await context.defer(hidden=True)
await send_maybe_hidden(context, f'Invalid key `{e.key}`')
except InvalidValueError as e:
if isinstance(context, SlashContext):
await context.defer(hidden=True)
await send_maybe_hidden(context, f'Invalid value `{e.value}` for key `{e.key}`.')
@settings.command(name='list')
async def list(self, context: commands.Context, scope_name: Optional[str] = 'all'):
await self._list(context, scope_name)
@cog_ext.cog_subcommand(
base='settings',
name='list',
description='Shows a list of guild or server settings.',
options=[
{
'name': 'scope',
'description': 'Property scope.',
'type': 3,
'choices': [
{
'name': 'all',
'value': 'all'
},
{
'name': 'guild',
'value': 'guild'
},
{
'name': 'channel',
'value': 'channel'
}
]
}
]
)
async def slash_list(self, context: SlashContext, scope: Optional[str] = 'all'):
await context.defer(hidden=True)
await self._list(context, scope)
def get_all(self, scope):
properties = {}
for p in self._scoped_property_manager.properties:
if isinstance(scope, (discord.Guild, discord.DMChannel)):
properties[p] = self._scoped_property_manager.get(p.key, scope)
elif isinstance(scope, discord.TextChannel):
value = self._scoped_property_manager.get(p.key, scope, recursive=False)
if value is not None:
properties[p] = value
return properties
async def _list(self, context: Union[commands.Context, SlashContext], scope_name: str = 'all'):
if scope_name == 'all':
reply = ''
for scope_name in ('guild', 'channel'):
scope = self._get_scope_from_name(scope_name, context)
if scope is not None:
properties = self.get_all(scope)
if len(properties) > 0:
reply += '\n' + self._print_properties(properties, scope)
await send_maybe_hidden(context, reply)
else:
scope = self._get_scope_from_name(scope_name, context)
if scope is None:
await send_maybe_hidden(context, f'Invalid scope: `{scope_name}`! Must be either `guild` or `channel`.')
return
properties = self.get_all(scope)
await send_maybe_hidden(context, self._print_properties(properties, scope))
@settings.command(name='remove')
async def remove(self, context: commands.Context, scope_name: str, key: str):
await self._remove(context, scope_name, key)
@cog_ext.cog_subcommand(
base='settings',
name='remove',
description='Remove a property.',
options=[
{
'name': 'scope',
'description': 'Property scope.',
'type': 3,
'required': True,
'choices': [
{
'name': 'guild',
'value': 'guild'
},
{
'name': 'channel',
'value': 'channel'
}
]
},
PROPERTY_NAME_OPTION
]
)
async def slash_remove(self, context: SlashContext, scope: str, name: str):
await self._remove(context, scope, name)
async def _remove(self, context: Union[commands.Context, SlashContext], scope_name: str, key: str):
scope = self._get_scope_from_name(scope_name, context)
if scope is None:
if isinstance(context, SlashContext):
await context.defer(hidden=True)
await send_maybe_hidden(context, f'Invalid scope: `{scope_name}`! Must be either `guild` or `channel`.')
return
try:
self._scoped_property_manager.remove(key, scope)
except InvalidKeyError:
await send_maybe_hidden(context, 'Invalid property name!')
return
await context.send(f'Successfully removed `{key}` from `{scope_name}`.')
@staticmethod
def _get_scope_from_name(scope_name: str, context: commands.Context):
try:
if scope_name == 'channel':
return context.channel
elif scope_name == 'guild':
return context.channel.guild
except AttributeError:
return None
return None
@staticmethod
def _print_properties(properties: {Property: Any}, scope: Union[discord.Guild, discord.TextChannel, discord.DMChannel]) -> str:
reply = ''
if isinstance(scope, discord.Guild):
reply += '__**Server Settings**__\n'
reply += 'These settings affect every channel in your server, unless they are overridden with a channel-specific setting.\n\n'
elif isinstance(scope, discord.TextChannel):
reply += '__**Channel Settings**__\n'
reply += 'These settings only affect this channel and take priority over server settings.\n\n'
elif isinstance(scope, discord.DMChannel):
reply += '__**DM Settings**__\n'
reply += 'Use `help settings` to see more info about settings.\n\n'
for p in sorted(properties, key=lambda x: x.key):
reply += f'**{p.key}**: `{p.to_string(properties[p])}`\n'
if len(properties) == 0:
reply += 'No properties set'
return reply
| [
"discord.ext.commands.group",
"discord_slash.cog_ext.cog_subcommand",
"discord.ext.commands.errors.ArgumentParsingError"
] | [((3827, 3995), 'discord.ext.commands.group', 'commands.group', ([], {'name': '"""settings"""', 'aliases': "['p']", 'help': 'PROPERTY_COMMAND_DESCRIPTION', 'usage': '"""(list <scope> | set <scope> <name> <value> | remove <scope> <name>)"""'}), "(name='settings', aliases=['p'], help=\n PROPERTY_COMMAND_DESCRIPTION, usage=\n '(list <scope> | set <scope> <name> <value> | remove <scope> <name>)')\n", (3841, 3995), False, 'from discord.ext import commands\n'), ((4338, 4728), 'discord_slash.cog_ext.cog_subcommand', 'cog_ext.cog_subcommand', ([], {'base': '"""settings"""', 'name': '"""set"""', 'description': '"""Set a property."""', 'options': "[{'name': 'scope', 'description': 'Property scope.', 'type': 3, 'required':\n True, 'choices': [{'name': 'guild', 'value': 'guild'}, {'name':\n 'channel', 'value': 'channel'}]}, PROPERTY_NAME_OPTION, {'name':\n 'value', 'description': 'Property value.', 'type': 3, 'required': True}]"}), "(base='settings', name='set', description=\n 'Set a property.', options=[{'name': 'scope', 'description':\n 'Property scope.', 'type': 3, 'required': True, 'choices': [{'name':\n 'guild', 'value': 'guild'}, {'name': 'channel', 'value': 'channel'}]},\n PROPERTY_NAME_OPTION, {'name': 'value', 'description':\n 'Property value.', 'type': 3, 'required': True}])\n", (4360, 4728), False, 'from discord_slash import SlashContext, cog_ext\n'), ((6616, 6940), 'discord_slash.cog_ext.cog_subcommand', 'cog_ext.cog_subcommand', ([], {'base': '"""settings"""', 'name': '"""list"""', 'description': '"""Shows a list of guild or server settings."""', 'options': "[{'name': 'scope', 'description': 'Property scope.', 'type': 3, 'choices':\n [{'name': 'all', 'value': 'all'}, {'name': 'guild', 'value': 'guild'},\n {'name': 'channel', 'value': 'channel'}]}]"}), "(base='settings', name='list', description=\n 'Shows a list of guild or server settings.', options=[{'name': 'scope',\n 'description': 'Property scope.', 'type': 3, 'choices': [{'name': 'all',\n 'value': 'all'}, {'name': 'guild', 'value': 'guild'}, {'name':\n 'channel', 'value': 'channel'}]}])\n", (6638, 6940), False, 'from discord_slash import SlashContext, cog_ext\n'), ((9153, 9463), 'discord_slash.cog_ext.cog_subcommand', 'cog_ext.cog_subcommand', ([], {'base': '"""settings"""', 'name': '"""remove"""', 'description': '"""Remove a property."""', 'options': "[{'name': 'scope', 'description': 'Property scope.', 'type': 3, 'required':\n True, 'choices': [{'name': 'guild', 'value': 'guild'}, {'name':\n 'channel', 'value': 'channel'}]}, PROPERTY_NAME_OPTION]"}), "(base='settings', name='remove', description=\n 'Remove a property.', options=[{'name': 'scope', 'description':\n 'Property scope.', 'type': 3, 'required': True, 'choices': [{'name':\n 'guild', 'value': 'guild'}, {'name': 'channel', 'value': 'channel'}]},\n PROPERTY_NAME_OPTION])\n", (9175, 9463), False, 'from discord_slash import SlashContext, cog_ext\n'), ((4128, 4166), 'discord.ext.commands.errors.ArgumentParsingError', 'commands.errors.ArgumentParsingError', ([], {}), '()\n', (4164, 4166), False, 'from discord.ext import commands\n')] |
# thumbget.py from somerandompiggo's superutils under the MIT License
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
wholepage = False # this bool when switched to true will constantly scroll down until the end of the page has been reached. if the site has infinite scroll, this seems to crash. it is disabled by default
def getpage():
driver.get("https://en.wikipedia.org/wiki/Main_Page")
driver.refresh()
if (wholepage == False):
height = 720
elif (wholepage == True):
height = driver.execute_script("return document.body.parentNode.scrollHeight")
driver.set_window_size(original_size['width'], height)
driver.save_screenshot("page.png") # end filename
driver.close()
options = Options()
options.add_argument("--headless") # this makes geckodriver headless meaning that no window pops up to screenshot. comment out this line to remove
driver = webdriver.Firefox(options=options,executable_path="/usr/bin/geckodriver")
driver.set_window_size(1280, 720)
original_size = driver.get_window_size()
getpage()
| [
"selenium.webdriver.firefox.options.Options",
"selenium.webdriver.Firefox"
] | [((734, 743), 'selenium.webdriver.firefox.options.Options', 'Options', ([], {}), '()\n', (741, 743), False, 'from selenium.webdriver.firefox.options import Options\n'), ((900, 974), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {'options': 'options', 'executable_path': '"""/usr/bin/geckodriver"""'}), "(options=options, executable_path='/usr/bin/geckodriver')\n", (917, 974), False, 'from selenium import webdriver\n')] |
"""
(c) Copyright JC 2018-2020 All Rights Reserved
-----------------------------------------------------------------------------
File Name :
Description :
Author : JC
Email : <EMAIL>
GiitHub : https://github.com/lsdlab
-----------------------------------------------------------------------------
"""
import factory
from faker import Faker
fake = Faker("zh_CN")
class AddressFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'profiles.Address'
django_get_or_create = ('id', )
name = factory.Sequence(lambda n: 'name{}'.format(n))
mobile = fake.phone_number()
address = fake.text()
| [
"faker.Faker"
] | [((386, 400), 'faker.Faker', 'Faker', (['"""zh_CN"""'], {}), "('zh_CN')\n", (391, 400), False, 'from faker import Faker\n')] |
from dataclasses import dataclass
from typing import Optional
from logzero import setup_logger
from app import logging_setup, https
# Setting up logger object
log = setup_logger(name="models", **logging_setup)
class GQL:
class PageInfo:
has_next_page = False
end_cursor = "null"
def __init__(self, headers, endpoint="https://api.github.com/graphql"):
self.paging = GQL.PageInfo()
self.__endpoint = endpoint
self.__headers = headers
self.__query = ""
self.__query_template = ""
self.__query_results = {}
# self.__template_name = ""
self.__template_path = "app/queries"
self.__template_variables = dict(
AFTER_CURSOR=f"after: {self.paging.end_cursor}",
STARS_FILTER=f"stars:>1"
)
@property
def endpoint(self):
return self.__endpoint
@property
def headers(self):
return self.__headers
def set_headers(self, headers):
if type(headers) is dict:
self.__headers = headers
else:
log.error(f"{self.__class__}.set_headers(): headers must be a dict")
@property
def query(self):
return self.__query
def set_query(self, query):
if type(query) is str:
self.__query = query
else:
log.error(f"{self.__class__}.set_query(): query must be a str")
@property
def query_results(self):
return self.__query_results
def set_query_results(self, results_json):
if type(results_json) is dict:
if "pageInfo" in results_json["data"]["search"]:
paging = results_json["data"]["search"]["pageInfo"]
self.paging.has_next_page = paging["hasNextPage"]
self.paging.end_cursor = f'"{paging["endCursor"]}"'
self.__query_results = results_json["data"]["search"]
else:
log.error(
f"{self.__class__}.set_query_results(): "
f"results_json value must be a dict"
)
@property
def query_template(self):
return self.__query_template
@property
def template_path(self):
return self.__template_path
@property
def template_variables(self):
return self.__template_variables
def set_template_variables(self, **kwargs):
self.__template_variables = kwargs
def load_query(self, template_name):
template = self._load_query_template(template_name, self.__template_path)
self.set_query(self._setup_query(template, self.template_variables))
def reload_query(self):
if self.query_template:
self.set_query(
self._setup_query(self.query_template, self.template_variables)
)
def _load_query_template(self, template_name, template_path):
with open(f"{template_path}/{template_name}") as f:
self.__query_template = f.read()
return self.query_template
@staticmethod
def _setup_query(query_template, var_dict):
query = query_template
for key in var_dict.keys():
query = query.replace(f"<{key}>", var_dict[key])
return query
def run_query(self, retry=2, raw_response=False):
for i in range(-1, retry):
response = https.post(
url=self.endpoint, headers=self.headers, json=dict(query=self.query)
)
if raw_response:
return response
if "X-RateLimit-Remaining" in response.headers:
log.debug(
f"{self.__class__}.run_query({self.__hash__()}): "
f"X-RateLimit-Remaining={response.headers['X-RateLimit-Remaining']}"
)
else:
log.debug(
f"{self.__class__}.run_query({self.__hash__()}): "
f"response[{response.status_code}].text={response.text}"
)
if response.status_code == 200:
self.set_query_results(response.json())
return self.query_results
elif response.status_code == 403:
log.debug(
f"{self.__class__}.run_query({self.__hash__()}): "
f"self.query={self.query}"
)
raise ConnectionRefusedError(
f"Triggered API abuse mechanism! (hash={self.__hash__()})"
)
log.warning(
f"Query attempt #{i + 2} failed (status_code={response.status_code})"
)
log.error(f"Giving up on query (hash={self.__hash__()})")
log.debug(
f"{self.__class__}.run_query({self.__hash__()}): self.query={self.query})"
)
def next_page(self):
if not self.paging.has_next_page:
return False
self.template_variables["AFTER_CURSOR"] = f"after:{self.paging.end_cursor}"
self.reload_query()
return self.run_query()
@dataclass
class Repo:
id: str = ""
ssh_url: str = ""
created_at: str = ""
updated_at: str = ""
license_info_name: str = ""
is_fork: Optional[bool] = None
is_in_organization: Optional[bool] = None
stargazers_total_count: int = -1
watchers_total_count: int = -1
forks_total_count: int = -1
releases_total_count: int = -1
commit_comments_total_count: int = -1
collaborators_total_count: int = -1
collaborators_direct_count: int = -1
collaborators_outside_count: int = -1
pull_requests_total_count: int = -1
pull_requests_open_count: int = -1
issues_total_count: int = -1
issues_open_count: int = -1
def setup_via_json(self, json):
"""
json param must correspond to GQL query
"""
if type(json) is dict:
self.id = json["id"]
self.ssh_url = json["sshUrl"]
self.created_at = json["createdAt"]
self.updated_at = json["updatedAt"]
self.is_fork = json["isFork"]
self.is_in_organization = json["isInOrganization"]
if json["licenseInfo"]:
self.license_info_name = json["licenseInfo"]["name"]
self.stargazers_total_count = json["stargazers"]["totalCount"]
self.watchers_total_count = json["watchers"]["totalCount"] if json["watchers"] else 0
self.forks_total_count = json["forks"]["totalCount"] if json["forks"] else 0
self.releases_total_count = json["releases"]["totalCount"] if json["releases"] else 0
self.commit_comments_total_count = json["commitComments"]["totalCount"] if json["commitComments"] else 0
self.collaborators_total_count = json["collaborators"]["totalCount"] if json["collaborators"] else 0
self.collaborators_direct_count = json["collaboratorsDirect"]["totalCount"] if json["collaboratorsDirect"] else 0
self.collaborators_outside_count = json["collaboratorsOutside"]["totalCount"] if json["collaboratorsOutside"] else 0
self.pull_requests_total_count = json["pullRequests"]["totalCount"] if json["pullRequests"] else 0
self.pull_requests_open_count = json["pullRequestsOpen"]["totalCount"] if json["pullRequestsOpen"] else 0
self.issues_total_count = json["issues"]["totalCount"] if json["issues"] else 0
self.issues_open_count = json["issuesOpen"]["totalCount"] if json["issuesOpen"] else 0
else:
log.error(f"{self.__class__}.setup_via_json(): json must be a dict")
def export_repo_info_as_json(self):
return dict(
id=self.id,
ssh_url=self.ssh_url,
created_at=self.created_at,
updated_at=self.updated_at,
stars=self.stargazers_total_count,
license=self.license_info_name,
is_fork=self.is_fork,
in_org=self.is_in_organization,
watchers=self.watchers_total_count,
forks=self.forks_total_count,
releases=self.releases_total_count,
commit_comments=self.commit_comments_total_count,
collaborators=self.collaborators_total_count,
collab_direct=self.collaborators_direct_count,
collab_outside=self.collaborators_outside_count,
prs=self.pull_requests_total_count,
prs_open=self.pull_requests_open_count,
issues=self.issues_total_count,
issues_open=self.issues_open_count,
)
class Repository:
def __init__(self):
self.__id = ""
self.__owner = ""
self.__name = ""
self.__url = ""
self.__created_at = ""
self.__updated_at = ""
self.__primary_language_name = ""
# self.__license = None
self.__license_info_name = ""
# self.__stargazers = []
self.__stargazers_total_count = -1
# self.__watchers = []
self.__watchers_total_count = -1
# self.__forks = []
self.__forks_total_count = -1
# self.__releases = []
self.__releases_total_count = -1
self.__pull_requests = []
self.__pull_requests_total_count = -1
self.__pull_requests_open_count = -1
# self.__issues = []
self.__issues_total_count = -1
self.__issues_open_count = -1
self.__issues_open_old_count = -1
@property
def id(self):
return self.__id
def set_id(self, id):
self.__id = id
@property
def owner(self):
return self.__owner
def set_owner(self, owner):
if type(owner) is str:
self.__owner = owner
else:
log.error(f"{self.__class__}.set_owner(): owner must be a str")
@property
def name(self):
return self.__name
def set_name(self, name):
if type(name) is str:
self.__name = name
else:
log.error(f"{self.__class__}.set_name(): name must be a str")
@property
def name_with_owner(self):
return f"{self.owner}/{self.name}"
@property
def url(self):
return self.__url
def set_url(self, url):
if type(url) is str:
self.__url = url
else:
log.error(f"{self.__class__}.set_url(): url must be a str")
@property
def created_at(self):
return self.__created_at
def set_created_at(self, created_at):
self.__created_at = created_at
@property
def updated_at(self):
return self.__updated_at
def set_updated_at(self, updated_at):
self.__updated_at = updated_at
@property
def primary_language_name(self):
return self.__primary_language_name
def set_primary_language_name(self, language_name):
if type(language_name) is str:
self.__primary_language_name = language_name
else:
log.error(
f"{self.__class__}.set_primary_language_name(): "
f"language_name must be a str"
)
@property
def license_info_name(self):
return self.__license_info_name
def set_license_info_name(self, license_name):
if type(license_name) is str:
self.__license_info_name = license_name
else:
log.error(
f"{self.__class__}.set_license_info_name():"
f"license_name must be a str"
)
@property
def stargazers_total_count(self):
return self.__stargazers_total_count
def set_stargazers_total_count(self, stargazers_count):
if type(stargazers_count) is int and stargazers_count >= 0:
self.__stargazers_total_count = stargazers_count
else:
log.error(
f"{self.__class__}.set_stargazers_total_count(): "
f"stargazers_count must be an int >=0"
)
@property
def forks_total_count(self):
return self.__forks_total_count
def set_forks_total_count(self, forks_count):
if type(forks_count) is int and forks_count >= 0:
self.__forks_total_count = forks_count
else:
log.error(
f"{self.__class__}.set_forks_total_count(): "
f"forks_count must be an int >=0"
)
@property
def releases_total_count(self):
return self.__releases_total_count
def set_releases_total_count(self, releases_count):
if type(releases_count) is int and releases_count >= 0:
self.__releases_total_count = releases_count
else:
log.error(
f"{self.__class__}.set_releases_total_count(): "
f"releases_count must be an int >=0"
)
@property
def pull_requests(self):
return self.__pull_requests
def set_pull_requests(self, pr_list):
self.__pull_requests = pr_list
@property
def pull_requests_total_count(self):
return self.__pull_requests_total_count
def set_pull_requests_total_count(self, pull_requests_count):
if type(pull_requests_count) is int and pull_requests_count >= 0:
self.__pull_requests_total_count = pull_requests_count
else:
log.error(
f"{self.__class__}.set_pull_requests_total_count(): "
f"pull_requests_count must be an int >=0"
)
@property
def pull_requests_open_count(self):
return self.__pull_requests_open_count
def set_pull_requests_open_count(self, pull_requests_count):
if type(pull_requests_count) is int and pull_requests_count >= 0:
self.__pull_requests_open_count = pull_requests_count
else:
log.error(
f"{self.__class__}.set_pull_requests_open_count(): "
f"pull_requests_count must be an int >=0"
)
@property
def issues_total_count(self):
return self.__issues_total_count
def set_issues_total_count(self, issues_count):
if type(issues_count) is int and issues_count >= 0:
self.__issues_total_count = issues_count
else:
log.error(
f"{self.__class__}.set_issues_total_count(): "
f"issues_count must be an int >=0"
)
@property
def issues_open_count(self):
return self.__issues_open_count
def set_issues_open_count(self, issues_count):
if type(issues_count) is int and issues_count >= 0:
self.__issues_open_count = issues_count
else:
log.error(
f"{self.__class__}.set_issues_open_count(): "
f"issues_count must be an int >=0"
)
@property
def issues_open_old_count(self):
return self.__issues_open_old_count
def set_issues_open_old_count(self, issues_count):
if type(issues_count) is int and issues_count >= 0:
self.__issues_open_old_count = issues_count
else:
log.error(
f"{self.__class__}.set_issues_open_old_count(): "
f"issues_count must be an int >=0"
)
def setup_via_json(self, json):
"""
json param must correspond to queries/top_python_repositories.gql GQL query
"""
if type(json) is dict:
self.set_owner(json["nameWithOwner"].split("/")[0])
self.set_name(json["nameWithOwner"].split("/")[1])
self.set_url(json["url"])
self.set_stargazers_total_count(json["stargazers"]["totalCount"])
# if json["primaryLanguage"]:
# self.set_primary_language_name(json["primaryLanguage"]["name"])
else:
log.error(f"{self.__class__}.setup_via_json(): json must be a dict")
def export_repo_info_as_json(self):
return dict(
repo=self.name_with_owner,
url=self.url,
stars=self.stargazers_total_count,
)
def repository(node_json):
repo = Repository()
repo.setup_via_json(node_json)
return repo
def repo_dataclass(node_json):
repo = Repo()
repo.setup_via_json(node_json)
return repo
| [
"logzero.setup_logger"
] | [((176, 220), 'logzero.setup_logger', 'setup_logger', ([], {'name': '"""models"""'}), "(name='models', **logging_setup)\n", (188, 220), False, 'from logzero import setup_logger\n')] |
from airflow import DAG
from airflow.operators.python_operator import PythonOperator, BranchPythonOperator
from airflow.operators.dummy_operator import DummyOperator
import logging
import datetime
import twint
#directly import
# from sensors.gcs_bq_custom_sensor import GoogleCloudStorageBigQueryUpdateSensor
#using airflow plugin
from airflow.sensors import GoogleCloudStorageBigQueryUpdateSensor
from airflow.contrib.operators import gcs_to_bq
from google.cloud import storage
from google.oauth2 import service_account
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime.datetime(2020, 1, 1),
'email_on_failure': False,
'email_on_retry': False,
'retries': 1
}
def scrapeYesterdayTwitter(bucket_name, project, credentials_path: str=None, **kwargs):
"""setting up the google credentials"""
credentials = service_account.Credentials.from_service_account_file(credentials_path) if credentials_path else None
storage_client = storage.Client(project=project, credentials=credentials)
bucket = storage_client.bucket(bucket_name)
#get_yesterday
yesterday = datetime.datetime.today() - datetime.timedelta(days=1)
#setting up twitter scraper
tweetConfig = twint.Config()
searchTerm = "coronavirus"
tweetConfig.Search = searchTerm
tweetConfig.Since = f"{yesterday.strftime('%Y-%m-%d')} 00:00:00"
tweetConfig.Until = f"{datetime.datetime.today().strftime('%Y-%m-%d')} 00:00:00"
tweetConfig.Lang = "en"
tweetConfig.Verified = True
#storing the result in the pandas dataframe
tweetConfig.Pandas = True
tweetConfig.Limit = 100
tweetConfig.Stats = False
tweetConfig.Hide_output = True
twint.run.Search(tweetConfig)
Tweets_df = twint.storage.panda.Tweets_df
filename = f"tweet-{searchTerm}-{yesterday.strftime('%Y-%m-%d')}"
bucket.blob('{}/{}.csv'.format("airflowTweet", filename)).upload_from_string(Tweets_df.to_csv(), 'text/csv')
blob = bucket.get_blob('{}/{}.csv'.format("airflowTweet", filename))
blob.metadata = {'updatedTime': datetime.datetime.now()}
blob.patch()
logging.info('{}/{}.csv has been uploaded.'.format("airflowTweet", filename))
def checkingYesterdayTweet(bucket_name, project, credentials_path, **kwargs):
"""
If the data CSV file was saved, it triggers the All_jobs_end task;
else it set off the tweeter-today-scraper.
"""
credentials = service_account.Credentials.from_service_account_file(credentials_path) if credentials_path else None
storage_client = storage.Client(project=project, credentials=credentials)
bucket_name = "storage-ark2"
bucket = storage_client.get_bucket(bucket_name)
yesterday = datetime.datetime.today() - datetime.timedelta(days=1)
searchTerm = "coronavirus"
filename = f"tweet-{searchTerm}-{yesterday.strftime('%Y-%m-%d')}"
if bucket.blob(filename).exists():
logging.info('this file exist: {}/{}.csv'.format("airflowTweet", filename))
return "All_jobs_end"
logging.info('this file does not exist: {}/{}.csv'.format("airflowTweet", filename))
return "tweeter-yesterday-scraper"
dag = DAG('UpdateYesterdayTweet2BigQuery', default_args=default_args, schedule_interval="@daily", catchup=False)
with dag:
check_modified_date_sensor = GoogleCloudStorageBigQueryUpdateSensor(
task_id='check_modified_date_sensor',
project='project-ark2',
credentials_path='/usr/local/airflow/dags/project-ark2-b9b253cd02fa.json',
timeout=60*60*24, # timeout in 1 day
poke_interval=60*60*1, # checking files every 1 hours
)
GCS_to_BQ = gcs_to_bq.GoogleCloudStorageToBigQueryOperator(
task_id='gcs_to_bq',
bucket='storage-ark2',
source_objects=['airflowTweet/*.csv'],
destination_project_dataset_table='tweetScraper.tweet',
skip_leading_rows=1,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
write_disposition='WRITE_TRUNCATE',
bigquery_conn_id='google_cloud_default',
allow_quoted_newlines=True, #allows for newlines
allow_jagged_rows=True, #allows for missing values
autodetect=True
)
dummy_shut_down = DummyOperator(
task_id='All_jobs_end')
checkingYesterdayTweet = BranchPythonOperator(
task_id='branching',
python_callable=checkingYesterdayTweet,
op_kwargs={'bucket_name': 'storage-ark2',
'project': 'project-ark2',
'credentials_path': '/usr/local/airflow/dags/project-ark2-b9b253cd02fa.json'},
provide_context=True)
scrapeYesterdayData = PythonOperator(
task_id="tweeter-yesterday-scraper",
python_callable=scrapeYesterdayTwitter,
provide_context=True,
op_kwargs={'bucket_name': 'storage-ark2',
'project': 'project-ark2',
'credentials_path': '/usr/local/airflow/dags/project-ark2-b9b253cd02fa.json'},
)
[checkingYesterdayTweet, check_modified_date_sensor] >> GCS_to_BQ >> dummy_shut_down | [
"datetime.datetime",
"google.cloud.storage.Client",
"twint.Config",
"airflow.sensors.GoogleCloudStorageBigQueryUpdateSensor",
"google.oauth2.service_account.Credentials.from_service_account_file",
"airflow.operators.dummy_operator.DummyOperator",
"airflow.operators.python_operator.PythonOperator",
"twint.run.Search",
"datetime.datetime.now",
"airflow.contrib.operators.gcs_to_bq.GoogleCloudStorageToBigQueryOperator",
"airflow.DAG",
"datetime.datetime.today",
"datetime.timedelta",
"airflow.operators.python_operator.BranchPythonOperator"
] | [((3172, 3282), 'airflow.DAG', 'DAG', (['"""UpdateYesterdayTweet2BigQuery"""'], {'default_args': 'default_args', 'schedule_interval': '"""@daily"""', 'catchup': '(False)'}), "('UpdateYesterdayTweet2BigQuery', default_args=default_args,\n schedule_interval='@daily', catchup=False)\n", (3175, 3282), False, 'from airflow import DAG\n'), ((613, 642), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (630, 642), False, 'import datetime\n'), ((997, 1053), 'google.cloud.storage.Client', 'storage.Client', ([], {'project': 'project', 'credentials': 'credentials'}), '(project=project, credentials=credentials)\n', (1011, 1053), False, 'from google.cloud import storage\n'), ((1243, 1257), 'twint.Config', 'twint.Config', ([], {}), '()\n', (1255, 1257), False, 'import twint\n'), ((1715, 1744), 'twint.run.Search', 'twint.run.Search', (['tweetConfig'], {}), '(tweetConfig)\n', (1731, 1744), False, 'import twint\n'), ((2567, 2623), 'google.cloud.storage.Client', 'storage.Client', ([], {'project': 'project', 'credentials': 'credentials'}), '(project=project, credentials=credentials)\n', (2581, 2623), False, 'from google.cloud import storage\n'), ((3323, 3561), 'airflow.sensors.GoogleCloudStorageBigQueryUpdateSensor', 'GoogleCloudStorageBigQueryUpdateSensor', ([], {'task_id': '"""check_modified_date_sensor"""', 'project': '"""project-ark2"""', 'credentials_path': '"""/usr/local/airflow/dags/project-ark2-b9b253cd02fa.json"""', 'timeout': '(60 * 60 * 24)', 'poke_interval': '(60 * 60 * 1)'}), "(task_id='check_modified_date_sensor',\n project='project-ark2', credentials_path=\n '/usr/local/airflow/dags/project-ark2-b9b253cd02fa.json', timeout=60 * \n 60 * 24, poke_interval=60 * 60 * 1)\n", (3361, 3561), False, 'from airflow.sensors import GoogleCloudStorageBigQueryUpdateSensor\n'), ((3654, 4092), 'airflow.contrib.operators.gcs_to_bq.GoogleCloudStorageToBigQueryOperator', 'gcs_to_bq.GoogleCloudStorageToBigQueryOperator', ([], {'task_id': '"""gcs_to_bq"""', 'bucket': '"""storage-ark2"""', 'source_objects': "['airflowTweet/*.csv']", 'destination_project_dataset_table': '"""tweetScraper.tweet"""', 'skip_leading_rows': '(1)', 'source_format': '"""CSV"""', 'create_disposition': '"""CREATE_IF_NEEDED"""', 'write_disposition': '"""WRITE_TRUNCATE"""', 'bigquery_conn_id': '"""google_cloud_default"""', 'allow_quoted_newlines': '(True)', 'allow_jagged_rows': '(True)', 'autodetect': '(True)'}), "(task_id='gcs_to_bq', bucket=\n 'storage-ark2', source_objects=['airflowTweet/*.csv'],\n destination_project_dataset_table='tweetScraper.tweet',\n skip_leading_rows=1, source_format='CSV', create_disposition=\n 'CREATE_IF_NEEDED', write_disposition='WRITE_TRUNCATE',\n bigquery_conn_id='google_cloud_default', allow_quoted_newlines=True,\n allow_jagged_rows=True, autodetect=True)\n", (3700, 4092), False, 'from airflow.contrib.operators import gcs_to_bq\n'), ((4240, 4277), 'airflow.operators.dummy_operator.DummyOperator', 'DummyOperator', ([], {'task_id': '"""All_jobs_end"""'}), "(task_id='All_jobs_end')\n", (4253, 4277), False, 'from airflow.operators.dummy_operator import DummyOperator\n'), ((4317, 4585), 'airflow.operators.python_operator.BranchPythonOperator', 'BranchPythonOperator', ([], {'task_id': '"""branching"""', 'python_callable': 'checkingYesterdayTweet', 'op_kwargs': "{'bucket_name': 'storage-ark2', 'project': 'project-ark2',\n 'credentials_path':\n '/usr/local/airflow/dags/project-ark2-b9b253cd02fa.json'}", 'provide_context': '(True)'}), "(task_id='branching', python_callable=\n checkingYesterdayTweet, op_kwargs={'bucket_name': 'storage-ark2',\n 'project': 'project-ark2', 'credentials_path':\n '/usr/local/airflow/dags/project-ark2-b9b253cd02fa.json'},\n provide_context=True)\n", (4337, 4585), False, 'from airflow.operators.python_operator import PythonOperator, BranchPythonOperator\n'), ((4669, 4943), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': '"""tweeter-yesterday-scraper"""', 'python_callable': 'scrapeYesterdayTwitter', 'provide_context': '(True)', 'op_kwargs': "{'bucket_name': 'storage-ark2', 'project': 'project-ark2',\n 'credentials_path':\n '/usr/local/airflow/dags/project-ark2-b9b253cd02fa.json'}"}), "(task_id='tweeter-yesterday-scraper', python_callable=\n scrapeYesterdayTwitter, provide_context=True, op_kwargs={'bucket_name':\n 'storage-ark2', 'project': 'project-ark2', 'credentials_path':\n '/usr/local/airflow/dags/project-ark2-b9b253cd02fa.json'})\n", (4683, 4943), False, 'from airflow.operators.python_operator import PythonOperator, BranchPythonOperator\n'), ((874, 945), 'google.oauth2.service_account.Credentials.from_service_account_file', 'service_account.Credentials.from_service_account_file', (['credentials_path'], {}), '(credentials_path)\n', (927, 945), False, 'from google.oauth2 import service_account\n'), ((1138, 1163), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (1161, 1163), False, 'import datetime\n'), ((1166, 1192), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1184, 1192), False, 'import datetime\n'), ((2086, 2109), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2107, 2109), False, 'import datetime\n'), ((2443, 2514), 'google.oauth2.service_account.Credentials.from_service_account_file', 'service_account.Credentials.from_service_account_file', (['credentials_path'], {}), '(credentials_path)\n', (2496, 2514), False, 'from google.oauth2 import service_account\n'), ((2725, 2750), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (2748, 2750), False, 'import datetime\n'), ((2753, 2779), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2771, 2779), False, 'import datetime\n'), ((1421, 1446), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (1444, 1446), False, 'import datetime\n')] |
import asyncio
from collections import namedtuple
from typing import List
from jobs.base import BaseFetcher, INotified
from jobs.defipulse_job import DefiPulseKeeper
from lib.cooldown import OnceADay, Cooldown
from lib.datetime import parse_timespan_to_seconds, now_ts, HOUR, MINUTE, DAY, parse_time, \
is_time_to_do
from lib.depcont import DepContainer
from lib.texts import MessageType
from lib.utils import circular_shuffled_iterator
from localization import LocalizationManager
from models.models import CoinPriceInfo, PriceReport, PriceHistoricalTriplet, DefiPulseEntry, PriceATH
PriceAndDate = namedtuple('PriceAndDate', ('timestamp', 'price'))
class PriceFetcher(BaseFetcher):
ALPHA_GECKO_NAME = 'alpha-finance'
COIN_RANK_GECKO = "https://api.coingecko.com/api/v3/coins/{coin}?" \
"localization=false&tickers=false&market_data=false&" \
"community_data=false&developer_data=false&sparkline=false"
COIN_PRICE_GECKO = "https://api.coingecko.com/api/v3/simple/price?" \
"ids={coin}&vs_currencies=usd%2Cbtc&include_market_cap=true&include_24hr_change=true"
COIN_PRICE_HISTORY_GECKO = "https://api.coingecko.com/api/v3/coins/{coin}/market_chart/range?" \
"vs_currency=usd&from={t_from}&to={t_to}"
def __init__(self, deps: DepContainer):
cfg = deps.cfg.data_source.coin_gecko
super().__init__(deps, parse_timespan_to_seconds(cfg.fetch_period))
async def fetch(self) -> PriceReport:
self.logger.info('start job')
now = now_ts()
rank, price_data, p_1h, p_24h, p_7d = await asyncio.gather(
self._fetch_rank(),
self._fetch_price(),
self._fetch_price_history(t_from=now - HOUR - MINUTE * 5, t_to=now - HOUR + MINUTE * 5),
self._fetch_price_history(t_from=now - DAY - MINUTE * 15, t_to=now - DAY + MINUTE * 15),
self._fetch_price_history(t_from=now - DAY * 7 - HOUR, t_to=now - DAY * 7 + HOUR),
)
price_data: CoinPriceInfo
price_data.rank = rank
return PriceReport(
price_and_cap=price_data,
price_change=PriceHistoricalTriplet(
price_7d=p_7d[0].price,
price_24h=p_24h[0].price,
price_1h=p_1h[0].price
),
defipulse=DefiPulseEntry(),
price_ath=PriceATH(),
is_ath=False
)
async def _fetch_price(self):
url = self.COIN_PRICE_GECKO.format(coin=self.ALPHA_GECKO_NAME)
async with self.deps.session.get(url) as reps:
response_j = await reps.json()
result = CoinPriceInfo(**response_j.get(self.ALPHA_GECKO_NAME, {}))
self.logger.info(f'got gecko current price {self.ALPHA_GECKO_NAME!r}: {result}')
return result
async def _fetch_rank(self) -> int:
url = self.COIN_RANK_GECKO.format(coin=self.ALPHA_GECKO_NAME)
async with self.deps.session.get(url) as reps:
response_j = await reps.json()
rank = int(response_j.get('market_cap_rank', 0))
self.logger.info(f'got gecko rank {self.ALPHA_GECKO_NAME!r} -> #{rank}')
return rank
async def _fetch_price_history(self, t_from, t_to) -> List[PriceAndDate]:
url = self.COIN_PRICE_HISTORY_GECKO.format(coin=self.ALPHA_GECKO_NAME, t_from=t_from, t_to=t_to)
async with self.deps.session.get(url) as reps:
response_j = await reps.json()
prices = response_j.get('prices', [])
self.logger.info(f'got gecko price range {self.ALPHA_GECKO_NAME!r} from {t_from} to {t_to}')
return [PriceAndDate(*p) for p in prices]
class PriceHandler(INotified):
KEY_ATH = 'alpha:price:ath'
def __init__(self, deps: DepContainer):
self.deps = deps
self.cfg = deps.cfg.notifications.price
self.stickers = self.cfg.ath.stickers
self.notification_period = parse_timespan_to_seconds(self.cfg.period)
self.ath_sticker_iter = circular_shuffled_iterator(self.stickers)
self.daily_once = OnceADay(self.deps.db, 'PriceDaily')
self.regular_price_cd = Cooldown(self.deps.db, 'regular_price_notification', self.notification_period)
async def get_prev_ath(self) -> PriceATH:
try:
await self.deps.db.get_redis()
ath_str = await self.deps.db.redis.get(self.KEY_ATH)
if ath_str is None:
return PriceATH()
else:
return PriceATH.from_json(ath_str)
except (TypeError, ValueError, AttributeError):
return PriceATH()
async def reset_ath(self):
await self.deps.db.redis.delete(self.KEY_ATH)
async def update_ath(self, price):
last_ath = await self.get_prev_ath()
if price and price > 0 and last_ath.is_new_ath(price):
await self.deps.db.get_redis()
await self.deps.db.redis.set(self.KEY_ATH, PriceATH(
int(now_ts()), price
).to_json())
return True
return False
async def send_ath_sticker(self):
if self.ath_sticker_iter:
sticker = next(self.ath_sticker_iter)
user_lang_map = self.deps.broadcaster.telegram_chats_from_config(self.deps.loc_man)
await self.deps.broadcaster.broadcast(user_lang_map.keys(), sticker, message_type=MessageType.STICKER)
async def send_notification(self, p: PriceReport):
loc_man: LocalizationManager = self.deps.loc_man
text = loc_man.default.notification_text_price_update(p)
user_lang_map = self.deps.broadcaster.telegram_chats_from_config(self.deps.loc_man)
await self.deps.broadcaster.broadcast(user_lang_map.keys(), text)
if p.is_ath:
await self.send_ath_sticker()
def _is_it_time_for_regular_message(self):
h, m = parse_time(self.cfg.time_of_day)
return is_time_to_do(h, m)
async def on_data(self, sender, p: PriceReport):
d: DefiPulseKeeper = self.deps.defipulse
# p.price_and_cap.usd = 3.02 # todo: for ATH debugging
p.defipulse = await d.get_last_state()
p.price_ath = await self.get_prev_ath()
p.is_ath = (await self.update_ath(p.price_and_cap.usd))
if p.is_ath:
await self.send_notification(p)
elif self._is_it_time_for_regular_message():
if await self.regular_price_cd.can_do():
await self.send_notification(p)
await self.regular_price_cd.do()
# if await self.daily_once.can_do():
# await self.send_notification(p)
# await self.daily_once.write_today()
| [
"models.models.PriceHistoricalTriplet",
"collections.namedtuple",
"models.models.PriceATH.from_json",
"lib.utils.circular_shuffled_iterator",
"lib.cooldown.OnceADay",
"lib.cooldown.Cooldown",
"lib.datetime.parse_time",
"lib.datetime.is_time_to_do",
"lib.datetime.now_ts",
"models.models.DefiPulseEntry",
"models.models.PriceATH",
"lib.datetime.parse_timespan_to_seconds"
] | [((606, 656), 'collections.namedtuple', 'namedtuple', (['"""PriceAndDate"""', "('timestamp', 'price')"], {}), "('PriceAndDate', ('timestamp', 'price'))\n", (616, 656), False, 'from collections import namedtuple\n'), ((1586, 1594), 'lib.datetime.now_ts', 'now_ts', ([], {}), '()\n', (1592, 1594), False, 'from lib.datetime import parse_timespan_to_seconds, now_ts, HOUR, MINUTE, DAY, parse_time, is_time_to_do\n'), ((3999, 4041), 'lib.datetime.parse_timespan_to_seconds', 'parse_timespan_to_seconds', (['self.cfg.period'], {}), '(self.cfg.period)\n', (4024, 4041), False, 'from lib.datetime import parse_timespan_to_seconds, now_ts, HOUR, MINUTE, DAY, parse_time, is_time_to_do\n'), ((4074, 4115), 'lib.utils.circular_shuffled_iterator', 'circular_shuffled_iterator', (['self.stickers'], {}), '(self.stickers)\n', (4100, 4115), False, 'from lib.utils import circular_shuffled_iterator\n'), ((4142, 4178), 'lib.cooldown.OnceADay', 'OnceADay', (['self.deps.db', '"""PriceDaily"""'], {}), "(self.deps.db, 'PriceDaily')\n", (4150, 4178), False, 'from lib.cooldown import OnceADay, Cooldown\n'), ((4211, 4289), 'lib.cooldown.Cooldown', 'Cooldown', (['self.deps.db', '"""regular_price_notification"""', 'self.notification_period'], {}), "(self.deps.db, 'regular_price_notification', self.notification_period)\n", (4219, 4289), False, 'from lib.cooldown import OnceADay, Cooldown\n'), ((5932, 5964), 'lib.datetime.parse_time', 'parse_time', (['self.cfg.time_of_day'], {}), '(self.cfg.time_of_day)\n', (5942, 5964), False, 'from lib.datetime import parse_timespan_to_seconds, now_ts, HOUR, MINUTE, DAY, parse_time, is_time_to_do\n'), ((5980, 5999), 'lib.datetime.is_time_to_do', 'is_time_to_do', (['h', 'm'], {}), '(h, m)\n', (5993, 5999), False, 'from lib.datetime import parse_timespan_to_seconds, now_ts, HOUR, MINUTE, DAY, parse_time, is_time_to_do\n'), ((1445, 1488), 'lib.datetime.parse_timespan_to_seconds', 'parse_timespan_to_seconds', (['cfg.fetch_period'], {}), '(cfg.fetch_period)\n', (1470, 1488), False, 'from lib.datetime import parse_timespan_to_seconds, now_ts, HOUR, MINUTE, DAY, parse_time, is_time_to_do\n'), ((2193, 2293), 'models.models.PriceHistoricalTriplet', 'PriceHistoricalTriplet', ([], {'price_7d': 'p_7d[0].price', 'price_24h': 'p_24h[0].price', 'price_1h': 'p_1h[0].price'}), '(price_7d=p_7d[0].price, price_24h=p_24h[0].price,\n price_1h=p_1h[0].price)\n', (2215, 2293), False, 'from models.models import CoinPriceInfo, PriceReport, PriceHistoricalTriplet, DefiPulseEntry, PriceATH\n'), ((2375, 2391), 'models.models.DefiPulseEntry', 'DefiPulseEntry', ([], {}), '()\n', (2389, 2391), False, 'from models.models import CoinPriceInfo, PriceReport, PriceHistoricalTriplet, DefiPulseEntry, PriceATH\n'), ((2415, 2425), 'models.models.PriceATH', 'PriceATH', ([], {}), '()\n', (2423, 2425), False, 'from models.models import CoinPriceInfo, PriceReport, PriceHistoricalTriplet, DefiPulseEntry, PriceATH\n'), ((4513, 4523), 'models.models.PriceATH', 'PriceATH', ([], {}), '()\n', (4521, 4523), False, 'from models.models import CoinPriceInfo, PriceReport, PriceHistoricalTriplet, DefiPulseEntry, PriceATH\n'), ((4565, 4592), 'models.models.PriceATH.from_json', 'PriceATH.from_json', (['ath_str'], {}), '(ath_str)\n', (4583, 4592), False, 'from models.models import CoinPriceInfo, PriceReport, PriceHistoricalTriplet, DefiPulseEntry, PriceATH\n'), ((4668, 4678), 'models.models.PriceATH', 'PriceATH', ([], {}), '()\n', (4676, 4678), False, 'from models.models import CoinPriceInfo, PriceReport, PriceHistoricalTriplet, DefiPulseEntry, PriceATH\n'), ((5041, 5049), 'lib.datetime.now_ts', 'now_ts', ([], {}), '()\n', (5047, 5049), False, 'from lib.datetime import parse_timespan_to_seconds, now_ts, HOUR, MINUTE, DAY, parse_time, is_time_to_do\n')] |
import h5py
class AuthException(Exception):
pass
class NotFoundException(Exception):
pass
class LoginError(Exception):
def __init__(self, message, redirect_url=None):
super(LoginError, self).__init__(message)
self.redirect_url = redirect_url
def validate_file(path: str) -> bool:
"""
Check if the file is an HDF5 file
Modify this to allow for other file types!
(Note some records can be text or arbitrary binary files but we don't need to validate them)
:param path:
:return:
"""
return h5py.is_hdf5(path)
| [
"h5py.is_hdf5"
] | [((555, 573), 'h5py.is_hdf5', 'h5py.is_hdf5', (['path'], {}), '(path)\n', (567, 573), False, 'import h5py\n')] |
import pygame
from pygame import *
from random import randint
import time
pygame.init()
window = display.set_mode((700, 500))
display.set_caption('Поняшки')
background = transform.scale(image.load("galaxy.jpg"), (700, 500))
FPS = 100
class GameSprite(sprite.Sprite):
def __init__(self, player_image, player_x, player_y, player_speed,width,height):
super().__init__()
self.w = width
self.h = height
self.image = transform.scale(image.load(player_image), (self.w, self.h))
self.speed = player_speed
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
bullets = sprite.Group()
monsters = sprite.Group()
asteroids = sprite.Group()
class Player(GameSprite):
def update(self):
key_pressed = key.get_pressed()
if key_pressed[K_d] and self.rect.x <= 620:
self.rect.x += self.speed
if key_pressed[K_a] and self.rect.x >= 10:
self.rect.x -= self.speed
def fire(self):
firem.play()
bullet = Bullet("bullet.png", self.rect.centerx, self.rect.top, 10,15,15)
bullets.add(bullet)
lives = 3
lost = 0
kills = 0
num_b = 0
reload_on = False
ka = 0
i = 0
class Enemie(GameSprite):
def update(self):
global lost
global i
self.rect.y += speed
if self.rect.y >= 500:
lost += 1
i -= 1
self.rect.y = ycor
self.kill()
class Asteroid(GameSprite):
def update(self):
global ka
self.rect.y += self.speed
if self.rect.y >= 500:
ka -= 1
self.rect.y = ycor
self.kill()
class Bullet(GameSprite):
def update(self):
self.rect.y -= self.speed
mixer.init()
mixer.music.load("space.ogg")
firem = mixer.Sound('fire.ogg')
mixer.music.play()
a = 420
rocketa = Player("rocket.png", 350, a, 5,65,65)
lose_game = False
win_game = False
run = True
while run:
check_time = time.time()
key_pressed = key.get_pressed()
myfont = font.SysFont("Arial", 30)
myfont3 = font.SysFont("Arial", 60)
kill_l = myfont.render("Счёт:" + str(kills), 1, (255, 255, 255))
label = myfont.render("Пропущенно:" + str(lost), 1, (255, 255, 255))
lives_label = myfont3.render(str(lives), 1, (255, 15, 87))
reload_label = myfont.render("Перезарядка..." ,1, (255, 0, 0))
myfont1 = font.SysFont("Arial", 100)
label1 = myfont1.render("You lose!!", 1, (255, 0, 0))
label2 = myfont1.render("You win!!", 1, (255, 191, 28))
window.blit(background, (0, 0))
rocketa.reset()
window.blit(lives_label,(670,0))
window.blit(kill_l,(0,0))
window.blit(label,(0,30))
while i != 5:
ycor = randint(-50,0)
xcor = randint(80, 620)
speed = randint(1,2)
monster = Enemie('ufo.png', xcor, ycor,speed,65,65)
monsters.add(monster)
i += 1
while ka != 3:
ycor = randint(-100,0)
xcor = randint(80,620)
speed = randint(2,3)
asteroid = Asteroid('asteroid.png', xcor,ycor,speed,65,65)
asteroids.add(asteroid)
ka +=1
rocketa.update()
bullets.draw(window)
monsters.draw(window)
asteroids.draw(window)
if sprite.groupcollide(monsters,bullets,True,True):
i-=1
kills+=1
if sprite.spritecollide(rocketa,monsters,True) or sprite.spritecollide(rocketa,asteroids,True):
lives -=1
ka -= 1
if sprite.groupcollide(monsters,asteroids,True,False):
i -= 1
if sprite.groupcollide(bullets,asteroids,True,False):
pass
if key_pressed[K_SPACE] and reload_on == False:
num_b +=1
rocketa.fire()
if num_b == 5:
reload_on = True
start_time = time.time()
end_time = int(start_time) + 3
for u in bullets:
u.update()
if reload_on == True:
window.blit(reload_label,(300,450))
if check_time >= end_time:
num_b = 0
reload_on = False
for o in asteroids:
o.update()
for m in monsters:
m.update()
for e in event.get():
if e.type == QUIT:
run = False
if lost == 10 or lives == 0:
lose_game=True
while lose_game:
window.blit(label1, (200, 250))
for e in event.get():
if e.type == QUIT:
lose_game = False
run = False
key_pressed = key.get_pressed()
if key_pressed[K_SPACE]:
kills = 0
lost = 0
i = 0
run = True
lose_game = False
display.update()
if kills == 5:
win_game=True
while win_game:
window.blit(label2, (200, 250))
for e in event.get():
if e.type == QUIT:
win_game = False
run = False
key_pressed = key.get_pressed()
if key_pressed[K_SPACE]:
kills = 0
lost = 0
i = 0
run = True
win_game = False
display.update()
display.update()
| [
"random.randint",
"time.time",
"pygame.init"
] | [((75, 88), 'pygame.init', 'pygame.init', ([], {}), '()\n', (86, 88), False, 'import pygame\n'), ((2075, 2086), 'time.time', 'time.time', ([], {}), '()\n', (2084, 2086), False, 'import time\n'), ((2819, 2834), 'random.randint', 'randint', (['(-50)', '(0)'], {}), '(-50, 0)\n', (2826, 2834), False, 'from random import randint\n'), ((2849, 2865), 'random.randint', 'randint', (['(80)', '(620)'], {}), '(80, 620)\n', (2856, 2865), False, 'from random import randint\n'), ((2882, 2895), 'random.randint', 'randint', (['(1)', '(2)'], {}), '(1, 2)\n', (2889, 2895), False, 'from random import randint\n'), ((3040, 3056), 'random.randint', 'randint', (['(-100)', '(0)'], {}), '(-100, 0)\n', (3047, 3056), False, 'from random import randint\n'), ((3071, 3087), 'random.randint', 'randint', (['(80)', '(620)'], {}), '(80, 620)\n', (3078, 3087), False, 'from random import randint\n'), ((3103, 3116), 'random.randint', 'randint', (['(2)', '(3)'], {}), '(2, 3)\n', (3110, 3116), False, 'from random import randint\n'), ((3872, 3883), 'time.time', 'time.time', ([], {}), '()\n', (3881, 3883), False, 'import time\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Civic Knowledge. This file is licensed under the terms of the
# MIT License, included in this distribution as LICENSE
"""
"""
from os import environ
from pathlib import Path
from .exceptions import ConfigurationError
import yaml
class Config(object):
def __init__(self, path=None, **kwargs):
"""
:param path: A path in which to look for config. Prepended to search paths
kwargs set top level value.
Special handling for cache keys; kwargs of the
form "cache_object" will set the 'object' key of the top level
'cache' dict. So to change the object cache: Config(cache_object="/tmp")
Equivalently, the values in the config file in the 'cache" dict are flattened,
so 'cache->meta' is translated to 'cache_meta'
"""
self.parameters = 'client_id secret bbox zone uaa_url metadata_url event_url _config_file ' \
'start_time timezone cache_meta cache_objects cache_errors'.split()
self.env_vars = {e: f"CITYIQ_{e.upper()}" for e in self.parameters}
if not path:
self._paths = [
Path.cwd().joinpath('.city-iq.yaml'),
Path.cwd().joinpath('city-iq.yaml'),
Path.home().joinpath('.city-iq.yaml'),
]
else:
if Path(path).is_dir():
self._paths = [ Path(path).joinpath(e) for e in ['.city-iq.yaml', 'city-iq.yaml']]
else:
self._paths = [Path(path)]
if environ.get('CITYIQ_CONFIG'):
self._paths = [Path(environ.get('CITYIQ_CONFIG'))] + self._paths
self._kwargs = kwargs
self._config = self._load_config()
if 'cache' in self._config:
for k, v in self._config['cache'].items():
self._config['cache_'+k] = v
del self._config['cache']
def _load_config(self):
"""
Load a YAML configuration from the first configuration file that is found to exist
:return:
"""
for p in self._paths:
if p.exists():
with p.open() as f:
c = yaml.safe_load(f)
if c:
c['_config_file'] = str(p)
return c
else:
raise ConfigurationError(f"Didn't find a config file in paths: {self._paths}")
return {}
@property
def which(self):
return self._config_file
@property
def dict(self):
d = {k: self[k] for k in self.parameters}
for k, v in self._config.items():
if k not in d:
d[k] = v
return d
def __getattr__(self, item):
try:
return self._kwargs[item]
except KeyError:
pass
try:
return environ[self.env_vars[item]]
except KeyError:
pass
try:
return self._config[item]
except KeyError:
pass
if item not in self.parameters:
raise AttributeError(item)
return None
def __getitem__(self, item):
try:
return self.__getattr__(item)
except AttributeError:
raise IndexError(item)
def __str__(self):
import yaml
return yaml.dump(self.dict, default_flow_style=False)
| [
"pathlib.Path",
"pathlib.Path.cwd",
"yaml.dump",
"pathlib.Path.home",
"os.environ.get",
"yaml.safe_load"
] | [((1566, 1594), 'os.environ.get', 'environ.get', (['"""CITYIQ_CONFIG"""'], {}), "('CITYIQ_CONFIG')\n", (1577, 1594), False, 'from os import environ\n'), ((3368, 3414), 'yaml.dump', 'yaml.dump', (['self.dict'], {'default_flow_style': '(False)'}), '(self.dict, default_flow_style=False)\n', (3377, 3414), False, 'import yaml\n'), ((1373, 1383), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (1377, 1383), False, 'from pathlib import Path\n'), ((1542, 1552), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (1546, 1552), False, 'from pathlib import Path\n'), ((2206, 2223), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (2220, 2223), False, 'import yaml\n'), ((1184, 1194), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (1192, 1194), False, 'from pathlib import Path\n'), ((1238, 1248), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (1246, 1248), False, 'from pathlib import Path\n'), ((1291, 1302), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (1300, 1302), False, 'from pathlib import Path\n'), ((1628, 1656), 'os.environ.get', 'environ.get', (['"""CITYIQ_CONFIG"""'], {}), "('CITYIQ_CONFIG')\n", (1639, 1656), False, 'from os import environ\n'), ((1426, 1436), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (1430, 1436), False, 'from pathlib import Path\n')] |
# Sound field reconstruction in rooms: inpainting meets superresolution - 17.12.2019
# Inference.py
import sys
import os
import util
import sfun
import data
import copy
import numpy as np
sys.path.append('util')
def get_latest_checkpoint_path(session_dir):
""" Returns the path of the most recent checkpoint in session_dir.
Args:
session_dir: string
Returns: string
"""
checkpoints_path = os.path.join(session_dir, 'checkpoints')
if os.path.exists(checkpoints_path) and util.dir_contains_files(checkpoints_path):
checkpoints = os.listdir(checkpoints_path)
checkpoints.sort(key=lambda x: os.stat(os.path.join(checkpoints_path, x)).st_mtime)
last_checkpoint = checkpoints[-1]
return os.path.join(checkpoints_path, last_checkpoint)
else:
return ''
def get_results_dict():
""" Get the dictionary to save results.
Returns: dict
"""
return {'name': [], 'num_file': [], 'xDim': [], 'yDim': [], 'm2': [], 'num_mics': [], 'num_comb': [], 'freq': [], 'NMSE': [], 'SSIM': [], 'pattern': [], 'p_real': [], 'p_predicted': [], 'p_previous': []}
def get_test_filenames(test_path):
""" Get the .mat filenames given a folder path.
Args:
test_path: string
Returns: string
"""
filenames = [filename for filename in os.listdir(test_path) if filename.endswith('.mat')]
return filenames
def reconstruct_soundfield(model, sf_sample, mask, factor, frequencies, filename, num_file, com_num,
results_dict):
""" Reconstruct and evaluate sound field
Args:
model: keras model
sf_sample: np.ndarray
factor: int
frequencies: list
filename: string
num_file: int
com_num: int
results_dict: dict
Returns: dict
"""
# Create one sample batch. Expand dims
sf_sample = np.expand_dims(sf_sample, axis=0)
sf_gt = copy.deepcopy(sf_sample)
mask = np.expand_dims(mask, axis=0)
mask_gt = copy.deepcopy(mask)
# preprocessing
irregular_sf, mask = util.preprocessing(factor, sf_sample, mask)
#predict sound field
pred_sf = model.predict([irregular_sf, mask])
#measured observations. To use in postprocessing
measured_sf = util.downsampling(factor, copy.deepcopy(sf_gt))
measured_sf = util.apply_mask(measured_sf, mask_gt)
#compute csv fields
split_filename = filename[:-4].split('_')
pattern = np.where(mask_gt[0, :, :, 0].flatten() == 1)[0]
num_mic = len(pattern)
for freq_num, freq in enumerate(frequencies):
#Postprocessing
reconstructed_sf_slice = util.postprocessing(pred_sf, measured_sf, freq_num, pattern, factor)
#Compute Metrics
reconstructed_sf_slice = util.postprocessing(pred_sf, measured_sf, freq_num, pattern, factor)
nmse = util.compute_NMSE(sf_gt[0, :, :, freq_num], reconstructed_sf_slice)
data_range = sf_gt[0, :, :, freq_num].max() - sf_gt[0, :, :, freq_num].min()
ssim = util.compute_SSIM(sf_gt[0, :, :, freq_num].astype('float32'), reconstructed_sf_slice, data_range)
average_pressure_real = util.compute_average_pressure(sf_gt[0, :, :, freq_num])
average_pressure_predicted = util.compute_average_pressure(reconstructed_sf_slice)
average_pressure_previous = util.compute_average_pressure(measured_sf[0, :, :, freq_num])
#store results
results_dict['freq'].append(freq)
results_dict['name'].append(filename[:-4])
results_dict['xDim'].append(split_filename[2])
results_dict['yDim'].append(split_filename[3])
results_dict['m2'].append(split_filename[4])
results_dict['num_mics'].append(num_mic)
results_dict['num_comb'].append(com_num)
results_dict['num_file'].append(num_file)
results_dict['pattern'].append(pattern)
results_dict['NMSE'].append(nmse)
results_dict['SSIM'].append(ssim)
results_dict['p_real'].append(average_pressure_real)
results_dict['p_predicted'].append(average_pressure_predicted)
results_dict['p_previous'].append(average_pressure_previous)
return results_dict
def real_data_evaluation(config_path):
""" Evaluates a trained model on real data.
Args:
config_path: string
"""
config = util.load_config(config_path)
print('Loaded configuration from: %s' % config_path)
session_dir = config_path[:config_path.rfind('/')+1]
checkpoint_path = get_latest_checkpoint_path(session_dir)
if not checkpoint_path:
print('Error: No checkpoint found in same directory as configuration file.')
return
model = sfun.SFUN(config, train_bn=False)
predict_path = os.path.join(session_dir, 'real_data_evaluation', 'min_mics_' + str(config['evaluation']['min_mics']) +
'_max_mics_' + str(config['evaluation']['max_mics']) + '_step_mics_' +
str(config['evaluation']['step_mics']))
if not os.path.exists(predict_path): os.makedirs(predict_path)
filepath = os.path.join(config['dataset']['path'], 'real_soundfields','RoomB_soundfield.mat')
# Get Ground Truth
soundfield_1 = util.load_RoomB_soundfield(filepath, 0)
soundfield_2 = util.load_RoomB_soundfield(filepath, 1)
frequencies = util.get_frequencies()
results_dict = get_results_dict()
print('\nEvaluating model in real sound fields...\n')
for num_mics in range(config['evaluation']['min_mics'], config['evaluation']['max_mics'], config['evaluation']['step_mics']):
for source_num, source in enumerate(['source_1', 'source_2']):
mask_generator = data.MaskGenerator(config['dataset']['xSamples']//config['dataset']['factor'], config['dataset']['ySamples']//config['dataset']['factor'], len(frequencies), num_mics=num_mics, rand_seed=3)
for com_num in range(config['evaluation']['num_comb']):
print("\twith "+ str(num_mics) + " mics, pattern number " + str(com_num) + " and source position " + str(source_num))
if source_num:
input_soundfield = copy.deepcopy(soundfield_2)
else:
input_soundfield = copy.deepcopy(soundfield_1)
mask = mask_generator.sample()
filename = str(source_num) + '_d_4.159_6.459_26.862.mat'
results_dict = reconstruct_soundfield(model, input_soundfield, mask, config['dataset']['factor'],
frequencies, filename, source_num, com_num, results_dict)
print('\nWriting real room results...\n')
util.write_results(os.path.join(predict_path, 'results_RoomB_Dataset.csv'), results_dict)
print('Analysing and plotting results...')
util.analyze_and_plot_real_results(os.path.join(predict_path, 'results_RoomB_Dataset.csv'), config)
print('Evaluation completed!')
def simulated_data_evaluation(config_path):
""" Evaluates a trained model on simulated data.
Args:
config_path: string
"""
config = util.load_config(config_path)
print('Loaded configuration from: %s' % config_path)
session_dir = config_path[:config_path.rfind('/')+1]
checkpoint_path = get_latest_checkpoint_path(session_dir)
if not checkpoint_path: # Model weights are loaded when creating the model object
print('Error: No checkpoint found in same directory as configuration file.')
return
model = sfun.SFUN(config, train_bn=False)
evaluation_path = os.path.join(session_dir, 'simulated_data_evaluation', 'min_mics_' + str(config['evaluation']['min_mics']) +
'_max_mics_' + str(config['evaluation']['max_mics']) + '_step_mics_' +
str(config['evaluation']['step_mics']))
if not os.path.exists(evaluation_path): os.makedirs(evaluation_path)
test_path = os.path.join(config['dataset']['path'], 'simulated_soundfields', 'test')
filenames = get_test_filenames(test_path)
frequencies = util.get_frequencies()
for num_file, filename in enumerate(sorted(filenames)):
print('\nEvaluating model in simulated room ' + str(num_file) + '...\n')
aux_sound = util.load_soundfield(os.path.join(test_path, filename), frequencies)
results_dict = get_results_dict()
for num_mics in range(config['evaluation']['min_mics'], config['evaluation']['max_mics'], config['evaluation']['step_mics']):
mask_generator = data.MaskGenerator(config['dataset']['xSamples']//config['dataset']['factor'], config['dataset']['ySamples']//config['dataset']['factor'], len(frequencies), num_mics=num_mics, rand_seed=3)
for com_num in range(config['evaluation']['num_comb']):
soundfield_input = copy.deepcopy(aux_sound)
mask = mask_generator.sample()
print("\twith "+ str(num_mics) + " mics and pattern number " + str(com_num))
results_dict = reconstruct_soundfield(model, soundfield_input, mask, config['dataset']['factor'],
frequencies, filename, num_file, com_num, results_dict)
com_num += 1
filename = 'results_file_number_' + str(num_file) + '.csv'
print('\nWriting simulated room ' + str(num_file) + ' results...\n')
util.write_results(os.path.join(evaluation_path, filename), results_dict)
print('Analysing and plotting results...')
util.analyze_and_plot_simulated_results(evaluation_path, session_dir, config)
print('Evaluation completed!')
def visualize(config_path):
""" Plot predictions of trained model on real data.
Args:
config_path: string
"""
config = util.load_config(config_path)
print('Loaded configuration from: %s' % config_path)
frequencies = util.get_frequencies()
session_dir = config_path[:config_path.rfind('/')+1]
checkpoint_path = get_latest_checkpoint_path(session_dir)
if not checkpoint_path:
print('Error: No checkpoint found in same directory as configuration file.')
return
model = sfun.SFUN(config, train_bn=False)
visualization_path = os.path.join(session_dir, 'visualization')
if not os.path.exists(visualization_path): os.makedirs(visualization_path)
filepath = os.path.join(config['dataset']['path'], 'real_soundfields','RoomB_soundfield.mat')
mask_generator = data.MaskGenerator(config['dataset']['xSamples']//config['dataset']['factor'], config['dataset']['ySamples']//config['dataset']['factor'], len(frequencies), num_mics=config['visualization']['num_mics'])
# Get measured sound field
sf_sample = util.load_RoomB_soundfield(filepath, config['visualization']['source'])
sf_gt = np.expand_dims(copy.deepcopy(sf_sample), axis=0)
initial_sf = np.expand_dims(sf_sample, axis=0)
# Get mask samples
mask = mask_generator.sample()
mask = np.expand_dims(mask, axis=0)
# preprocessing
irregular_sf, mask = util.preprocessing(config['dataset']['factor'], initial_sf, mask)
# Scale ground truth sound field
sf_gt = util.scale(sf_gt)
print('\nPlotting Ground Truth Sound Field Scaled...')
for num_freq, freq in enumerate(frequencies):
print('\tat frequency ' + str(freq))
util.plot_2D(sf_gt[0, ..., num_freq], os.path.join(visualization_path, str(freq) + '_Hz_Ground_Truth.png'))
print('\nPlotting Irregular Sound Field...')
for num_freq, freq in enumerate(frequencies):
print('\tat frequency ' + str(freq))
util.plot_2D(irregular_sf[0, ..., num_freq], os.path.join(visualization_path, str(freq) + '_Hz_Irregular_SF.png'))
print('\nPlotting Mask...')
for num_freq, freq in enumerate(frequencies):
print('\tat frequency ' + str(freq))
util.plot_2D(mask[0, ..., num_freq], os.path.join(visualization_path, str(freq) + '_Hz_Mask.png'))
pred_sf = model.predict([irregular_sf, mask])
print('\nPlotting Predicted Sound Field...')
for num_freq, freq in enumerate(frequencies):
print('\tat frequency ' + str(freq))
util.plot_2D(pred_sf[0, ..., num_freq], os.path.join(visualization_path, str(freq) + '_Hz_Pred_SF.png'))
| [
"sfun.SFUN",
"util.postprocessing",
"util.preprocessing",
"util.load_RoomB_soundfield",
"copy.deepcopy",
"sys.path.append",
"os.path.exists",
"os.listdir",
"util.analyze_and_plot_simulated_results",
"util.load_config",
"util.get_frequencies",
"util.compute_average_pressure",
"util.apply_mask",
"util.scale",
"util.compute_NMSE",
"util.dir_contains_files",
"os.makedirs",
"os.path.join",
"numpy.expand_dims"
] | [((190, 213), 'sys.path.append', 'sys.path.append', (['"""util"""'], {}), "('util')\n", (205, 213), False, 'import sys\n'), ((431, 471), 'os.path.join', 'os.path.join', (['session_dir', '"""checkpoints"""'], {}), "(session_dir, 'checkpoints')\n", (443, 471), False, 'import os\n'), ((1921, 1954), 'numpy.expand_dims', 'np.expand_dims', (['sf_sample'], {'axis': '(0)'}), '(sf_sample, axis=0)\n', (1935, 1954), True, 'import numpy as np\n'), ((1967, 1991), 'copy.deepcopy', 'copy.deepcopy', (['sf_sample'], {}), '(sf_sample)\n', (1980, 1991), False, 'import copy\n'), ((2004, 2032), 'numpy.expand_dims', 'np.expand_dims', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (2018, 2032), True, 'import numpy as np\n'), ((2047, 2066), 'copy.deepcopy', 'copy.deepcopy', (['mask'], {}), '(mask)\n', (2060, 2066), False, 'import copy\n'), ((2113, 2156), 'util.preprocessing', 'util.preprocessing', (['factor', 'sf_sample', 'mask'], {}), '(factor, sf_sample, mask)\n', (2131, 2156), False, 'import util\n'), ((2371, 2408), 'util.apply_mask', 'util.apply_mask', (['measured_sf', 'mask_gt'], {}), '(measured_sf, mask_gt)\n', (2386, 2408), False, 'import util\n'), ((4376, 4405), 'util.load_config', 'util.load_config', (['config_path'], {}), '(config_path)\n', (4392, 4405), False, 'import util\n'), ((4725, 4758), 'sfun.SFUN', 'sfun.SFUN', (['config'], {'train_bn': '(False)'}), '(config, train_bn=False)\n', (4734, 4758), False, 'import sfun\n'), ((5148, 5235), 'os.path.join', 'os.path.join', (["config['dataset']['path']", '"""real_soundfields"""', '"""RoomB_soundfield.mat"""'], {}), "(config['dataset']['path'], 'real_soundfields',\n 'RoomB_soundfield.mat')\n", (5160, 5235), False, 'import os\n'), ((5274, 5313), 'util.load_RoomB_soundfield', 'util.load_RoomB_soundfield', (['filepath', '(0)'], {}), '(filepath, 0)\n', (5300, 5313), False, 'import util\n'), ((5333, 5372), 'util.load_RoomB_soundfield', 'util.load_RoomB_soundfield', (['filepath', '(1)'], {}), '(filepath, 1)\n', (5359, 5372), False, 'import util\n'), ((5393, 5415), 'util.get_frequencies', 'util.get_frequencies', ([], {}), '()\n', (5413, 5415), False, 'import util\n'), ((7168, 7197), 'util.load_config', 'util.load_config', (['config_path'], {}), '(config_path)\n', (7184, 7197), False, 'import util\n'), ((7576, 7609), 'sfun.SFUN', 'sfun.SFUN', (['config'], {'train_bn': '(False)'}), '(config, train_bn=False)\n', (7585, 7609), False, 'import sfun\n'), ((8011, 8083), 'os.path.join', 'os.path.join', (["config['dataset']['path']", '"""simulated_soundfields"""', '"""test"""'], {}), "(config['dataset']['path'], 'simulated_soundfields', 'test')\n", (8023, 8083), False, 'import os\n'), ((8149, 8171), 'util.get_frequencies', 'util.get_frequencies', ([], {}), '()\n', (8169, 8171), False, 'import util\n'), ((9603, 9680), 'util.analyze_and_plot_simulated_results', 'util.analyze_and_plot_simulated_results', (['evaluation_path', 'session_dir', 'config'], {}), '(evaluation_path, session_dir, config)\n', (9642, 9680), False, 'import util\n'), ((9869, 9898), 'util.load_config', 'util.load_config', (['config_path'], {}), '(config_path)\n', (9885, 9898), False, 'import util\n'), ((9975, 9997), 'util.get_frequencies', 'util.get_frequencies', ([], {}), '()\n', (9995, 9997), False, 'import util\n'), ((10260, 10293), 'sfun.SFUN', 'sfun.SFUN', (['config'], {'train_bn': '(False)'}), '(config, train_bn=False)\n', (10269, 10293), False, 'import sfun\n'), ((10320, 10362), 'os.path.join', 'os.path.join', (['session_dir', '"""visualization"""'], {}), "(session_dir, 'visualization')\n", (10332, 10362), False, 'import os\n'), ((10458, 10545), 'os.path.join', 'os.path.join', (["config['dataset']['path']", '"""real_soundfields"""', '"""RoomB_soundfield.mat"""'], {}), "(config['dataset']['path'], 'real_soundfields',\n 'RoomB_soundfield.mat')\n", (10470, 10545), False, 'import os\n'), ((10814, 10885), 'util.load_RoomB_soundfield', 'util.load_RoomB_soundfield', (['filepath', "config['visualization']['source']"], {}), "(filepath, config['visualization']['source'])\n", (10840, 10885), False, 'import util\n'), ((10964, 10997), 'numpy.expand_dims', 'np.expand_dims', (['sf_sample'], {'axis': '(0)'}), '(sf_sample, axis=0)\n', (10978, 10997), True, 'import numpy as np\n'), ((11068, 11096), 'numpy.expand_dims', 'np.expand_dims', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (11082, 11096), True, 'import numpy as np\n'), ((11143, 11208), 'util.preprocessing', 'util.preprocessing', (["config['dataset']['factor']", 'initial_sf', 'mask'], {}), "(config['dataset']['factor'], initial_sf, mask)\n", (11161, 11208), False, 'import util\n'), ((11259, 11276), 'util.scale', 'util.scale', (['sf_gt'], {}), '(sf_gt)\n', (11269, 11276), False, 'import util\n'), ((479, 511), 'os.path.exists', 'os.path.exists', (['checkpoints_path'], {}), '(checkpoints_path)\n', (493, 511), False, 'import os\n'), ((516, 557), 'util.dir_contains_files', 'util.dir_contains_files', (['checkpoints_path'], {}), '(checkpoints_path)\n', (539, 557), False, 'import util\n'), ((581, 609), 'os.listdir', 'os.listdir', (['checkpoints_path'], {}), '(checkpoints_path)\n', (591, 609), False, 'import os\n'), ((759, 806), 'os.path.join', 'os.path.join', (['checkpoints_path', 'last_checkpoint'], {}), '(checkpoints_path, last_checkpoint)\n', (771, 806), False, 'import os\n'), ((2331, 2351), 'copy.deepcopy', 'copy.deepcopy', (['sf_gt'], {}), '(sf_gt)\n', (2344, 2351), False, 'import copy\n'), ((2678, 2746), 'util.postprocessing', 'util.postprocessing', (['pred_sf', 'measured_sf', 'freq_num', 'pattern', 'factor'], {}), '(pred_sf, measured_sf, freq_num, pattern, factor)\n', (2697, 2746), False, 'import util\n'), ((2806, 2874), 'util.postprocessing', 'util.postprocessing', (['pred_sf', 'measured_sf', 'freq_num', 'pattern', 'factor'], {}), '(pred_sf, measured_sf, freq_num, pattern, factor)\n', (2825, 2874), False, 'import util\n'), ((2890, 2957), 'util.compute_NMSE', 'util.compute_NMSE', (['sf_gt[0, :, :, freq_num]', 'reconstructed_sf_slice'], {}), '(sf_gt[0, :, :, freq_num], reconstructed_sf_slice)\n', (2907, 2957), False, 'import util\n'), ((3190, 3245), 'util.compute_average_pressure', 'util.compute_average_pressure', (['sf_gt[0, :, :, freq_num]'], {}), '(sf_gt[0, :, :, freq_num])\n', (3219, 3245), False, 'import util\n'), ((3283, 3336), 'util.compute_average_pressure', 'util.compute_average_pressure', (['reconstructed_sf_slice'], {}), '(reconstructed_sf_slice)\n', (3312, 3336), False, 'import util\n'), ((3373, 3434), 'util.compute_average_pressure', 'util.compute_average_pressure', (['measured_sf[0, :, :, freq_num]'], {}), '(measured_sf[0, :, :, freq_num])\n', (3402, 3434), False, 'import util\n'), ((5075, 5103), 'os.path.exists', 'os.path.exists', (['predict_path'], {}), '(predict_path)\n', (5089, 5103), False, 'import os\n'), ((5105, 5130), 'os.makedirs', 'os.makedirs', (['predict_path'], {}), '(predict_path)\n', (5116, 5130), False, 'import os\n'), ((6744, 6799), 'os.path.join', 'os.path.join', (['predict_path', '"""results_RoomB_Dataset.csv"""'], {}), "(predict_path, 'results_RoomB_Dataset.csv')\n", (6756, 6799), False, 'import os\n'), ((6902, 6957), 'os.path.join', 'os.path.join', (['predict_path', '"""results_RoomB_Dataset.csv"""'], {}), "(predict_path, 'results_RoomB_Dataset.csv')\n", (6914, 6957), False, 'import os\n'), ((7932, 7963), 'os.path.exists', 'os.path.exists', (['evaluation_path'], {}), '(evaluation_path)\n', (7946, 7963), False, 'import os\n'), ((7965, 7993), 'os.makedirs', 'os.makedirs', (['evaluation_path'], {}), '(evaluation_path)\n', (7976, 7993), False, 'import os\n'), ((10374, 10408), 'os.path.exists', 'os.path.exists', (['visualization_path'], {}), '(visualization_path)\n', (10388, 10408), False, 'import os\n'), ((10410, 10441), 'os.makedirs', 'os.makedirs', (['visualization_path'], {}), '(visualization_path)\n', (10421, 10441), False, 'import os\n'), ((10913, 10937), 'copy.deepcopy', 'copy.deepcopy', (['sf_sample'], {}), '(sf_sample)\n', (10926, 10937), False, 'import copy\n'), ((1351, 1372), 'os.listdir', 'os.listdir', (['test_path'], {}), '(test_path)\n', (1361, 1372), False, 'import os\n'), ((8355, 8388), 'os.path.join', 'os.path.join', (['test_path', 'filename'], {}), '(test_path, filename)\n', (8367, 8388), False, 'import os\n'), ((9496, 9535), 'os.path.join', 'os.path.join', (['evaluation_path', 'filename'], {}), '(evaluation_path, filename)\n', (9508, 9535), False, 'import os\n'), ((8902, 8926), 'copy.deepcopy', 'copy.deepcopy', (['aux_sound'], {}), '(aux_sound)\n', (8915, 8926), False, 'import copy\n'), ((6208, 6235), 'copy.deepcopy', 'copy.deepcopy', (['soundfield_2'], {}), '(soundfield_2)\n', (6221, 6235), False, 'import copy\n'), ((6297, 6324), 'copy.deepcopy', 'copy.deepcopy', (['soundfield_1'], {}), '(soundfield_1)\n', (6310, 6324), False, 'import copy\n'), ((657, 690), 'os.path.join', 'os.path.join', (['checkpoints_path', 'x'], {}), '(checkpoints_path, x)\n', (669, 690), False, 'import os\n')] |
"""
Author: <NAME>
Date: 15 May 2021
"""
import json
import pytest
from flask import Response
from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string
@FlaskTestRig.setup_app(n_users=3)
def test_get_user_me_no_auth(client_factory, make_users, **kwargs):
"""
Validate an Unauthorised error is returned when attempting to get
a users information on a GET request to /api/v1/user/me endpoint without being
authenticated.
:endpoint: /api/v1/users/me
:method: GET
:auth: False
:params: None
:status: 401
:response: An unauthorised error.
"""
rig: FlaskTestRig = FlaskTestRig.extract_rig_from_kwargs(kwargs)
expected = {
"error": "Unauthorised",
"message": "Invalid credentials."
}
# Make request and gather response.
res: Response = rig.client.get("/api/v1/users/me")
# Get JSON data returned.
data = json.loads(res.data)
# Verify response matches expected.
assert data == expected
assert res.status_code == 401
@FlaskTestRig.setup_app(n_users=3)
def test_get_user_me_with_auth_user_200(client_factory, make_users, **kwargs):
"""
Validate the current user's information is returned on a
GET request to /users endpoint.
:endpoint: /api/v1/user/me
:method: GET
:auth: True
:params: Auth Token
:status: 200
:response: A object describing the current user.
"""
rig: FlaskTestRig = FlaskTestRig.extract_rig_from_kwargs(kwargs)
expected = rig.get_first_user()
expected.pop("last_login")
# Acquire login token for first user.
user = rig.get_first_user(keep_password=True)
token = login(rig.client, user)
# Make request and gather response.
res: Response = rig.client.get("/api/v1/users/me", headers=token_auth_header_field(token))
# Get JSON data returned.
data = json.loads(res.data)
data.pop("last_login")
# Verify response matches expected.
assert data == expected
assert res.status_code == 200
@FlaskTestRig.setup_app(n_users=4)
@pytest.mark.parametrize("user_id", [1, 2, 3])
def test_get_user_with_id_with_auth_user(user_id, client_factory, make_users, **kwargs):
"""
Validate the current user's information is returned on a
GET request to /users endpoint as a User.
:endpoint: /api/v1/user/<int:id>
:method: GET
:auth: True
:params: Auth Token, A user ID to get information on.
:status: 200
:response: A object describing a user given the user's id,
including the id, username and last_login time.
"""
rig: FlaskTestRig = FlaskTestRig.extract_rig_from_kwargs(kwargs)
# The expected user information to be returned.
expected = rig.get_current_users(keep_email=False)[user_id]
print(expected)
# Acquire login token for first user.
user = rig.get_first_user(keep_password=True)
token = login(rig.client, user)
# Make request and gather response.
res: Response = rig.client.get(f"/api/v1/users/{user_id}", headers=token_auth_header_field(token))
# Get JSON data returned.
data = json.loads(res.data)
# Add the last_login field to the expected data.
expected = [{**user, "last_login": datetime_as_string(user["last_login"])} for user in [expected]][0]
# Verify response matches expected.
assert data == expected
assert res.status_code == 200
@FlaskTestRig.setup_app(n_users=3)
def test_get_user_with_id_with_auth_user_404(client_factory, make_users, **kwargs):
"""
Validate that a 404 error is raised when a non-existing id is queried
against the database as a User.
:endpoint: /api/v1/user/<int:id>
:method: GET
:auth: True
:params: Auth Token, A user ID to get information on.
:status: 404
:response: 404 error due to user not existing.
"""
rig: FlaskTestRig = FlaskTestRig.extract_rig_from_kwargs(kwargs)
user_id = 100
expected = {'error': 'Not Found', 'message': 'User does not exist.'}
# Acquire login token for first user.
user = rig.get_first_user(keep_password=True)
token = login(rig.client, user)
# Make request and gather response.
res: Response = rig.client.get(f"/api/v1/users/{user_id}", headers=token_auth_header_field(token))
# Get JSON data returned.
data = json.loads(res.data)
# Verify response matches expected.
assert data == expected
assert res.status_code == 404
@FlaskTestRig.setup_app(n_users=4)
@pytest.mark.parametrize("user_id", [1, 2, 3])
def test_get_users_with_auth_admin(user_id, client_factory, make_users, **kwargs):
"""
Validate the a user keyed by their ID is returned on a
GET request to /users endpoint as an Admin.
:endpoint: /api/v1/user/<int:id>
:method: GET
:auth: True
:params: Auth Token, A user ID to get information on.
:status: 200
:response: A object describing a user given the user's id,
including the id, username and last_login time.
"""
rig: FlaskTestRig = FlaskTestRig.extract_rig_from_kwargs(kwargs)
# The expected user information to be returned.
expected = rig.get_current_users(keep_email=True, keep_role_name=user_id)[user_id]
expected.pop("last_login")
print(expected)
# Acquire login token for first user.
user = rig.get_first_user(keep_email=True, admin_only=True)
token = login(rig.client, user)
# Make request and gather response.
res: Response = rig.client.get(f"/api/v1/users/{user_id}", headers=token_auth_header_field(token))
# Get JSON data returned.
data = json.loads(res.data)
data.pop("last_login")
# Verify response matches expected.
assert data == expected
assert res.status_code == 200
@FlaskTestRig.setup_app(n_users=3)
def test_get_user_with_id_with_auth_admin_404(client_factory, make_users, **kwargs):
"""
Validate that a 404 error is raised when a non-existing id is queried
against the database as a Admin.
:endpoint: /api/v1/user/<int:id>
:method: GET
:auth: True
:params: Auth Token, A user ID to get information on.
:status: 404
:response: 404 error due to user not existing.
"""
rig: FlaskTestRig = FlaskTestRig.extract_rig_from_kwargs(kwargs)
user_id = 100
expected = {'error': 'Not Found', 'message': 'User does not exist.'}
# Acquire login token for first user.
user = rig.get_first_user(keep_password=True, admin_only=True)
token = login(rig.client, user)
# Make request and gather response.
res: Response = rig.client.get(f"/api/v1/users/{user_id}", headers=token_auth_header_field(token))
# Get JSON data returned.
data = json.loads(res.data)
# Verify response matches expected.
assert data == expected
assert res.status_code == 404
| [
"json.loads",
"tests.functional.utils.datetime_as_string",
"tests.functional.utils.FlaskTestRig.extract_rig_from_kwargs",
"pytest.mark.parametrize",
"tests.functional.utils.token_auth_header_field",
"tests.functional.utils.FlaskTestRig.setup_app",
"tests.functional.utils.login"
] | [((210, 243), 'tests.functional.utils.FlaskTestRig.setup_app', 'FlaskTestRig.setup_app', ([], {'n_users': '(3)'}), '(n_users=3)\n', (232, 243), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((1089, 1122), 'tests.functional.utils.FlaskTestRig.setup_app', 'FlaskTestRig.setup_app', ([], {'n_users': '(3)'}), '(n_users=3)\n', (1111, 1122), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((2088, 2121), 'tests.functional.utils.FlaskTestRig.setup_app', 'FlaskTestRig.setup_app', ([], {'n_users': '(4)'}), '(n_users=4)\n', (2110, 2121), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((2123, 2168), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""user_id"""', '[1, 2, 3]'], {}), "('user_id', [1, 2, 3])\n", (2146, 2168), False, 'import pytest\n'), ((3479, 3512), 'tests.functional.utils.FlaskTestRig.setup_app', 'FlaskTestRig.setup_app', ([], {'n_users': '(3)'}), '(n_users=3)\n', (3501, 3512), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((4540, 4573), 'tests.functional.utils.FlaskTestRig.setup_app', 'FlaskTestRig.setup_app', ([], {'n_users': '(4)'}), '(n_users=4)\n', (4562, 4573), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((4575, 4620), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""user_id"""', '[1, 2, 3]'], {}), "('user_id', [1, 2, 3])\n", (4598, 4620), False, 'import pytest\n'), ((5860, 5893), 'tests.functional.utils.FlaskTestRig.setup_app', 'FlaskTestRig.setup_app', ([], {'n_users': '(3)'}), '(n_users=3)\n', (5882, 5893), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((680, 724), 'tests.functional.utils.FlaskTestRig.extract_rig_from_kwargs', 'FlaskTestRig.extract_rig_from_kwargs', (['kwargs'], {}), '(kwargs)\n', (716, 724), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((962, 982), 'json.loads', 'json.loads', (['res.data'], {}), '(res.data)\n', (972, 982), False, 'import json\n'), ((1514, 1558), 'tests.functional.utils.FlaskTestRig.extract_rig_from_kwargs', 'FlaskTestRig.extract_rig_from_kwargs', (['kwargs'], {}), '(kwargs)\n', (1550, 1558), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((1732, 1755), 'tests.functional.utils.login', 'login', (['rig.client', 'user'], {}), '(rig.client, user)\n', (1737, 1755), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((1934, 1954), 'json.loads', 'json.loads', (['res.data'], {}), '(res.data)\n', (1944, 1954), False, 'import json\n'), ((2694, 2738), 'tests.functional.utils.FlaskTestRig.extract_rig_from_kwargs', 'FlaskTestRig.extract_rig_from_kwargs', (['kwargs'], {}), '(kwargs)\n', (2730, 2738), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((2982, 3005), 'tests.functional.utils.login', 'login', (['rig.client', 'user'], {}), '(rig.client, user)\n', (2987, 3005), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((3192, 3212), 'json.loads', 'json.loads', (['res.data'], {}), '(res.data)\n', (3202, 3212), False, 'import json\n'), ((3960, 4004), 'tests.functional.utils.FlaskTestRig.extract_rig_from_kwargs', 'FlaskTestRig.extract_rig_from_kwargs', (['kwargs'], {}), '(kwargs)\n', (3996, 4004), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((4203, 4226), 'tests.functional.utils.login', 'login', (['rig.client', 'user'], {}), '(rig.client, user)\n', (4208, 4226), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((4413, 4433), 'json.loads', 'json.loads', (['res.data'], {}), '(res.data)\n', (4423, 4433), False, 'import json\n'), ((5140, 5184), 'tests.functional.utils.FlaskTestRig.extract_rig_from_kwargs', 'FlaskTestRig.extract_rig_from_kwargs', (['kwargs'], {}), '(kwargs)\n', (5176, 5184), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((5496, 5519), 'tests.functional.utils.login', 'login', (['rig.client', 'user'], {}), '(rig.client, user)\n', (5501, 5519), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((5706, 5726), 'json.loads', 'json.loads', (['res.data'], {}), '(res.data)\n', (5716, 5726), False, 'import json\n'), ((6343, 6387), 'tests.functional.utils.FlaskTestRig.extract_rig_from_kwargs', 'FlaskTestRig.extract_rig_from_kwargs', (['kwargs'], {}), '(kwargs)\n', (6379, 6387), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((6603, 6626), 'tests.functional.utils.login', 'login', (['rig.client', 'user'], {}), '(rig.client, user)\n', (6608, 6626), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((6813, 6833), 'json.loads', 'json.loads', (['res.data'], {}), '(res.data)\n', (6823, 6833), False, 'import json\n'), ((1860, 1890), 'tests.functional.utils.token_auth_header_field', 'token_auth_header_field', (['token'], {}), '(token)\n', (1883, 1890), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((3118, 3148), 'tests.functional.utils.token_auth_header_field', 'token_auth_header_field', (['token'], {}), '(token)\n', (3141, 3148), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((4339, 4369), 'tests.functional.utils.token_auth_header_field', 'token_auth_header_field', (['token'], {}), '(token)\n', (4362, 4369), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((5632, 5662), 'tests.functional.utils.token_auth_header_field', 'token_auth_header_field', (['token'], {}), '(token)\n', (5655, 5662), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((6739, 6769), 'tests.functional.utils.token_auth_header_field', 'token_auth_header_field', (['token'], {}), '(token)\n', (6762, 6769), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n'), ((3306, 3344), 'tests.functional.utils.datetime_as_string', 'datetime_as_string', (["user['last_login']"], {}), "(user['last_login'])\n", (3324, 3344), False, 'from tests.functional.utils import FlaskTestRig, login, token_auth_header_field, datetime_as_string\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='about_me',
field=models.TextField(default=b'Something about myself...'),
),
migrations.AddField(
model_name='userprofile',
name='city',
field=models.CharField(default=b'Single City', max_length=20),
),
migrations.AlterField(
model_name='userprofile',
name='gender',
field=models.CharField(max_length=1, choices=[(b'M', b'Male'), (b'F', b'Female')]),
),
migrations.AlterField(
model_name='userprofile',
name='proximity',
field=models.IntegerField(verbose_name=b'Proximity (km)'),
),
]
| [
"django.db.models.IntegerField",
"django.db.models.TextField",
"django.db.models.CharField"
] | [((350, 404), 'django.db.models.TextField', 'models.TextField', ([], {'default': "b'Something about myself...'"}), "(default=b'Something about myself...')\n", (366, 404), False, 'from django.db import migrations, models\n'), ((527, 582), 'django.db.models.CharField', 'models.CharField', ([], {'default': "b'Single City'", 'max_length': '(20)'}), "(default=b'Single City', max_length=20)\n", (543, 582), False, 'from django.db import migrations, models\n'), ((709, 785), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': "[(b'M', b'Male'), (b'F', b'Female')]"}), "(max_length=1, choices=[(b'M', b'Male'), (b'F', b'Female')])\n", (725, 785), False, 'from django.db import migrations, models\n'), ((915, 966), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': "b'Proximity (km)'"}), "(verbose_name=b'Proximity (km)')\n", (934, 966), False, 'from django.db import migrations, models\n')] |
import json
import os
from os.path import join, dirname
import boto3
import requests
from boto3.session import Session
from jsonschema import validate, exceptions
from requests_aws4auth import AWS4Auth
template_config = {"name": "template", "url": "http://example-url.com"}
configs_to_test = []
def pytest_html_report_title(report):
report.title = "template integration tests"
def send_a_request(
test_config,
url=None,
method=None,
payload=None,
extra_headers=None,
content_type=None,
):
print(f"Using test_config: {test_config['name']}")
headers = {
"Content-Type": content_type if content_type else "application/json",
}
if extra_headers:
for h in extra_headers:
headers[h["header_name"]] = h["header_value"]
if payload:
body = json.dumps(payload)
else:
body = None
if "CI" in os.environ:
role_name = "integrations-ci"
else:
role_name = "operator"
boto3.setup_default_session(region_name="eu-west-1",)
client = boto3.client("sts")
client.get_caller_identity()["Account"]
role_to_assume = f"arn:aws:iam::288342028542:role/{role_name}"
response = client.assume_role(
RoleArn=role_to_assume, RoleSessionName="assumed_role"
)
session = Session(
aws_access_key_id=response["Credentials"]["AccessKeyId"],
aws_secret_access_key=response["Credentials"]["SecretAccessKey"],
aws_session_token=response["Credentials"]["SessionToken"],
)
client = session.client("sts")
client.get_caller_identity()["Account"]
credentials = session.get_credentials()
credentials = credentials.get_frozen_credentials()
access_key = credentials.access_key
secret_key = credentials.secret_key
token = credentials.token
auth = AWS4Auth(
access_key, secret_key, "eu-west-1", "execute-api", session_token=token,
)
response = requests.request(method, url, auth=auth, data=body, headers=headers)
print(f"response.status_code: {response.status_code}")
print(f"response: {json.dumps(response.json(), indent=4)}")
return response.status_code, response.text
def is_valid_schema(data, schema_file):
""" Checks whether the given data matches the schema """
schema = load_data(schema_file, as_json=False)
try:
validate(data, schema)
result = True
except exceptions.ValidationError as e:
print("well-formed but invalid JSON:", e)
result = False
except json.decoder.JSONDecodeError as e:
print("poorly-formed text, not JSON:", e)
result = False
return result
def load_data(filename, as_json=True):
relative_path = join("response_schemas", filename)
absolute_path = join(dirname(__file__), relative_path)
with open(absolute_path) as data_file:
if as_json:
return data_file.read()
else:
return json.loads(data_file.read())
| [
"boto3.setup_default_session",
"boto3.session.Session",
"boto3.client",
"requests_aws4auth.AWS4Auth",
"json.dumps",
"os.path.join",
"requests.request",
"os.path.dirname",
"jsonschema.validate"
] | [((987, 1039), 'boto3.setup_default_session', 'boto3.setup_default_session', ([], {'region_name': '"""eu-west-1"""'}), "(region_name='eu-west-1')\n", (1014, 1039), False, 'import boto3\n'), ((1055, 1074), 'boto3.client', 'boto3.client', (['"""sts"""'], {}), "('sts')\n", (1067, 1074), False, 'import boto3\n'), ((1307, 1505), 'boto3.session.Session', 'Session', ([], {'aws_access_key_id': "response['Credentials']['AccessKeyId']", 'aws_secret_access_key': "response['Credentials']['SecretAccessKey']", 'aws_session_token': "response['Credentials']['SessionToken']"}), "(aws_access_key_id=response['Credentials']['AccessKeyId'],\n aws_secret_access_key=response['Credentials']['SecretAccessKey'],\n aws_session_token=response['Credentials']['SessionToken'])\n", (1314, 1505), False, 'from boto3.session import Session\n'), ((1832, 1918), 'requests_aws4auth.AWS4Auth', 'AWS4Auth', (['access_key', 'secret_key', '"""eu-west-1"""', '"""execute-api"""'], {'session_token': 'token'}), "(access_key, secret_key, 'eu-west-1', 'execute-api', session_token=\n token)\n", (1840, 1918), False, 'from requests_aws4auth import AWS4Auth\n'), ((1945, 2013), 'requests.request', 'requests.request', (['method', 'url'], {'auth': 'auth', 'data': 'body', 'headers': 'headers'}), '(method, url, auth=auth, data=body, headers=headers)\n', (1961, 2013), False, 'import requests\n'), ((2718, 2752), 'os.path.join', 'join', (['"""response_schemas"""', 'filename'], {}), "('response_schemas', filename)\n", (2722, 2752), False, 'from os.path import join, dirname\n'), ((825, 844), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (835, 844), False, 'import json\n'), ((2357, 2379), 'jsonschema.validate', 'validate', (['data', 'schema'], {}), '(data, schema)\n', (2365, 2379), False, 'from jsonschema import validate, exceptions\n'), ((2778, 2795), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (2785, 2795), False, 'from os.path import join, dirname\n')] |
import einops
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
def callbacks(args, monitor="val/Accuracy"):
callbacks_list = []
# log learning rate
callbacks_list.append(pl.callbacks.LearningRateMonitor(logging_interval="step"))
# best model checkpoint
callbacks_list.append(
pl.callbacks.ModelCheckpoint(
monitor=monitor,
mode="max",
save_top_k=1,
save_last=True,
# filename
# new style will be supported in pl-1.3
filename="{epoch:02d}",
# auto_insert_metric_name=False,
# filename="epoch{epoch:02d}-val_iou{val/IoU:.2f}",
)
)
# early stop
if "patience" in args:
callbacks_list.append(
pl.callbacks.early_stopping.EarlyStopping(
monitor=monitor, patience=args.patience, mode="max"
)
)
return callbacks_list
| [
"pytorch_lightning.callbacks.ModelCheckpoint",
"pytorch_lightning.callbacks.early_stopping.EarlyStopping",
"pytorch_lightning.callbacks.LearningRateMonitor"
] | [((234, 291), 'pytorch_lightning.callbacks.LearningRateMonitor', 'pl.callbacks.LearningRateMonitor', ([], {'logging_interval': '"""step"""'}), "(logging_interval='step')\n", (266, 291), True, 'import pytorch_lightning as pl\n'), ((357, 472), 'pytorch_lightning.callbacks.ModelCheckpoint', 'pl.callbacks.ModelCheckpoint', ([], {'monitor': 'monitor', 'mode': '"""max"""', 'save_top_k': '(1)', 'save_last': '(True)', 'filename': '"""{epoch:02d}"""'}), "(monitor=monitor, mode='max', save_top_k=1,\n save_last=True, filename='{epoch:02d}')\n", (385, 472), True, 'import pytorch_lightning as pl\n'), ((818, 917), 'pytorch_lightning.callbacks.early_stopping.EarlyStopping', 'pl.callbacks.early_stopping.EarlyStopping', ([], {'monitor': 'monitor', 'patience': 'args.patience', 'mode': '"""max"""'}), "(monitor=monitor, patience=args.\n patience, mode='max')\n", (859, 917), True, 'import pytorch_lightning as pl\n')] |
import os
import subprocess
import json
from multiprocessing import Pool
import numpy as np
class Config:
def __init__(self, test_number):
self.ls_rounds = 5
self.ls_time = 5
self.ml_pruning = 0.95
self.test_number = test_number
def write_to_file(self):
path = os.path.join(output_directory, self.test_number, "config.json")
with open(path, "w") as output_file:
json.dump(self.__dict__, output_file)
def exec_iterative_ml(config, graph_filename, test_number):
args = [executable,
f"{graph_directory}/{graph_filename}",
f"--output={output_directory}/{test_number}/{graph_filename}.mis",
f"--ls_rounds={config.ls_rounds}",
f"--ls_time={config.ls_time}",
f"--ml_pruning={config.ml_pruning}",
"--console_log"
">",
f"{output_directory}/{test_number}/{graph_filename}.mis.log"
]
subprocess.run(args)
print(args)
# run tests
with Pool(processes=os.cpu_count()) as pool:
for graph_filename in os.listdir(graph_directory):
graph_filepath = os.path.join(graph_directory, graph_filename)
if os.path.isfile(graph_filepath):
pool.apply_async(exec_iterative_ml, (Config(), graph_filename, test_number))
| [
"os.listdir",
"subprocess.run",
"os.path.join",
"os.path.isfile",
"os.cpu_count",
"json.dump"
] | [((956, 976), 'subprocess.run', 'subprocess.run', (['args'], {}), '(args)\n', (970, 976), False, 'import subprocess\n'), ((1080, 1107), 'os.listdir', 'os.listdir', (['graph_directory'], {}), '(graph_directory)\n', (1090, 1107), False, 'import os\n'), ((314, 377), 'os.path.join', 'os.path.join', (['output_directory', 'self.test_number', '"""config.json"""'], {}), "(output_directory, self.test_number, 'config.json')\n", (326, 377), False, 'import os\n'), ((1134, 1179), 'os.path.join', 'os.path.join', (['graph_directory', 'graph_filename'], {}), '(graph_directory, graph_filename)\n', (1146, 1179), False, 'import os\n'), ((1191, 1221), 'os.path.isfile', 'os.path.isfile', (['graph_filepath'], {}), '(graph_filepath)\n', (1205, 1221), False, 'import os\n'), ((435, 472), 'json.dump', 'json.dump', (['self.__dict__', 'output_file'], {}), '(self.__dict__, output_file)\n', (444, 472), False, 'import json\n'), ((1029, 1043), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (1041, 1043), False, 'import os\n')] |
import csv
import numpy as np
from matplotlib import pyplot as plt
import os
log_names = [
'10x5_1S',
'10x5_1W',
'10x5_3S',
'10x5_3W',
'5x5_1W',
'5x5_1S',
'5x5_3W',
'5x5_3S',
'10x20_1W',
'10x20_1S',
'10x20_3W',
'10x20_3S',
'10x2_1W',
'10x2_1S',
'10x2_3W',
'10x2_3S',
'50x5_1W',
'50x5_1S',
'50x5_3W',
'50x5_3S',
'BPI2017_50k',
]
def _parse_scores(scores_filepath):
with open(scores_filepath, 'r') as f:
csv_reader = csv.reader(f, delimiter=',', quotechar='|')
next(csv_reader, None)
line = next(csv_reader, None)
activity_id_accuracy = float(line[1])
activity_id_std = float(line[2])
line = next(csv_reader, None)
resource_id_accuracy = float(line[1])
resource_id_std = float(line[2])
next(csv_reader, None)
line = next(csv_reader, None)
time_mse = float(line[1])
time_std = float(line[2])
next(csv_reader, None)
next(csv_reader, None)
line = next(csv_reader, None)
activity_id_nlevenshtein = float(line[1])
activity_id_nlevenshtein_std = float(line[2])
line = next(csv_reader, None)
resource_id_nlevenshtein = float(line[1])
resource_id_nlevenshtein_std = float(line[2])
return activity_id_accuracy, activity_id_std, resource_id_accuracy, resource_id_std, time_mse, time_std, activity_id_nlevenshtein, activity_id_nlevenshtein_std, resource_id_nlevenshtein, resource_id_nlevenshtein_std
def plot_scores_figure(scores, index, title, top=1):
models = sorted(scores.keys())
bar_width = 0.2
axis = np.arange(len(log_names))
plt.figure()
plt.title(title)
y_min = 1
y_max = 0
for i, model in enumerate(models):
model_scores = np.array(scores[model])
plt.bar(axis - bar_width / 2 * len(models) / 2 + i * bar_width, model_scores[:, index], yerr=model_scores[:, index+1],
width=bar_width, label=model)
y_min_temp = np.min(model_scores[:, index])
y_max_temp = np.max(model_scores[:, index])
if y_min_temp < y_min:
y_min = y_min_temp
if y_max_temp > y_max:
y_max = y_max_temp
if top > 0:
plt.ylim(top=top)
y_min -= 0.1 * (y_max - y_min)
plt.ylim(bottom=max(0, y_min))
plt.xticks(axis, log_names, rotation=90)
plt.legend()
plt.show()
def _plot_scores(scores):
plot_scores_figure(scores, 0, 'activity_id_accuracy')
plot_scores_figure(scores, 2, 'resource_id accuracy')
plot_scores_figure(scores, 4, 'time mse', top=0)
plot_scores_figure(scores, 6, 'activity_id nlevenshtein')
plot_scores_figure(scores, 8, 'resource_id nlevenshtein')
def compare_models(model_type):
scores = {}
models = [f.name for f in os.scandir('/'.join(['..', 'outputs', model_type])) if f.is_dir()]
for model in models:
scores[model] = []
for log_name in log_names:
for model in models:
results_filepath = '/'.join(['..', 'outputs', model_type, model, log_name, 'results.csv'])
scores[model].append(_parse_scores(results_filepath))
_plot_scores(scores)
if __name__ == "__main__":
compare_models('sequence')
| [
"matplotlib.pyplot.xticks",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.min",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"csv.reader",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((1696, 1708), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1706, 1708), True, 'from matplotlib import pyplot as plt\n'), ((1713, 1729), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1722, 1729), True, 'from matplotlib import pyplot as plt\n'), ((2364, 2404), 'matplotlib.pyplot.xticks', 'plt.xticks', (['axis', 'log_names'], {'rotation': '(90)'}), '(axis, log_names, rotation=90)\n', (2374, 2404), True, 'from matplotlib import pyplot as plt\n'), ((2409, 2421), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2419, 2421), True, 'from matplotlib import pyplot as plt\n'), ((2426, 2436), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2434, 2436), True, 'from matplotlib import pyplot as plt\n'), ((515, 558), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""","""', 'quotechar': '"""|"""'}), "(f, delimiter=',', quotechar='|')\n", (525, 558), False, 'import csv\n'), ((1821, 1844), 'numpy.array', 'np.array', (['scores[model]'], {}), '(scores[model])\n', (1829, 1844), True, 'import numpy as np\n'), ((2039, 2069), 'numpy.min', 'np.min', (['model_scores[:, index]'], {}), '(model_scores[:, index])\n', (2045, 2069), True, 'import numpy as np\n'), ((2091, 2121), 'numpy.max', 'np.max', (['model_scores[:, index]'], {}), '(model_scores[:, index])\n', (2097, 2121), True, 'import numpy as np\n'), ((2271, 2288), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'top': 'top'}), '(top=top)\n', (2279, 2288), True, 'from matplotlib import pyplot as plt\n')] |
#!/usr/bin/env python3
import pywind.lib.timer as timer
import freenet.lib.utils as utils
class ip6_dgram_proxy(object):
"""处理IPv6 UDP 和 UDPLite数据分包
"""
__ok_packets = None
__timer = None
__TIMEOUT = 5
__fragdata = None
def __init__(self):
self.__ok_packets = []
self.__timer = timer.timer()
self.__fragdata = {}
def add_frag(self, mbuf):
mbuf.offset = 8
saddr = mbuf.get_part(16)
mbuf.offset = 24
daddr = mbuf.get_part(16)
mbuf.offset = 42
frag_off = utils.bytes2number(mbuf.get_part(2))
m_flag = frag_off & 1
frag_off = frag_off >> 3
mbuf.offset = 44
frag_id = mbuf.get_part(4)
if frag_off == 0 and m_flag == 0:
sport, dport = self.__get_pkt_port_info(mbuf)
mbuf.offset = 56
self.__ok_packets.append((saddr, daddr, sport, dport, mbuf.get_data(),))
return
uniq_id = b"".join([saddr, frag_id, ])
if frag_off == 0 and m_flag == 1:
sport, dport = self.__get_pkt_port_info(mbuf)
mbuf.offset = 56
self.__fragdata[uniq_id] = (saddr, daddr, sport, dport, [mbuf.get_data()])
self.__timer.set_timeout(uniq_id, self.__TIMEOUT)
return
if uniq_id not in self.__fragdata: return
mbuf.offset = 48
content = mbuf.get_data()
saddr, daddr, sport, dport, data_list = self.__fragdata[uniq_id]
data_list.append(content)
if m_flag != 0: return
self.__ok_packets.append(
(saddr, daddr, sport, dport, b"".join(data_list))
)
self.__timer.drop(uniq_id)
del self.__fragdata[uniq_id]
def __get_pkt_port_info(self, mbuf):
mbuf.offset = 48
sport = utils.bytes2number(mbuf.get_part(2))
mbuf.offset = 50
dport = utils.bytes2number(mbuf.get_part(2))
return (sport, dport,)
def get_data(self):
self.recycle()
try:
return self.__ok_packets.pop(0)
except IndexError:
return None
def recycle(self):
uniq_ids = self.__timer.get_timeout_names()
for uniq_id in uniq_ids:
if not self.__timer.exists(uniq_id): continue
self.__timer.drop(uniq_id)
del self.__fragdata[uniq_id]
return
| [
"pywind.lib.timer.timer"
] | [((325, 338), 'pywind.lib.timer.timer', 'timer.timer', ([], {}), '()\n', (336, 338), True, 'import pywind.lib.timer as timer\n')] |
import numpy as np
import scipy.stats
from scipy import ndimage
from scipy.optimize import curve_fit
from imutils import nan_to_zero
# try to use cv2 for faster image processing
try:
import cv2
cv2.connectedComponents # relatively recent addition, so check presence
opencv_found = True
except (ImportError, AttributeError):
opencv_found = False
def measure_of_chaos(im, nlevels, overwrite=True, statistic=None):
"""
Compute a measure for the spatial chaos in given image using the level sets method.
:param im: 2d array
:param nlevels: how many levels to use
:type nlevels: int
:param overwrite: Whether the input image can be overwritten to save memory
:type overwrite: bool
:param statistic: callable that calculates a score (a number) for the object counts in the level sets. If
specified, this statistic will be used instead of the default one. The callable must take two arguments - the
object counts (sequence of ints) and the number of non-zero pixels in the original image (int) - and output a number
:return: the measured value
:rtype: float
:raises ValueError: if nlevels <= 0 or q_val is an invalid percentile or an unknown interp value is used
"""
statistic = statistic or _default_measure
# don't process empty images
if np.sum(im) <= 0:
return np.nan
sum_notnull = np.sum(im > 0)
if not overwrite:
# don't modify original image, make a copy
im = im.copy()
notnull_mask = nan_to_zero(im)
im_clean = im / np.max(im) # normalize to 1
# Level Sets Calculation
object_counts = _level_sets(im_clean, nlevels)
return statistic(object_counts, sum_notnull)
def measure_of_chaos_fit(im, nlevels, overwrite=True):
"""
This function is identical to measure_of_chaos except that it uses a different statistic.
"""
return measure_of_chaos(im, nlevels, overwrite=overwrite, statistic=_fit)
def _dilation_and_erosion(im, dilate_mask=None, erode_mask=None):
dilate_mask = dilate_mask or [[0, 1, 0], [1, 1, 1], [0, 1, 0]]
erode_mask = erode_mask or [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
if opencv_found:
im = np.asarray(im, dtype=np.uint8)
im = cv2.dilate(im, np.asarray(dilate_mask, dtype=np.uint8))
im = cv2.erode(im, np.asarray(erode_mask, dtype=np.uint8))
return im
return ndimage.binary_erosion(ndimage.morphology.binary_dilation(im, structure=dilate_mask), structure=erode_mask, border_value=1)
def _level_sets(im_clean, nlevels, prep=_dilation_and_erosion):
"""
Divide the image into level sets and count the number of objects in each of them.
:param im_clean: 2d array with :code:`im_clean.max() == 1`
:param int nlevels: number of levels to search for objects (positive integer)
:param prep: callable that takes a 2d array as its only argument and returns a 2d array
:return: sequence with the number of objects in each respective level
"""
if nlevels <= 0:
raise ValueError("nlevels must be positive")
prep = prep or (lambda x: x) # if no preprocessing should be done, use the identity function
# TODO change the levels. Reason:
# - in the for loop, the > operator is used. The highest level is 1, therefore the highest level set will always
# be empty. The ndimage.label function then returns 1 as the number of objects in the empty image, although it
# should be zero.
# Proposed solution:
# levels = np.linspace(0, 1, nlevels + 2)[1:-1]
# That is, create nlevels + 2 levels, then throw away the zero level and the one level
# or:
# levels = np.linspace(0, 1, nlevels)[1:-1]
# That is, only use nlevels - 2 levels. This means that the output array will have a size of nlevels - 2
levels = np.linspace(0, 1, nlevels+1)[:-1] # np.amin(im), np.amax(im)
# Go through levels and calculate number of objects
num_objs = []
count_func = (lambda im: cv2.connectedComponents(im, connectivity=4)[0] - 1) if opencv_found else (lambda im: ndimage.label(im)[1])
for lev in levels:
# Threshold at level
bw = (im_clean > lev)
bw = prep(bw)
# Record objects at this level
num_objs.append(count_func(bw))
return num_objs
def _default_measure(num_objs, sum_notnull):
"""
Calculate a statistic for the object counts.
:param num_objs: number of objects found in each level, respectively
:param float sum_notnull: sum of all non-zero elements in the original array (positive number)
:return: the calculated value between 0 and 1, bigger is better
"""
num_objs = np.asarray(num_objs, dtype=np.int_)
nlevels = len(num_objs)
if sum_notnull <= 0:
raise ValueError("sum_notnull must be positive")
if min(num_objs) < 0:
raise ValueError("cannot have negative object counts")
if nlevels < 1:
raise ValueError("array of object counts is empty")
if np.unique(num_objs).shape[0] <= 1:
return np.nan
sum_vals = float(np.sum(num_objs))
return 1 - sum_vals / (sum_notnull * nlevels)
# this updates the scoring function from the main algorithm.
def _fit(num_objs, _):
"""
An alternative statistic for measure_of_chaos.
:param num_objs: number of objects found in each level, respectively
:param _: unused dummy parameter, kept for signature compatibility with _default_measure
:return: the calculated value
"""
num_objs = np.asarray(num_objs, dtype=np.int_)
nlevels = len(num_objs)
if min(num_objs) < 0:
raise ValueError("must have at least one object in each level")
if nlevels < 1:
raise ValueError("array of object counts is empty")
if np.unique(num_objs).shape[0] < 2:
return np.nan
def func(x, a, b):
return scipy.stats.norm.cdf(x, loc=a, scale=b)
# measure_value, im, levels, num_objs = measure_of_chaos(im, nlevels)
# if measure_value == np.nan: # if basic algorithm failed then we're going to fail here too
# return np.nan
cdf_curve = np.cumsum(num_objs) / float(np.sum(num_objs))
popt, pcov = curve_fit(func, np.linspace(0, 1, nlevels), cdf_curve, p0=(0.5, 0.05))
pdf_fitted = func(np.linspace(0, 1, nlevels), popt[0], popt[1])
# return 1-np.sqrt(np.sum((pdf_fitted - cdf_curve)**2))
return 1 - np.sum(np.abs((pdf_fitted - cdf_curve)))
def isotope_pattern_match(images_flat, theor_iso_intensities):
"""
This function calculates a match between a list of isotope ion images and a theoretical intensity vector.
:param images_flat: 2d array (or sequence of 1d arrays) of pixel intensities with shape (d1, d2) where d1 is the number of images and d2 is the number of pixels per image, i.e. :code:`images_flat[i]` is the i-th flattened image
:param theor_iso_intensities: 1d array (or sequence) of theoretical isotope intensities with shape d1, i.e :code:`theor_iso_intensities[i]` is the theoretical isotope intensity corresponding to the i-th image
:return: measure value between 0 and 1, bigger is better
:rtype: float
:raise TypeError: if images are not 1d
:raise ValueError: if images are not equally shaped or if the number of images and the number of intensities differ
"""
d1 = len(images_flat)
if d1 != len(theor_iso_intensities):
raise ValueError("amount of images and theoretical intensities must be equal")
if any(np.shape(im) != np.shape(images_flat[0]) for im in images_flat):
raise ValueError("images are not equally sized")
if any(len(np.shape(im)) != 1 for im in images_flat):
raise TypeError("images are not 1d")
if any(intensity < 0 for intensity in theor_iso_intensities):
raise ValueError("intensities must be >= 0")
image_ints = []
not_null = images_flat[0] > 0
for ii, _ in enumerate(theor_iso_intensities):
image_ints.append(np.sum(images_flat[ii][not_null]))
pattern_match = 1 - np.mean(abs(theor_iso_intensities / np.linalg.norm(theor_iso_intensities) -
image_ints / np.linalg.norm(image_ints)))
if pattern_match == 1.:
return 0
return pattern_match
def isotope_image_correlation(images_flat, weights=None):
"""
Function for calculating a weighted average measure of image correlation with the principle image.
:param images_flat: 2d array (or sequence of 1d arrays) of pixel intensities with shape (d1, d2) where d1 is the number of images and d2 is the number of pixels per image, i.e. :code:`images_flat[i]` is the i-th flattened image
:param weights: 1d array (or sequence) of weights with shape (d1 - 1), i.e :code:`weights[i]` is the weight to put on the correlation between the first and the i-th image. If omitted, all correlations are weighted equally
:return: measure_value (zero if less than 2 images are given)
:raise TypeError: if images are not 1d
:raise ValueError: if images are not equally shaped or if the number of correlations and the number of weights (if given) differ
"""
if len(images_flat) < 2:
return 0
if any(len(np.shape(im)) != 1 for im in images_flat):
raise TypeError("images are not 1d")
else:
# first image mask
mask = images_flat[0] > 0
if mask.sum() < 2:
return 0
flt_images_flat = [img[mask] for img in images_flat]
# slightly faster to compute all correlations and pull the elements needed
iso_correlation = np.corrcoef(flt_images_flat)[1:, 0]
# when all values are the same (e.g. zeros) then correlation is undefined
iso_correlation[np.isinf(iso_correlation) | np.isnan(iso_correlation)] = 0
try:
return np.clip(np.average(iso_correlation, weights=weights),0,1)# coerce between [0 1]
except TypeError:
raise ValueError("Number of images is not equal to the number of weights + 1")
| [
"numpy.abs",
"numpy.unique",
"numpy.corrcoef",
"numpy.average",
"imutils.nan_to_zero",
"numpy.asarray",
"scipy.ndimage.label",
"numpy.max",
"numpy.sum",
"numpy.linspace",
"numpy.isnan",
"cv2.connectedComponents",
"scipy.ndimage.morphology.binary_dilation",
"numpy.linalg.norm",
"numpy.cumsum",
"numpy.shape",
"numpy.isinf"
] | [((1382, 1396), 'numpy.sum', 'np.sum', (['(im > 0)'], {}), '(im > 0)\n', (1388, 1396), True, 'import numpy as np\n'), ((1514, 1529), 'imutils.nan_to_zero', 'nan_to_zero', (['im'], {}), '(im)\n', (1525, 1529), False, 'from imutils import nan_to_zero\n'), ((4651, 4686), 'numpy.asarray', 'np.asarray', (['num_objs'], {'dtype': 'np.int_'}), '(num_objs, dtype=np.int_)\n', (4661, 4686), True, 'import numpy as np\n'), ((5489, 5524), 'numpy.asarray', 'np.asarray', (['num_objs'], {'dtype': 'np.int_'}), '(num_objs, dtype=np.int_)\n', (5499, 5524), True, 'import numpy as np\n'), ((1325, 1335), 'numpy.sum', 'np.sum', (['im'], {}), '(im)\n', (1331, 1335), True, 'import numpy as np\n'), ((1550, 1560), 'numpy.max', 'np.max', (['im'], {}), '(im)\n', (1556, 1560), True, 'import numpy as np\n'), ((2188, 2218), 'numpy.asarray', 'np.asarray', (['im'], {'dtype': 'np.uint8'}), '(im, dtype=np.uint8)\n', (2198, 2218), True, 'import numpy as np\n'), ((2407, 2468), 'scipy.ndimage.morphology.binary_dilation', 'ndimage.morphology.binary_dilation', (['im'], {'structure': 'dilate_mask'}), '(im, structure=dilate_mask)\n', (2441, 2468), False, 'from scipy import ndimage\n'), ((3808, 3838), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(nlevels + 1)'], {}), '(0, 1, nlevels + 1)\n', (3819, 3838), True, 'import numpy as np\n'), ((5052, 5068), 'numpy.sum', 'np.sum', (['num_objs'], {}), '(num_objs)\n', (5058, 5068), True, 'import numpy as np\n'), ((6085, 6104), 'numpy.cumsum', 'np.cumsum', (['num_objs'], {}), '(num_objs)\n', (6094, 6104), True, 'import numpy as np\n'), ((6164, 6190), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'nlevels'], {}), '(0, 1, nlevels)\n', (6175, 6190), True, 'import numpy as np\n'), ((6241, 6267), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'nlevels'], {}), '(0, 1, nlevels)\n', (6252, 6267), True, 'import numpy as np\n'), ((2247, 2286), 'numpy.asarray', 'np.asarray', (['dilate_mask'], {'dtype': 'np.uint8'}), '(dilate_mask, dtype=np.uint8)\n', (2257, 2286), True, 'import numpy as np\n'), ((2315, 2353), 'numpy.asarray', 'np.asarray', (['erode_mask'], {'dtype': 'np.uint8'}), '(erode_mask, dtype=np.uint8)\n', (2325, 2353), True, 'import numpy as np\n'), ((6113, 6129), 'numpy.sum', 'np.sum', (['num_objs'], {}), '(num_objs)\n', (6119, 6129), True, 'import numpy as np\n'), ((6369, 6399), 'numpy.abs', 'np.abs', (['(pdf_fitted - cdf_curve)'], {}), '(pdf_fitted - cdf_curve)\n', (6375, 6399), True, 'import numpy as np\n'), ((7923, 7956), 'numpy.sum', 'np.sum', (['images_flat[ii][not_null]'], {}), '(images_flat[ii][not_null])\n', (7929, 7956), True, 'import numpy as np\n'), ((9524, 9552), 'numpy.corrcoef', 'np.corrcoef', (['flt_images_flat'], {}), '(flt_images_flat)\n', (9535, 9552), True, 'import numpy as np\n'), ((4058, 4075), 'scipy.ndimage.label', 'ndimage.label', (['im'], {}), '(im)\n', (4071, 4075), False, 'from scipy import ndimage\n'), ((4973, 4992), 'numpy.unique', 'np.unique', (['num_objs'], {}), '(num_objs)\n', (4982, 4992), True, 'import numpy as np\n'), ((5738, 5757), 'numpy.unique', 'np.unique', (['num_objs'], {}), '(num_objs)\n', (5747, 5757), True, 'import numpy as np\n'), ((7447, 7459), 'numpy.shape', 'np.shape', (['im'], {}), '(im)\n', (7455, 7459), True, 'import numpy as np\n'), ((7463, 7487), 'numpy.shape', 'np.shape', (['images_flat[0]'], {}), '(images_flat[0])\n', (7471, 7487), True, 'import numpy as np\n'), ((9666, 9691), 'numpy.isinf', 'np.isinf', (['iso_correlation'], {}), '(iso_correlation)\n', (9674, 9691), True, 'import numpy as np\n'), ((9694, 9719), 'numpy.isnan', 'np.isnan', (['iso_correlation'], {}), '(iso_correlation)\n', (9702, 9719), True, 'import numpy as np\n'), ((9765, 9809), 'numpy.average', 'np.average', (['iso_correlation'], {'weights': 'weights'}), '(iso_correlation, weights=weights)\n', (9775, 9809), True, 'import numpy as np\n'), ((3973, 4016), 'cv2.connectedComponents', 'cv2.connectedComponents', (['im'], {'connectivity': '(4)'}), '(im, connectivity=4)\n', (3996, 4016), False, 'import cv2\n'), ((7584, 7596), 'numpy.shape', 'np.shape', (['im'], {}), '(im)\n', (7592, 7596), True, 'import numpy as np\n'), ((9147, 9159), 'numpy.shape', 'np.shape', (['im'], {}), '(im)\n', (9155, 9159), True, 'import numpy as np\n'), ((8018, 8055), 'numpy.linalg.norm', 'np.linalg.norm', (['theor_iso_intensities'], {}), '(theor_iso_intensities)\n', (8032, 8055), True, 'import numpy as np\n'), ((8107, 8133), 'numpy.linalg.norm', 'np.linalg.norm', (['image_ints'], {}), '(image_ints)\n', (8121, 8133), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import statsmodels.api as sm
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
import os
import sys
import scipy.stats
from scipy.stats.mstats import gmean
import scipy.stats as stats
import math
import matplotlib as mpl
from sklearn.cluster import KMeans
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams["font.sans-serif"] = "Arial"
#1.Z-score Normalzie DiseaseSP_DF:
Cell='Monocytes'
outDir=os.path.join('{}/DiffPeaks/mean3_fc2_p0.001_fdr0.05/KmeansCluster'.format(Cell))
if not os.path.exists(outDir):
os.mkdir(outDir)
DiseaseSP_F='{}/DiffPeaks/mean3_fc2_p0.001_fdr0.05/TwoTwoCompare_Merge.sortCol.txt'.format(Cell)
DiseaseSP_DF=pd.read_table(DiseaseSP_F,sep='\t',index_col=0)
DiseaseSP_DFz= DiseaseSP_DF.apply(scipy.stats.zscore,axis=1,result_type='broadcast')
#decide K:1.手肘法(误差平方法SSE);2.轮廓系数法
SSE = [] # 存放每次结果的误差平方和
for k in range(1,10):
estimator = KMeans(n_clusters=k)
estimator.fit(DiseaseSP_DFz)
SSE.append(estimator.inertia_)
X = range(1,10)
plt.style.use('seaborn-white')
fig=plt.figure(figsize=(3.5,2))
ax=fig.add_axes([0.2,0.2,0.7,0.7])
ax.set_ylabel('Sum of the squared errors',fontsize=10)
ax.set_xlabel('k number',fontsize=10)
ax.tick_params(axis='y',length=7,labelsize=8,direction='out')
ax.tick_params(axis='x',length=7,labelsize=8,direction='out')
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['left'].set_linewidth(0.5)
ax.spines['right'].set_linewidth(0.5)
ax.spines['top'].set_linewidth(0.5)
plt.plot(X,SSE,color='purple', marker='o', linestyle='dashed',linewidth=1, markersize=5)
fig.savefig(outDir+'/Kvalue_SSE.pdf')
#print '误差平方和:'
plt.show()
2.#根据最佳K值进行KMeans聚类 (Kmeans聚类用的ZscoreNorm后的DF!!!)
def KMean_Cluster(DF,outDirPrefix,k):
#print 'Do KMean Cluster, k={}'.format(k)
kmeans=KMeans(n_clusters=k)
kmeans.fit(DF)
Kcluster=pd.DataFrame(kmeans.labels_,index=list(DF.index),columns=['Cluster'])
Kcluster.to_csv(outDir+'/TwoTwoCompareMerge_zscore_k{}.txt'.format(k),sep='\t')
#return Kcluster
KMean_Cluster(DiseaseSP_DFz,outDir,2)
KMean_Cluster(DiseaseSP_DFz,outDir,3)
print ('K-means Done !')
# In[5]:
k='3'
Cell='Monocytes'
DiseaseSP_F='{}/DiffPeaks/mean3_fc2_p0.001_fdr0.05/TwoTwoCompare_Merge.sortCol.txt'.format(Cell)
DiseaseSP_DF=pd.read_table(DiseaseSP_F,sep='\t',index_col=0)
RAs=[i for i in list(DiseaseSP_DF) if 'RA' in i]
OAs=[i for i in list(DiseaseSP_DF) if 'OA' in i]
HCs=[i for i in list(DiseaseSP_DF) if 'HC' in i]
BedF= '{}/RAOAHC.removeY.bed'.format(Cell) #read PeakBed
BedDF=pd.read_table(BedF,sep='\t',header=None)
BedDF.index=BedDF[3]
def PlotKmeanCluster_K3(k):
kmeansDir=os.path.join('{}/DiffPeaks/mean3_fc2_p0.001_fdr0.05/KmeansCluster/kvalue_k{}/'.format(Cell,k))
if not os.path.exists(kmeansDir):
os.mkdir(kmeansDir)
KClusterF='{}/DiffPeaks/mean3_fc2_p0.001_fdr0.05/KmeansCluster/TwoTwoCompareMerge_zscore_k{}.txt'.format(Cell,k)
KCluster=pd.read_table(KClusterF,sep='\t',index_col=0)
k1=KCluster[KCluster['Cluster']==0]
k2=KCluster[KCluster['Cluster']==1]
k3=KCluster[KCluster['Cluster']==2]
k1DF=DiseaseSP_DF.loc[k1.index]
k2DF=DiseaseSP_DF.loc[k2.index]
k3DF=DiseaseSP_DF.loc[k3.index]
k1Bed=BedDF.loc[k1DF.index]
k2Bed=BedDF.loc[k2DF.index]
k3Bed=BedDF.loc[k3DF.index]
a1=k1DF.iloc[:,-2:-1].mean(axis=0)[0]
a2=k2DF.iloc[:,-2:-1].mean(axis=0)[0]
a3=k3DF.iloc[:,-2:-1].mean(axis=0)[0]
if (a1 < a2) & (a2 < a3):
KclusterDF_c1=k1DF.copy()
KclusterDF_c2=k2DF.copy()
KclusterDF_c3=k3DF.copy()
elif (a1 < a3) & (a3 < a2):
KclusterDF_c1=k1DF.copy()
KclusterDF_c2=k3DF.copy()
KclusterDF_c3=k2DF.copy()
elif (a2 < a1) & (a1 < a3):
KclusterDF_c1=k2DF.copy()
KclusterDF_c2=k1DF.copy()
KclusterDF_c3=k3DF.copy()
elif (a2 < a3) & (a3 < a1):
KclusterDF_c1=k2DF.copy()
KclusterDF_c2=k3DF.copy()
KclusterDF_c3=k1DF.copy()
elif (a3 < a1) & (a1 < a2):
KclusterDF_c1=k3DF.copy()
KclusterDF_c2=k1DF.copy()
KclusterDF_c3=k2DF.copy()
elif (a3 < a2) & (a2 < a1):
KclusterDF_c1=k3DF.copy()
KclusterDF_c2=k2DF.copy()
KclusterDF_c3=k1DF.copy()
KclusterBed_c1=BedDF.loc[KclusterDF_c1.index]
KclusterBed_c2=BedDF.loc[KclusterDF_c2.index]
KclusterBed_c3=BedDF.loc[KclusterDF_c3.index]
KclusterBed_c1.to_csv(kmeansDir+'KmeansCluster_c1.bed',sep='\t',header=False,index=False)
KclusterBed_c2.to_csv(kmeansDir+'KmeansCluster_c2.bed',sep='\t',header=False,index=False)
KclusterBed_c3.to_csv(kmeansDir+'KmeansCluster_c3.bed',sep='\t',header=False,index=False)
KclusterDF_c1.to_csv(kmeansDir+'KmeansCluster_c1.txt',sep='\t')
KclusterDF_c2.to_csv(kmeansDir+'KmeansCluster_c2.txt',sep='\t')
KclusterDF_c3.to_csv(kmeansDir+'KmeansCluster_c3.txt',sep='\t')
KclusterDF_c1c2c3=pd.concat([KclusterDF_c1,KclusterDF_c2,KclusterDF_c3],axis=0)
KclusterDF_c1c2c3.to_csv(kmeansDir+'KmeansCluster_all.txt',sep='\t')
KclusterBed_c1c2c3=BedDF.loc[KclusterDF_c1c2c3.index]
KclusterBed_c1c2c3.to_csv(kmeansDir+'KmeansCluster_all.bed',sep='\t',header=False,index=False)
def DFmean(inputDF,C):
Df=DiseaseSP_DF.loc[inputDF.index]
hc=Df[HCs]
oa=Df[OAs]
ra=Df[RAs]
hcmean=hc.mean(axis=1)
hcmeanDF = hcmean.to_frame()
hcmeanDF.rename(columns={0:'HC'}, inplace = True)
oamean=oa.mean(axis=1)
oameanDF = oamean.to_frame()
oameanDF.rename(columns={0:'OA'}, inplace = True)
ramean=ra.mean(axis=1)
rameanDF = ramean.to_frame()
rameanDF.rename(columns={0:'RA'}, inplace = True)
MergeM = pd.concat([hcmeanDF,oameanDF,rameanDF],axis=1)
MergeM.to_csv(kmeansDir+'KmeansCluster_{}.average.txt'.format(C),sep='\t')
#Boxplot
plt.style.use('seaborn-white')
fig=plt.figure(figsize=(1.5,2))
ax=fig.add_axes([0.2,0.2,0.75,0.75])
#sns.violinplot(data=AA,ax=ax1,palette=(['steelblue','gold','orangered']))
sns.boxplot(data=MergeM,ax=ax,palette=(['steelblue','gold','orangered']),whis=0.5,fliersize=0.5,width=0.7,showfliers=False,medianprops={'linewidth':0.5},whiskerprops={'linewidth':0.5},boxprops={'linewidth':0.5},capprops={'linewidth':0.5})
ax.tick_params(labelsize=8,width=0.5,direction='out')
#ax.set_ylim([0,10])
ax.spines['bottom'].set_linewidth(0.25)
ax.spines['left'].set_linewidth(0.25)
ax.spines['right'].set_linewidth(0.25)
ax.spines['top'].set_linewidth(0.25)
fig.savefig(kmeansDir+'KmeansCluster_{}_average.boxplot.pdf'.format(C))
plt.show()
DFmean(KclusterDF_c1,'c1')
DFmean(KclusterDF_c2,'c2')
DFmean(KclusterDF_c3,'c3')
#zcore,plot heatmap:
KclusterDFall_Z=KclusterDF_c1c2c3.apply(scipy.stats.zscore,axis=1,result_type='broadcast')
KclusterDFc1_Z=KclusterDF_c1.apply(scipy.stats.zscore,axis=1,result_type='broadcast')
KclusterDFc2_Z=KclusterDF_c2.apply(scipy.stats.zscore,axis=1,result_type='broadcast')
KclusterDFc3_Z=KclusterDF_c3.apply(scipy.stats.zscore,axis=1,result_type='broadcast')
fig1=sns.clustermap(KclusterDFall_Z,figsize=(4,5),center=0,vmin=-2,vmax=2,col_cluster=False,row_cluster=False,cmap='RdYlBu_r')
fig1.savefig(kmeansDir+'KmeansCluster_all.heatmap.png',dpi=200)
plt.show()
plt.close('all')
fig2=sns.clustermap(KclusterDFc1_Z,figsize=(4,0.0009*len(KclusterDFc1_Z)),center=0,vmin=-2,vmax=2,col_cluster=False,row_cluster=False,cmap='RdYlBu_r')
fig2.savefig(kmeansDir+'KmeansCluster_c1.heatmap.png',dpi=500)
plt.show()
plt.close('all')
fig3=sns.clustermap(KclusterDFc2_Z,figsize=(4,0.0009*len(KclusterDFc2_Z)),center=0,vmin=-2,vmax=2,col_cluster=False,row_cluster=False,cmap='RdYlBu_r')
fig3.savefig(kmeansDir+'KmeansCluster_c2.heatmap.png',dpi=500)
plt.show()
plt.close('all')
fig4=sns.clustermap(KclusterDFc3_Z,figsize=(4,0.0009*len(KclusterDFc3_Z)),center=0,vmin=-2,vmax=2,col_cluster=False,row_cluster=False,cmap='RdYlBu_r')
fig4.savefig(kmeansDir+'KmeansCluster_c3.heatmap.png',dpi=500)
plt.show()
plt.close('all')
HCz=KclusterDFall_Z[HCs]
OAz=KclusterDFall_Z[OAs]
RAz=KclusterDFall_Z[RAs]
HCmean=HCz.mean(axis=1)
HCmeanDF = HCmean.to_frame()
HCmeanDF.rename(columns={0:'HC'}, inplace = True)
OAmean=OAz.mean(axis=1)
OAmeanDF = OAmean.to_frame()
OAmeanDF.rename(columns={0:'OA'}, inplace = True)
RAmean=RAz.mean(axis=1)
RAmeanDF = RAmean.to_frame()
RAmeanDF.rename(columns={0:'RA'}, inplace = True)
KclusterDFall_Z_average = pd.concat([HCmeanDF,OAmeanDF,RAmeanDF],axis=1)
fig4=sns.clustermap(KclusterDFall_Z_average,figsize=(1,6),center=0,vmin=-2,vmax=2,col_cluster=False,row_cluster=False,cmap='RdYlBu_r')
fig4.savefig(kmeansDir+'KmeansCluster_all.heatmap.average.pdf')
plt.show()
plt.close('all')
# In[6]:
k='3'
PlotKmeanCluster_K3(k)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| [
"sklearn.cluster.KMeans",
"os.path.exists",
"seaborn.clustermap",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.close",
"seaborn.boxplot",
"matplotlib.pyplot.figure",
"os.mkdir",
"pandas.read_table",
"pandas.concat",
"matplotlib.pyplot.show"
] | [((808, 857), 'pandas.read_table', 'pd.read_table', (['DiseaseSP_F'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(DiseaseSP_F, sep='\\t', index_col=0)\n", (821, 857), True, 'import pandas as pd\n'), ((1144, 1174), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-white"""'], {}), "('seaborn-white')\n", (1157, 1174), True, 'import matplotlib.pyplot as plt\n'), ((1179, 1207), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(3.5, 2)'}), '(figsize=(3.5, 2))\n', (1189, 1207), True, 'import matplotlib.pyplot as plt\n'), ((1609, 1705), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'SSE'], {'color': '"""purple"""', 'marker': '"""o"""', 'linestyle': '"""dashed"""', 'linewidth': '(1)', 'markersize': '(5)'}), "(X, SSE, color='purple', marker='o', linestyle='dashed', linewidth=\n 1, markersize=5)\n", (1617, 1705), True, 'import matplotlib.pyplot as plt\n'), ((1752, 1762), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1760, 1762), True, 'import matplotlib.pyplot as plt\n'), ((2385, 2434), 'pandas.read_table', 'pd.read_table', (['DiseaseSP_F'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(DiseaseSP_F, sep='\\t', index_col=0)\n", (2398, 2434), True, 'import pandas as pd\n'), ((2645, 2687), 'pandas.read_table', 'pd.read_table', (['BedF'], {'sep': '"""\t"""', 'header': 'None'}), "(BedF, sep='\\t', header=None)\n", (2658, 2687), True, 'import pandas as pd\n'), ((652, 674), 'os.path.exists', 'os.path.exists', (['outDir'], {}), '(outDir)\n', (666, 674), False, 'import os\n'), ((680, 696), 'os.mkdir', 'os.mkdir', (['outDir'], {}), '(outDir)\n', (688, 696), False, 'import os\n'), ((1039, 1059), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k)\n', (1045, 1059), False, 'from sklearn.cluster import KMeans\n'), ((1909, 1929), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k)\n', (1915, 1929), False, 'from sklearn.cluster import KMeans\n'), ((3043, 3090), 'pandas.read_table', 'pd.read_table', (['KClusterF'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(KClusterF, sep='\\t', index_col=0)\n", (3056, 3090), True, 'import pandas as pd\n'), ((5002, 5066), 'pandas.concat', 'pd.concat', (['[KclusterDF_c1, KclusterDF_c2, KclusterDF_c3]'], {'axis': '(0)'}), '([KclusterDF_c1, KclusterDF_c2, KclusterDF_c3], axis=0)\n', (5011, 5066), True, 'import pandas as pd\n'), ((7288, 7421), 'seaborn.clustermap', 'sns.clustermap', (['KclusterDFall_Z'], {'figsize': '(4, 5)', 'center': '(0)', 'vmin': '(-2)', 'vmax': '(2)', 'col_cluster': '(False)', 'row_cluster': '(False)', 'cmap': '"""RdYlBu_r"""'}), "(KclusterDFall_Z, figsize=(4, 5), center=0, vmin=-2, vmax=2,\n col_cluster=False, row_cluster=False, cmap='RdYlBu_r')\n", (7302, 7421), True, 'import seaborn as sns\n'), ((7482, 7492), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7490, 7492), True, 'import matplotlib.pyplot as plt\n'), ((7497, 7513), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (7506, 7513), True, 'import matplotlib.pyplot as plt\n'), ((7740, 7750), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7748, 7750), True, 'import matplotlib.pyplot as plt\n'), ((7755, 7771), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (7764, 7771), True, 'import matplotlib.pyplot as plt\n'), ((7998, 8008), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8006, 8008), True, 'import matplotlib.pyplot as plt\n'), ((8013, 8029), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (8022, 8029), True, 'import matplotlib.pyplot as plt\n'), ((8256, 8266), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8264, 8266), True, 'import matplotlib.pyplot as plt\n'), ((8271, 8287), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (8280, 8287), True, 'import matplotlib.pyplot as plt\n'), ((8751, 8800), 'pandas.concat', 'pd.concat', (['[HCmeanDF, OAmeanDF, RAmeanDF]'], {'axis': '(1)'}), '([HCmeanDF, OAmeanDF, RAmeanDF], axis=1)\n', (8760, 8800), True, 'import pandas as pd\n'), ((8808, 8949), 'seaborn.clustermap', 'sns.clustermap', (['KclusterDFall_Z_average'], {'figsize': '(1, 6)', 'center': '(0)', 'vmin': '(-2)', 'vmax': '(2)', 'col_cluster': '(False)', 'row_cluster': '(False)', 'cmap': '"""RdYlBu_r"""'}), "(KclusterDFall_Z_average, figsize=(1, 6), center=0, vmin=-2,\n vmax=2, col_cluster=False, row_cluster=False, cmap='RdYlBu_r')\n", (8822, 8949), True, 'import seaborn as sns\n'), ((9010, 9020), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9018, 9020), True, 'import matplotlib.pyplot as plt\n'), ((9025, 9041), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (9034, 9041), True, 'import matplotlib.pyplot as plt\n'), ((2856, 2881), 'os.path.exists', 'os.path.exists', (['kmeansDir'], {}), '(kmeansDir)\n', (2870, 2881), False, 'import os\n'), ((2891, 2910), 'os.mkdir', 'os.mkdir', (['kmeansDir'], {}), '(kmeansDir)\n', (2899, 2910), False, 'import os\n'), ((5817, 5866), 'pandas.concat', 'pd.concat', (['[hcmeanDF, oameanDF, rameanDF]'], {'axis': '(1)'}), '([hcmeanDF, oameanDF, rameanDF], axis=1)\n', (5826, 5866), True, 'import pandas as pd\n'), ((5972, 6002), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-white"""'], {}), "('seaborn-white')\n", (5985, 6002), True, 'import matplotlib.pyplot as plt\n'), ((6015, 6043), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.5, 2)'}), '(figsize=(1.5, 2))\n', (6025, 6043), True, 'import matplotlib.pyplot as plt\n'), ((6179, 6445), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'MergeM', 'ax': 'ax', 'palette': "['steelblue', 'gold', 'orangered']", 'whis': '(0.5)', 'fliersize': '(0.5)', 'width': '(0.7)', 'showfliers': '(False)', 'medianprops': "{'linewidth': 0.5}", 'whiskerprops': "{'linewidth': 0.5}", 'boxprops': "{'linewidth': 0.5}", 'capprops': "{'linewidth': 0.5}"}), "(data=MergeM, ax=ax, palette=['steelblue', 'gold', 'orangered'],\n whis=0.5, fliersize=0.5, width=0.7, showfliers=False, medianprops={\n 'linewidth': 0.5}, whiskerprops={'linewidth': 0.5}, boxprops={\n 'linewidth': 0.5}, capprops={'linewidth': 0.5})\n", (6190, 6445), True, 'import seaborn as sns\n'), ((6783, 6793), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6791, 6793), True, 'import matplotlib.pyplot as plt\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.